config BCMDHD
- tristate "Broadcom 4329/30 wireless cards support"
- depends on MMC
+ tristate "Broadcom FullMAC wireless cards support"
---help---
This module adds support for wireless adapters based on
- Broadcom 4329/30 chipset.
+ Broadcom FullMAC chipset.
config BCMDHD_FW_PATH
depends on BCMDHD
Enables WEXT support
choice
- depends on BCMDHD
- prompt "Interrupt type"
+ depends on BCMDHD
+ prompt "Interrupt type"
+ ---help---
+ Interrupt type
config BCMDHD_OOB
depends on BCMDHD
bool "Out-of-Band Interrupt"
default y
---help---
- Interrupt from WL_HOST_WAKE.
+ Interrupt from WL_HOST_WAKE.
config BCMDHD_SDIO_IRQ
- depends on BCMDHD
+ depends on BCMDHD
bool "In-Band Interrupt"
---help---
- Interrupt from SDIO DAT[1]
+ Interrupt from SDIO DAT[1]
endchoice
+
# bcmdhd
-# 1. WL_ENABLE_P2P_IF must be removed if kernel version is 3.8 later, otherwise please remove it.
-# 2. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4,
+# 1. WL_IFACE_COMB_NUM_CHANNELS must be added if Android version is 4.4 with Kernel version 3.0~3.4,
# otherwise please remove it.
-
MODULE_NAME = bcmdhd
#CONFIG_RKWIFI = m
CONFIG_CFG80211 = y
CONFIG_BCMDHD_OOB = y
-#CONFIG_BCMDHD_POWER_OFF_IN_SUSPEND = y
-DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \
- -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DWLBTAMP -DBCMFILEIMAGE \
- -DDHDTHREAD -DDHD_DEBUG -DSDTEST -DBDC -DTOE \
- -DDHD_BCMEVENTS -DSHOW_EVENTS -DBCMDBG \
- -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DBCMPLATFORM_BUS \
+DHDCFLAGS = -Wall -Wstrict-prototypes -Dlinux -DBCMDRIVER \
+ -DBCMDONGLEHOST -DUNRELEASEDCHIP -DBCMDMA32 -DBCMFILEIMAGE \
+ -DDHDTHREAD -DDHD_DEBUG -DSHOW_EVENTS -DBCMDBG \
-DWIFI_ACT_FRAME -DARP_OFFLOAD_SUPPORT \
- -DKEEP_ALIVE -DPKT_FILTER_SUPPORT -DGET_CUSTOM_MAC_ENABLE \
- -DEMBEDDED_PLATFORM -DPNO_SUPPORT -DENABLE_INSMOD_NO_FW_LOAD \
+ -DKEEP_ALIVE -DPKT_FILTER_SUPPORT \
+ -DEMBEDDED_PLATFORM -DENABLE_INSMOD_NO_FW_LOAD -DPNO_SUPPORT \
-DDHD_USE_IDLECOUNT -DSET_RANDOM_MAC_SOFTAP -DVSDB \
- -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST -DSDIO_CRC_ERROR_FIX \
- -DESCAN_RESULT_PATCH -DHT40_GO -DPASS_ARP_PACKET -DSUPPORT_PM2_ONLY \
- -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DAMPDU_HOSTREORDER \
- -DCUSTOM_SDIO_F2_BLKSIZE=128 -DWL_SDO -DWLTDLS \
- -DWL_SUPPORT_BACKPORTED_KPATCHES \
- -Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd \
- -Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include
+ -DWL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST \
+ -DESCAN_RESULT_PATCH -DSUPPORT_PM2_ONLY -DWLTDLS \
+ -DDHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT -DRXFRAME_THREAD \
+ -DMIRACAST_AMPDU_SIZE=8 \
+ -DSDTEST -DBDC -DDHD_BCMEVENTS -DPROP_TXSTATUS -DPROP_TXSTATUS_VSDB \
+ -DWL_SUPPORT_BACKPORTED_KPATCHES -DDHDTCPACK_SUPPRESS \
+ -Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd \
+ -Idrivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/include
-DHDOFILES = aiutils.o bcmsdh_sdmmc_linux.o dhd_linux.o siutils.o bcmutils.o \
- dhd_linux_sched.o bcmwifi_channels.o dhd_sdio.o bcmevent.o dhd_bta.o hndpmu.o \
- bcmsdh.o dhd_cdc.o bcmsdh_linux.o dhd_common.o dhd_wlfc.o dhd_ip.o linux_osl.o \
- bcmsdh_sdmmc.o dhd_custom_gpio.o sbutils.o wldev_common.o wl_android.o \
- dhd_pno.o dhd_config.o dhd_gpio.o
+DHDCFLAGS += \
+ -DMMC_SDIO_ABORT -DBCMSDIO -DBCMLXSDMMC -DSDIO_CRC_ERROR_FIX \
+ -DCUSTOM_SDIO_F2_BLKSIZE=128 -DUSE_SDIOFIFO_IOVAR
-$(MODULE_NAME)-y += $(DHDOFILES)
+DHDOFILES = aiutils.o siutils.o sbutils.o bcmutils.o bcmwifi_channels.o \
+ dhd_linux.o dhd_linux_platdev.o dhd_linux_sched.o dhd_pno.o \
+ dhd_common.o dhd_ip.o dhd_linux_wq.o dhd_custom_gpio.o \
+ bcmevent.o hndpmu.o linux_osl.o wldev_common.o wl_android.o \
+ hnd_pktq.o hnd_pktpool.o dhd_config.o
-DHDCFLAGS += -DCUSTOMER_HW
+DHDOFILES += bcmsdh.o bcmsdh_linux.o bcmsdh_sdmmc.o bcmsdh_sdmmc_linux.o \
+ dhd_sdio.o dhd_cdc.o dhd_wlfc.o
-#DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT -DCONFIG_DHD_USE_STATIC_BUF -DENHANCED_STATIC_BUF
+$(MODULE_NAME)-y += $(DHDOFILES)
+
+DHDOFILES += dhd_gpio.o
+DHDCFLAGS += -DCUSTOMER_HW -DDHD_OF_SUPPORT
+#DHDCFLAGS += -DBCMWAPI_WPI -DBCMWAPI_WAI
ifeq ($(CONFIG_BCMDHD_OOB),y)
DHDCFLAGS += -DOOB_INTR_ONLY -DHW_OOB -DCUSTOMER_OOB
+ifeq ($(CONFIG_BCMDHD_DISABLE_WOWLAN),y)
+DHDCFLAGS += -DDISABLE_WOWLAN
+endif
else
DHDCFLAGS += -DSDIO_ISR_THREAD
endif
-ifeq ($(CONFIG_BCMDHD_POWER_OFF_IN_SUSPEND),y)
-DHDCFLAGS += -DPOWER_OFF_IN_SUSPEND
-endif
-
-#ifeq ($(CONFIG_BCMDHD_AG),y)
DHDCFLAGS += -DBAND_AG
-#endif
+
+ifeq ($(CONFIG_DHD_USE_STATIC_BUF),y)
+DHDCFLAGS += -DSTATIC_WL_PRIV_STRUCT
+endif
ifneq ($(CONFIG_WIRELESS_EXT),)
-$(MODULE_NAME)-y += wl_iw.o
+bcmdhd-objs += wl_iw.o
DHDCFLAGS += -DSOFTAP -DWL_WIRELESS_EXT -DUSE_IW
endif
-ifeq ($(CONFIG_CFG80211),y)
-$(MODULE_NAME)-y += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o dhd_cfg80211.o
-DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT
+ifneq ($(CONFIG_CFG80211),)
+bcmdhd-objs += wl_cfg80211.o wl_cfgp2p.o wl_linux_mon.o dhd_cfg80211.o wl_cfg_btcoex.o
+#DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT -DWL_ENABLE_P2P_IF
+#DHDCFLAGS += -DWL_IFACE_COMB_NUM_CHANNELS
+DHDCFLAGS += -DWL_CFG80211 -DWLP2P -DWL_CFG80211_STA_EVENT
DHDCFLAGS += -DCUSTOM_ROAM_TRIGGER_SETTING=-65
DHDCFLAGS += -DCUSTOM_ROAM_DELTA_SETTING=15
DHDCFLAGS += -DCUSTOM_KEEP_ALIVE_SETTING=28000
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: aiutils.c 385510 2013-02-15 21:02:07Z $
+ * $Id: aiutils.c 467150 2014-04-02 17:30:43Z $
*/
#include <bcm_cfg.h>
#include <typedefs.h>
#define BCM47162_DMP() (0)
#define BCM5357_DMP() (0)
#define BCM4707_DMP() (0)
+#define PMU_DMP() (0)
#define remap_coreid(sih, coreid) (coreid)
#define remap_corerev(sih, corerev) (corerev)
-
+/* EROM parsing */
static uint32
get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
{
- uint32 ent = 0;
+ uint32 ent;
uint inv = 0, nom = 0;
- uint retry = 20;
- while (retry--) {
+ while (TRUE) {
ent = R_REG(si_osh(sih), *eromptr);
(*eromptr)++;
nom++;
}
- if (!retry)
- SI_ERROR(("%s: WiFi read register fail, retry = %d.\n", __FUNCTION__, retry));
SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
if (inv + nom) {
if (((asd & ER_TAG1) != ER_ADD) ||
(((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
((asd & AD_ST_MASK) != st)) {
-
+ /* This is not what we want, "push" it back */
(*eromptr)--;
return 0;
}
}
-
+/* parse the enumeration rom to identify all cores */
void
ai_scan(si_t *sih, void *regs, uint devid)
{
si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
chipcregs_t *cc = (chipcregs_t *)regs;
uint32 erombase, *eromptr, *eromlim;
break;
case PCI_BUS:
-
+ /* Set wrappers address */
sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
-
+ /* Now point the window at the erom */
OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
eromptr = regs;
break;
+#ifdef BCMSDIO
case SPI_BUS:
case SDIO_BUS:
eromptr = (uint32 *)(uintptr)erombase;
break;
+#endif /* BCMSDIO */
case PCMCIA_BUS:
default:
br = FALSE;
-
+ /* Grok a component */
cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
if (cia == (ER_END | ER_VALID)) {
SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
continue;
if ((nmw + nsw == 0)) {
-
+ /* A component which is not a core */
if (cid == OOB_ROUTER_CORE_ID) {
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
&addrl, &addrh, &sizel, &sizeh);
sii->oob_router = addrl;
}
}
- if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID)
+ if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID &&
+ cid != PMU_CORE_ID && cid != GCI_CORE_ID)
continue;
}
idx = sii->numcores;
- sii->cia[idx] = cia;
- sii->cib[idx] = cib;
- sii->coreid[idx] = remap_coreid(sih, cid);
+ cores_info->cia[idx] = cia;
+ cores_info->cib[idx] = cib;
+ cores_info->coreid[idx] = remap_coreid(sih, cid);
for (i = 0; i < nmp; i++) {
mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
(mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
}
-
+ /* First Slave Address Descriptor should be port 0:
+ * the main register space for the core
+ */
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
if (asd == 0) {
do {
-
+ /* Try again to see if it is a bridge */
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
&sizel, &sizeh);
if (asd != 0)
}
} while (1);
}
- sii->coresba[idx] = addrl;
- sii->coresba_size[idx] = sizel;
-
+ cores_info->coresba[idx] = addrl;
+ cores_info->coresba_size[idx] = sizel;
+ /* Get any more ASDs in port 0 */
j = 1;
do {
asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
&sizel, &sizeh);
if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
- sii->coresba2[idx] = addrl;
- sii->coresba2_size[idx] = sizel;
+ cores_info->coresba2[idx] = addrl;
+ cores_info->coresba2_size[idx] = sizel;
}
j++;
} while (asd != 0);
-
+ /* Go through the ASDs for other slave ports */
for (i = 1; i < nsp; i++) {
j = 0;
do {
}
}
-
+ /* Now get master wrappers */
for (i = 0; i < nmw; i++) {
asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
&sizel, &sizeh);
goto error;
}
if (i == 0)
- sii->wrapba[idx] = addrl;
+ cores_info->wrapba[idx] = addrl;
}
-
+ /* And finally slave wrappers */
for (i = 0; i < nsw; i++) {
uint fwp = (nsp == 1) ? 0 : 1;
asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
goto error;
}
if ((nmw == 0) && (i == 0))
- sii->wrapba[idx] = addrl;
+ cores_info->wrapba[idx] = addrl;
}
-
+ /* Don't record bridges */
if (br)
continue;
-
+ /* Done with core */
sii->numcores++;
}
return;
}
+#define AI_SETCOREIDX_MAPSIZE(coreid) \
+ (((coreid) == NS_CCB_CORE_ID) ? 15 * SI_CORE_SIZE : SI_CORE_SIZE)
+/* This function changes the logical "focus" to the indicated core.
+ * Return the current core's virtual address.
+ */
void *
ai_setcoreidx(si_t *sih, uint coreidx)
{
si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint32 addr, wrap;
void *regs;
if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
return (NULL);
- addr = sii->coresba[coreidx];
- wrap = sii->wrapba[coreidx];
+ addr = cores_info->coresba[coreidx];
+ wrap = cores_info->wrapba[coreidx];
-
+ /*
+ * If the user has provided an interrupt mask enabled function,
+ * then assert interrupts are disabled before switching the core.
+ */
ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
switch (BUSTYPE(sih->bustype)) {
case SI_BUS:
-
- if (!sii->regs[coreidx]) {
- sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
- ASSERT(GOODREGS(sii->regs[coreidx]));
+ /* map new one */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(addr,
+ AI_SETCOREIDX_MAPSIZE(cores_info->coreid[coreidx]));
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
}
- sii->curmap = regs = sii->regs[coreidx];
- if (!sii->wrappers[coreidx] && (wrap != 0)) {
- sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
- ASSERT(GOODREGS(sii->wrappers[coreidx]));
+ sii->curmap = regs = cores_info->regs[coreidx];
+ if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
+ cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
}
- sii->curwrap = sii->wrappers[coreidx];
+ sii->curwrap = cores_info->wrappers[coreidx];
break;
+ case PCI_BUS:
+ /* point bar0 window */
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
+ regs = sii->curmap;
+ /* point bar0 2nd 4KB window to the primary wrapper */
+ if (PCIE_GEN2(sii))
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
+ else
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
+ break;
+#ifdef BCMSDIO
case SPI_BUS:
case SDIO_BUS:
sii->curmap = regs = (void *)((uintptr)addr);
sii->curwrap = (void *)((uintptr)wrap);
break;
+#endif /* BCMSDIO */
case PCMCIA_BUS:
default:
return regs;
}
+
void
ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
{
si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
chipcregs_t *cc = NULL;
uint32 erombase, *eromptr, *eromlim;
uint i, j, cidx;
uint32 asd, addrl, addrh, sizel, sizeh;
for (i = 0; i < sii->numcores; i++) {
- if (sii->coreid[i] == CC_CORE_ID) {
- cc = (chipcregs_t *)sii->regs[i];
+ if (cores_info->coreid[i] == CC_CORE_ID) {
+ cc = (chipcregs_t *)cores_info->regs[i];
break;
}
}
eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
cidx = sii->curidx;
- cia = sii->cia[cidx];
- cib = sii->cib[cidx];
+ cia = cores_info->cia[cidx];
+ cib = cores_info->cib[cidx];
nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
-
+ /* scan for cores */
while (eromptr < eromlim) {
if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
}
}
-
+ /* skip master ports */
for (i = 0; i < nmp; i++)
get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
-
+ /* Skip ASDs in port 0 */
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
if (asd == 0) {
-
+ /* Try again to see if it is a bridge */
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
&sizel, &sizeh);
}
j++;
} while (asd != 0);
-
+ /* Go through the ASDs for other slave ports */
for (i = 1; i < nsp; i++) {
j = 0;
do {
return;
}
-
+/* Return the number of address spaces in current core */
int
ai_numaddrspaces(si_t *sih)
{
return 2;
}
-
+/* Return the address of the nth address space in the current core */
uint32
ai_addrspace(si_t *sih, uint asidx)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint cidx;
- sii = SI_INFO(sih);
cidx = sii->curidx;
if (asidx == 0)
- return sii->coresba[cidx];
+ return cores_info->coresba[cidx];
else if (asidx == 1)
- return sii->coresba2[cidx];
+ return cores_info->coresba2[cidx];
else {
SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
__FUNCTION__, asidx));
}
}
-
+/* Return the size of the nth address space in the current core */
uint32
ai_addrspacesize(si_t *sih, uint asidx)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint cidx;
- sii = SI_INFO(sih);
cidx = sii->curidx;
if (asidx == 0)
- return sii->coresba_size[cidx];
+ return cores_info->coresba_size[cidx];
else if (asidx == 1)
- return sii->coresba2_size[cidx];
+ return cores_info->coresba2_size[cidx];
else {
SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
__FUNCTION__, asidx));
uint
ai_flag(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
- sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
return sii->curidx;
__FUNCTION__));
return sii->curidx;
}
+
+#ifdef REROUTE_OOBINT
+ if (PMU_DMP()) {
+ SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+ __FUNCTION__));
+ return PMU_OOB_BIT;
+ }
+#endif /* REROUTE_OOBINT */
+
ai = sii->curwrap;
+ ASSERT(ai != NULL);
return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
}
uint
ai_flag_alt(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
- sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
return sii->curidx;
__FUNCTION__));
return sii->curidx;
}
+#ifdef REROUTE_OOBINT
+ if (PMU_DMP()) {
+ SI_ERROR(("%s: Attempting to read PMU DMP registers\n",
+ __FUNCTION__));
+ return PMU_OOB_BIT;
+ }
+#endif /* REROUTE_OOBINT */
+
ai = sii->curwrap;
return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
uint32 w = R_REG(sii->osh, map+(offset/4));
w &= ~mask;
w |= val;
- W_REG(sii->osh, map+(offset/4), val);
+ W_REG(sii->osh, map+(offset/4), w);
}
return (R_REG(sii->osh, map+(offset/4)));
uint
ai_corevendor(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint32 cia;
- sii = SI_INFO(sih);
- cia = sii->cia[sii->curidx];
+ cia = cores_info->cia[sii->curidx];
return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
}
uint
ai_corerev(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint32 cib;
- sii = SI_INFO(sih);
- cib = sii->cib[sii->curidx];
+
+ cib = cores_info->cib[sii->curidx];
return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
}
bool
ai_iscoreup(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
- sii = SI_INFO(sih);
ai = sii->curwrap;
return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
}
-
+/*
+ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
+ * switch back to the original core, and return the new value.
+ *
+ * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
+ *
+ * Also, when using pci/pcie, we can optimize away the core switching for pci registers
+ * and (on newer pci cores) chipcommon registers.
+ */
uint
ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
{
uint w;
uint intr_val = 0;
bool fast = FALSE;
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
- sii = SI_INFO(sih);
ASSERT(GOODIDX(coreidx));
ASSERT(regoff < SI_CORE_SIZE);
return 0;
if (BUSTYPE(sih->bustype) == SI_BUS) {
-
+ /* If internal bus, we can always get at everything */
fast = TRUE;
-
- if (!sii->regs[coreidx]) {
- sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
SI_CORE_SIZE);
- ASSERT(GOODREGS(sii->regs[coreidx]));
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
}
- r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+ r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
-
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
- if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
-
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
fast = TRUE;
r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
} else if (sii->pub.buscoreidx == coreidx) {
-
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
fast = TRUE;
if (SI_FAST(sii))
r = (uint32 *)((char *)sii->curmap +
if (!fast) {
INTR_OFF(sii, intr_val);
-
+ /* save current core index */
origidx = si_coreidx(&sii->pub);
-
+ /* switch core */
r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
}
ASSERT(r != NULL);
-
+ /* mask and set */
if (mask || val) {
w = (R_REG(sii->osh, r) & ~mask) | val;
W_REG(sii->osh, r, w);
}
-
+ /* readback */
w = R_REG(sii->osh, r);
if (!fast) {
-
+ /* restore core index */
if (origidx != coreidx)
ai_setcoreidx(&sii->pub, origidx);
return (w);
}
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+ uint32 *r = NULL;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sih->bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast)
+ return 0;
+
+ return (r);
+}
+
void
ai_core_disable(si_t *sih, uint32 bits)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
volatile uint32 dummy;
uint32 status;
aidmp_t *ai;
- sii = SI_INFO(sih);
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
-
+ /* if core is already in reset, just return */
if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
return;
-
+ /* ensure there are no pending backplane operations */
SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
-
+ /* if pending backplane ops still, try waiting longer */
if (status != 0) {
-
-
+ /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
+ /* during driver load we may need more time */
SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
-
-
+ /* if still pending ops, continue on and try disable anyway */
+ /* this is in big hammer path, so don't call wl_reinit in this case... */
}
W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
OSL_DELAY(10);
}
-
+/* reset and re-enable a core
+ * inputs:
+ * bits - core specific bits that are set during and after reset sequence
+ * resetbits - core specific bits that are set only during reset sequence
+ */
void
ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
volatile uint32 dummy;
+ uint loop_counter = 10;
- sii = SI_INFO(sih);
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
-
- ai_core_disable(sih, (bits | resetbits));
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
-
- W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
+ /* put core into reset state */
+ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
+ OSL_DELAY(10);
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+
+ W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
dummy = R_REG(sii->osh, &ai->ioctrl);
BCM_REFERENCE(dummy);
- W_REG(sii->osh, &ai->resetctrl, 0);
- dummy = R_REG(sii->osh, &ai->resetctrl);
- BCM_REFERENCE(dummy);
- OSL_DELAY(1);
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+ while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
+ /* ensure there are no pending backplane operations */
+ SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
+
+
+ /* take core out of reset */
+ W_REG(sii->osh, &ai->resetctrl, 0);
+
+ /* ensure there are no pending backplane operations */
+ SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
+ }
+
W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
dummy = R_REG(sii->osh, &ai->ioctrl);
void
ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
uint32 w;
- sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
__FUNCTION__));
return;
}
+ if (PMU_DMP()) {
+ SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+ __FUNCTION__));
+ return;
+ }
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
uint32
ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
uint32 w;
- sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
__FUNCTION__));
return 0;
}
+ if (PMU_DMP()) {
+ SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+ __FUNCTION__));
+ return 0;
+ }
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
uint32
ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
aidmp_t *ai;
uint32 w;
- sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
__FUNCTION__));
__FUNCTION__));
return 0;
}
+ if (PMU_DMP()) {
+ SI_ERROR(("%s: Accessing PMU DMP register (ioctrl)\n",
+ __FUNCTION__));
+ return 0;
+ }
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
return R_REG(sii->osh, &ai->iostatus);
}
+
+#if defined(BCMDBG_PHYDUMP)
+/* print interesting aidmp registers */
+void
+ai_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ osl_t *osh;
+ aidmp_t *ai;
+ uint i;
+
+ osh = sii->osh;
+
+ for (i = 0; i < sii->numcores; i++) {
+ si_setcoreidx(&sii->pub, i);
+ ai = sii->curwrap;
+
+ bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
+ if (BCM47162_DMP()) {
+ bcm_bprintf(b, "Skipping mips74k in 47162a0\n");
+ continue;
+ }
+ if (BCM5357_DMP()) {
+ bcm_bprintf(b, "Skipping usb20h in 5357\n");
+ continue;
+ }
+ if (BCM4707_DMP()) {
+ bcm_bprintf(b, "Skipping chipcommonb in 4707\n");
+ continue;
+ }
+
+ if (PMU_DMP()) {
+ bcm_bprintf(b, "Skipping pmu core\n");
+ continue;
+ }
+
+ bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x"
+ "ioctrlwidth 0x%x iostatuswidth 0x%x\n"
+ "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n"
+ "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x"
+ "errlogaddrlo 0x%x errlogaddrhi 0x%x\n"
+ "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n"
+ "intstatus 0x%x config 0x%x itcr 0x%x\n",
+ R_REG(osh, &ai->ioctrlset),
+ R_REG(osh, &ai->ioctrlclear),
+ R_REG(osh, &ai->ioctrl),
+ R_REG(osh, &ai->iostatus),
+ R_REG(osh, &ai->ioctrlwidth),
+ R_REG(osh, &ai->iostatuswidth),
+ R_REG(osh, &ai->resetctrl),
+ R_REG(osh, &ai->resetstatus),
+ R_REG(osh, &ai->resetreadid),
+ R_REG(osh, &ai->resetwriteid),
+ R_REG(osh, &ai->errlogctrl),
+ R_REG(osh, &ai->errlogdone),
+ R_REG(osh, &ai->errlogstatus),
+ R_REG(osh, &ai->errlogaddrlo),
+ R_REG(osh, &ai->errlogaddrhi),
+ R_REG(osh, &ai->errlogid),
+ R_REG(osh, &ai->errloguser),
+ R_REG(osh, &ai->errlogflags),
+ R_REG(osh, &ai->intstatus),
+ R_REG(osh, &ai->config),
+ R_REG(osh, &ai->itcr));
+ }
+}
+#endif
* bcmevent read-only data shared by kernel or app layers
*
* $Copyright Open Broadcom Corporation$
- * $Id: bcmevent.c 389384 2013-03-06 12:20:17Z $
+ * $Id: bcmevent.c 487838 2014-06-27 05:51:44Z $
*/
#include <typedefs.h>
#include <proto/bcmeth.h>
#include <proto/bcmevent.h>
-#if WLC_E_LAST != 125
-#error "You need to add an entry to bcmevent_names[] for the new event"
-#endif
-const bcmevent_name_t bcmevent_names[] = {
- { WLC_E_SET_SSID, "SET_SSID" },
- { WLC_E_JOIN, "JOIN" },
- { WLC_E_START, "START" },
- { WLC_E_AUTH, "AUTH" },
- { WLC_E_AUTH_IND, "AUTH_IND" },
- { WLC_E_DEAUTH, "DEAUTH" },
- { WLC_E_DEAUTH_IND, "DEAUTH_IND" },
- { WLC_E_ASSOC, "ASSOC" },
- { WLC_E_ASSOC_IND, "ASSOC_IND" },
- { WLC_E_REASSOC, "REASSOC" },
- { WLC_E_REASSOC_IND, "REASSOC_IND" },
- { WLC_E_DISASSOC, "DISASSOC" },
- { WLC_E_DISASSOC_IND, "DISASSOC_IND" },
- { WLC_E_QUIET_START, "START_QUIET" },
- { WLC_E_QUIET_END, "END_QUIET" },
- { WLC_E_BEACON_RX, "BEACON_RX" },
- { WLC_E_LINK, "LINK" },
- { WLC_E_MIC_ERROR, "MIC_ERROR" },
- { WLC_E_NDIS_LINK, "NDIS_LINK" },
- { WLC_E_ROAM, "ROAM" },
- { WLC_E_TXFAIL, "TXFAIL" },
- { WLC_E_PMKID_CACHE, "PMKID_CACHE" },
- { WLC_E_RETROGRADE_TSF, "RETROGRADE_TSF" },
- { WLC_E_PRUNE, "PRUNE" },
- { WLC_E_AUTOAUTH, "AUTOAUTH" },
- { WLC_E_EAPOL_MSG, "EAPOL_MSG" },
- { WLC_E_SCAN_COMPLETE, "SCAN_COMPLETE" },
- { WLC_E_ADDTS_IND, "ADDTS_IND" },
- { WLC_E_DELTS_IND, "DELTS_IND" },
- { WLC_E_BCNSENT_IND, "BCNSENT_IND" },
- { WLC_E_BCNRX_MSG, "BCNRX_MSG" },
- { WLC_E_BCNLOST_MSG, "BCNLOST_IND" },
- { WLC_E_ROAM_PREP, "ROAM_PREP" },
- { WLC_E_PFN_NET_FOUND, "PFNFOUND_IND" },
- { WLC_E_PFN_NET_LOST, "PFNLOST_IND" },
+/* Table of event name strings for UIs and debugging dumps */
+typedef struct {
+ uint event;
+ const char *name;
+} bcmevent_name_str_t;
+
+/* Use the actual name for event tracing */
+#define BCMEVENT_NAME(_event) {(_event), #_event}
+
+static const bcmevent_name_str_t bcmevent_names[] = {
+ BCMEVENT_NAME(WLC_E_SET_SSID),
+ BCMEVENT_NAME(WLC_E_JOIN),
+ BCMEVENT_NAME(WLC_E_START),
+ BCMEVENT_NAME(WLC_E_AUTH),
+ BCMEVENT_NAME(WLC_E_AUTH_IND),
+ BCMEVENT_NAME(WLC_E_DEAUTH),
+ BCMEVENT_NAME(WLC_E_DEAUTH_IND),
+ BCMEVENT_NAME(WLC_E_ASSOC),
+ BCMEVENT_NAME(WLC_E_ASSOC_IND),
+ BCMEVENT_NAME(WLC_E_REASSOC),
+ BCMEVENT_NAME(WLC_E_REASSOC_IND),
+ BCMEVENT_NAME(WLC_E_DISASSOC),
+ BCMEVENT_NAME(WLC_E_DISASSOC_IND),
+ BCMEVENT_NAME(WLC_E_QUIET_START),
+ BCMEVENT_NAME(WLC_E_QUIET_END),
+ BCMEVENT_NAME(WLC_E_BEACON_RX),
+ BCMEVENT_NAME(WLC_E_LINK),
+ BCMEVENT_NAME(WLC_E_MIC_ERROR),
+ BCMEVENT_NAME(WLC_E_NDIS_LINK),
+ BCMEVENT_NAME(WLC_E_ROAM),
+ BCMEVENT_NAME(WLC_E_TXFAIL),
+ BCMEVENT_NAME(WLC_E_PMKID_CACHE),
+ BCMEVENT_NAME(WLC_E_RETROGRADE_TSF),
+ BCMEVENT_NAME(WLC_E_PRUNE),
+ BCMEVENT_NAME(WLC_E_AUTOAUTH),
+ BCMEVENT_NAME(WLC_E_EAPOL_MSG),
+ BCMEVENT_NAME(WLC_E_SCAN_COMPLETE),
+ BCMEVENT_NAME(WLC_E_ADDTS_IND),
+ BCMEVENT_NAME(WLC_E_DELTS_IND),
+ BCMEVENT_NAME(WLC_E_BCNSENT_IND),
+ BCMEVENT_NAME(WLC_E_BCNRX_MSG),
+ BCMEVENT_NAME(WLC_E_BCNLOST_MSG),
+ BCMEVENT_NAME(WLC_E_ROAM_PREP),
+ BCMEVENT_NAME(WLC_E_PFN_NET_FOUND),
+ BCMEVENT_NAME(WLC_E_PFN_NET_LOST),
#if defined(IBSS_PEER_DISCOVERY_EVENT)
- { WLC_E_IBSS_ASSOC, "IBSS_ASSOC" },
+ BCMEVENT_NAME(WLC_E_IBSS_ASSOC),
#endif /* defined(IBSS_PEER_DISCOVERY_EVENT) */
- { WLC_E_RADIO, "RADIO" },
- { WLC_E_PSM_WATCHDOG, "PSM_WATCHDOG" },
+ BCMEVENT_NAME(WLC_E_RADIO),
+ BCMEVENT_NAME(WLC_E_PSM_WATCHDOG),
#if defined(BCMCCX) && defined(CCX_SDK)
- { WLC_E_CCX_ASSOC_START, "CCX_ASSOC_START" },
- { WLC_E_CCX_ASSOC_ABORT, "CCX_ASSOC_ABORT" },
+ BCMEVENT_NAME(WLC_E_CCX_ASSOC_START),
+ BCMEVENT_NAME(WLC_E_CCX_ASSOC_ABORT),
#endif /* BCMCCX && CCX_SDK */
- { WLC_E_PROBREQ_MSG, "PROBE_REQ_MSG" },
- { WLC_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND" },
- { WLC_E_PSK_SUP, "PSK_SUP" },
- { WLC_E_COUNTRY_CODE_CHANGED, "CNTRYCODE_IND" },
- { WLC_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME" },
- { WLC_E_ICV_ERROR, "ICV_ERROR" },
- { WLC_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR" },
- { WLC_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR" },
- { WLC_E_TRACE, "TRACE" },
+ BCMEVENT_NAME(WLC_E_PROBREQ_MSG),
+ BCMEVENT_NAME(WLC_E_SCAN_CONFIRM_IND),
+ BCMEVENT_NAME(WLC_E_PSK_SUP),
+ BCMEVENT_NAME(WLC_E_COUNTRY_CODE_CHANGED),
+ BCMEVENT_NAME(WLC_E_EXCEEDED_MEDIUM_TIME),
+ BCMEVENT_NAME(WLC_E_ICV_ERROR),
+ BCMEVENT_NAME(WLC_E_UNICAST_DECODE_ERROR),
+ BCMEVENT_NAME(WLC_E_MULTICAST_DECODE_ERROR),
+ BCMEVENT_NAME(WLC_E_TRACE),
#ifdef WLBTAMP
- { WLC_E_BTA_HCI_EVENT, "BTA_HCI_EVENT" },
+ BCMEVENT_NAME(WLC_E_BTA_HCI_EVENT),
#endif
- { WLC_E_IF, "IF" },
+ BCMEVENT_NAME(WLC_E_IF),
#ifdef WLP2P
- { WLC_E_P2P_DISC_LISTEN_COMPLETE, "WLC_E_P2P_DISC_LISTEN_COMPLETE" },
+ BCMEVENT_NAME(WLC_E_P2P_DISC_LISTEN_COMPLETE),
#endif
- { WLC_E_RSSI, "RSSI" },
- { WLC_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE" },
- { WLC_E_EXTLOG_MSG, "EXTERNAL LOG MESSAGE" },
+ BCMEVENT_NAME(WLC_E_RSSI),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_COMPLETE),
+ BCMEVENT_NAME(WLC_E_EXTLOG_MSG),
#ifdef WIFI_ACT_FRAME
- { WLC_E_ACTION_FRAME, "ACTION_FRAME" },
- { WLC_E_ACTION_FRAME_RX, "ACTION_FRAME_RX" },
- { WLC_E_ACTION_FRAME_COMPLETE, "ACTION_FRAME_COMPLETE" },
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_COMPLETE),
#endif
#if 0 && (NDISVER >= 0x0620)
- { WLC_E_PRE_ASSOC_IND, "ASSOC_RECV" },
- { WLC_E_PRE_REASSOC_IND, "REASSOC_RECV" },
- { WLC_E_CHANNEL_ADOPTED, "CHANNEL_ADOPTED" },
- { WLC_E_AP_STARTED, "AP_STARTED" },
- { WLC_E_DFS_AP_STOP, "DFS_AP_STOP" },
- { WLC_E_DFS_AP_RESUME, "DFS_AP_RESUME" },
- { WLC_E_ASSOC_IND_NDIS, "ASSOC_IND_NDIS"},
- { WLC_E_REASSOC_IND_NDIS, "REASSOC_IND_NDIS"},
- { WLC_E_ACTION_FRAME_RX_NDIS, "WLC_E_ACTION_FRAME_RX_NDIS" },
- { WLC_E_AUTH_REQ, "WLC_E_AUTH_REQ" },
- { WLC_E_IBSS_COALESCE, "IBSS COALESCE" },
+ BCMEVENT_NAME(WLC_E_PRE_ASSOC_IND),
+ BCMEVENT_NAME(WLC_E_PRE_REASSOC_IND),
+ BCMEVENT_NAME(WLC_E_CHANNEL_ADOPTED),
+ BCMEVENT_NAME(WLC_E_AP_STARTED),
+ BCMEVENT_NAME(WLC_E_DFS_AP_STOP),
+ BCMEVENT_NAME(WLC_E_DFS_AP_RESUME),
+ BCMEVENT_NAME(WLC_E_ASSOC_IND_NDIS),
+ BCMEVENT_NAME(WLC_E_REASSOC_IND_NDIS),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_RX_NDIS),
+ BCMEVENT_NAME(WLC_E_AUTH_REQ),
+ BCMEVENT_NAME(WLC_E_IBSS_COALESCE),
#endif
#ifdef BCMWAPI_WAI
- { WLC_E_WAI_STA_EVENT, "WAI_STA_EVENT" },
- { WLC_E_WAI_MSG, "WAI_MSG" },
+ BCMEVENT_NAME(WLC_E_WAI_STA_EVENT),
+ BCMEVENT_NAME(WLC_E_WAI_MSG),
#endif /* BCMWAPI_WAI */
- { WLC_E_ESCAN_RESULT, "WLC_E_ESCAN_RESULT" },
- { WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, "WLC_E_AF_OFF_CHAN_COMPLETE" },
+ BCMEVENT_NAME(WLC_E_ESCAN_RESULT),
+ BCMEVENT_NAME(WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE),
#ifdef WLP2P
- { WLC_E_PROBRESP_MSG, "PROBE_RESP_MSG" },
- { WLC_E_P2P_PROBREQ_MSG, "P2P PROBE_REQ_MSG" },
+ BCMEVENT_NAME(WLC_E_PROBRESP_MSG),
+ BCMEVENT_NAME(WLC_E_P2P_PROBREQ_MSG),
#endif
#ifdef PROP_TXSTATUS
- { WLC_E_FIFO_CREDIT_MAP, "FIFO_CREDIT_MAP" },
+ BCMEVENT_NAME(WLC_E_FIFO_CREDIT_MAP),
#endif
- { WLC_E_WAKE_EVENT, "WAKE_EVENT" },
- { WLC_E_DCS_REQUEST, "DCS_REQUEST" },
- { WLC_E_RM_COMPLETE, "RM_COMPLETE" },
+ BCMEVENT_NAME(WLC_E_WAKE_EVENT),
+ BCMEVENT_NAME(WLC_E_DCS_REQUEST),
+ BCMEVENT_NAME(WLC_E_RM_COMPLETE),
#ifdef WLMEDIA_HTSF
- { WLC_E_HTSFSYNC, "HTSF_SYNC_EVENT" },
+ BCMEVENT_NAME(WLC_E_HTSFSYNC),
#endif
- { WLC_E_OVERLAY_REQ, "OVERLAY_REQ_EVENT" },
- { WLC_E_CSA_COMPLETE_IND, "WLC_E_CSA_COMPLETE_IND"},
- { WLC_E_EXCESS_PM_WAKE_EVENT, "EXCESS_PM_WAKE_EVENT" },
- { WLC_E_PFN_SCAN_NONE, "PFN_SCAN_NONE" },
- { WLC_E_PFN_SCAN_ALLGONE, "PFN_SCAN_ALLGONE" },
+ BCMEVENT_NAME(WLC_E_OVERLAY_REQ),
+ BCMEVENT_NAME(WLC_E_CSA_COMPLETE_IND),
+ BCMEVENT_NAME(WLC_E_EXCESS_PM_WAKE_EVENT),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_NONE),
+ BCMEVENT_NAME(WLC_E_PFN_SCAN_ALLGONE),
#ifdef SOFTAP
- { WLC_E_GTK_PLUMBED, "GTK_PLUMBED" },
+ BCMEVENT_NAME(WLC_E_GTK_PLUMBED),
#endif
- { WLC_E_ASSOC_REQ_IE, "ASSOC_REQ_IE" },
- { WLC_E_ASSOC_RESP_IE, "ASSOC_RESP_IE" },
- { WLC_E_ACTION_FRAME_RX_NDIS, "WLC_E_ACTION_FRAME_RX_NDIS" },
+ BCMEVENT_NAME(WLC_E_ASSOC_REQ_IE),
+ BCMEVENT_NAME(WLC_E_ASSOC_RESP_IE),
+ BCMEVENT_NAME(WLC_E_BEACON_FRAME_RX),
#ifdef WLTDLS
- { WLC_E_TDLS_PEER_EVENT, "TDLS_PEER_EVENT" },
+ BCMEVENT_NAME(WLC_E_TDLS_PEER_EVENT),
#endif /* WLTDLS */
- { WLC_E_NATIVE, "NATIVE" },
+ BCMEVENT_NAME(WLC_E_NATIVE),
#ifdef WLPKTDLYSTAT
- { WLC_E_PKTDELAY_IND, "PKTDELAY_IND" },
+ BCMEVENT_NAME(WLC_E_PKTDELAY_IND),
#endif /* WLPKTDLYSTAT */
- { WLC_E_SERVICE_FOUND, "SERVICE_FOUND" },
- { WLC_E_GAS_FRAGMENT_RX, "GAS_FRAGMENT_RX" },
- { WLC_E_GAS_COMPLETE, "GAS_COMPLETE" },
- { WLC_E_P2PO_ADD_DEVICE, "P2PO_DEV_FOUND" },
- { WLC_E_P2PO_DEL_DEVICE, "P2PO_DEV_LOST" },
+ BCMEVENT_NAME(WLC_E_SERVICE_FOUND),
+ BCMEVENT_NAME(WLC_E_GAS_FRAGMENT_RX),
+ BCMEVENT_NAME(WLC_E_GAS_COMPLETE),
+ BCMEVENT_NAME(WLC_E_P2PO_ADD_DEVICE),
+ BCMEVENT_NAME(WLC_E_P2PO_DEL_DEVICE),
#ifdef WLWNM
- { WLC_E_WNM_STA_SLEEP, "WMM_STA_SLEEP" },
+ BCMEVENT_NAME(WLC_E_WNM_STA_SLEEP),
#endif /* WLWNM */
#if defined(WL_PROXDETECT)
- { WLC_E_PROXD, "WLC_E_PROXD" },
+ BCMEVENT_NAME(WLC_E_PROXD),
+#endif
+ BCMEVENT_NAME(WLC_E_CCA_CHAN_QUAL),
+ BCMEVENT_NAME(WLC_E_BSSID),
+#ifdef PROP_TXSTATUS
+ BCMEVENT_NAME(WLC_E_BCMC_CREDIT_SUPPORT),
+#endif
+ BCMEVENT_NAME(WLC_E_TXFAIL_THRESH),
+#ifdef WLAIBSS
+ BCMEVENT_NAME(WLC_E_AIBSS_TXFAIL),
+#endif /* WLAIBSS */
+#ifdef WLBSSLOAD_REPORT
+ BCMEVENT_NAME(WLC_E_BSS_LOAD),
#endif
- { WLC_E_CCA_CHAN_QUAL, "CCA_BASED_CHANNEL_QUALITY" },
+#if defined(BT_WIFI_HANDOVER) || defined(WL_TBOW)
+ BCMEVENT_NAME(WLC_E_BT_WIFI_HANDOVER_REQ),
+#endif
+#ifdef WLFBT
+ BCMEVENT_NAME(WLC_E_FBT_AUTH_REQ_IND),
+#endif /* WLFBT */
+ BCMEVENT_NAME(WLC_E_RMC_EVENT),
};
-const int bcmevent_names_size = ARRAYSIZE(bcmevent_names);
+
+const char *bcmevent_get_name(uint event_type)
+{
+ /* note: first coded this as a static const but some
+ * ROMs already have something called event_name so
+ * changed it so we don't have a variable for the
+ * 'unknown string
+ */
+ const char *event_name = NULL;
+
+ uint idx;
+ for (idx = 0; idx < (uint)ARRAYSIZE(bcmevent_names); idx++) {
+
+ if (bcmevent_names[idx].event == event_type) {
+ event_name = bcmevent_names[idx].name;
+ break;
+ }
+ }
+
+ /* if we find an event name in the array, return it.
+ * otherwise return unknown string.
+ */
+ return ((event_name) ? event_name : "Unknown Event");
+}
*
* $ Copyright Open Broadcom Corporation $
*
- * $Id: bcmsdh.c 373331 2012-12-07 04:46:22Z $
+ * $Id: bcmsdh.c 450676 2014-01-22 22:45:13Z $
*/
/**
#include <bcmsdh.h> /* BRCM API for SDIO clients (such as wl, dhd) */
#include <bcmsdbus.h> /* common SDIO/controller interface */
#include <sbsdio.h> /* SDIO device core hardware definitions. */
-
#include <sdio.h> /* SDIO Device and Protocol Specs */
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
const uint bcmsdh_msglevel = BCMSDH_ERROR_VAL;
-/**
- * BCMSDH API context
- */
-struct bcmsdh_info
-{
- bool init_success; /* underlying driver successfully attached */
- void *sdioh; /* handler for sdioh */
- uint32 vendevid; /* Target Vendor and Device ID on SD bus */
- osl_t *osh;
- bool regfail; /* Save status of last reg_read/reg_write call */
- uint32 sbwad; /* Save backplane window address */
-};
/* local copy of bcm sd handler */
bcmsdh_info_t * l_bcmsdh = NULL;
+#if 0 && (NDISVER < 0x0630)
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+#endif
+
#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
extern int
sdioh_enable_hw_oob_intr(void *sdioh, bool enable);
* @return bcmsdh_info_t Handle to BCMSDH context.
*/
bcmsdh_info_t *
-bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq)
+bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva)
{
bcmsdh_info_t *bcmsdh;
return NULL;
}
bzero((char *)bcmsdh, sizeof(bcmsdh_info_t));
-
- /* save the handler locally */
- l_bcmsdh = bcmsdh;
-
- if (!(bcmsdh->sdioh = sdioh_attach(osh, cfghdl, irq))) {
- bcmsdh_detach(osh, bcmsdh);
- return NULL;
- }
-
+ bcmsdh->sdioh = sdioh;
bcmsdh->osh = osh;
bcmsdh->init_success = TRUE;
-
- *regsva = (uint32 *)SI_ENUM_BASE;
+ *regsva = SI_ENUM_BASE;
/* Report the BAR, to fix if needed */
bcmsdh->sbwad = SI_ENUM_BASE;
+
+ /* save the handler locally */
+ l_bcmsdh = bcmsdh;
+
return bcmsdh;
}
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
if (bcmsdh != NULL) {
- if (bcmsdh->sdioh) {
+#if 0 && (NDISVER < 0x0630)
+ if (bcmsdh->sdioh)
sdioh_detach(osh, bcmsdh->sdioh);
- bcmsdh->sdioh = NULL;
- }
+#endif
MFREE(osh, bcmsdh, sizeof(bcmsdh_info_t));
}
l_bcmsdh = NULL;
+
return 0;
}
bcmsdh_waitlockfree(void *sdh)
{
bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
- if (!bcmsdh)
- bcmsdh = l_bcmsdh;
return sdioh_waitlockfree(bcmsdh->sdioh);
}
return sdioh_gpioout(sd, gpio, enab);
}
-
-#ifdef BCMSDIOH_TXGLOM
-void
-bcmsdh_glom_post(void *sdh, uint8 *frame, void *pkt, uint len)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
- sdioh_glom_post(bcmsdh->sdioh, frame, pkt, len);
-}
-
-void
-bcmsdh_glom_clear(void *sdh)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
- sdioh_glom_clear(bcmsdh->sdioh);
-}
-
-uint
-bcmsdh_set_mode(void *sdh, uint mode)
-{
- bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)sdh;
- return (sdioh_set_mode(bcmsdh->sdioh, mode));
-}
-
-bool
-bcmsdh_glom_enabled(void)
-{
- return (sdioh_glom_enabled());
-}
-#endif /* BCMSDIOH_TXGLOM */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmsdh_linux.c 414953 2013-07-26 17:36:27Z $
+ * $Id: bcmsdh_linux.c 461444 2014-03-12 02:55:28Z $
*/
/**
#include <typedefs.h>
#include <linuxver.h>
-
#include <linux/pci.h>
#include <linux/completion.h>
#include <pcicfg.h>
#include <bcmdefs.h>
#include <bcmdevs.h>
-
-#if defined(OOB_INTR_ONLY)
#include <linux/irq.h>
extern void dhdsdio_isr(void * args);
#include <bcmutils.h>
#include <dngl_stats.h>
#include <dhd.h>
-#endif
-
-/**
- * SDIO Host Controller info
- */
-typedef struct bcmsdh_hc bcmsdh_hc_t;
-
-struct bcmsdh_hc {
- bcmsdh_hc_t *next;
-#ifdef BCMPLATFORM_BUS
- struct device *dev; /* platform device handle */
-#else
- struct pci_dev *dev; /* pci device handle */
-#endif /* BCMPLATFORM_BUS */
- osl_t *osh;
- void *regs; /* SDIO Host Controller address */
- bcmsdh_info_t *sdh; /* SDIO Host Controller handle */
- void *ch;
- unsigned int oob_irq;
- unsigned long oob_flags; /* OOB Host specifiction as edge and etc */
- bool oob_irq_registered;
- bool oob_irq_enable_flag;
-#if defined(OOB_INTR_ONLY)
- spinlock_t irq_lock;
-#endif
-};
-static bcmsdh_hc_t *sdhcinfo = NULL;
+#if defined(CONFIG_ARCH_ODIN)
+#include <linux/platform_data/gpio-odin.h>
+#endif /* defined(CONFIG_ARCH_ODIN) */
+#include <dhd_linux.h>
/* driver info, initialized when bcmsdh_register is called */
-static bcmsdh_driver_t drvinfo = {NULL, NULL};
+static bcmsdh_driver_t drvinfo = {NULL, NULL, NULL, NULL};
+
+typedef enum {
+ DHD_INTR_INVALID = 0,
+ DHD_INTR_INBAND,
+ DHD_INTR_HWOOB,
+ DHD_INTR_SWOOB
+} DHD_HOST_INTR_TYPE;
+
+/* the BCMSDH module comprises the generic part (bcmsdh.c) and OS specific layer (e.g.
+ * bcmsdh_linux.c). Put all OS specific variables (e.g. irq number and flags) here rather
+ * than in the common structure bcmsdh_info. bcmsdh_info only keeps a handle (os_ctx) to this
+ * structure.
+ */
+typedef struct bcmsdh_os_info {
+ DHD_HOST_INTR_TYPE intr_type;
+ int oob_irq_num; /* valid when hardware or software oob in use */
+ unsigned long oob_irq_flags; /* valid when hardware or software oob in use */
+ bool oob_irq_registered;
+ bool oob_irq_enabled;
+ bool oob_irq_wake_enabled;
+ spinlock_t oob_irq_spinlock;
+ bcmsdh_cb_fn_t oob_irq_handler;
+ void *oob_irq_handler_context;
+ void *context; /* context returned from upper layer */
+ void *sdioh; /* handle to lower layer (sdioh) */
+ void *dev; /* handle to the underlying device */
+ bool dev_wake_enabled;
+} bcmsdh_os_info_t;
/* debugging macros */
#define SDLX_MSG(x) printf x
return (FALSE);
}
-#if defined(BCMPLATFORM_BUS)
-#if defined(BCMLXSDMMC)
-/* forward declarations */
-int bcmsdh_probe(struct device *dev);
-int bcmsdh_remove(struct device *dev);
-
-EXPORT_SYMBOL(bcmsdh_probe);
-EXPORT_SYMBOL(bcmsdh_remove);
-
-#else
-/* forward declarations */
-static int __devinit bcmsdh_probe(struct device *dev);
-static int __devexit bcmsdh_remove(struct device *dev);
-#endif
-
-#if !defined(BCMLXSDMMC)
-static
-#endif
-int bcmsdh_probe(struct device *dev)
+void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+ uint bus_num, uint slot_num)
{
- osl_t *osh = NULL;
- bcmsdh_hc_t *sdhc = NULL;
- ulong regs = 0;
- bcmsdh_info_t *sdh = NULL;
-#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
- struct platform_device *pdev;
- struct resource *r;
-#endif
- int irq = 0;
+ ulong regs;
+ bcmsdh_info_t *bcmsdh;
uint32 vendevid;
- unsigned long irq_flags = 0;
-
-#if !defined(BCMLXSDMMC) && defined(BCMPLATFORM_BUS)
- pdev = to_platform_device(dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
- return -ENXIO;
-#endif
-
-#if defined(OOB_INTR_ONLY)
-#ifdef HW_OOB
- irq_flags =
- IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE;
-#else
- irq_flags = IRQF_TRIGGER_FALLING;
-#endif /* HW_OOB */
+ bcmsdh_os_info_t *bcmsdh_osinfo = NULL;
- /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
- irq = dhd_customer_oob_irq_map(&irq_flags);
- if (irq < 0) {
- SDLX_MSG(("%s: Host irq is not defined\n", __FUNCTION__));
- goto err;
- }
-#endif
- /* allocate SDIO Host Controller state info */
- if (!(osh = osl_attach(dev, PCI_BUS, FALSE))) {
- SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
+ bcmsdh = bcmsdh_attach(osh, sdioh, ®s);
+ if (bcmsdh == NULL) {
+ SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
goto err;
}
- if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
- SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
- __FUNCTION__,
- MALLOCED(osh)));
+ bcmsdh_osinfo = MALLOC(osh, sizeof(bcmsdh_os_info_t));
+ if (bcmsdh_osinfo == NULL) {
+ SDLX_MSG(("%s: failed to allocate bcmsdh_os_info_t\n", __FUNCTION__));
goto err;
}
- bzero(sdhc, sizeof(bcmsdh_hc_t));
- sdhc->osh = osh;
+ bzero((char *)bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+ bcmsdh->os_cxt = bcmsdh_osinfo;
+ bcmsdh_osinfo->sdioh = sdioh;
+ bcmsdh_osinfo->dev = dev;
+ osl_set_bus_handle(osh, bcmsdh);
- sdhc->dev = (void *)dev;
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (dev && device_init_wakeup(dev, true) == 0)
+ bcmsdh_osinfo->dev_wake_enabled = TRUE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
-#if defined(BCMLXSDMMC)
- if (!(sdh = bcmsdh_attach(osh, (void *)0,
- (void **)®s, irq))) {
- SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
- goto err;
- }
-#else
- if (!(sdh = bcmsdh_attach(osh, (void *)r->start,
- (void **)®s, irq))) {
- SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
+#if defined(OOB_INTR_ONLY)
+ spin_lock_init(&bcmsdh_osinfo->oob_irq_spinlock);
+ /* Get customer specific OOB IRQ parametres: IRQ number as IRQ type */
+ bcmsdh_osinfo->oob_irq_num = wifi_platform_get_irq_number(adapter_info,
+ &bcmsdh_osinfo->oob_irq_flags);
+ if (bcmsdh_osinfo->oob_irq_num < 0) {
+ SDLX_MSG(("%s: Host OOB irq is not defined\n", __FUNCTION__));
goto err;
}
-#endif
- sdhc->sdh = sdh;
- sdhc->oob_irq = irq;
- sdhc->oob_flags = irq_flags;
- sdhc->oob_irq_registered = FALSE; /* to make sure.. */
- sdhc->oob_irq_enable_flag = FALSE;
-#if defined(OOB_INTR_ONLY)
- spin_lock_init(&sdhc->irq_lock);
-#endif
-
- /* chain SDIO Host Controller info together */
- sdhc->next = sdhcinfo;
- sdhcinfo = sdhc;
+#endif /* defined(BCMLXSDMMC) */
/* Read the vendor/device ID from the CIS */
- vendevid = bcmsdh_query_device(sdh);
+ vendevid = bcmsdh_query_device(bcmsdh);
/* try to attach to the target device */
- if (!(sdhc->ch = drvinfo.attach((vendevid >> 16),
- (vendevid & 0xFFFF), 0, 0, 0, 0,
- (void *)regs, NULL, sdh))) {
+ bcmsdh_osinfo->context = drvinfo.probe((vendevid >> 16), (vendevid & 0xFFFF), bus_num,
+ slot_num, 0, bus_type, (void *)regs, osh, bcmsdh);
+ if (bcmsdh_osinfo->context == NULL) {
SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
goto err;
}
- return 0;
+ return bcmsdh;
/* error handling */
err:
- if (sdhc) {
- if (sdhc->sdh)
- bcmsdh_detach(sdhc->osh, sdhc->sdh);
- MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
- }
- if (osh)
- osl_detach(osh);
- return -ENODEV;
+ if (bcmsdh != NULL)
+ bcmsdh_detach(osh, bcmsdh);
+ if (bcmsdh_osinfo != NULL)
+ MFREE(osh, bcmsdh_osinfo, sizeof(bcmsdh_os_info_t));
+ return NULL;
}
-#if !defined(BCMLXSDMMC)
-static
-#endif
-int bcmsdh_remove(struct device *dev)
+int bcmsdh_remove(bcmsdh_info_t *bcmsdh)
{
- bcmsdh_hc_t *sdhc, *prev;
- osl_t *osh;
-
- sdhc = sdhcinfo;
- drvinfo.detach(sdhc->ch);
- bcmsdh_detach(sdhc->osh, sdhc->sdh);
-
- /* find the SDIO Host Controller state for this pdev and take it out from the list */
- for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
- if (sdhc->dev == (void *)dev) {
- if (prev)
- prev->next = sdhc->next;
- else
- sdhcinfo = NULL;
- break;
- }
- prev = sdhc;
- }
- if (!sdhc) {
- SDLX_MSG(("%s: failed\n", __FUNCTION__));
- return 0;
- }
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
- /* release SDIO Host Controller info */
- osh = sdhc->osh;
- MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
- osl_detach(osh);
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (bcmsdh_osinfo->dev)
+ device_init_wakeup(bcmsdh_osinfo->dev, false);
+ bcmsdh_osinfo->dev_wake_enabled = FALSE;
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
-#if !defined(BCMLXSDMMC) || defined(OOB_INTR_ONLY)
- dev_set_drvdata(dev, NULL);
-#endif
+ drvinfo.remove(bcmsdh_osinfo->context);
+ MFREE(bcmsdh->osh, bcmsdh->os_cxt, sizeof(bcmsdh_os_info_t));
+ bcmsdh_detach(bcmsdh->osh, bcmsdh);
return 0;
}
-#else /* BCMPLATFORM_BUS */
-
-#if !defined(BCMLXSDMMC)
-/* forward declarations for PCI probe and remove functions. */
-static int __devinit bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void __devexit bcmsdh_pci_remove(struct pci_dev *pdev);
-
-/**
- * pci id table
- */
-static struct pci_device_id bcmsdh_pci_devid[] __devinitdata = {
- { vendor: PCI_ANY_ID,
- device: PCI_ANY_ID,
- subvendor: PCI_ANY_ID,
- subdevice: PCI_ANY_ID,
- class: 0,
- class_mask: 0,
- driver_data: 0,
- },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, bcmsdh_pci_devid);
-
-/**
- * SDIO Host Controller pci driver info
- */
-static struct pci_driver bcmsdh_pci_driver = {
- node: {},
- name: "bcmsdh",
- id_table: bcmsdh_pci_devid,
- probe: bcmsdh_pci_probe,
- remove: bcmsdh_pci_remove,
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
- save_state: NULL,
-#endif
- suspend: NULL,
- resume: NULL,
- };
-
-
-extern uint sd_pci_slot; /* Force detection to a particular PCI */
- /* slot only . Allows for having multiple */
- /* WL devices at once in a PC */
- /* Only one instance of dhd will be */
- /* usable at a time */
- /* Upper word is bus number, */
- /* lower word is slot number */
- /* Default value of 0xffffffff turns this */
- /* off */
-module_param(sd_pci_slot, uint, 0);
-
-
-/**
- * Detect supported SDIO Host Controller and attach if found.
- *
- * Determine if the device described by pdev is a supported SDIO Host
- * Controller. If so, attach to it and attach to the target device.
- */
-static int __devinit
-bcmsdh_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh)
{
- osl_t *osh = NULL;
- bcmsdh_hc_t *sdhc = NULL;
- ulong regs;
- bcmsdh_info_t *sdh = NULL;
- int rc;
-
- if (sd_pci_slot != 0xFFFFffff) {
- if (pdev->bus->number != (sd_pci_slot>>16) ||
- PCI_SLOT(pdev->devfn) != (sd_pci_slot&0xffff)) {
- SDLX_MSG(("%s: %s: bus %X, slot %X, vend %X, dev %X\n",
- __FUNCTION__,
- bcmsdh_chipmatch(pdev->vendor, pdev->device)
- ?"Found compatible SDIOHC"
- :"Probing unknown device",
- pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor,
- pdev->device));
- return -ENODEV;
- }
- SDLX_MSG(("%s: %s: bus %X, slot %X, vendor %X, device %X (good PCI location)\n",
- __FUNCTION__,
- bcmsdh_chipmatch(pdev->vendor, pdev->device)
- ?"Using compatible SDIOHC"
- :"WARNING, forced use of unkown device",
- pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device));
- }
-
- if ((pdev->vendor == VENDOR_TI) && ((pdev->device == PCIXX21_FLASHMEDIA_ID) ||
- (pdev->device == PCIXX21_FLASHMEDIA0_ID))) {
- uint32 config_reg;
-
- SDLX_MSG(("%s: Disabling TI FlashMedia Controller.\n", __FUNCTION__));
- if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
- SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
- goto err;
- }
-
- config_reg = OSL_PCI_READ_CONFIG(osh, 0x4c, 4);
-
- /*
- * Set MMC_SD_DIS bit in FlashMedia Controller.
- * Disbling the SD/MMC Controller in the FlashMedia Controller
- * allows the Standard SD Host Controller to take over control
- * of the SD Slot.
- */
- config_reg |= 0x02;
- OSL_PCI_WRITE_CONFIG(osh, 0x4c, 4, config_reg);
- osl_detach(osh);
- }
- /* match this pci device with what we support */
- /* we can't solely rely on this to believe it is our SDIO Host Controller! */
- if (!bcmsdh_chipmatch(pdev->vendor, pdev->device)) {
- if (pdev->vendor == VENDOR_BROADCOM) {
- SDLX_MSG(("%s: Unknown Broadcom device (vendor: %#x, device: %#x).\n",
- __FUNCTION__, pdev->vendor, pdev->device));
- }
- return -ENODEV;
- }
-
- /* this is a pci device we might support */
- SDLX_MSG(("%s: Found possible SDIO Host Controller: bus %d slot %d func %d irq %d\n",
- __FUNCTION__,
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pdev->irq));
-
- /* use bcmsdh_query_device() to get the vendor ID of the target device so
- * it will eventually appear in the Broadcom string on the console
- */
-
- /* allocate SDIO Host Controller state info */
- if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
- SDLX_MSG(("%s: osl_attach failed\n", __FUNCTION__));
- goto err;
- }
- if (!(sdhc = MALLOC(osh, sizeof(bcmsdh_hc_t)))) {
- SDLX_MSG(("%s: out of memory, allocated %d bytes\n",
- __FUNCTION__,
- MALLOCED(osh)));
- goto err;
- }
- bzero(sdhc, sizeof(bcmsdh_hc_t));
- sdhc->osh = osh;
-
- sdhc->dev = pdev;
-
- /* map to address where host can access */
- pci_set_master(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- SDLX_MSG(("%s: Cannot enable PCI device\n", __FUNCTION__));
- goto err;
- }
- if (!(sdh = bcmsdh_attach(osh, (void *)(uintptr)pci_resource_start(pdev, 0),
- (void **)®s, pdev->irq))) {
- SDLX_MSG(("%s: bcmsdh_attach failed\n", __FUNCTION__));
- goto err;
- }
-
- sdhc->sdh = sdh;
-
- /* try to attach to the target device */
- if (!(sdhc->ch = drvinfo.attach(VENDOR_BROADCOM, /* pdev->vendor, */
- bcmsdh_query_device(sdh) & 0xFFFF, 0, 0, 0, 0,
- (void *)regs, NULL, sdh))) {
- SDLX_MSG(("%s: device attach failed\n", __FUNCTION__));
- goto err;
- }
-
- /* chain SDIO Host Controller info together */
- sdhc->next = sdhcinfo;
- sdhcinfo = sdhc;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ if (drvinfo.suspend && drvinfo.suspend(bcmsdh_osinfo->context))
+ return -EBUSY;
return 0;
-
- /* error handling */
-err:
- if (sdhc) {
- if (sdhc->sdh)
- bcmsdh_detach(sdhc->osh, sdhc->sdh);
- MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
- }
- if (osh)
- osl_detach(osh);
- return -ENODEV;
}
-
-/**
- * Detach from target devices and SDIO Host Controller
- */
-static void __devexit
-bcmsdh_pci_remove(struct pci_dev *pdev)
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh)
{
- bcmsdh_hc_t *sdhc, *prev;
- osl_t *osh;
-
- /* find the SDIO Host Controller state for this pdev and take it out from the list */
- for (sdhc = sdhcinfo, prev = NULL; sdhc; sdhc = sdhc->next) {
- if (sdhc->dev == pdev) {
- if (prev)
- prev->next = sdhc->next;
- else
- sdhcinfo = NULL;
- break;
- }
- prev = sdhc;
- }
- if (!sdhc)
- return;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
- drvinfo.detach(sdhc->ch);
-
- bcmsdh_detach(sdhc->osh, sdhc->sdh);
-
- /* release SDIO Host Controller info */
- osh = sdhc->osh;
- MFREE(osh, sdhc, sizeof(bcmsdh_hc_t));
- osl_detach(osh);
+ if (drvinfo.resume)
+ return drvinfo.resume(bcmsdh_osinfo->context);
+ return 0;
}
-#endif /* BCMLXSDMMC */
-#endif /* BCMPLATFORM_BUS */
-
-extern int sdio_function_init(void);
+extern int bcmsdh_register_client_driver(void);
+extern void bcmsdh_unregister_client_driver(void);
extern int sdio_func_reg_notify(void* semaphore);
extern void sdio_func_unreg_notify(void);
int error = 0;
drvinfo = *driver;
-
-#if defined(BCMPLATFORM_BUS)
- SDLX_MSG(("%s: Linux Kernel SDIO/MMC Driver\n", __FUNCTION__));
- error = sdio_function_init();
- return error;
-#endif /* defined(BCMPLATFORM_BUS) */
-
-#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
- if (!(error = pci_module_init(&bcmsdh_pci_driver)))
- return 0;
-#else
- if (!(error = pci_register_driver(&bcmsdh_pci_driver)))
- return 0;
-#endif
-
- SDLX_MSG(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
-#endif /* BCMPLATFORM_BUS */
+ SDLX_MSG(("%s: register client driver\n", __FUNCTION__));
+ error = bcmsdh_register_client_driver();
+ if (error)
+ SDLX_MSG(("%s: failed %d\n", __FUNCTION__, error));
return error;
}
-extern void sdio_function_cleanup(void);
-
void
bcmsdh_unregister(void)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
- if (bcmsdh_pci_driver.node.next)
+ if (bcmsdh_pci_driver.node.next == NULL)
+ return;
#endif
-#if defined(BCMLXSDMMC)
- sdio_function_cleanup();
-#endif /* BCMLXSDMMC */
+ bcmsdh_unregister_client_driver();
+}
-#if !defined(BCMPLATFORM_BUS) && !defined(BCMLXSDMMC)
- pci_unregister_driver(&bcmsdh_pci_driver);
-#endif /* BCMPLATFORM_BUS */
+void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *bcmsdh)
+{
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ pm_stay_awake(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
}
-int bcmsdh_set_drvdata(void * dhdp)
+void bcmsdh_dev_relax(bcmsdh_info_t *bcmsdh)
{
- SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+#if !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
+ pm_relax(bcmsdh_osinfo->dev);
+#endif /* !defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36)) */
+}
- dev_set_drvdata(sdhcinfo->dev, dhdp);
+bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *bcmsdh)
+{
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
- return 0;
+ return bcmsdh_osinfo->dev_wake_enabled;
}
#if defined(OOB_INTR_ONLY)
-void bcmsdh_oob_intr_set(bool enable)
+void bcmsdh_oob_intr_set(bcmsdh_info_t *bcmsdh, bool enable)
{
- static bool curstate = 1;
unsigned long flags;
+ bcmsdh_os_info_t *bcmsdh_osinfo;
- spin_lock_irqsave(&sdhcinfo->irq_lock, flags);
- if (curstate != enable) {
+ if (!bcmsdh)
+ return;
+
+ bcmsdh_osinfo = bcmsdh->os_cxt;
+ spin_lock_irqsave(&bcmsdh_osinfo->oob_irq_spinlock, flags);
+ if (bcmsdh_osinfo->oob_irq_enabled != enable) {
if (enable)
- enable_irq(sdhcinfo->oob_irq);
+ enable_irq(bcmsdh_osinfo->oob_irq_num);
else
- disable_irq_nosync(sdhcinfo->oob_irq);
- curstate = enable;
+ disable_irq_nosync(bcmsdh_osinfo->oob_irq_num);
+ bcmsdh_osinfo->oob_irq_enabled = enable;
}
- spin_unlock_irqrestore(&sdhcinfo->irq_lock, flags);
+ spin_unlock_irqrestore(&bcmsdh_osinfo->oob_irq_spinlock, flags);
}
static irqreturn_t wlan_oob_irq(int irq, void *dev_id)
{
- dhd_pub_t *dhdp;
-
- dhdp = (dhd_pub_t *)dev_get_drvdata(sdhcinfo->dev);
-
- bcmsdh_oob_intr_set(0);
-
- if (dhdp == NULL) {
- SDLX_MSG(("%s: Out of band GPIO interrupt fired way too early\n",
- __FUNCTION__));
- return IRQ_HANDLED;
- }
+ bcmsdh_info_t *bcmsdh = (bcmsdh_info_t *)dev_id;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
- dhdsdio_isr((void *)dhdp->bus);
+ bcmsdh_oob_intr_set(bcmsdh, FALSE);
+ bcmsdh_osinfo->oob_irq_handler(bcmsdh_osinfo->oob_irq_handler_context);
return IRQ_HANDLED;
}
-int bcmsdh_register_oob_intr(void * dhdp)
+int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+ void* oob_irq_handler_context)
{
- int error = 0;
-
- SDLX_MSG(("%s: Enter \n", __FUNCTION__));
-
- /* IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE; */
-
- dev_set_drvdata(sdhcinfo->dev, dhdp);
-
- if (!sdhcinfo->oob_irq_registered) {
- SDLX_MSG(("%s: IRQ=%d Type=%X \n", __FUNCTION__,
- (int)sdhcinfo->oob_irq, (int)sdhcinfo->oob_flags));
- /* Refer to customer Host IRQ docs about proper irqflags definition */
- error = request_irq(sdhcinfo->oob_irq, wlan_oob_irq, sdhcinfo->oob_flags,
- "bcmsdh_sdmmc", NULL);
- if (error)
- return -ENODEV;
+ int err = 0;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
-#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
- if (device_may_wakeup(sdhcinfo->dev)) {
-#endif
- error = enable_irq_wake(sdhcinfo->oob_irq);
-#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
- }
+ SDLX_MSG(("%s: Enter\n", __FUNCTION__));
+ if (bcmsdh_osinfo->oob_irq_registered) {
+ SDLX_MSG(("%s: irq is already registered\n", __FUNCTION__));
+ return -EBUSY;
+ }
+ SDLX_MSG(("%s OOB irq=%d flags=%X\n", __FUNCTION__,
+ (int)bcmsdh_osinfo->oob_irq_num, (int)bcmsdh_osinfo->oob_irq_flags));
+ bcmsdh_osinfo->oob_irq_handler = oob_irq_handler;
+ bcmsdh_osinfo->oob_irq_handler_context = oob_irq_handler_context;
+#if defined(CONFIG_ARCH_ODIN)
+ err = odin_gpio_sms_request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+ bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#else
+ err = request_irq(bcmsdh_osinfo->oob_irq_num, wlan_oob_irq,
+ bcmsdh_osinfo->oob_irq_flags, "bcmsdh_sdmmc", bcmsdh);
+#endif /* defined(CONFIG_ARCH_ODIN) */
+ if (err) {
+ SDLX_MSG(("%s: request_irq failed with %d\n", __FUNCTION__, err));
+ return err;
+ }
+
+#if defined(DISABLE_WOWLAN)
+ SDLX_MSG(("%s: disable_irq_wake\n", __FUNCTION__));
+ err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (err)
+ SDLX_MSG(("%s: disable_irq_wake failed with %d\n", __FUNCTION__, err));
+ else
+ bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+#else
+ SDLX_MSG(("%s: enable_irq_wake\n", __FUNCTION__));
+ err = enable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (err)
+ SDLX_MSG(("%s: enable_irq_wake failed with %d\n", __FUNCTION__, err));
+ else
+ bcmsdh_osinfo->oob_irq_wake_enabled = TRUE;
#endif
- if (error)
- SDLX_MSG(("%s: enable_irq_wake error=%d \n", __FUNCTION__, error));
- sdhcinfo->oob_irq_registered = TRUE;
- sdhcinfo->oob_irq_enable_flag = TRUE;
- }
-
- return 0;
+ bcmsdh_osinfo->oob_irq_enabled = TRUE;
+ bcmsdh_osinfo->oob_irq_registered = TRUE;
+ return err;
}
-void bcmsdh_set_irq(int flag)
+void bcmsdh_oob_intr_unregister(bcmsdh_info_t *bcmsdh)
{
- if (sdhcinfo->oob_irq_registered && sdhcinfo->oob_irq_enable_flag != flag) {
- SDLX_MSG(("%s: Flag = %d\n", __FUNCTION__, flag));
- sdhcinfo->oob_irq_enable_flag = flag;
- if (flag) {
- enable_irq(sdhcinfo->oob_irq);
-#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
- if (device_may_wakeup(sdhcinfo->dev))
-#endif
- enable_irq_wake(sdhcinfo->oob_irq);
- } else {
-#if defined(CONFIG_ARCH_RHEA) || defined(CONFIG_ARCH_CAPRI)
- if (device_may_wakeup(sdhcinfo->dev))
-#endif
- disable_irq_wake(sdhcinfo->oob_irq);
- disable_irq(sdhcinfo->oob_irq);
- }
- }
-}
+ int err = 0;
+ bcmsdh_os_info_t *bcmsdh_osinfo = bcmsdh->os_cxt;
-void bcmsdh_unregister_oob_intr(void)
-{
SDLX_MSG(("%s: Enter\n", __FUNCTION__));
-
- if (sdhcinfo->oob_irq_registered == TRUE) {
- bcmsdh_set_irq(FALSE);
- free_irq(sdhcinfo->oob_irq, NULL);
- sdhcinfo->oob_irq_registered = FALSE;
+ if (!bcmsdh_osinfo->oob_irq_registered) {
+ SDLX_MSG(("%s: irq is not registered\n", __FUNCTION__));
+ return;
}
-}
-
-bool bcmsdh_is_oob_intr_registered(void)
-{
- if (sdhcinfo)
- return sdhcinfo->oob_irq_registered;
- else
- return FALSE;
+ if (bcmsdh_osinfo->oob_irq_wake_enabled) {
+ err = disable_irq_wake(bcmsdh_osinfo->oob_irq_num);
+ if (!err)
+ bcmsdh_osinfo->oob_irq_wake_enabled = FALSE;
+ }
+ if (bcmsdh_osinfo->oob_irq_enabled) {
+ disable_irq(bcmsdh_osinfo->oob_irq_num);
+ bcmsdh_osinfo->oob_irq_enabled = FALSE;
+ }
+ free_irq(bcmsdh_osinfo->oob_irq_num, bcmsdh);
+ bcmsdh_osinfo->oob_irq_registered = FALSE;
}
#endif
-#if defined(BCMLXSDMMC)
-void *bcmsdh_get_drvdata(void)
-{
- if (!sdhcinfo)
- return NULL;
- return dev_get_drvdata(sdhcinfo->dev);
-}
-#endif
-
/* Module parameters specific to each host-controller driver */
extern uint sd_msglevel; /* Debug message level */
module_param(sd_tuning_period, uint, 0);
extern int sd_delay_value;
module_param(sd_delay_value, uint, 0);
-#endif
-#ifdef BCMSDIOH_TXGLOM
-extern uint sd_txglom;
-module_param(sd_txglom, uint, 0);
+/* SDIO Drive Strength for UHSI mode specific to SDIO3.0 */
+extern char dhd_sdiod_uhsi_ds_override[2];
+module_param_string(dhd_sdiod_uhsi_ds_override, dhd_sdiod_uhsi_ds_override, 2, 0);
+
#endif
#ifdef BCMSDH_MODULE
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2013, Broadcom Corporation
+ * Copyright (C) 1999-2014, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh_sdmmc.c 418714 2013-08-16 13:21:09Z $
+ * $Id: bcmsdh_sdmmc.c 459285 2014-03-03 02:54:39Z $
*/
#include <typedefs.h>
#include <sdiovar.h> /* ioctl/iovars */
#include <linux/mmc/core.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#define CUSTOM_SDIO_F2_BLKSIZE DEFAULT_SDIO_F2_BLKSIZE
#endif
+#define MAX_IO_RW_EXTENDED_BLK 511
+
uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
uint sd_f2_blocksize = CUSTOM_SDIO_F2_BLKSIZE;
uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
uint sd_msglevel = 0x01;
uint sd_use_dma = TRUE;
-#ifdef BCMSDIOH_TXGLOM
-#ifndef CUSTOM_TXGLOM
-#define CUSTOM_TXGLOM 0
-#endif
-uint sd_txglom = CUSTOM_TXGLOM;
-#endif /* BCMSDIOH_TXGLOM */
-
#ifndef CUSTOM_RXCHAIN
#define CUSTOM_RXCHAIN 0
#endif
sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
/* Enable Function 1 */
- sdio_claim_host(gInstance->func[1]);
- err_ret = sdio_enable_func(gInstance->func[1]);
- sdio_release_host(gInstance->func[1]);
+ sdio_claim_host(sd->func[1]);
+ err_ret = sdio_enable_func(sd->func[1]);
+ sdio_release_host(sd->func[1]);
if (err_ret) {
sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
}
* Public entry points & extern's
*/
extern sdioh_info_t *
-sdioh_attach(osl_t *osh, void *bar0, uint irq)
+sdioh_attach(osl_t *osh, struct sdio_func *func)
{
- sdioh_info_t *sd;
+ sdioh_info_t *sd = NULL;
int err_ret;
sd_trace(("%s\n", __FUNCTION__));
- if (gInstance == NULL) {
- sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
+ if (func == NULL) {
+ sd_err(("%s: sdio function device is NULL\n", __FUNCTION__));
return NULL;
}
}
bzero((char *)sd, sizeof(sdioh_info_t));
sd->osh = osh;
- if (sdioh_sdmmc_osinit(sd) != 0) {
- sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
- MFREE(sd->osh, sd, sizeof(sdioh_info_t));
- return NULL;
- }
-
+ sd->fake_func0.num = 0;
+ sd->fake_func0.card = func->card;
+ sd->func[0] = &sd->fake_func0;
+ if (func->num == 2)
+ sd->func[1] = gInstance->func[1];
+ sd->func[2] = func->card->sdio_func[1];
+ sd->func[func->num] = func;
sd->num_funcs = 2;
sd->sd_blockmode = TRUE;
sd->use_client_ints = TRUE;
sd->client_block_size[0] = 64;
sd->use_rxchain = CUSTOM_RXCHAIN;
-
- gInstance->sd = sd;
-
- /* Claim host controller */
- if (gInstance->func[1]) {
- sdio_claim_host(gInstance->func[1]);
-
- sd->client_block_size[1] = 64;
- err_ret = sdio_set_block_size(gInstance->func[1], 64);
- /* Release host controller F1 */
- sdio_release_host(gInstance->func[1]);
- if (err_ret) {
- sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
- MFREE(sd->osh, sd, sizeof(sdioh_info_t));
- return NULL;
- }
-
- } else {
- sd_err(("%s:gInstance->func[1] is null\n", __FUNCTION__));
- MFREE(sd->osh, sd, sizeof(sdioh_info_t));
- return NULL;
+ if (sd->func[1] == NULL || sd->func[2] == NULL) {
+ sd_err(("%s: func 1 or 2 is null \n", __FUNCTION__));
+ goto fail;
}
+ sdio_set_drvdata(sd->func[1], sd);
- if (gInstance->func[2]) {
- /* Claim host controller F2 */
- sdio_claim_host(gInstance->func[2]);
-
- sd->client_block_size[2] = sd_f2_blocksize;
- err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
- /* Release host controller F2 */
- sdio_release_host(gInstance->func[2]);
- if (err_ret) {
- sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
- sd_f2_blocksize));
- MFREE(sd->osh, sd, sizeof(sdioh_info_t));
- return NULL;
- }
+ sdio_claim_host(sd->func[1]);
+ sd->client_block_size[1] = 64;
+ err_ret = sdio_set_block_size(sd->func[1], 64);
+ sdio_release_host(sd->func[1]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize(%d)\n", err_ret));
+ goto fail;
+ }
- } else {
- sd_err(("%s:gInstance->func[2] is null\n", __FUNCTION__));
- MFREE(sd->osh, sd, sizeof(sdioh_info_t));
- return NULL;
+ sdio_claim_host(sd->func[2]);
+ sd->client_block_size[2] = sd_f2_blocksize;
+ err_ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+ sdio_release_host(sd->func[2]);
+ if (err_ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d(%d)\n",
+ sd_f2_blocksize, err_ret));
+ goto fail;
}
sdioh_sdmmc_card_enablefuncs(sd);
sd_trace(("%s: Done\n", __FUNCTION__));
return sd;
+
+fail:
+ MFREE(sd->osh, sd, sizeof(sdioh_info_t));
+ return NULL;
}
if (sd) {
/* Disable Function 2 */
- sdio_claim_host(gInstance->func[2]);
- sdio_disable_func(gInstance->func[2]);
- sdio_release_host(gInstance->func[2]);
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
+ sdio_disable_func(sd->func[2]);
+ sdio_release_host(sd->func[2]);
+ }
/* Disable Function 1 */
- if (gInstance->func[1]) {
- sdio_claim_host(gInstance->func[1]);
- sdio_disable_func(gInstance->func[1]);
- sdio_release_host(gInstance->func[1]);
+ if (sd->func[1]) {
+ sdio_claim_host(sd->func[1]);
+ sdio_disable_func(sd->func[1]);
+ sdio_release_host(sd->func[1]);
}
- gInstance->func[1] = NULL;
- gInstance->func[2] = NULL;
-
- /* deregister irq */
- sdioh_sdmmc_osfree(sd);
+ sd->func[1] = NULL;
+ sd->func[2] = NULL;
MFREE(sd->osh, sd, sizeof(sdioh_info_t));
}
#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
extern SDIOH_API_RC
-sdioh_enable_func_intr(void)
+sdioh_enable_func_intr(sdioh_info_t *sd)
{
uint8 reg;
int err;
- if (gInstance->func[0]) {
- sdio_claim_host(gInstance->func[0]);
-
- reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
- if (err) {
- sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
- sdio_release_host(gInstance->func[0]);
- return SDIOH_API_RC_FAIL;
- }
-
- /* Enable F1 and F2 interrupts, clear master enable */
- reg &= ~INTR_CTL_MASTER_EN;
- reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
- sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
- sdio_release_host(gInstance->func[0]);
+ if (sd->func[0] == NULL) {
+ sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
- if (err) {
- sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
- return SDIOH_API_RC_FAIL;
- }
+ sdio_claim_host(sd->func[0]);
+ reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(sd->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+ /* Enable F1 and F2 interrupts, clear master enable */
+ reg &= ~INTR_CTL_MASTER_EN;
+ reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+ sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(sd->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
}
return SDIOH_API_RC_SUCCESS;
}
extern SDIOH_API_RC
-sdioh_disable_func_intr(void)
+sdioh_disable_func_intr(sdioh_info_t *sd)
{
uint8 reg;
int err;
- if (gInstance->func[0]) {
- sdio_claim_host(gInstance->func[0]);
- reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
- if (err) {
- sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
- sdio_release_host(gInstance->func[0]);
- return SDIOH_API_RC_FAIL;
- }
-
- reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
- /* Disable master interrupt with the last function interrupt */
- if (!(reg & 0xFE))
- reg = 0;
- sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ if (sd->func[0] == NULL) {
+ sd_err(("%s: function 0 pointer is NULL\n", __FUNCTION__));
+ return SDIOH_API_RC_FAIL;
+ }
- sdio_release_host(gInstance->func[0]);
- if (err) {
- sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
- return SDIOH_API_RC_FAIL;
- }
+ sdio_claim_host(sd->func[0]);
+ reg = sdio_readb(sd->func[0], SDIOD_CCCR_INTEN, &err);
+ if (err) {
+ sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ sdio_release_host(sd->func[0]);
+ return SDIOH_API_RC_FAIL;
+ }
+ reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
+ /* Disable master interrupt with the last function interrupt */
+ if (!(reg & 0xFE))
+ reg = 0;
+ sdio_writeb(sd->func[0], reg, SDIOD_CCCR_INTEN, &err);
+ sdio_release_host(sd->func[0]);
+
+ if (err) {
+ sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
+ return SDIOH_API_RC_FAIL;
}
+
return SDIOH_API_RC_SUCCESS;
}
#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
sd->intr_handler_valid = TRUE;
/* register and unmask irq */
- if (gInstance->func[2]) {
- sdio_claim_host(gInstance->func[2]);
- sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
- sdio_release_host(gInstance->func[2]);
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
+ sdio_claim_irq(sd->func[2], IRQHandlerF2);
+ sdio_release_host(sd->func[2]);
}
- if (gInstance->func[1]) {
- sdio_claim_host(gInstance->func[1]);
- sdio_claim_irq(gInstance->func[1], IRQHandler);
- sdio_release_host(gInstance->func[1]);
+ if (sd->func[1]) {
+ sdio_claim_host(sd->func[1]);
+ sdio_claim_irq(sd->func[1], IRQHandler);
+ sdio_release_host(sd->func[1]);
}
#elif defined(HW_OOB)
- sdioh_enable_func_intr();
+ sdioh_enable_func_intr(sd);
#endif /* !defined(OOB_INTR_ONLY) */
return SDIOH_API_RC_SUCCESS;
sd_trace(("%s: Entering\n", __FUNCTION__));
#if !defined(OOB_INTR_ONLY)
- if (gInstance->func[1]) {
+ if (sd->func[1]) {
/* register and unmask irq */
- sdio_claim_host(gInstance->func[1]);
- sdio_release_irq(gInstance->func[1]);
- sdio_release_host(gInstance->func[1]);
+ sdio_claim_host(sd->func[1]);
+ sdio_release_irq(sd->func[1]);
+ sdio_release_host(sd->func[1]);
}
- if (gInstance->func[2]) {
+ if (sd->func[2]) {
/* Claim host controller F2 */
- sdio_claim_host(gInstance->func[2]);
- sdio_release_irq(gInstance->func[2]);
+ sdio_claim_host(sd->func[2]);
+ sdio_release_irq(sd->func[2]);
/* Release host controller F2 */
- sdio_release_host(gInstance->func[2]);
+ sdio_release_host(sd->func[2]);
}
sd->intr_handler_valid = FALSE;
sd->intr_handler = NULL;
sd->intr_handler_arg = NULL;
#elif defined(HW_OOB)
- if (dhd_download_fw_on_driverload)
- sdioh_disable_func_intr();
+ if (dhd_download_fw_on_driverload)
+ sdioh_disable_func_intr(sd);
#endif /* !defined(OOB_INTR_ONLY) */
return SDIOH_API_RC_SUCCESS;
}
return SDIOH_API_RC_FAIL;
}
- sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
+ sd_trace(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
for (count = 0; count < length; count++) {
offset = sd->func_cis_ptr[func] + count;
* as a special case.
*/
if (regaddr == SDIOD_CCCR_IOEN) {
- if (gInstance->func[2]) {
- sdio_claim_host(gInstance->func[2]);
+ if (sd->func[2]) {
+ sdio_claim_host(sd->func[2]);
if (*byte & SDIO_FUNC_ENABLE_2) {
/* Enable Function 2 */
- err_ret = sdio_enable_func(gInstance->func[2]);
+ err_ret = sdio_enable_func(sd->func[2]);
if (err_ret) {
- sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
+ sd_err(("bcmsdh_sdmmc: enable F2 failed:%d\n",
err_ret));
}
} else {
/* Disable Function 2 */
- err_ret = sdio_disable_func(gInstance->func[2]);
+ err_ret = sdio_disable_func(sd->func[2]);
if (err_ret) {
- sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
+ sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d\n",
err_ret));
}
}
- sdio_release_host(gInstance->func[2]);
+ sdio_release_host(sd->func[2]);
}
}
#if defined(MMC_SDIO_ABORT)
/* to allow abort command through F1 */
else if (regaddr == SDIOD_CCCR_IOABORT) {
while (sdio_abort_retry--) {
- if (gInstance->func[func]) {
- sdio_claim_host(gInstance->func[func]);
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
/*
* this sdio_f0_writeb() can be replaced with
* another api depending upon MMC driver change.
* As of this time, this is temporaray one
*/
- sdio_writeb(gInstance->func[func],
+ sdio_writeb(sd->func[func],
*byte, regaddr, &err_ret);
- sdio_release_host(gInstance->func[func]);
+ sdio_release_host(sd->func[func]);
}
if (!err_ret)
break;
sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
} else {
/* Claim host controller, perform F0 write, and release */
- if (gInstance->func[func]) {
- sdio_claim_host(gInstance->func[func]);
- sdio_f0_writeb(gInstance->func[func],
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ sdio_f0_writeb(sd->func[func],
*byte, regaddr, &err_ret);
- sdio_release_host(gInstance->func[func]);
+ sdio_release_host(sd->func[func]);
}
}
} else {
/* Claim host controller, perform Fn write, and release */
- if (gInstance->func[func]) {
- sdio_claim_host(gInstance->func[func]);
- sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
- sdio_release_host(gInstance->func[func]);
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
+ sdio_writeb(sd->func[func], *byte, regaddr, &err_ret);
+ sdio_release_host(sd->func[func]);
}
}
} else { /* CMD52 Read */
/* Claim host controller, perform Fn read, and release */
- if (gInstance->func[func]) {
- sdio_claim_host(gInstance->func[func]);
+ if (sd->func[func]) {
+ sdio_claim_host(sd->func[func]);
if (func == 0) {
- *byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
+ *byte = sdio_f0_readb(sd->func[func], regaddr, &err_ret);
} else {
- *byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
+ *byte = sdio_readb(sd->func[func], regaddr, &err_ret);
}
- sdio_release_host(gInstance->func[func]);
+ sdio_release_host(sd->func[func]);
}
}
if (err_ret) {
- if ((regaddr == 0x1001F) && (err_ret == -110)) {
+ if ((regaddr == 0x1001F) && ((err_ret == -ETIMEDOUT) || (err_ret == -EILSEQ))) {
} else {
sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
/* Claim host controller */
- sdio_claim_host(gInstance->func[func]);
+ sdio_claim_host(sd->func[func]);
if(rw) { /* CMD52 Write */
if (nbytes == 4) {
- sdio_writel(gInstance->func[func], *word, addr, &err_ret);
+ sdio_writel(sd->func[func], *word, addr, &err_ret);
} else if (nbytes == 2) {
- sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
+ sdio_writew(sd->func[func], (*word & 0xFFFF), addr, &err_ret);
} else {
sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
}
} else { /* CMD52 Read */
if (nbytes == 4) {
- *word = sdio_readl(gInstance->func[func], addr, &err_ret);
+ *word = sdio_readl(sd->func[func], addr, &err_ret);
} else if (nbytes == 2) {
- *word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
+ *word = sdio_readw(sd->func[func], addr, &err_ret) & 0xFFFF;
} else {
sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
}
}
/* Release host controller */
- sdio_release_host(gInstance->func[func]);
+ sdio_release_host(sd->func[func]);
if (err_ret) {
#if defined(MMC_SDIO_ABORT)
/* Any error on CMD53 transaction should abort that function using function 0. */
while (sdio_abort_retry--) {
- if (gInstance->func[0]) {
- sdio_claim_host(gInstance->func[0]);
+ if (sd->func[0]) {
+ sdio_claim_host(sd->func[0]);
/*
* this sdio_f0_writeb() can be replaced with another api
* depending upon MMC driver change.
* As of this time, this is temporaray one
*/
- sdio_writeb(gInstance->func[0],
+ sdio_writeb(sd->func[0],
func, SDIOD_CCCR_IOABORT, &err_ret2);
- sdio_release_host(gInstance->func[0]);
+ sdio_release_host(sd->func[0]);
}
if (!err_ret2)
break;
return (((err_ret == 0)&&(err_ret2 == 0)) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
}
-#ifdef BCMSDIOH_TXGLOM
-void
-sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
-{
- void *phead = sd->glom_info.glom_pkt_head;
- void *ptail = sd->glom_info.glom_pkt_tail;
-
- BCM_REFERENCE(frame);
-
- ASSERT(!PKTLINK(pkt));
- if (!phead) {
- ASSERT(!phead);
- sd->glom_info.glom_pkt_head = sd->glom_info.glom_pkt_tail = pkt;
- }
- else {
- ASSERT(ptail);
- PKTSETNEXT(sd->osh, ptail, pkt);
- sd->glom_info.glom_pkt_tail = pkt;
- }
- sd->glom_info.count++;
-}
-
-void
-sdioh_glom_clear(sdioh_info_t *sd)
-{
- void *pnow, *pnext;
-
- pnext = sd->glom_info.glom_pkt_head;
-
- if (!pnext) {
- sd_err(("sdioh_glom_clear: no first packet to clear!\n"));
- return;
- }
-
- while (pnext) {
- pnow = pnext;
- pnext = PKTNEXT(sd->osh, pnow);
- PKTSETNEXT(sd->osh, pnow, NULL);
- sd->glom_info.count--;
- }
-
- sd->glom_info.glom_pkt_head = NULL;
- sd->glom_info.glom_pkt_tail = NULL;
- if (sd->glom_info.count != 0) {
- sd_err(("sdioh_glom_clear: glom count mismatch!\n"));
- sd->glom_info.count = 0;
- }
-}
-
-uint
-sdioh_set_mode(sdioh_info_t *sd, uint mode)
-{
- if (mode == SDPCM_TXGLOM_CPY)
- sd->txglom_mode = mode;
- else if (mode == SDPCM_TXGLOM_MDESC)
- sd->txglom_mode = mode;
-
- return (sd->txglom_mode);
-}
-
-bool
-sdioh_glom_enabled(void)
-{
- return sd_txglom;
-}
-#endif /* BCMSDIOH_TXGLOM */
-
-static INLINE int sdioh_request_packet_align(uint pkt_len, uint write, uint func, int blk_size)
-{
- /* Align Patch */
- if (!write || pkt_len < 32)
- pkt_len = (pkt_len + 3) & 0xFFFFFFFC;
- else if ((pkt_len > blk_size) && (pkt_len % blk_size)) {
- if (func == SDIO_FUNC_2) {
- sd_err(("%s: [%s] dhd_sdio must align %d bytes"
- " packet larger than a %d bytes blk size by a blk size\n",
- __FUNCTION__, write ? "W" : "R", pkt_len, blk_size));
- }
- pkt_len += blk_size - (pkt_len % blk_size);
- }
-#ifdef CONFIG_MMC_MSM7X00A
- if ((pkt_len % 64) == 32) {
- sd_err(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
- pkt_len += 32;
- }
-#endif /* CONFIG_MMC_MSM7X00A */
- return pkt_len;
-}
-
static SDIOH_API_RC
-sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+sdioh_request_packet_chain(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
uint addr, void *pkt)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
- uint32 SGCount = 0;
int err_ret = 0;
void *pnext;
- uint ttl_len, dma_len, lft_len, xfred_len, pkt_len;
+ uint ttl_len, pkt_offset;
uint blk_num;
- int blk_size;
+ uint blk_size;
+ uint max_blk_count;
+ uint max_req_size;
struct mmc_request mmc_req;
struct mmc_command mmc_cmd;
struct mmc_data mmc_dat;
-#ifdef BCMSDIOH_TXGLOM
- uint8 *localbuf = NULL;
- uint local_plen = 0;
- bool need_txglom = write && sdioh_glom_enabled() &&
- (pkt == sd->glom_info.glom_pkt_tail) &&
- (sd->glom_info.glom_pkt_head != sd->glom_info.glom_pkt_tail);
-#endif /* BCMSDIOH_TXGLOM */
+ uint32 sg_count;
+ struct sdio_func *sdio_func = sd->func[func];
+ struct mmc_host *host = sdio_func->card->host;
sd_trace(("%s: Enter\n", __FUNCTION__));
-
ASSERT(pkt);
DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
- ttl_len = xfred_len = 0;
-#ifdef BCMSDIOH_TXGLOM
- if (need_txglom) {
- pkt = sd->glom_info.glom_pkt_head;
- }
-#endif /* BCMSDIOH_TXGLOM */
-
- /* at least 4 bytes alignment of skb buff is guaranteed */
- for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext))
- ttl_len += PKTLEN(sd->osh, pnext);
-
blk_size = sd->client_block_size[func];
- if (((!write && sd->use_rxchain) ||
-#ifdef BCMSDIOH_TXGLOM
- (need_txglom && sd->txglom_mode == SDPCM_TXGLOM_MDESC) ||
-#endif
- 0) && (ttl_len >= blk_size)) {
- blk_num = ttl_len / blk_size;
- dma_len = blk_num * blk_size;
- } else {
- blk_num = 0;
- dma_len = 0;
- }
+ max_blk_count = min(host->max_blk_count, (uint)MAX_IO_RW_EXTENDED_BLK);
+ max_req_size = min(max_blk_count * blk_size, host->max_req_size);
- lft_len = ttl_len - dma_len;
+ pkt_offset = 0;
+ pnext = pkt;
- sd_trace(("%s: %s %dB to func%d:%08x, %d blks with DMA, %dB leftover\n",
- __FUNCTION__, write ? "W" : "R",
- ttl_len, func, addr, blk_num, lft_len));
-
- if (0 != dma_len) {
+ while (pnext != NULL) {
+ ttl_len = 0;
+ sg_count = 0;
memset(&mmc_req, 0, sizeof(struct mmc_request));
memset(&mmc_cmd, 0, sizeof(struct mmc_command));
memset(&mmc_dat, 0, sizeof(struct mmc_data));
+ sg_init_table(sd->sg_list, ARRAYSIZE(sd->sg_list));
- /* Set up DMA descriptors */
- for (pnext = pkt;
- pnext && dma_len;
- pnext = PKTNEXT(sd->osh, pnext)) {
- pkt_len = PKTLEN(sd->osh, pnext);
-
- if (dma_len > pkt_len)
- dma_len -= pkt_len;
- else {
- pkt_len = xfred_len = dma_len;
- dma_len = 0;
- pkt = pnext;
- }
-
- sg_set_buf(&sd->sg_list[SGCount++],
- (uint8*)PKTDATA(sd->osh, pnext),
- pkt_len);
+ /* Set up scatter-gather DMA descriptors. this loop is to find out the max
+ * data we can transfer with one command 53. blocks per command is limited by
+ * host max_req_size and 9-bit max block number. when the total length of this
+ * packet chain is bigger than max_req_size, use multiple SD_IO_RW_EXTENDED
+ * commands (each transfer is still block aligned)
+ */
+ while (pnext != NULL && ttl_len < max_req_size) {
+ int pkt_len;
+ int sg_data_size;
+ uint8 *pdata = (uint8*)PKTDATA(sd->osh, pnext);
- if (SGCount >= SDIOH_SDMMC_MAX_SG_ENTRIES) {
- sd_err(("%s: sg list entries exceed limit\n",
- __FUNCTION__));
+ ASSERT(pdata != NULL);
+ pkt_len = PKTLEN(sd->osh, pnext);
+ sd_trace(("%s[%d] data=%p, len=%d\n", __FUNCTION__, write, pdata, pkt_len));
+ /* sg_count is unlikely larger than the array size, and this is
+ * NOT something we can handle here, but in case it happens, PLEASE put
+ * a restriction on max tx/glom count (based on host->max_segs).
+ */
+ if (sg_count >= ARRAYSIZE(sd->sg_list)) {
+ sd_err(("%s: sg list entries exceed limit\n", __FUNCTION__));
return (SDIOH_API_RC_FAIL);
}
+ pdata += pkt_offset;
+
+ sg_data_size = pkt_len - pkt_offset;
+ if (sg_data_size > max_req_size - ttl_len)
+ sg_data_size = max_req_size - ttl_len;
+ /* some platforms put a restriction on the data size of each scatter-gather
+ * DMA descriptor, use multiple sg buffers when xfer_size is bigger than
+ * max_seg_size
+ */
+ if (sg_data_size > host->max_seg_size)
+ sg_data_size = host->max_seg_size;
+ sg_set_buf(&sd->sg_list[sg_count++], pdata, sg_data_size);
+
+ ttl_len += sg_data_size;
+ pkt_offset += sg_data_size;
+ if (pkt_offset == pkt_len) {
+ pnext = PKTNEXT(sd->osh, pnext);
+ pkt_offset = 0;
+ }
}
+ if (ttl_len % blk_size != 0) {
+ sd_err(("%s, data length %d not aligned to block size %d\n",
+ __FUNCTION__, ttl_len, blk_size));
+ return SDIOH_API_RC_FAIL;
+ }
+ blk_num = ttl_len / blk_size;
mmc_dat.sg = sd->sg_list;
- mmc_dat.sg_len = SGCount;
+ mmc_dat.sg_len = sg_count;
mmc_dat.blksz = blk_size;
mmc_dat.blocks = blk_num;
mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
-
- mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
+ mmc_cmd.opcode = 53; /* SD_IO_RW_EXTENDED */
mmc_cmd.arg = write ? 1<<31 : 0;
mmc_cmd.arg |= (func & 0x7) << 28;
mmc_cmd.arg |= 1<<27;
mmc_cmd.arg |= (addr & 0x1FFFF) << 9;
mmc_cmd.arg |= blk_num & 0x1FF;
mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
-
mmc_req.cmd = &mmc_cmd;
mmc_req.data = &mmc_dat;
+ if (!fifo)
+ addr += ttl_len;
- sdio_claim_host(gInstance->func[func]);
- mmc_set_data_timeout(&mmc_dat, gInstance->func[func]->card);
- mmc_wait_for_req(gInstance->func[func]->card->host, &mmc_req);
- sdio_release_host(gInstance->func[func]);
+ sdio_claim_host(sdio_func);
+ mmc_set_data_timeout(&mmc_dat, sdio_func->card);
+ mmc_wait_for_req(host, &mmc_req);
+ sdio_release_host(sdio_func);
err_ret = mmc_cmd.error? mmc_cmd.error : mmc_dat.error;
if (0 != err_ret) {
sd_err(("%s:CMD53 %s failed with code %d\n",
- __FUNCTION__,
- write ? "write" : "read",
- err_ret));
- }
- if (!fifo) {
- addr = addr + ttl_len - lft_len - dma_len;
+ __FUNCTION__, write ? "write" : "read", err_ret));
+ return SDIOH_API_RC_FAIL;
}
}
- /* PIO mode */
- if (0 != lft_len) {
- /* Claim host controller */
- sdio_claim_host(gInstance->func[func]);
- for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
- uint8 *buf = (uint8*)PKTDATA(sd->osh, pnext) +
- xfred_len;
- uint pad = 0;
- pkt_len = PKTLEN(sd->osh, pnext);
- if (0 != xfred_len) {
- pkt_len -= xfred_len;
- xfred_len = 0;
- }
-#ifdef BCMSDIOH_TXGLOM
- if (need_txglom) {
- if (!localbuf) {
- uint prev_lft_len = lft_len;
- lft_len = sdioh_request_packet_align(lft_len, write,
- func, blk_size);
-
- if (lft_len > prev_lft_len) {
- sd_err(("%s: padding is unexpected! lft_len %d,"
- " prev_lft_len %d %s\n",
- __FUNCTION__, lft_len, prev_lft_len,
- write ? "Write" : "Read"));
- }
-
- localbuf = (uint8 *)MALLOC(sd->osh, lft_len);
- if (localbuf == NULL) {
- sd_err(("%s: %s TXGLOM: localbuf malloc FAILED\n",
- __FUNCTION__, (write) ? "TX" : "RX"));
- need_txglom = FALSE;
- goto txglomfail;
- }
- }
- bcopy(buf, (localbuf + local_plen), pkt_len);
- local_plen += pkt_len;
-
- if (PKTNEXT(sd->osh, pnext)) {
- continue;
- }
+ sd_trace(("%s: Exit\n", __FUNCTION__));
+ return SDIOH_API_RC_SUCCESS;
+}
- buf = localbuf;
- pkt_len = local_plen;
- }
+static SDIOH_API_RC
+sdioh_buffer_tofrom_bus(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
+ uint addr, uint8 *buf, uint len)
+{
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
+ int err_ret = 0;
-txglomfail:
-#endif /* BCMSDIOH_TXGLOM */
+ sd_trace(("%s: Enter\n", __FUNCTION__));
+ ASSERT(buf);
+
+ /* NOTE:
+ * For all writes, each packet length is aligned to 32 (or 4)
+ * bytes in dhdsdio_txpkt_preprocess, and for glom the last packet length
+ * is aligned to block boundary. If you want to align each packet to
+ * a custom size, please do it in dhdsdio_txpkt_preprocess, NOT here
+ *
+ * For reads, the alignment is doen in sdioh_request_buffer.
+ *
+ */
+ sdio_claim_host(sd->func[func]);
+
+ if ((write) && (!fifo))
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+ else if (write)
+ err_ret = sdio_memcpy_toio(sd->func[func], addr, buf, len);
+ else if (fifo)
+ err_ret = sdio_readsb(sd->func[func], buf, addr, len);
+ else
+ err_ret = sdio_memcpy_fromio(sd->func[func], buf, addr, len);
- if (
-#ifdef BCMSDIOH_TXGLOM
- !need_txglom &&
-#endif
- TRUE) {
- pkt_len = sdioh_request_packet_align(pkt_len, write,
- func, blk_size);
-
- pad = pkt_len - PKTLEN(sd->osh, pnext);
-
- if (pad > 0) {
- if (func == SDIO_FUNC_2) {
- sd_err(("%s: padding is unexpected! pkt_len %d,"
- " PKTLEN %d lft_len %d %s\n",
- __FUNCTION__, pkt_len, PKTLEN(sd->osh, pnext),
- lft_len, write ? "Write" : "Read"));
- }
- if (PKTTAILROOM(sd->osh, pkt) < pad) {
- sd_info(("%s: insufficient tailroom %d, pad %d,"
- " lft_len %d pktlen %d, func %d %s\n",
- __FUNCTION__, (int)PKTTAILROOM(sd->osh, pkt),
- pad, lft_len, PKTLEN(sd->osh, pnext), func,
- write ? "W" : "R"));
- if (PKTPADTAILROOM(sd->osh, pkt, pad)) {
- sd_err(("%s: padding error size %d.\n",
- __FUNCTION__, pad));
- return SDIOH_API_RC_FAIL;
- }
- }
- }
- }
+ sdio_release_host(sd->func[func]);
- if ((write) && (!fifo))
- err_ret = sdio_memcpy_toio(
- gInstance->func[func],
- addr, buf, pkt_len);
- else if (write)
- err_ret = sdio_memcpy_toio(
- gInstance->func[func],
- addr, buf, pkt_len);
- else if (fifo)
- err_ret = sdio_readsb(
- gInstance->func[func],
- buf, addr, pkt_len);
- else
- err_ret = sdio_memcpy_fromio(
- gInstance->func[func],
- buf, addr, pkt_len);
-
- if (err_ret)
- sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=%d\n",
- __FUNCTION__,
- (write) ? "TX" : "RX",
- pnext, SGCount, addr, pkt_len, err_ret));
- else
- sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
- __FUNCTION__,
- (write) ? "TX" : "RX",
- pnext, SGCount, addr, pkt_len));
-
- if (!fifo)
- addr += pkt_len;
- SGCount ++;
- }
- sdio_release_host(gInstance->func[func]);
- }
-#ifdef BCMSDIOH_TXGLOM
- if (localbuf)
- MFREE(sd->osh, localbuf, lft_len);
-#endif /* BCMSDIOH_TXGLOM */
+ if (err_ret)
+ sd_err(("%s: %s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=%d\n", __FUNCTION__,
+ (write) ? "TX" : "RX", buf, addr, len, err_ret));
+ else
+ sd_trace(("%s: %s xfr'd %p, addr=0x%05x, len=%d\n", __FUNCTION__,
+ (write) ? "TX" : "RX", buf, addr, len));
sd_trace(("%s: Exit\n", __FUNCTION__));
return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
*/
extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
- uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
+ uint addr, uint reg_width, uint buf_len, uint8 *buffer, void *pkt)
{
- SDIOH_API_RC Status;
+ SDIOH_API_RC status;
void *tmppkt;
- void *orig_buf = NULL;
- uint copylen = 0;
sd_trace(("%s: Enter\n", __FUNCTION__));
-
DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
- if (pkt == NULL) {
- /* Case 1: we don't have a packet. */
- orig_buf = buffer;
- copylen = buflen_u;
- } else if ((ulong)PKTDATA(sd->osh, pkt) & DMA_ALIGN_MASK) {
- /* Case 2: We have a packet, but it is unaligned.
- * in this case, we cannot have a chain.
+ if (pkt) {
+ /* packet chain, only used for tx/rx glom, all packets length
+ * are aligned, total length is a block multiple
*/
- ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
+ if (PKTNEXT(sd->osh, pkt))
+ return sdioh_request_packet_chain(sd, fix_inc, write, func, addr, pkt);
- orig_buf = PKTDATA(sd->osh, pkt);
- copylen = PKTLEN(sd->osh, pkt);
+ /* non-glom mode, ignore the buffer parameter and use the packet pointer
+ * (this shouldn't happen)
+ */
+ buffer = PKTDATA(sd->osh, pkt);
+ buf_len = PKTLEN(sd->osh, pkt);
}
- tmppkt = pkt;
- if (copylen) {
- tmppkt = PKTGET_STATIC(sd->osh, copylen, write ? TRUE : FALSE);
- if (tmppkt == NULL) {
- sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, copylen));
- return SDIOH_API_RC_FAIL;
- }
- /* For a write, copy the buffer data into the packet. */
- if (write)
- bcopy(orig_buf, PKTDATA(sd->osh, tmppkt), copylen);
- }
+ ASSERT(buffer);
+
+ /* buffer and length are aligned, use it directly so we can avoid memory copy */
+ if (((ulong)buffer & DMA_ALIGN_MASK) == 0 && (buf_len & DMA_ALIGN_MASK) == 0)
+ return sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr, buffer, buf_len);
- Status = sdioh_request_packet(sd, fix_inc, write, func, addr, tmppkt);
+ sd_trace(("%s: [%d] doing memory copy buf=%p, len=%d\n",
+ __FUNCTION__, write, buffer, buf_len));
- if (copylen) {
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- bcopy(PKTDATA(sd->osh, tmppkt), orig_buf, PKTLEN(sd->osh, tmppkt));
- PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+ /* otherwise, a memory copy is needed as the input buffer is not aligned */
+ tmppkt = PKTGET_STATIC(sd->osh, buf_len + DEFAULT_SDIO_F2_BLKSIZE, write ? TRUE : FALSE);
+ if (tmppkt == NULL) {
+ sd_err(("%s: PKTGET failed: len %d\n", __FUNCTION__, buf_len));
+ return SDIOH_API_RC_FAIL;
}
- return (Status);
+ if (write)
+ bcopy(buffer, PKTDATA(sd->osh, tmppkt), buf_len);
+
+ status = sdioh_buffer_tofrom_bus(sd, fix_inc, write, func, addr,
+ PKTDATA(sd->osh, tmppkt), ROUNDUP(buf_len, (DMA_ALIGN_MASK+1)));
+
+ if (!write)
+ bcopy(PKTDATA(sd->osh, tmppkt), buffer, buf_len);
+
+ PKTFREE_STATIC(sd->osh, tmppkt, write ? TRUE : FALSE);
+
+ return status;
}
/* this function performs "abort" for both of host & device */
{
sdioh_info_t *sd;
- sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
- sd = gInstance->sd;
+ sd = sdio_get_drvdata(func);
ASSERT(sd != NULL);
- sdio_release_host(gInstance->func[0]);
+ sdio_release_host(sd->func[0]);
if (sd->use_client_ints) {
sd->intrcount++;
__FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
}
- sdio_claim_host(gInstance->func[0]);
+ sdio_claim_host(sd->func[0]);
}
/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
static void IRQHandlerF2(struct sdio_func *func)
{
- sdioh_info_t *sd;
-
sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
-
- sd = gInstance->sd;
-
- ASSERT(sd != NULL);
- BCM_REFERENCE(sd);
}
#endif /* !defined(OOB_INTR_ONLY) */
#endif /* NOTUSED */
int
-sdioh_start(sdioh_info_t *si, int stage)
+sdioh_start(sdioh_info_t *sd, int stage)
{
int ret;
- sdioh_info_t *sd = gInstance->sd;
if (!sd) {
sd_err(("%s Failed, sd is NULL\n", __FUNCTION__));
downloading of the firmware is complete, other wise polling
sdio access will come in way
*/
- if (gInstance->func[0]) {
+ if (sd->func[0]) {
if (stage == 0) {
/* Since the power to the chip is killed, we will have
re enumerate the device again. Set the block size
2.6.27. The implementation prior to that is buggy, and needs broadcom's
patch for it
*/
- if ((ret = sdio_reset_comm(gInstance->func[0]->card))) {
+ if ((ret = sdio_reset_comm(sd->func[0]->card))) {
sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
return ret;
}
sd->use_client_ints = TRUE;
sd->client_block_size[0] = 64;
- if (gInstance->func[1]) {
+ if (sd->func[1]) {
/* Claim host controller */
- sdio_claim_host(gInstance->func[1]);
+ sdio_claim_host(sd->func[1]);
sd->client_block_size[1] = 64;
- if (sdio_set_block_size(gInstance->func[1], 64)) {
- sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
+ ret = sdio_set_block_size(sd->func[1], 64);
+ if (ret) {
+ sd_err(("bcmsdh_sdmmc: Failed to set F1 "
+ "blocksize(%d)\n", ret));
}
/* Release host controller F1 */
- sdio_release_host(gInstance->func[1]);
+ sdio_release_host(sd->func[1]);
}
- if (gInstance->func[2]) {
+ if (sd->func[2]) {
/* Claim host controller F2 */
- sdio_claim_host(gInstance->func[2]);
+ sdio_claim_host(sd->func[2]);
sd->client_block_size[2] = sd_f2_blocksize;
- if (sdio_set_block_size(gInstance->func[2],
- sd_f2_blocksize)) {
+ ret = sdio_set_block_size(sd->func[2], sd_f2_blocksize);
+ if (ret) {
sd_err(("bcmsdh_sdmmc: Failed to set F2 "
- "blocksize to %d\n", sd_f2_blocksize));
+ "blocksize to %d(%d)\n", sd_f2_blocksize, ret));
}
/* Release host controller F2 */
- sdio_release_host(gInstance->func[2]);
+ sdio_release_host(sd->func[2]);
}
sdioh_sdmmc_card_enablefuncs(sd);
}
} else {
#if !defined(OOB_INTR_ONLY)
- sdio_claim_host(gInstance->func[0]);
- if (gInstance->func[2])
- sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
- if (gInstance->func[1])
- sdio_claim_irq(gInstance->func[1], IRQHandler);
- sdio_release_host(gInstance->func[0]);
+ sdio_claim_host(sd->func[0]);
+ if (sd->func[2])
+ sdio_claim_irq(sd->func[2], IRQHandlerF2);
+ if (sd->func[1])
+ sdio_claim_irq(sd->func[1], IRQHandler);
+ sdio_release_host(sd->func[0]);
#else /* defined(OOB_INTR_ONLY) */
#if defined(HW_OOB)
- sdioh_enable_func_intr();
+ sdioh_enable_func_intr(sd);
#endif
- bcmsdh_oob_intr_set(TRUE);
+ bcmsdh_oob_intr_set(sd->bcmsdh, TRUE);
#endif /* !defined(OOB_INTR_ONLY) */
}
}
}
int
-sdioh_stop(sdioh_info_t *si)
+sdioh_stop(sdioh_info_t *sd)
{
/* MSM7201A Android sdio stack has bug with interrupt
So internaly within SDIO stack they are polling
unregister interrupt with SDIO stack to stop the
polling
*/
- if (gInstance->func[0]) {
+ if (sd->func[0]) {
#if !defined(OOB_INTR_ONLY)
- sdio_claim_host(gInstance->func[0]);
- if (gInstance->func[1])
- sdio_release_irq(gInstance->func[1]);
- if (gInstance->func[2])
- sdio_release_irq(gInstance->func[2]);
- sdio_release_host(gInstance->func[0]);
+ sdio_claim_host(sd->func[0]);
+ if (sd->func[1])
+ sdio_release_irq(sd->func[1]);
+ if (sd->func[2])
+ sdio_release_irq(sd->func[2]);
+ sdio_release_host(sd->func[0]);
#else /* defined(OOB_INTR_ONLY) */
#if defined(HW_OOB)
- sdioh_disable_func_intr();
+ sdioh_disable_func_intr(sd);
#endif
- bcmsdh_oob_intr_set(FALSE);
+ bcmsdh_oob_intr_set(sd->bcmsdh, FALSE);
#endif /* !defined(OOB_INTR_ONLY) */
}
else
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2013, Broadcom Corporation
+ * Copyright (C) 1999-2014, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh_sdmmc_linux.c 404103 2013-05-23 20:07:27Z $
+ * $Id: bcmsdh_sdmmc_linux.c 434777 2013-11-07 09:30:27Z $
*/
#include <typedefs.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
-#include <dhd_config.h>
+#include <dhd_linux.h>
+#include <bcmsdh_sdmmc.h>
+#include <dhd_dbg.h>
#if !defined(SDIO_VENDOR_ID_BROADCOM)
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
#define SDIO_DEVICE_ID_BROADCOM_43239 43239
#endif /* !defined(SDIO_DEVICE_ID_BROADCOM_43239) */
-
-#include <bcmsdh_sdmmc.h>
-
-#include <dhd_dbg.h>
-
-#ifdef WL_CFG80211
extern void wl_cfg80211_set_parent_dev(void *dev);
-#endif
-
extern void sdioh_sdmmc_devintr_off(sdioh_info_t *sd);
extern void sdioh_sdmmc_devintr_on(sdioh_info_t *sd);
-extern int dhd_os_check_wakelock(void *dhdp);
-extern int dhd_os_check_if_up(void *dhdp);
-extern void *bcmsdh_get_drvdata(void);
+extern void* bcmsdh_probe(osl_t *osh, void *dev, void *sdioh, void *adapter_info, uint bus_type,
+ uint bus_num, uint slot_num);
+extern int bcmsdh_remove(bcmsdh_info_t *bcmsdh);
int sdio_function_init(void);
void sdio_function_cleanup(void);
/* Maximum number of bcmsdh_sdmmc devices supported by driver */
#define BCMSDH_SDMMC_MAX_DEVICES 1
-extern int bcmsdh_probe(struct device *dev);
-extern int bcmsdh_remove(struct device *dev);
extern volatile bool dhd_mmc_suspend;
+static int sdioh_probe(struct sdio_func *func)
+{
+ int host_idx = func->card->host->index;
+ uint32 rca = func->card->rca;
+ wifi_adapter_info_t *adapter;
+ osl_t *osh = NULL;
+ sdioh_info_t *sdioh = NULL;
+
+ sd_err(("bus num (host idx)=%d, slot num (rca)=%d\n", host_idx, rca));
+ adapter = dhd_wifi_platform_get_adapter(SDIO_BUS, host_idx, rca);
+ if (adapter != NULL)
+ sd_err(("found adapter info '%s'\n", adapter->name));
+ else
+ sd_err(("can't find adapter info for this chip\n"));
+
+#ifdef WL_CFG80211
+ wl_cfg80211_set_parent_dev(&func->dev);
+#endif
+
+ /* allocate SDIO Host Controller state info */
+ osh = osl_attach(&func->dev, SDIO_BUS, TRUE);
+ if (osh == NULL) {
+ sd_err(("%s: osl_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+ osl_static_mem_init(osh, adapter);
+ sdioh = sdioh_attach(osh, func);
+ if (sdioh == NULL) {
+ sd_err(("%s: sdioh_attach failed\n", __FUNCTION__));
+ goto fail;
+ }
+ sdioh->bcmsdh = bcmsdh_probe(osh, &func->dev, sdioh, adapter, SDIO_BUS, host_idx, rca);
+ if (sdioh->bcmsdh == NULL) {
+ sd_err(("%s: bcmsdh_probe failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ sdio_set_drvdata(func, sdioh);
+ return 0;
+
+fail:
+ if (sdioh != NULL)
+ sdioh_detach(osh, sdioh);
+ if (osh != NULL)
+ osl_detach(osh);
+ return -ENOMEM;
+}
+
+static void sdioh_remove(struct sdio_func *func)
+{
+ sdioh_info_t *sdioh;
+ osl_t *osh;
+
+ sdioh = sdio_get_drvdata(func);
+ if (sdioh == NULL) {
+ sd_err(("%s: error, no sdioh handler found\n", __FUNCTION__));
+ return;
+ }
+
+ osh = sdioh->osh;
+ bcmsdh_remove(sdioh->bcmsdh);
+ sdioh_detach(osh, sdioh);
+ osl_detach(osh);
+}
+
static int bcmsdh_sdmmc_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int ret = 0;
- static struct sdio_func sdio_func_0;
- if (!gInstance)
+ if (func == NULL)
return -EINVAL;
- if (func) {
- sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
- sd_trace(("sdio_bcmsdh: func->class=%x\n", func->class));
- sd_trace(("sdio_vendor: 0x%04x\n", func->vendor));
- sd_trace(("sdio_device: 0x%04x\n", func->device));
- sd_trace(("Function#: 0x%04x\n", func->num));
-
- if (func->num == 1) {
- sdio_func_0.num = 0;
- sdio_func_0.card = func->card;
- gInstance->func[0] = &sdio_func_0;
- if(func->device == 0x4) { /* 4318 */
- gInstance->func[2] = NULL;
- sd_trace(("NIC found, calling bcmsdh_probe...\n"));
- ret = bcmsdh_probe(&func->dev);
- }
- }
-
- gInstance->func[func->num] = func;
-
- if (func->num == 2) {
- #ifdef WL_CFG80211
- wl_cfg80211_set_parent_dev(&func->dev);
- #endif
- sd_trace(("F2 found, calling bcmsdh_probe...\n"));
- ret = bcmsdh_probe(&func->dev);
- if (ret < 0)
- gInstance->func[2] = NULL;
- }
- } else {
- ret = -ENODEV;
- }
-#ifdef POWER_OFF_IN_SUSPEND
- dhd_conf_register_wifi_suspend(func);
-#endif
+ sd_err(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ gInstance->func[func->num] = func;
+
+ /* 4318 doesn't have function 2 */
+ if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+ ret = sdioh_probe(func);
return ret;
}
static void bcmsdh_sdmmc_remove(struct sdio_func *func)
{
- if (func) {
- sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
- sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
- sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
- sd_info(("sdio_device: 0x%04x\n", func->device));
- sd_info(("Function#: 0x%04x\n", func->num));
-
-#ifdef POWER_OFF_IN_SUSPEND
- dhd_conf_unregister_wifi_suspend(func);
-#endif
- if (gInstance->func[2]) {
- sd_trace(("F2 found, calling bcmsdh_remove...\n"));
- bcmsdh_remove(&func->dev);
- gInstance->func[2] = NULL;
- }
- if (func->num == 1) {
- sdio_claim_host(func);
- sdio_disable_func(func);
- sdio_release_host(func);
- gInstance->func[1] = NULL;
- }
+ if (func == NULL) {
+ sd_err(("%s is called with NULL SDIO function pointer\n", __FUNCTION__));
+ return;
}
+
+ sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
+ sd_info(("sdio_bcmsdh: func->class=%x\n", func->class));
+ sd_info(("sdio_vendor: 0x%04x\n", func->vendor));
+ sd_info(("sdio_device: 0x%04x\n", func->device));
+ sd_info(("Function#: 0x%04x\n", func->num));
+
+ if ((func->num == 2) || (func->num == 1 && func->device == 0x4))
+ sdioh_remove(func);
}
/* devices we support, null terminated */
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM)
static int bcmsdh_sdmmc_suspend(struct device *pdev)
{
+ int err;
+ sdioh_info_t *sdioh;
struct sdio_func *func = dev_to_sdio_func(pdev);
-#ifndef POWER_OFF_IN_SUSPEND
mmc_pm_flag_t sdio_flags;
- int ret;
-#endif
+ printk("%s Enter\n", __FUNCTION__);
if (func->num != 2)
return 0;
- sd_trace(("%s Enter\n", __FUNCTION__));
- if (dhd_os_check_wakelock(bcmsdh_get_drvdata()))
- return -EBUSY;
+ sdioh = sdio_get_drvdata(func);
+ err = bcmsdh_suspend(sdioh->bcmsdh);
+ if (err) {
+ printk("%s bcmsdh_suspend err=%d\n", __FUNCTION__, err);
+ return err;
+ }
-#ifdef POWER_OFF_IN_SUSPEND
- dhd_conf_wifi_suspend(func);
-#else
sdio_flags = sdio_get_host_pm_caps(func);
-
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
sd_err(("%s: can't keep power while host is suspended\n", __FUNCTION__));
return -EINVAL;
}
/* keep power while host suspended */
- ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
- if (ret) {
+ err = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+ if (err) {
sd_err(("%s: error while trying to keep power\n", __FUNCTION__));
- return ret;
+ return err;
}
#if defined(OOB_INTR_ONLY)
- bcmsdh_oob_intr_set(0);
-#endif
+ bcmsdh_oob_intr_set(sdioh->bcmsdh, FALSE);
#endif
dhd_mmc_suspend = TRUE;
smp_mb();
+ printk("%s Exit\n", __FUNCTION__);
return 0;
}
static int bcmsdh_sdmmc_resume(struct device *pdev)
{
-#if defined(OOB_INTR_ONLY) || defined(POWER_OFF_IN_SUSPEND)
- struct sdio_func *func = dev_to_sdio_func(pdev);
+#if defined(OOB_INTR_ONLY)
+ sdioh_info_t *sdioh;
#endif
- sd_trace(("%s Enter\n", __FUNCTION__));
+ struct sdio_func *func = dev_to_sdio_func(pdev);
+
+ printk("%s Enter\n", __FUNCTION__);
+ if (func->num != 2)
+ return 0;
+
dhd_mmc_suspend = FALSE;
-
-#ifdef POWER_OFF_IN_SUSPEND
- gInstance->func[func->num] = func;
-#else
#if defined(OOB_INTR_ONLY)
- if ((func->num == 2) && dhd_os_check_if_up(bcmsdh_get_drvdata()))
- bcmsdh_oob_intr_set(1);
+ sdioh = sdio_get_drvdata(func);
+ bcmsdh_resume(sdioh->bcmsdh);
#endif
-#endif
+
smp_mb();
+ printk("%s Exit\n", __FUNCTION__);
return 0;
}
#if defined(BCMLXSDMMC)
static struct semaphore *notify_semaphore = NULL;
-/*
+
static int dummy_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
+ if (func && (func->num != 2)) {
+ return 0;
+ }
+
if (notify_semaphore)
up(notify_semaphore);
return 0;
.name = "dummy_sdmmc",
.id_table = bcmsdh_sdmmc_ids,
};
-*/
+
int sdio_func_reg_notify(void* semaphore)
{
notify_semaphore = semaphore;
- if (notify_semaphore)
- up(notify_semaphore);
- return 0;
- //return sdio_register_driver(&dummy_sdmmc_driver);
+ return sdio_register_driver(&dummy_sdmmc_driver);
}
void sdio_func_unreg_notify(void)
{
- //sdio_unregister_driver(&dummy_sdmmc_driver);
+ OSL_SLEEP(15);
+ sdio_unregister_driver(&dummy_sdmmc_driver);
}
#endif /* defined(BCMLXSDMMC) */
spinlock_t lock;
};
-
-int
-sdioh_sdmmc_osinit(sdioh_info_t *sd)
-{
- struct sdos_info *sdos;
-
- if (!sd)
- return BCME_BADARG;
-
- sdos = (struct sdos_info*)MALLOC(sd->osh, sizeof(struct sdos_info));
- sd->sdos_info = (void*)sdos;
- if (sdos == NULL)
- return BCME_NOMEM;
-
- sdos->sd = sd;
- spin_lock_init(&sdos->lock);
- return BCME_OK;
-}
-
-void
-sdioh_sdmmc_osfree(sdioh_info_t *sd)
-{
- struct sdos_info *sdos;
- ASSERT(sd && sd->sdos_info);
-
- sdos = (struct sdos_info *)sd->sdos_info;
- MFREE(sd->osh, sdos, sizeof(struct sdos_info));
-}
-
/* Interrupt enable/disable */
SDIOH_API_RC
sdioh_interrupt_set(sdioh_info_t *sd, bool enable)
{
- ulong flags;
- struct sdos_info *sdos;
-
if (!sd)
return BCME_BADARG;
sd_trace(("%s: %s\n", __FUNCTION__, enable ? "Enabling" : "Disabling"));
-
- sdos = (struct sdos_info *)sd->sdos_info;
- ASSERT(sdos);
-
-#if !defined(OOB_INTR_ONLY)
- if (enable && !(sd->intr_handler && sd->intr_handler_arg)) {
- sd_err(("%s: no handler registered, will not enable\n", __FUNCTION__));
- return SDIOH_API_RC_FAIL;
- }
-#endif /* !defined(OOB_INTR_ONLY) */
-
- /* Ensure atomicity for enable/disable calls */
- spin_lock_irqsave(&sdos->lock, flags);
-
- sd->client_intr_enabled = enable;
- if (enable) {
- sdioh_sdmmc_devintr_on(sd);
- } else {
- sdioh_sdmmc_devintr_off(sd);
- }
-
- spin_unlock_irqrestore(&sdos->lock, flags);
-
return SDIOH_API_RC_SUCCESS;
}
-
#ifdef BCMSDH_MODULE
static int __init
bcmsdh_module_init(void)
/*
* module init
*/
-int sdio_function_init(void)
+int bcmsdh_register_client_driver(void)
{
- int error = 0;
- sd_trace(("bcmsdh_sdmmc: %s Enter\n", __FUNCTION__));
-
gInstance = kzalloc(sizeof(BCMSDH_SDMMC_INSTANCE), GFP_KERNEL);
if (!gInstance)
return -ENOMEM;
- error = sdio_register_driver(&bcmsdh_sdmmc_driver);
- if (error) {
- kfree(gInstance);
- gInstance = NULL;
- }
-
- return error;
+ return sdio_register_driver(&bcmsdh_sdmmc_driver);
}
/*
* module cleanup
*/
-extern int bcmsdh_remove(struct device *dev);
-void sdio_function_cleanup(void)
+void bcmsdh_unregister_client_driver(void)
{
- sd_trace(("%s Enter\n", __FUNCTION__));
-
-
sdio_unregister_driver(&bcmsdh_sdmmc_driver);
-
- if (gInstance) {
+ if (gInstance)
kfree(gInstance);
- gInstance = NULL;
- }
}
* Driver O/S-independent utility routines
*
* $Copyright Open Broadcom Corporation$
- * $Id: bcmutils.c 412804 2013-07-16 16:26:39Z $
+ * $Id: bcmutils.c 488316 2014-06-30 15:22:21Z $
*/
#include <bcm_cfg.h>
#include <bcm_osl.h>
#endif
+#ifndef ASSERT
+#define ASSERT(exp)
+#endif
#endif /* !BCMDRIVER */
#include <proto/bcmip.h>
#include <proto/802.1d.h>
#include <proto/802.11.h>
+
+
void *_bcmutils_dummy_fn = NULL;
+#ifdef CUSTOM_DSCP_TO_PRIO_MAPPING
+#define CUST_IPV4_TOS_PREC_MASK 0x3F
+#define DCSP_MAX_VALUE 64
+/* 0:BE,1:BK,2:RESV(BK):,3:EE,:4:CL,5:VI,6:VO,7:NC */
+int dscp2priomap[DCSP_MAX_VALUE]=
+{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, /* BK->BE */
+ 2, 0, 0, 0, 0, 0, 0, 0,
+ 3, 0, 0, 0, 0, 0, 0, 0,
+ 4, 0, 0, 0, 0, 0, 0, 0,
+ 5, 0, 0, 0, 0, 0, 0, 0,
+ 6, 0, 0, 0, 0, 0, 0, 0,
+ 7, 0, 0, 0, 0, 0, 0, 0
+};
+#endif /* CUSTOM_DSCP_TO_PRIO_MAPPING */
+
+
#ifdef BCMDRIVER
{
uint n, ret = 0;
+
/* skip 'offset' bytes */
for (; p && offset; p = PKTNEXT(osh, p)) {
if (offset < (uint)PKTLEN(osh, p))
for (; p; p = PKTNEXT(osh, p)) {
len = PKTLEN(osh, p);
total += len;
+#ifdef BCMLFRAG
+ if (BCMLFRAG_ENAB()) {
+ if (PKTISFRAG(osh, p)) {
+ total += PKTFRAGTOTLEN(osh, p);
+ }
+ }
+#endif
}
return (total);
{
uint cnt;
- for (cnt = 0; p; p = PKTNEXT(osh, p))
+ for (cnt = 0; p; p = PKTNEXT(osh, p)) {
cnt++;
+#ifdef BCMLFRAG
+ if (BCMLFRAG_ENAB()) {
+ if (PKTISFRAG(osh, p)) {
+ cnt += PKTFRAGTOTNUM(osh, p);
+ }
+ }
+#endif
+ }
return cnt;
}
return p;
}
-/*
- * osl multiple-precedence packet queue
- * hi_prec is always >= the number of the highest non-empty precedence
- */
-void * BCMFASTPATH
-pktq_penq(struct pktq *pq, int prec, void *p)
-{
- struct pktq_prec *q;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
- ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
-
- ASSERT(!pktq_full(pq));
- ASSERT(!pktq_pfull(pq, prec));
-
- q = &pq->q[prec];
-
- if (q->head)
- PKTSETLINK(q->tail, p);
- else
- q->head = p;
-
- q->tail = p;
- q->len++;
-
- pq->len++;
-
- if (pq->hi_prec < prec)
- pq->hi_prec = (uint8)prec;
-
- return p;
-}
-
-void * BCMFASTPATH
-pktq_penq_head(struct pktq *pq, int prec, void *p)
-{
- struct pktq_prec *q;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
- ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
-
- ASSERT(!pktq_full(pq));
- ASSERT(!pktq_pfull(pq, prec));
-
- q = &pq->q[prec];
-
- if (q->head == NULL)
- q->tail = p;
-
- PKTSETLINK(p, q->head);
- q->head = p;
- q->len++;
-
- pq->len++;
-
- if (pq->hi_prec < prec)
- pq->hi_prec = (uint8)prec;
-
- return p;
-}
-
-void * BCMFASTPATH
-pktq_pdeq(struct pktq *pq, int prec)
-{
- struct pktq_prec *q;
- void *p;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- q = &pq->q[prec];
-
- if ((p = q->head) == NULL)
- return NULL;
-
- if ((q->head = PKTLINK(p)) == NULL)
- q->tail = NULL;
-
- q->len--;
-
- pq->len--;
-
- PKTSETLINK(p, NULL);
-
- return p;
-}
-
-void * BCMFASTPATH
-pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
-{
- struct pktq_prec *q;
- void *p;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- q = &pq->q[prec];
-
- if (prev_p == NULL)
- return NULL;
-
- if ((p = PKTLINK(prev_p)) == NULL)
- return NULL;
-
- q->len--;
-
- pq->len--;
-
- PKTSETLINK(prev_p, PKTLINK(p));
- PKTSETLINK(p, NULL);
-
- return p;
-}
-
-void * BCMFASTPATH
-pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg)
-{
- struct pktq_prec *q;
- void *p, *prev = NULL;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- q = &pq->q[prec];
- p = q->head;
-
- while (p) {
- if (fn == NULL || (*fn)(p, arg)) {
- break;
- } else {
- prev = p;
- p = PKTLINK(p);
- }
- }
- if (p == NULL)
- return NULL;
-
- if (prev == NULL) {
- if ((q->head = PKTLINK(p)) == NULL)
- q->tail = NULL;
- } else {
- PKTSETLINK(prev, PKTLINK(p));
- }
-
- q->len--;
-
- pq->len--;
-
- PKTSETLINK(p, NULL);
-
- return p;
-}
-
-void * BCMFASTPATH
-pktq_pdeq_tail(struct pktq *pq, int prec)
-{
- struct pktq_prec *q;
- void *p, *prev;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- q = &pq->q[prec];
-
- if ((p = q->head) == NULL)
- return NULL;
-
- for (prev = NULL; p != q->tail; p = PKTLINK(p))
- prev = p;
-
- if (prev)
- PKTSETLINK(prev, NULL);
- else
- q->head = NULL;
-
- q->tail = prev;
- q->len--;
-
- pq->len--;
-
- return p;
-}
-
-void
-pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
-{
- struct pktq_prec *q;
- void *p, *prev = NULL;
-
- q = &pq->q[prec];
- p = q->head;
- while (p) {
- if (fn == NULL || (*fn)(p, arg)) {
- bool head = (p == q->head);
- if (head)
- q->head = PKTLINK(p);
- else
- PKTSETLINK(prev, PKTLINK(p));
- PKTSETLINK(p, NULL);
- PKTFREE(osh, p, dir);
- q->len--;
- pq->len--;
- p = (head ? q->head : PKTLINK(prev));
- } else {
- prev = p;
- p = PKTLINK(p);
- }
- }
-
- if (q->head == NULL) {
- ASSERT(q->len == 0);
- q->tail = NULL;
- }
-}
-
-bool BCMFASTPATH
-pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
-{
- struct pktq_prec *q;
- void *p;
-
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- if (!pktbuf)
- return FALSE;
-
- q = &pq->q[prec];
-
- if (q->head == pktbuf) {
- if ((q->head = PKTLINK(pktbuf)) == NULL)
- q->tail = NULL;
- } else {
- for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
- ;
- if (p == NULL)
- return FALSE;
-
- PKTSETLINK(p, PKTLINK(pktbuf));
- if (q->tail == pktbuf)
- q->tail = p;
- }
-
- q->len--;
- pq->len--;
- PKTSETLINK(pktbuf, NULL);
- return TRUE;
-}
-
-void
-pktq_init(struct pktq *pq, int num_prec, int max_len)
-{
- int prec;
-
- ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
-
- /* pq is variable size; only zero out what's requested */
- bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
-
- pq->num_prec = (uint16)num_prec;
-
- pq->max = (uint16)max_len;
-
- for (prec = 0; prec < num_prec; prec++)
- pq->q[prec].max = pq->max;
-}
-
-void
-pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
-{
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- if (prec < pq->num_prec)
- pq->q[prec].max = (uint16)max_len;
-}
-
-void * BCMFASTPATH
-pktq_deq(struct pktq *pq, int *prec_out)
-{
- struct pktq_prec *q;
- void *p;
- int prec;
-
- if (pq->len == 0)
- return NULL;
-
- while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
- pq->hi_prec--;
-
- q = &pq->q[prec];
-
- if ((p = q->head) == NULL)
- return NULL;
-
- if ((q->head = PKTLINK(p)) == NULL)
- q->tail = NULL;
-
- q->len--;
-
- pq->len--;
-
- if (prec_out)
- *prec_out = prec;
-
- PKTSETLINK(p, NULL);
-
- return p;
-}
-
-void * BCMFASTPATH
-pktq_deq_tail(struct pktq *pq, int *prec_out)
-{
- struct pktq_prec *q;
- void *p, *prev;
- int prec;
-
- if (pq->len == 0)
- return NULL;
-
- for (prec = 0; prec < pq->hi_prec; prec++)
- if (pq->q[prec].head)
- break;
-
- q = &pq->q[prec];
-
- if ((p = q->head) == NULL)
- return NULL;
-
- for (prev = NULL; p != q->tail; p = PKTLINK(p))
- prev = p;
-
- if (prev)
- PKTSETLINK(prev, NULL);
- else
- q->head = NULL;
-
- q->tail = prev;
- q->len--;
-
- pq->len--;
-
- if (prec_out)
- *prec_out = prec;
-
- PKTSETLINK(p, NULL);
-
- return p;
-}
-
-void *
-pktq_peek(struct pktq *pq, int *prec_out)
-{
- int prec;
-
- if (pq->len == 0)
- return NULL;
-
- while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
- pq->hi_prec--;
-
- if (prec_out)
- *prec_out = prec;
-
- return (pq->q[prec].head);
-}
-
-void *
-pktq_peek_tail(struct pktq *pq, int *prec_out)
-{
- int prec;
-
- if (pq->len == 0)
- return NULL;
-
- for (prec = 0; prec < pq->hi_prec; prec++)
- if (pq->q[prec].head)
- break;
-
- if (prec_out)
- *prec_out = prec;
-
- return (pq->q[prec].tail);
-}
-
-void
-pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
-{
- int prec;
-
- /* Optimize flush, if pktq len = 0, just return.
- * pktq len of 0 means pktq's prec q's are all empty.
- */
- if (pq->len == 0) {
- return;
- }
-
- for (prec = 0; prec < pq->num_prec; prec++)
- pktq_pflush(osh, pq, prec, dir, fn, arg);
- if (fn == NULL)
- ASSERT(pq->len == 0);
-}
-
-/* Return sum of lengths of a specific set of precedences */
-int
-pktq_mlen(struct pktq *pq, uint prec_bmp)
-{
- int prec, len;
-
- len = 0;
-
- for (prec = 0; prec <= pq->hi_prec; prec++)
- if (prec_bmp & (1 << prec))
- len += pq->q[prec].len;
-
- return len;
-}
-
-/* Priority peek from a specific set of precedences */
-void * BCMFASTPATH
-pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
-{
- struct pktq_prec *q;
- void *p;
- int prec;
-
- if (pq->len == 0)
- {
- return NULL;
- }
- while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
- pq->hi_prec--;
-
- while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
- if (prec-- == 0)
- return NULL;
-
- q = &pq->q[prec];
-
- if ((p = q->head) == NULL)
- return NULL;
-
- if (prec_out)
- *prec_out = prec;
-
- return p;
-}
-/* Priority dequeue from a specific set of precedences */
-void * BCMFASTPATH
-pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
-{
- struct pktq_prec *q;
- void *p;
- int prec;
-
- if (pq->len == 0)
- return NULL;
-
- while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
- pq->hi_prec--;
-
- while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
- if (prec-- == 0)
- return NULL;
-
- q = &pq->q[prec];
-
- if ((p = q->head) == NULL)
- return NULL;
-
- if ((q->head = PKTLINK(p)) == NULL)
- q->tail = NULL;
-
- q->len--;
-
- if (prec_out)
- *prec_out = prec;
-
- pq->len--;
-
- PKTSETLINK(p, NULL);
-
- return p;
-}
-
#endif /* BCMDRIVER */
#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
if ((haystack == NULL) || (needle == NULL))
return DISCARD_QUAL(haystack, char);
- nlen = strlen(needle);
- len = strlen(haystack) - nlen + 1;
+ nlen = (int)strlen(needle);
+ len = (int)strlen(haystack) - nlen + 1;
for (i = 0; i < len; i++)
if (memcmp(needle, &haystack[i], nlen) == 0)
return (NULL);
}
+char *
+bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len)
+{
+ for (; s_len >= substr_len; s++, s_len--)
+ if (strncmp(s, substr, substr_len) == 0)
+ return DISCARD_QUAL(s, char);
+
+ return NULL;
+}
+
char *
bcmstrcat(char *dest, const char *src)
{
return (i == 6);
}
-#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+int
+bcm_atoipv4(const char *p, struct ipv4_addr *ip)
+{
-#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
-/* registry routine buffer preparation utility functions:
- * parameter order is like strncpy, but returns count
+ int i = 0;
+ char *c;
+ for (;;) {
+ ip->addr[i++] = (uint8)bcm_strtoul(p, &c, 0);
+ if (*c++ != '.' || i == IPV4_ADDR_LEN)
+ break;
+ p = c;
+ }
+ return (i == IPV4_ADDR_LEN);
+}
+#endif /* !BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS */
+
+
+#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER)
+/* registry routine buffer preparation utility functions:
+ * parameter order is like strncpy, but returns count
* of bytes copied. Minimum bytes copied is null char(1)/wchar(2)
*/
ulong
return (buf);
}
+char *
+bcm_ipv6_ntoa(void *ipv6, char *buf)
+{
+ /* Implementing RFC 5952 Sections 4 + 5 */
+ /* Not thoroughly tested */
+ uint16 tmp[8];
+ uint16 *a = &tmp[0];
+ char *p = buf;
+ int i, i_max = -1, cnt = 0, cnt_max = 1;
+ uint8 *a4 = NULL;
+ memcpy((uint8 *)&tmp[0], (uint8 *)ipv6, IPV6_ADDR_LEN);
+
+ for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+ if (a[i]) {
+ if (cnt > cnt_max) {
+ cnt_max = cnt;
+ i_max = i - cnt;
+ }
+ cnt = 0;
+ } else
+ cnt++;
+ }
+ if (cnt > cnt_max) {
+ cnt_max = cnt;
+ i_max = i - cnt;
+ }
+ if (i_max == 0 &&
+ /* IPv4-translated: ::ffff:0:a.b.c.d */
+ ((cnt_max == 4 && a[4] == 0xffff && a[5] == 0) ||
+ /* IPv4-mapped: ::ffff:a.b.c.d */
+ (cnt_max == 5 && a[5] == 0xffff)))
+ a4 = (uint8*) (a + 6);
+
+ for (i = 0; i < IPV6_ADDR_LEN/2; i++) {
+ if ((uint8*) (a + i) == a4) {
+ snprintf(p, 16, ":%u.%u.%u.%u", a4[0], a4[1], a4[2], a4[3]);
+ break;
+ } else if (i == i_max) {
+ *p++ = ':';
+ i += cnt_max - 1;
+ p[0] = ':';
+ p[1] = '\0';
+ } else {
+ if (i)
+ *p++ = ':';
+ p += snprintf(p, 8, "%x", ntoh16(a[i]));
+ }
+ }
+
+ return buf;
+}
#ifdef BCMDRIVER
void
int priority = 0;
int rc = 0;
- pktdata = (uint8 *)PKTDATA(NULL, pkt);
+ pktdata = (uint8 *)PKTDATA(OSH_NULL, pkt);
ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
eh = (struct ether_header *) pktdata;
vlan_tag = ntoh16(evh->vlan_tag);
vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
- if (evh->ether_type == hton16(ETHER_TYPE_IP)) {
+ if ((evh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (evh->ether_type == hton16(ETHER_TYPE_IPV6))) {
uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
uint8 tos_tc = IP_TOS46(ip_body);
dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
evh->vlan_tag = hton16(vlan_tag);
rc |= PKTPRIO_UPD;
}
- } else if (eh->ether_type == hton16(ETHER_TYPE_IP)) {
+ } else if ((eh->ether_type == hton16(ETHER_TYPE_IP)) ||
+ (eh->ether_type == hton16(ETHER_TYPE_IPV6))) {
uint8 *ip_body = pktdata + sizeof(struct ether_header);
uint8 tos_tc = IP_TOS46(ip_body);
uint8 dscp = tos_tc >> IPV4_TOS_DSCP_SHIFT;
priority = PRIO_8021D_EE;
break;
default:
+#ifndef CUSTOM_DSCP_TO_PRIO_MAPPING
priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+#else
+ priority = (int)dscp2priomap[((tos_tc >> IPV4_TOS_DSCP_SHIFT)
+ & CUST_IPV4_TOS_PREC_MASK)];
+#endif
break;
}
return (rc | priority);
}
+/* Returns TRUE and DSCP if IP header found, FALSE otherwise.
+ */
+bool BCMFASTPATH
+pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *ip_body;
+ bool rc = FALSE;
+
+ /* minimum length is ether header and IP header */
+ if (pktlen < sizeof(struct ether_header) + IPV4_MIN_HEADER_LEN)
+ return FALSE;
+
+ eh = (struct ether_header *) pktdata;
+
+ if (eh->ether_type == HTON16(ETHER_TYPE_IP)) {
+ ip_body = pktdata + sizeof(struct ether_header);
+ *dscp = IP_DSCP46(ip_body);
+ rc = TRUE;
+ }
+ else if (eh->ether_type == HTON16(ETHER_TYPE_8021Q)) {
+ evh = (struct ethervlan_header *)eh;
+
+ /* minimum length is ethervlan header and IP header */
+ if (pktlen >= sizeof(struct ethervlan_header) + IPV4_MIN_HEADER_LEN &&
+ evh->ether_type == HTON16(ETHER_TYPE_IP)) {
+ ip_body = pktdata + sizeof(struct ethervlan_header);
+ *dscp = IP_DSCP46(ip_body);
+ rc = TRUE;
+ }
+ }
+
+ return rc;
+}
+
+/* The 0.5KB string table is not removed by compiler even though it's unused */
static char bcm_undeferrstr[32];
static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
/* iovar table lookup */
+/* could mandate sorted tables and do a binary search */
const bcm_iovar_t*
bcm_iovar_lookup(const bcm_iovar_t *table, const char *name)
{
#endif /* BCMDRIVER */
+uint8 *
+bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst)
+{
+ uint8 *new_dst = dst;
+ bcm_tlv_t *dst_tlv = (bcm_tlv_t *)dst;
+
+ /* dst buffer should always be valid */
+ ASSERT(dst);
+
+ /* data len must be within valid range */
+ ASSERT((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE));
+
+ /* source data buffer pointer should be valid, unless datalen is 0
+ * meaning no data with this TLV
+ */
+ ASSERT((data != NULL) || (datalen == 0));
+
+ /* only do work if the inputs are valid
+ * - must have a dst to write to AND
+ * - datalen must be within range AND
+ * - the source data pointer must be non-NULL if datalen is non-zero
+ * (this last condition detects datalen > 0 with a NULL data pointer)
+ */
+ if ((dst != NULL) &&
+ ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) &&
+ ((data != NULL) || (datalen == 0))) {
+
+ /* write type, len fields */
+ dst_tlv->id = (uint8)type;
+ dst_tlv->len = (uint8)datalen;
+
+ /* if data is present, copy to the output buffer and update
+ * pointer to output buffer
+ */
+ if (datalen > 0) {
+
+ memcpy(dst_tlv->data, data, datalen);
+ }
+
+ /* update the output destination poitner to point past
+ * the TLV written
+ */
+ new_dst = dst + BCM_TLV_HDR_SIZE + datalen;
+ }
+
+ return (new_dst);
+}
+
+uint8 *
+bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst, int dst_maxlen)
+{
+ uint8 *new_dst = dst;
+
+ if ((datalen >= 0) && (datalen <= BCM_TLV_MAX_DATA_SIZE)) {
+
+ /* if len + tlv hdr len is more than destlen, don't do anything
+ * just return the buffer untouched
+ */
+ if ((int)(datalen + BCM_TLV_HDR_SIZE) <= dst_maxlen) {
+
+ new_dst = bcm_write_tlv(type, data, datalen, dst);
+ }
+ }
+
+ return (new_dst);
+}
+
+uint8 *
+bcm_copy_tlv(const void *src, uint8 *dst)
+{
+ uint8 *new_dst = dst;
+ const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+ uint totlen;
+
+ ASSERT(dst && src);
+ if (dst && src) {
+
+ totlen = BCM_TLV_HDR_SIZE + src_tlv->len;
+ memcpy(dst, src_tlv, totlen);
+ new_dst = dst + totlen;
+ }
+
+ return (new_dst);
+}
+
+
+uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen)
+{
+ uint8 *new_dst = dst;
+ const bcm_tlv_t *src_tlv = (const bcm_tlv_t *)src;
+
+ ASSERT(src);
+ if (src) {
+ if (bcm_valid_tlv(src_tlv, dst_maxlen)) {
+ new_dst = bcm_copy_tlv(src, dst);
+ }
+ }
+
+ return (new_dst);
+}
+
+
#if !defined(BCMROMOFFLOAD_EXCLUDE_BCMUTILS_FUNCS)
/*******************************************************************************
* crc8
int len;
/* validate current elt */
- if (!bcm_valid_tlv(elt, *buflen))
+ if (!bcm_valid_tlv(elt, *buflen)) {
return NULL;
+ }
/* advance to next elt */
len = elt->len;
*buflen -= (TLV_HDR_LEN + len);
/* validate next elt */
- if (!bcm_valid_tlv(elt, *buflen))
+ if (!bcm_valid_tlv(elt, *buflen)) {
return NULL;
+ }
return elt;
}
int len = elt->len;
/* validate remaining totlen */
- if ((elt->id == key) &&
- (totlen >= (len + TLV_HDR_LEN)))
+ if ((elt->id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
+
return (elt);
+ }
elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
totlen -= (len + TLV_HDR_LEN);
return NULL;
}
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ * return NULL if not found or length field < min_varlen
+ */
+bcm_tlv_t *
+bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen)
+{
+ bcm_tlv_t * ret = bcm_parse_tlvs(buf, buflen, key);
+ if (ret == NULL || ret->len < min_bodylen) {
+ return NULL;
+ }
+ return ret;
+}
+
/*
* Traverse a string of 1-byte tag/1-byte length/variable-length value
* triples, returning a pointer to the substring whose first element
int len = elt->len;
/* Punt if we start seeing IDs > than target key */
- if (id > key)
+ if (id > key) {
return (NULL);
+ }
/* validate remaining totlen */
- if ((id == key) &&
- (totlen >= (len + TLV_HDR_LEN)))
+ if ((id == key) && (totlen >= (int)(len + TLV_HDR_LEN))) {
return (elt);
+ }
elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN));
totlen -= (len + TLV_HDR_LEN);
return (int)(p - buf);
}
+#endif
/* print bytes formatted as hex to a string. return the resulting string length */
int
}
return (int)(p - str);
}
-#endif
/* pretty hex print a contiguous buffer */
void
"CKIP",
"CKIP_MMH",
"WEP_MMH",
- "NALG"
+ "NALG",
#else
- "NALG"
+ "NALG",
"UNDEF",
"UNDEF",
"UNDEF",
#endif /* BCMCCX */
-#ifdef BCMWAPI_WPI
"WAPI",
-#endif /* BCMWAPI_WPI */
+ "PMK",
+ "BIP",
+ "AES_GCM",
+ "AES_CCM256",
+ "AES_GCM256",
+ "BIP_CMAC256",
+ "BIP_GMAC",
+ "BIP_GMAC256",
"UNDEF"
};
uint len, max_len;
char c;
- len = strlen(buf);
+ len = (uint)strlen(buf);
max_len = BUFSIZE_TODUMP_ATONCE;
{
uint len;
- len = strlen(name) + 1;
+ len = (uint)strlen(name) + 1;
if ((len + datalen) > buflen)
return 0;
/* Look for vendor-specific IE with specified OUI and optional type */
bcm_tlv_t *
-find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len)
+bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type, int type_len)
{
bcm_tlv_t *ie;
uint8 ie_len;
ie = (bcm_tlv_t*)tlvs;
/* make sure we are looking at a valid IE */
- if (ie == NULL ||
- !bcm_valid_tlv(ie, tlvs_len))
+ if (ie == NULL || !bcm_valid_tlv(ie, tlvs_len)) {
return NULL;
+ }
/* Walk through the IEs looking for an OUI match */
do {
*r = r0;
}
-#ifndef setbit /* As in the header file */
+#ifndef setbit /* As in the header file */
#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
/* Set bit in byte array. */
void
}
#endif /* BCMUTILS_BIT_MACROS_USE_FUNCS */
#endif /* setbit */
+
+void
+set_bitrange(void *array, uint start, uint end, uint maxbit)
+{
+ uint startbyte = start/NBBY;
+ uint endbyte = end/NBBY;
+ uint i, startbytelastbit, endbytestartbit;
+
+ if (end >= start) {
+ if (endbyte - startbyte > 1)
+ {
+ startbytelastbit = (startbyte+1)*NBBY - 1;
+ endbytestartbit = endbyte*NBBY;
+ for (i = startbyte+1; i < endbyte; i++)
+ ((uint8 *)array)[i] = 0xFF;
+ for (i = start; i <= startbytelastbit; i++)
+ setbit(array, i);
+ for (i = endbytestartbit; i <= end; i++)
+ setbit(array, i);
+ } else {
+ for (i = start; i <= end; i++)
+ setbit(array, i);
+ }
+ }
+ else {
+ set_bitrange(array, start, maxbit, maxbit);
+ set_bitrange(array, 0, end, maxbit);
+ }
+}
+
+void
+bcm_bitprint32(const uint32 u32)
+{
+ int i;
+ for (i = NBITS(uint32) - 1; i >= 0; i--) {
+ isbitset(u32, i) ? printf("1") : printf("0");
+ if ((i % NBBY) == 0) printf(" ");
+ }
+ printf("\n");
+}
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16
+bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum)
+{
+ while (len > 1) {
+ sum += (buf[0] << 8) | buf[1];
+ buf += 2;
+ len -= 2;
+ }
+
+ if (len > 0) {
+ sum += (*buf) << 8;
+ }
+
+ while (sum >> 16) {
+ sum = (sum & 0xffff) + (sum >> 16);
+ }
+
+ return ((uint16)~sum);
+}
+
+#ifdef BCMDRIVER
+/*
+ * Hierarchical Multiword bitmap based small id allocator.
+ *
+ * Multilevel hierarchy bitmap. (maximum 2 levels)
+ * First hierarchy uses a multiword bitmap to identify 32bit words in the
+ * second hierarchy that have at least a single bit set. Each bit in a word of
+ * the second hierarchy represents a unique ID that may be allocated.
+ *
+ * BCM_MWBMAP_ITEMS_MAX: Maximum number of IDs managed.
+ * BCM_MWBMAP_BITS_WORD: Number of bits in a bitmap word word
+ * BCM_MWBMAP_WORDS_MAX: Maximum number of bitmap words needed for free IDs.
+ * BCM_MWBMAP_WDMAP_MAX: Maximum number of bitmap wordss identifying first non
+ * non-zero bitmap word carrying at least one free ID.
+ * BCM_MWBMAP_SHIFT_OP: Used in MOD, DIV and MUL operations.
+ * BCM_MWBMAP_INVALID_IDX: Value ~0U is treated as an invalid ID
+ *
+ * Design Notes:
+ * BCM_MWBMAP_USE_CNTSETBITS trades CPU for memory. A runtime count of how many
+ * bits are computed each time on allocation and deallocation, requiring 4
+ * array indexed access and 3 arithmetic operations. When not defined, a runtime
+ * count of set bits state is maintained. Upto 32 Bytes per 1024 IDs is needed.
+ * In a 4K max ID allocator, up to 128Bytes are hence used per instantiation.
+ * In a memory limited system e.g. dongle builds, a CPU for memory tradeoff may
+ * be used by defining BCM_MWBMAP_USE_CNTSETBITS.
+ *
+ * Note: wd_bitmap[] is statically declared and is not ROM friendly ... array
+ * size is fixed. No intention to support larger than 4K indice allocation. ID
+ * allocators for ranges smaller than 4K will have a wastage of only 12Bytes
+ * with savings in not having to use an indirect access, had it been dynamically
+ * allocated.
+ */
+#define BCM_MWBMAP_ITEMS_MAX (4 * 1024) /* May increase to 16K */
+
+#define BCM_MWBMAP_BITS_WORD (NBITS(uint32))
+#define BCM_MWBMAP_WORDS_MAX (BCM_MWBMAP_ITEMS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_WDMAP_MAX (BCM_MWBMAP_WORDS_MAX / BCM_MWBMAP_BITS_WORD)
+#define BCM_MWBMAP_SHIFT_OP (5)
+#define BCM_MWBMAP_MODOP(ix) ((ix) & (BCM_MWBMAP_BITS_WORD - 1))
+#define BCM_MWBMAP_DIVOP(ix) ((ix) >> BCM_MWBMAP_SHIFT_OP)
+#define BCM_MWBMAP_MULOP(ix) ((ix) << BCM_MWBMAP_SHIFT_OP)
+
+/* Redefine PTR() and/or HDL() conversion to invoke audit for debugging */
+#define BCM_MWBMAP_PTR(hdl) ((struct bcm_mwbmap *)(hdl))
+#define BCM_MWBMAP_HDL(ptr) ((void *)(ptr))
+
+#if defined(BCM_MWBMAP_DEBUG)
+#define BCM_MWBMAP_AUDIT(mwb) \
+ do { \
+ ASSERT((mwb != NULL) && \
+ (((struct bcm_mwbmap *)(mwb))->magic == (void *)(mwb))); \
+ bcm_mwbmap_audit(mwb); \
+ } while (0)
+#define MWBMAP_ASSERT(exp) ASSERT(exp)
+#define MWBMAP_DBG(x) printf x
+#else /* !BCM_MWBMAP_DEBUG */
+#define BCM_MWBMAP_AUDIT(mwb) do {} while (0)
+#define MWBMAP_ASSERT(exp) do {} while (0)
+#define MWBMAP_DBG(x)
+#endif /* !BCM_MWBMAP_DEBUG */
+
+
+typedef struct bcm_mwbmap { /* Hierarchical multiword bitmap allocator */
+ uint16 wmaps; /* Total number of words in free wd bitmap */
+ uint16 imaps; /* Total number of words in free id bitmap */
+ int16 ifree; /* Count of free indices. Used only in audits */
+ uint16 total; /* Total indices managed by multiword bitmap */
+
+ void * magic; /* Audit handle parameter from user */
+
+ uint32 wd_bitmap[BCM_MWBMAP_WDMAP_MAX]; /* 1st level bitmap of */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ int8 wd_count[BCM_MWBMAP_WORDS_MAX]; /* free id running count, 1st lvl */
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+
+ uint32 id_bitmap[0]; /* Second level bitmap */
+} bcm_mwbmap_t;
+
+/* Incarnate a hierarchical multiword bitmap based small index allocator. */
+struct bcm_mwbmap *
+bcm_mwbmap_init(osl_t *osh, uint32 items_max)
+{
+ struct bcm_mwbmap * mwbmap_p;
+ uint32 wordix, size, words, extra;
+
+ /* Implementation Constraint: Uses 32bit word bitmap */
+ MWBMAP_ASSERT(BCM_MWBMAP_BITS_WORD == 32U);
+ MWBMAP_ASSERT(BCM_MWBMAP_SHIFT_OP == 5U);
+ MWBMAP_ASSERT(ISPOWEROF2(BCM_MWBMAP_ITEMS_MAX));
+ MWBMAP_ASSERT((BCM_MWBMAP_ITEMS_MAX % BCM_MWBMAP_BITS_WORD) == 0U);
+
+ ASSERT(items_max <= BCM_MWBMAP_ITEMS_MAX);
+
+ /* Determine the number of words needed in the multiword bitmap */
+ extra = BCM_MWBMAP_MODOP(items_max);
+ words = BCM_MWBMAP_DIVOP(items_max) + ((extra != 0U) ? 1U : 0U);
+
+ /* Allocate runtime state of multiword bitmap */
+ /* Note: wd_count[] or wd_bitmap[] are not dynamically allocated */
+ size = sizeof(bcm_mwbmap_t) + (sizeof(uint32) * words);
+ mwbmap_p = (bcm_mwbmap_t *)MALLOC(osh, size);
+ if (mwbmap_p == (bcm_mwbmap_t *)NULL) {
+ ASSERT(0);
+ goto error1;
+ }
+ memset(mwbmap_p, 0, size);
+
+ /* Initialize runtime multiword bitmap state */
+ mwbmap_p->imaps = (uint16)words;
+ mwbmap_p->ifree = (int16)items_max;
+ mwbmap_p->total = (uint16)items_max;
+
+ /* Setup magic, for use in audit of handle */
+ mwbmap_p->magic = BCM_MWBMAP_HDL(mwbmap_p);
+
+ /* Setup the second level bitmap of free indices */
+ /* Mark all indices as available */
+ for (wordix = 0U; wordix < mwbmap_p->imaps; wordix++) {
+ mwbmap_p->id_bitmap[wordix] = (uint32)(~0U);
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[wordix] = BCM_MWBMAP_BITS_WORD;
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ }
+
+ /* Ensure that extra indices are tagged as un-available */
+ if (extra) { /* fixup the free ids in last bitmap and wd_count */
+ uint32 * bmap_p = &mwbmap_p->id_bitmap[mwbmap_p->imaps - 1];
+ *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[mwbmap_p->imaps - 1] = (int8)extra; /* fixup count */
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ }
+
+ /* Setup the first level bitmap hierarchy */
+ extra = BCM_MWBMAP_MODOP(mwbmap_p->imaps);
+ words = BCM_MWBMAP_DIVOP(mwbmap_p->imaps) + ((extra != 0U) ? 1U : 0U);
+
+ mwbmap_p->wmaps = (uint16)words;
+
+ for (wordix = 0U; wordix < mwbmap_p->wmaps; wordix++)
+ mwbmap_p->wd_bitmap[wordix] = (uint32)(~0U);
+ if (extra) {
+ uint32 * bmap_p = &mwbmap_p->wd_bitmap[mwbmap_p->wmaps - 1];
+ *bmap_p ^= (uint32)(~0U << extra); /* fixup bitmap */
+ }
+
+ return mwbmap_p;
+
+error1:
+ return BCM_MWBMAP_INVALID_HDL;
+}
+
+/* Release resources used by multiword bitmap based small index allocator. */
+void
+bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ MFREE(osh, mwbmap_p, sizeof(struct bcm_mwbmap)
+ + (sizeof(uint32) * mwbmap_p->imaps));
+ return;
+}
+
+/* Allocate a unique small index using a multiword bitmap index allocator. */
+uint32 BCMFASTPATH
+bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ /* Start with the first hierarchy */
+ for (wordix = 0; wordix < mwbmap_p->wmaps; ++wordix) {
+
+ bitmap = mwbmap_p->wd_bitmap[wordix]; /* get the word bitmap */
+
+ if (bitmap != 0U) {
+
+ uint32 count, bitix, *bitmap_p;
+
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+ /* clear all except trailing 1 */
+ bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+ MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+ bcm_count_leading_zeros(bitmap));
+ bitix = (BCM_MWBMAP_BITS_WORD - 1)
+ - bcm_count_leading_zeros(bitmap); /* use asm clz */
+ wordix = BCM_MWBMAP_MULOP(wordix) + bitix;
+
+ /* Clear bit if wd count is 0, without conditional branch */
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1;
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ mwbmap_p->wd_count[wordix]--;
+ count = mwbmap_p->wd_count[wordix];
+ MWBMAP_ASSERT(count ==
+ (bcm_cntsetbits(mwbmap_p->id_bitmap[wordix]) - 1));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ MWBMAP_ASSERT(count >= 0);
+
+ /* clear wd_bitmap bit if id_map count is 0 */
+ bitmap = (count == 0) << bitix;
+
+ MWBMAP_DBG((
+ "Lvl1: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap, count));
+
+ *bitmap_p ^= bitmap;
+
+ /* Use bitix in the second hierarchy */
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+ bitmap = mwbmap_p->id_bitmap[wordix]; /* get the id bitmap */
+ MWBMAP_ASSERT(bitmap != 0U);
+
+ /* clear all except trailing 1 */
+ bitmap = (uint32)(((int)(bitmap)) & (-((int)(bitmap))));
+ MWBMAP_ASSERT(C_bcm_count_leading_zeros(bitmap) ==
+ bcm_count_leading_zeros(bitmap));
+ bitix = BCM_MWBMAP_MULOP(wordix)
+ + (BCM_MWBMAP_BITS_WORD - 1)
+ - bcm_count_leading_zeros(bitmap); /* use asm clz */
+
+ mwbmap_p->ifree--; /* decrement system wide free count */
+ MWBMAP_ASSERT(mwbmap_p->ifree >= 0);
+
+ MWBMAP_DBG((
+ "Lvl2: bitix<%02u> wordix<%02u>: %08x ^ %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p ^= bitmap; /* mark as allocated = 1b0 */
+
+ return bitix;
+ }
+ }
+
+ ASSERT(mwbmap_p->ifree == 0);
+
+ return BCM_MWBMAP_INVALID_IDX;
+}
+
+/* Force an index at a specified position to be in use */
+void
+bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 count, wordix, bitmap, *bitmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT(bitix < mwbmap_p->total);
+
+ /* Start with second hierarchy */
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (uint32)(1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+ ASSERT((*bitmap_p & bitmap) == bitmap);
+
+ mwbmap_p->ifree--; /* update free count */
+ ASSERT(mwbmap_p->ifree >= 0);
+
+ MWBMAP_DBG(("Lvl2: bitix<%u> wordix<%u>: %08x ^ %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) ^ bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p ^= bitmap; /* mark as in use */
+
+ /* Update first hierarchy */
+ bitix = wordix;
+
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ mwbmap_p->wd_count[bitix]--;
+ count = mwbmap_p->wd_count[bitix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ MWBMAP_ASSERT(count >= 0);
+
+ bitmap = (count == 0) << BCM_MWBMAP_MODOP(bitix);
+
+ MWBMAP_DBG(("Lvl1: bitix<%02lu> wordix<%02u>: %08x ^ %08x = %08x wfree %d",
+ BCM_MWBMAP_MODOP(bitix), wordix, *bitmap_p, bitmap,
+ (*bitmap_p) ^ bitmap, count));
+
+ *bitmap_p ^= bitmap; /* mark as in use */
+
+ return;
+}
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+void BCMFASTPATH
+bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap, *bitmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT(bitix < mwbmap_p->total);
+
+ /* Start with second level hierarchy */
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->id_bitmap[wordix];
+
+ ASSERT((*bitmap_p & bitmap) == 0U); /* ASSERT not a double free */
+
+ mwbmap_p->ifree++; /* update free count */
+ ASSERT(mwbmap_p->ifree <= mwbmap_p->total);
+
+ MWBMAP_DBG(("Lvl2: bitix<%02u> wordix<%02u>: %08x | %08x = %08x ifree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap,
+ mwbmap_p->ifree));
+
+ *bitmap_p |= bitmap; /* mark as available */
+
+ /* Now update first level hierarchy */
+
+ bitix = wordix;
+
+ wordix = BCM_MWBMAP_DIVOP(bitix); /* first level's word index */
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+#if !defined(BCM_MWBMAP_USE_CNTSETBITS)
+ mwbmap_p->wd_count[bitix]++;
+#endif
+
+#if defined(BCM_MWBMAP_DEBUG)
+ {
+ uint32 count;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[bitix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[bitix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+
+ MWBMAP_ASSERT(count <= BCM_MWBMAP_BITS_WORD);
+
+ MWBMAP_DBG(("Lvl1: bitix<%02u> wordix<%02u>: %08x | %08x = %08x wfree %d",
+ bitix, wordix, *bitmap_p, bitmap, (*bitmap_p) | bitmap, count));
+ }
+#endif /* BCM_MWBMAP_DEBUG */
+
+ *bitmap_p |= bitmap;
+
+ return;
+}
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+uint32
+bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT(mwbmap_p->ifree >= 0);
+
+ return mwbmap_p->ifree;
+}
+
+/* Determine whether an index is inuse or free */
+bool
+bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 wordix, bitmap;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ ASSERT(bitix < mwbmap_p->total);
+
+ wordix = BCM_MWBMAP_DIVOP(bitix);
+ bitmap = (1U << BCM_MWBMAP_MODOP(bitix));
+
+ return ((mwbmap_p->id_bitmap[wordix] & bitmap) != 0U);
+}
+
+/* Debug dump a multiword bitmap allocator */
+void
+bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl)
+{
+ uint32 ix, count;
+ bcm_mwbmap_t * mwbmap_p;
+
+ BCM_MWBMAP_AUDIT(mwbmap_hdl);
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ printf("mwbmap_p %p wmaps %u imaps %u ifree %d total %u\n", mwbmap_p,
+ mwbmap_p->wmaps, mwbmap_p->imaps, mwbmap_p->ifree, mwbmap_p->total);
+ for (ix = 0U; ix < mwbmap_p->wmaps; ix++) {
+ printf("\tWDMAP:%2u. 0x%08x\t", ix, mwbmap_p->wd_bitmap[ix]);
+ bcm_bitprint32(mwbmap_p->wd_bitmap[ix]);
+ printf("\n");
+ }
+ for (ix = 0U; ix < mwbmap_p->imaps; ix++) {
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[ix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[ix];
+ MWBMAP_ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ printf("\tIDMAP:%2u. 0x%08x %02u\t", ix, mwbmap_p->id_bitmap[ix], count);
+ bcm_bitprint32(mwbmap_p->id_bitmap[ix]);
+ printf("\n");
+ }
+
+ return;
+}
+
+/* Audit a hierarchical multiword bitmap */
+void
+bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl)
+{
+ bcm_mwbmap_t * mwbmap_p;
+ uint32 count, free_cnt = 0U, wordix, idmap_ix, bitix, *bitmap_p;
+
+ mwbmap_p = BCM_MWBMAP_PTR(mwbmap_hdl);
+
+ for (wordix = 0U; wordix < mwbmap_p->wmaps; ++wordix) {
+
+ bitmap_p = &mwbmap_p->wd_bitmap[wordix];
+
+ for (bitix = 0U; bitix < BCM_MWBMAP_BITS_WORD; bitix++) {
+ if ((*bitmap_p) & (1 << bitix)) {
+ idmap_ix = BCM_MWBMAP_MULOP(wordix) + bitix;
+#if defined(BCM_MWBMAP_USE_CNTSETBITS)
+ count = bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]);
+#else /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ count = mwbmap_p->wd_count[idmap_ix];
+ ASSERT(count == bcm_cntsetbits(mwbmap_p->id_bitmap[idmap_ix]));
+#endif /* ! BCM_MWBMAP_USE_CNTSETBITS */
+ ASSERT(count != 0U);
+ free_cnt += count;
+ }
+ }
+ }
+
+ ASSERT((int)free_cnt == mwbmap_p->ifree);
+}
+/* END : Multiword bitmap based 64bit to Unique 32bit Id allocator. */
+
+/* Simple 16bit Id allocator using a stack implementation. */
+typedef struct id16_map {
+ uint16 total; /* total number of ids managed by allocator */
+ uint16 start; /* start value of 16bit ids to be managed */
+ uint32 failures; /* count of failures */
+ void *dbg; /* debug placeholder */
+ int stack_idx; /* index into stack of available ids */
+ uint16 stack[0]; /* stack of 16 bit ids */
+} id16_map_t;
+
+#define ID16_MAP_SZ(items) (sizeof(id16_map_t) + \
+ (sizeof(uint16) * (items)))
+
+#if defined(BCM_DBG)
+
+/* Uncomment BCM_DBG_ID16 to debug double free */
+/* #define BCM_DBG_ID16 */
+
+typedef struct id16_map_dbg {
+ uint16 total;
+ bool avail[0];
+} id16_map_dbg_t;
+#define ID16_MAP_DBG_SZ(items) (sizeof(id16_map_dbg_t) + \
+ (sizeof(bool) * (items)))
+#define ID16_MAP_MSG(x) print x
+#else
+#define ID16_MAP_MSG(x)
+#endif /* BCM_DBG */
+
+void * /* Construct an id16 allocator: [start_val16 .. start_val16+total_ids) */
+id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16)
+{
+ uint16 idx, val16;
+ id16_map_t * id16_map;
+
+ ASSERT(total_ids > 0);
+ ASSERT((start_val16 + total_ids) < ID16_INVALID);
+
+ id16_map = (id16_map_t *) MALLOC(osh, ID16_MAP_SZ(total_ids));
+ if (id16_map == NULL) {
+ return NULL;
+ }
+
+ id16_map->total = total_ids;
+ id16_map->start = start_val16;
+ id16_map->failures = 0;
+ id16_map->dbg = NULL;
+
+ /* Populate stack with 16bit id values, commencing with start_val16 */
+ id16_map->stack_idx = 0;
+ val16 = start_val16;
+
+ for (idx = 0; idx < total_ids; idx++, val16++) {
+ id16_map->stack_idx = idx;
+ id16_map->stack[id16_map->stack_idx] = val16;
+ }
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ id16_map->dbg = MALLOC(osh, ID16_MAP_DBG_SZ(total_ids));
+
+ if (id16_map->dbg) {
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ id16_map_dbg->total = total_ids;
+ for (idx = 0; idx < total_ids; idx++) {
+ id16_map_dbg->avail[idx] = TRUE;
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ return (void *)id16_map;
+}
+
+void * /* Destruct an id16 allocator instance */
+id16_map_fini(osl_t *osh, void * id16_map_hndl)
+{
+ uint16 total_ids;
+ id16_map_t * id16_map;
+
+ if (id16_map_hndl == NULL)
+ return NULL;
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+
+ total_ids = id16_map->total;
+ ASSERT(total_ids > 0);
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ MFREE(osh, id16_map->dbg, ID16_MAP_DBG_SZ(total_ids));
+ id16_map->dbg = NULL;
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ id16_map->total = 0;
+ MFREE(osh, id16_map, ID16_MAP_SZ(total_ids));
+
+ return NULL;
+}
+
+uint16 BCMFASTPATH /* Allocate a unique 16bit id */
+id16_map_alloc(void * id16_map_hndl)
+{
+ uint16 val16;
+ id16_map_t * id16_map;
+
+ ASSERT(id16_map_hndl != NULL);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+
+ ASSERT(id16_map->total > 0);
+
+ if (id16_map->stack_idx < 0) {
+ id16_map->failures++;
+ return ID16_INVALID;
+ }
+
+ val16 = id16_map->stack[id16_map->stack_idx];
+ id16_map->stack_idx--;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+
+ ASSERT(val16 < (id16_map->start + id16_map->total));
+
+ if (id16_map->dbg) { /* Validate val16 */
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == TRUE);
+ id16_map_dbg->avail[val16 - id16_map->start] = FALSE;
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ return val16;
+}
+
+
+void BCMFASTPATH /* Free a 16bit id value into the id16 allocator */
+id16_map_free(void * id16_map_hndl, uint16 val16)
+{
+ id16_map_t * id16_map;
+
+ ASSERT(id16_map_hndl != NULL);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+
+ ASSERT(val16 < (id16_map->start + id16_map->total));
+
+ if (id16_map->dbg) { /* Validate val16 */
+ id16_map_dbg_t *id16_map_dbg = (id16_map_dbg_t *)id16_map->dbg;
+
+ ASSERT(id16_map_dbg->avail[val16 - id16_map->start] == FALSE);
+ id16_map_dbg->avail[val16 - id16_map->start] = TRUE;
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ id16_map->stack_idx++;
+ id16_map->stack[id16_map->stack_idx] = val16;
+}
+
+uint32 /* Returns number of failures to allocate an unique id16 */
+id16_map_failures(void * id16_map_hndl)
+{
+ ASSERT(id16_map_hndl != NULL);
+ return ((id16_map_t *)id16_map_hndl)->failures;
+}
+
+bool
+id16_map_audit(void * id16_map_hndl)
+{
+ int idx;
+ int insane = 0;
+ id16_map_t * id16_map;
+
+ ASSERT(id16_map_hndl != NULL);
+
+ id16_map = (id16_map_t *)id16_map_hndl;
+
+ ASSERT((id16_map->stack_idx > 0) && (id16_map->stack_idx < id16_map->total));
+ for (idx = 0; idx <= id16_map->stack_idx; idx++) {
+ ASSERT(id16_map->stack[idx] >= id16_map->start);
+ ASSERT(id16_map->stack[idx] < (id16_map->start + id16_map->total));
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ uint16 val16 = id16_map->stack[idx];
+ if (((id16_map_dbg_t *)(id16_map->dbg))->avail[val16] != TRUE) {
+ insane |= 1;
+ ID16_MAP_MSG(("id16_map<%p>: stack_idx %u invalid val16 %u\n",
+ id16_map_hndl, idx, val16));
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+ }
+
+#if defined(BCM_DBG) && defined(BCM_DBG_ID16)
+ if (id16_map->dbg) {
+ uint16 avail = 0; /* Audit available ids counts */
+ for (idx = 0; idx < id16_map_dbg->total; idx++) {
+ if (((id16_map_dbg_t *)(id16_map->dbg))->avail[idx16] == TRUE)
+ avail++;
+ }
+ if (avail && (avail != (id16_map->stack_idx + 1))) {
+ insane |= 1;
+ ID16_MAP_MSG(("id16_map<%p>: avail %u stack_idx %u\n",
+ id16_map_hndl, avail, id16_map->stack_idx));
+ }
+ }
+#endif /* BCM_DBG && BCM_DBG_ID16 */
+
+ return (!!insane);
+}
+/* END: Simple id16 allocator */
+
+
+#endif /* BCMDRIVER */
+
+/* calculate a >> b; and returns only lower 32 bits */
+void
+bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b)
+{
+ uint32 a1 = a_high, a0 = a_low, r0 = 0;
+
+ if (b == 0) {
+ r0 = a_low;
+ *r = r0;
+ return;
+ }
+
+ if (b < 32) {
+ a0 = a0 >> b;
+ a1 = a1 & ((1 << b) - 1);
+ a1 = a1 << (32 - b);
+ r0 = a0 | a1;
+ *r = r0;
+ return;
+ } else {
+ r0 = a1 >> (b - 32);
+ *r = r0;
+ return;
+ }
+
+}
+
+/* calculate a + b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+ uint32 r1_lo = *r_lo;
+ (*r_lo) += offset;
+ if (*r_lo < r1_lo)
+ (*r_hi) ++;
+}
+
+/* calculate a - b where a is a 64 bit number and b is a 32 bit number */
+void
+bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset)
+{
+ uint32 r1_lo = *r_lo;
+ (*r_lo) -= offset;
+ if (*r_lo > r1_lo)
+ (*r_hi) --;
+}
+
+#ifdef DEBUG_COUNTER
+#if (OSL_SYSUPTIME_SUPPORT == TRUE)
+void counter_printlog(counter_tbl_t *ctr_tbl)
+{
+ uint32 now;
+
+ if (!ctr_tbl->enabled)
+ return;
+
+ now = OSL_SYSUPTIME();
+
+ if (now - ctr_tbl->prev_log_print > ctr_tbl->log_print_interval) {
+ uint8 i = 0;
+ printf("counter_print(%s %d):", ctr_tbl->name, now - ctr_tbl->prev_log_print);
+
+ for (i = 0; i < ctr_tbl->needed_cnt; i++) {
+ printf(" %u", ctr_tbl->cnt[i]);
+ }
+ printf("\n");
+
+ ctr_tbl->prev_log_print = now;
+ bzero(ctr_tbl->cnt, CNTR_TBL_MAX * sizeof(uint));
+ }
+}
+#else
+/* OSL_SYSUPTIME is not supported so no way to get time */
+#define counter_printlog(a) do {} while (0)
+#endif /* OSL_SYSUPTIME_SUPPORT == TRUE */
+#endif /* DEBUG_COUNTER */
+
+#ifdef BCMDRIVER
+void
+dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size)
+{
+ uint32 mem_size;
+ mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+ if (pool)
+ MFREE(osh, pool, mem_size);
+}
+dll_pool_t *
+dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size)
+{
+ uint32 mem_size, i;
+ dll_pool_t * dll_pool_p;
+ dll_t * elem_p;
+
+ ASSERT(elem_size > sizeof(dll_t));
+
+ mem_size = sizeof(dll_pool_t) + (elems_max * elem_size);
+
+ if ((dll_pool_p = (dll_pool_t *)MALLOC(osh, mem_size)) == NULL) {
+ printf("dll_pool_init: elems_max<%u> elem_size<%u> malloc failure\n",
+ elems_max, elem_size);
+ ASSERT(0);
+ return dll_pool_p;
+ }
+
+ bzero(dll_pool_p, mem_size);
+
+ dll_init(&dll_pool_p->free_list);
+ dll_pool_p->elems_max = elems_max;
+ dll_pool_p->elem_size = elem_size;
+
+ elem_p = dll_pool_p->elements;
+ for (i = 0; i < elems_max; i++) {
+ dll_append(&dll_pool_p->free_list, elem_p);
+ elem_p = (dll_t *)((uintptr)elem_p + elem_size);
+ }
+
+ dll_pool_p->free_count = elems_max;
+
+ return dll_pool_p;
+}
+
+
+void *
+dll_pool_alloc(dll_pool_t * dll_pool_p)
+{
+ dll_t * elem_p;
+
+ if (dll_pool_p->free_count == 0) {
+ ASSERT(dll_empty(&dll_pool_p->free_list));
+ return NULL;
+ }
+
+ elem_p = dll_head_p(&dll_pool_p->free_list);
+ dll_delete(elem_p);
+ dll_pool_p->free_count -= 1;
+
+ return (void *)elem_p;
+}
+
+void
+dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p)
+{
+ dll_t * node_p = (dll_t *)elem_p;
+ dll_prepend(&dll_pool_p->free_list, node_p);
+ dll_pool_p->free_count += 1;
+}
+
+
+void
+dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p)
+{
+ dll_t * node_p = (dll_t *)elem_p;
+ dll_append(&dll_pool_p->free_list, node_p);
+ dll_pool_p->free_count += 1;
+}
+
+#endif /* BCMDRIVER */
#ifndef ASSERT
#define ASSERT(exp)
#endif
-#endif
+#endif /* BCMDRIVER */
-#ifdef _bcmwifi_c_
-
-#include <bcmwifi.h>
-#else
#include <bcmwifi_channels.h>
-#endif
#if defined(WIN32) && (defined(BCMDLL) || defined(WLMDLL))
-#include <bcmstdlib.h>
+#include <bcmstdlib.h> /* For wl/exe/GNUmakefile.brcm_wlu and GNUmakefile.wlm_dll */
#endif
-#ifndef D11AC_IOTYPES
-
-
-
-
-
-
-
-char *
-wf_chspec_ntoa(chanspec_t chspec, char *buf)
-{
- const char *band, *bw, *sb;
- uint channel;
-
- band = "";
- bw = "";
- sb = "";
- channel = CHSPEC_CHANNEL(chspec);
-
- if ((CHSPEC_IS2G(chspec) && channel > CH_MAX_2G_CHANNEL) ||
- (CHSPEC_IS5G(chspec) && channel <= CH_MAX_2G_CHANNEL))
- band = (CHSPEC_IS2G(chspec)) ? "b" : "a";
- if (CHSPEC_IS40(chspec)) {
- if (CHSPEC_SB_UPPER(chspec)) {
- sb = "u";
- channel += CH_10MHZ_APART;
- } else {
- sb = "l";
- channel -= CH_10MHZ_APART;
- }
- } else if (CHSPEC_IS10(chspec)) {
- bw = "n";
- }
-
-
- snprintf(buf, 6, "%d%s%s%s", channel, band, bw, sb);
- return (buf);
-}
-
-
-chanspec_t
-wf_chspec_aton(const char *a)
-{
- char *endp = NULL;
- uint channel, band, bw, ctl_sb;
- char c;
-
- channel = strtoul(a, &endp, 10);
-
-
- if (endp == a)
- return 0;
-
- if (channel > MAXCHANNEL)
- return 0;
-
- band = ((channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
- bw = WL_CHANSPEC_BW_20;
- ctl_sb = WL_CHANSPEC_CTL_SB_NONE;
-
- a = endp;
-
- c = tolower(a[0]);
- if (c == '\0')
- goto done;
-
-
- if (c == 'a' || c == 'b') {
- band = (c == 'a') ? WL_CHANSPEC_BAND_5G : WL_CHANSPEC_BAND_2G;
- a++;
- c = tolower(a[0]);
- if (c == '\0')
- goto done;
- }
-
-
- if (c == 'n') {
- bw = WL_CHANSPEC_BW_10;
- } else if (c == 'l') {
- bw = WL_CHANSPEC_BW_40;
- ctl_sb = WL_CHANSPEC_CTL_SB_LOWER;
-
- if (channel <= (MAXCHANNEL - CH_20MHZ_APART))
- channel += CH_10MHZ_APART;
- else
- return 0;
- } else if (c == 'u') {
- bw = WL_CHANSPEC_BW_40;
- ctl_sb = WL_CHANSPEC_CTL_SB_UPPER;
-
- if (channel > CH_20MHZ_APART)
- channel -= CH_10MHZ_APART;
- else
- return 0;
- } else {
- return 0;
- }
-
-done:
- return (channel | band | bw | ctl_sb);
-}
-
-
-bool
-wf_chspec_malformed(chanspec_t chanspec)
-{
-
- if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
- return TRUE;
-
- if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
- return TRUE;
-
-
- if (CHSPEC_IS20(chanspec)) {
- if (!CHSPEC_SB_NONE(chanspec))
- return TRUE;
- } else {
- if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec))
- return TRUE;
- }
-
- return FALSE;
-}
-
-
-uint8
-wf_chspec_ctlchan(chanspec_t chspec)
-{
- uint8 ctl_chan;
-
-
- if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
- return CHSPEC_CHANNEL(chspec);
- } else {
-
- ASSERT(CHSPEC_BW(chspec) == WL_CHANSPEC_BW_40);
-
- if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
-
- ctl_chan = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
- } else {
- ASSERT(CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_LOWER);
-
- ctl_chan = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
- }
- }
-
- return ctl_chan;
-}
-
-chanspec_t
-wf_chspec_ctlchspec(chanspec_t chspec)
-{
- chanspec_t ctl_chspec = 0;
- uint8 channel;
-
- ASSERT(!wf_chspec_malformed(chspec));
-
-
- if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_NONE) {
- return chspec;
- } else {
- if (CHSPEC_CTL_SB(chspec) == WL_CHANSPEC_CTL_SB_UPPER) {
- channel = UPPER_20_SB(CHSPEC_CHANNEL(chspec));
- } else {
- channel = LOWER_20_SB(CHSPEC_CHANNEL(chspec));
- }
- ctl_chspec = channel | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE;
- ctl_chspec |= CHSPEC_BAND(chspec);
- }
- return ctl_chspec;
-}
-
-#else
-
-
-
-
+/* Definitions for D11AC capable Chanspec type */
+/* Chanspec ASCII representation with 802.11ac capability:
+ * [<band> 'g'] <channel> ['/'<bandwidth> [<ctl-sideband>]['/'<1st80channel>'-'<2nd80channel>]]
+ *
+ * <band>:
+ * (optional) 2, 3, 4, 5 for 2.4GHz, 3GHz, 4GHz, and 5GHz respectively.
+ * Default value is 2g if channel <= 14, otherwise 5g.
+ * <channel>:
+ * channel number of the 5MHz, 10MHz, 20MHz channel,
+ * or primary channel of 40MHz, 80MHz, 160MHz, or 80+80MHz channel.
+ * <bandwidth>:
+ * (optional) 5, 10, 20, 40, 80, 160, or 80+80. Default value is 20.
+ * <primary-sideband>:
+ * (only for 2.4GHz band 40MHz) U for upper sideband primary, L for lower.
+ *
+ * For 2.4GHz band 40MHz channels, the same primary channel may be the
+ * upper sideband for one 40MHz channel, and the lower sideband for an
+ * overlapping 40MHz channel. The U/L disambiguates which 40MHz channel
+ * is being specified.
+ *
+ * For 40MHz in the 5GHz band and all channel bandwidths greater than
+ * 40MHz, the U/L specificaion is not allowed since the channels are
+ * non-overlapping and the primary sub-band is derived from its
+ * position in the wide bandwidth channel.
+ *
+ * <1st80Channel>:
+ * <2nd80Channel>:
+ * Required for 80+80, otherwise not allowed.
+ * Specifies the center channel of the first and second 80MHz band.
+ *
+ * In its simplest form, it is a 20MHz channel number, with the implied band
+ * of 2.4GHz if channel number <= 14, and 5GHz otherwise.
+ *
+ * To allow for backward compatibility with scripts, the old form for
+ * 40MHz channels is also allowed: <channel><ctl-sideband>
+ *
+ * <channel>:
+ * primary channel of 40MHz, channel <= 14 is 2GHz, otherwise 5GHz
+ * <ctl-sideband>:
+ * "U" for upper, "L" for lower (or lower case "u" "l")
+ *
+ * 5 GHz Examples:
+ * Chanspec BW Center Ch Channel Range Primary Ch
+ * 5g8 20MHz 8 - -
+ * 52 20MHz 52 - -
+ * 52/40 40MHz 54 52-56 52
+ * 56/40 40MHz 54 52-56 56
+ * 52/80 80MHz 58 52-64 52
+ * 56/80 80MHz 58 52-64 56
+ * 60/80 80MHz 58 52-64 60
+ * 64/80 80MHz 58 52-64 64
+ * 52/160 160MHz 50 36-64 52
+ * 36/160 160MGz 50 36-64 36
+ * 36/80+80/42-106 80+80MHz 42,106 36-48,100-112 36
+ *
+ * 2 GHz Examples:
+ * Chanspec BW Center Ch Channel Range Primary Ch
+ * 2g8 20MHz 8 - -
+ * 8 20MHz 8 - -
+ * 6 20MHz 6 - -
+ * 6/40l 40MHz 8 6-10 6
+ * 6l 40MHz 8 6-10 6
+ * 6/40u 40MHz 4 2-6 6
+ * 6u 40MHz 4 2-6 6
+ */
+/* bandwidth ASCII string */
static const char *wf_chspec_bw_str[] =
{
"5",
#define WF_NUM_BW \
(sizeof(wf_chspec_bw_mhz)/sizeof(uint8))
-
+/* 40MHz channels in 5GHz band */
static const uint8 wf_5g_40m_chans[] =
{38, 46, 54, 62, 102, 110, 118, 126, 134, 142, 151, 159};
#define WF_NUM_5G_40M_CHANS \
(sizeof(wf_5g_40m_chans)/sizeof(uint8))
-
+/* 80MHz channels in 5GHz band */
static const uint8 wf_5g_80m_chans[] =
{42, 58, 106, 122, 138, 155};
#define WF_NUM_5G_80M_CHANS \
(sizeof(wf_5g_80m_chans)/sizeof(uint8))
-
+/* 160MHz channels in 5GHz band */
static const uint8 wf_5g_160m_chans[] =
{50, 114};
#define WF_NUM_5G_160M_CHANS \
(sizeof(wf_5g_160m_chans)/sizeof(uint8))
-
+/* convert bandwidth from chanspec to MHz */
static uint
bw_chspec_to_mhz(chanspec_t chspec)
{
return (bw >= WF_NUM_BW ? 0 : wf_chspec_bw_mhz[bw]);
}
-
+/* bw in MHz, return the channel count from the center channel to the
+ * the channel at the edge of the band
+ */
static uint8
center_chan_to_edge(uint bw)
{
-
+ /* edge channels separated by BW - 10MHz on each side
+ * delta from cf to edge is half of that,
+ * MHz to channel num conversion is 5MHz/channel
+ */
return (uint8)(((bw - 20) / 2) / 5);
}
-
+/* return channel number of the low edge of the band
+ * given the center channel and BW
+ */
static uint8
channel_low_edge(uint center_ch, uint bw)
{
return (uint8)(center_ch - center_chan_to_edge(bw));
}
-
+/* return side band number given center channel and control channel
+ * return -1 on error
+ */
static int
channel_to_sb(uint center_ch, uint ctl_ch, uint bw)
{
uint sb;
if ((ctl_ch - lowest) % 4) {
-
+ /* bad ctl channel, not mult 4 */
return -1;
}
sb = ((ctl_ch - lowest) / 4);
-
+ /* sb must be a index to a 20MHz channel in range */
if (sb >= (bw / 20)) {
-
+ /* ctl_ch must have been too high for the center_ch */
return -1;
}
return sb;
}
-
+/* return control channel given center channel and side band */
static uint8
channel_to_ctl_chan(uint center_ch, uint bw, uint sb)
{
return (uint8)(channel_low_edge(center_ch, bw) + sb * 4);
}
-
+/* return index of 80MHz channel from channel number
+ * return -1 on error
+ */
static int
channel_80mhz_to_id(uint ch)
{
return -1;
}
+/* wrapper function for wf_chspec_ntoa. In case of an error it puts
+ * the original chanspec in the output buffer, prepended with "invalid".
+ * Can be directly used in print routines as it takes care of null
+ */
+char *
+wf_chspec_ntoa_ex(chanspec_t chspec, char *buf)
+{
+ if (wf_chspec_ntoa(chspec, buf) == NULL)
+ snprintf(buf, CHANSPEC_STR_LEN, "invalid 0x%04x", chspec);
+ return buf;
+}
+/* given a chanspec and a string buffer, format the chanspec as a
+ * string, and return the original pointer a.
+ * Min buffer length must be CHANSPEC_STR_LEN.
+ * On error return NULL
+ */
char *
wf_chspec_ntoa(chanspec_t chspec, char *buf)
{
band = "";
-
+ /* check for non-default band spec */
if ((CHSPEC_IS2G(chspec) && CHSPEC_CHANNEL(chspec) > CH_MAX_2G_CHANNEL) ||
(CHSPEC_IS5G(chspec) && CHSPEC_CHANNEL(chspec) <= CH_MAX_2G_CHANNEL))
band = (CHSPEC_IS2G(chspec)) ? "2g" : "5g";
-
+ /* ctl channel */
ctl_chan = wf_chspec_ctlchan(chspec);
-
+ /* bandwidth and ctl sideband */
if (CHSPEC_IS20(chspec)) {
snprintf(buf, CHANSPEC_STR_LEN, "%s%d", band, ctl_chan);
} else if (!CHSPEC_IS8080(chspec)) {
bw = wf_chspec_bw_str[(chspec & WL_CHANSPEC_BW_MASK) >> WL_CHANSPEC_BW_SHIFT];
#ifdef CHANSPEC_NEW_40MHZ_FORMAT
-
+ /* ctl sideband string if needed for 2g 40MHz */
if (CHSPEC_IS40(chspec) && CHSPEC_IS2G(chspec)) {
sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
}
snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s%s", band, ctl_chan, bw, sb);
#else
-
+ /* ctl sideband string instead of BW for 40MHz */
if (CHSPEC_IS40(chspec)) {
sb = CHSPEC_SB_UPPER(chspec) ? "u" : "l";
snprintf(buf, CHANSPEC_STR_LEN, "%s%d%s", band, ctl_chan, sb);
} else {
snprintf(buf, CHANSPEC_STR_LEN, "%s%d/%s", band, ctl_chan, bw);
}
-#endif
+#endif /* CHANSPEC_NEW_40MHZ_FORMAT */
} else {
-
+ /* 80+80 */
uint chan1 = (chspec & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT;
uint chan2 = (chspec & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT;
-
+ /* convert to channel number */
chan1 = (chan1 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan1] : 0;
chan2 = (chan2 < WF_NUM_5G_80M_CHANS) ? wf_5g_80m_chans[chan2] : 0;
-
+ /* Outputs a max of CHANSPEC_STR_LEN chars including '\0' */
snprintf(buf, CHANSPEC_STR_LEN, "%d/80+80/%d-%d", ctl_chan, chan1, chan2);
}
char *endp = NULL;
val = strtoul(*p, &endp, 10);
-
+ /* if endp is the initial pointer value, then a number was not read */
if (endp == *p)
return 0;
-
+ /* advance the buffer pointer to the end of the integer string */
*p = endp;
-
+ /* return the parsed integer */
*num = (unsigned int)val;
return 1;
}
-
+/* given a chanspec string, convert to a chanspec.
+ * On error return 0
+ */
chanspec_t
wf_chspec_aton(const char *a)
{
chspec_sb = 0;
chspec_ch = ch1 = ch2 = 0;
-
+ /* parse channel num or band */
if (!read_uint(&a, &num))
return 0;
-
+ /* if we are looking at a 'g', then the first number was a band */
c = tolower((int)a[0]);
if (c == 'g') {
- a ++;
+ a ++; /* consume the char */
-
+ /* band must be "2" or "5" */
if (num == 2)
chspec_band = WL_CHANSPEC_BAND_2G;
else if (num == 5)
else
return 0;
-
+ /* read the channel number */
if (!read_uint(&a, &ctl_ch))
return 0;
c = tolower((int)a[0]);
}
else {
-
+ /* first number is channel, use default for band */
ctl_ch = num;
chspec_band = ((ctl_ch <= CH_MAX_2G_CHANNEL) ?
WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G);
}
if (c == '\0') {
-
+ /* default BW of 20MHz */
chspec_bw = WL_CHANSPEC_BW_20;
goto done_read;
}
- a ++;
+ a ++; /* consume the 'u','l', or '/' */
-
+ /* check 'u'/'l' */
if (c == 'u' || c == 'l') {
sb_ul = c;
chspec_bw = WL_CHANSPEC_BW_40;
goto done_read;
}
-
+ /* next letter must be '/' */
if (c != '/')
return 0;
-
+ /* read bandwidth */
if (!read_uint(&a, &bw))
return 0;
-
+ /* convert to chspec value */
if (bw == 20) {
chspec_bw = WL_CHANSPEC_BW_20;
} else if (bw == 40) {
return 0;
}
-
+ /* So far we have <band>g<chan>/<bw>
+ * Can now be followed by u/l if bw = 40,
+ * or '+80' if bw = 80, to make '80+80' bw.
+ */
c = tolower((int)a[0]);
-
+ /* if we have a 2g/40 channel, we should have a l/u spec now */
if (chspec_band == WL_CHANSPEC_BAND_2G && bw == 40) {
if (c == 'u' || c == 'l') {
- a ++;
+ a ++; /* consume the u/l char */
sb_ul = c;
goto done_read;
}
}
-
+ /* check for 80+80 */
if (c == '+') {
-
+ /* 80+80 */
static const char *plus80 = "80/";
-
+ /* must be looking at '+80/'
+ * check and consume this string.
+ */
chspec_bw = WL_CHANSPEC_BW_8080;
- a ++;
+ a ++; /* consume the char '+' */
-
+ /* consume the '80/' string */
for (i = 0; i < 3; i++) {
if (*a++ != *plus80++) {
return 0;
}
}
-
+ /* read primary 80MHz channel */
if (!read_uint(&a, &ch1))
return 0;
-
+ /* must followed by '-' */
if (a[0] != '-')
return 0;
- a ++;
+ a ++; /* consume the char */
-
+ /* read secondary 80MHz channel */
if (!read_uint(&a, &ch2))
return 0;
}
done_read:
-
+ /* skip trailing white space */
while (a[0] == ' ') {
a ++;
}
-
+ /* must be end of string */
if (a[0] != '\0')
return 0;
-
+ /* Now have all the chanspec string parts read;
+ * chspec_band, ctl_ch, chspec_bw, sb_ul, ch1, ch2.
+ * chspec_band and chspec_bw are chanspec values.
+ * Need to convert ctl_ch, sb_ul, and ch1,ch2 into
+ * a center channel (or two) and sideband.
+ */
-
+ /* if a sb u/l string was given, just use that,
+ * guaranteed to be bw = 40 by sting parse.
+ */
if (sb_ul != '\0') {
if (sb_ul == 'l') {
chspec_ch = UPPER_20_SB(ctl_ch);
chspec_sb = WL_CHANSPEC_CTL_SB_LLU;
}
}
-
+ /* if the bw is 20, center and sideband are trivial */
else if (chspec_bw == WL_CHANSPEC_BW_20) {
chspec_ch = ctl_ch;
- chspec_sb = 0;
+ chspec_sb = WL_CHANSPEC_CTL_SB_NONE;
}
-
+ /* if the bw is 40/80/160, not 80+80, a single method
+ * can be used to to find the center and sideband
+ */
else if (chspec_bw != WL_CHANSPEC_BW_8080) {
-
+ /* figure out ctl sideband based on ctl channel and bandwidth */
const uint8 *center_ch = NULL;
int num_ch = 0;
int sb = -1;
}
}
-
+ /* check for no matching sb/center */
if (sb < 0) {
return 0;
}
}
-
+ /* Otherwise, bw is 80+80. Figure out channel pair and sb */
else {
int ch1_id = 0, ch2_id = 0;
int sb;
+ /* look up the channel ID for the specified channel numbers */
ch1_id = channel_80mhz_to_id(ch1);
ch2_id = channel_80mhz_to_id(ch2);
-
- if (ch1 >= ch2 || ch1_id < 0 || ch2_id < 0)
+ /* validate channels */
+ if (ch1_id < 0 || ch2_id < 0)
return 0;
-
- chspec_ch = (((uint16)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) |
- ((uint16)ch2_id << WL_CHANSPEC_CHAN2_SHIFT));
+ /* combine 2 channel IDs in channel field of chspec */
+ chspec_ch = (((uint)ch1_id << WL_CHANSPEC_CHAN1_SHIFT) |
+ ((uint)ch2_id << WL_CHANSPEC_CHAN2_SHIFT));
-
+ /* figure out primary 20 MHz sideband */
-
+ /* is the primary channel contained in the 1st 80MHz channel? */
sb = channel_to_sb(ch1, ctl_ch, bw);
if (sb < 0) {
-
- sb = channel_to_sb(ch2, ctl_ch, bw);
- if (sb < 0) {
-
- return 0;
- }
-
- sb += 4;
+ /* no match for primary channel 'ctl_ch' in segment0 80MHz channel */
+ return 0;
}
chspec_sb = sb << WL_CHANSPEC_CTL_SB_SHIFT;
return chspec;
}
-
+/*
+ * Verify the chanspec is using a legal set of parameters, i.e. that the
+ * chanspec specified a band, bw, ctl_sb and channel and that the
+ * combination could be legal given any set of circumstances.
+ * RETURNS: TRUE is the chanspec is malformed, false if it looks good.
+ */
bool
wf_chspec_malformed(chanspec_t chanspec)
{
uint chspec_bw = CHSPEC_BW(chanspec);
uint chspec_ch = CHSPEC_CHANNEL(chanspec);
-
+ /* must be 2G or 5G band */
if (CHSPEC_IS2G(chanspec)) {
-
+ /* must be valid bandwidth */
if (chspec_bw != WL_CHANSPEC_BW_20 &&
chspec_bw != WL_CHANSPEC_BW_40) {
return TRUE;
if (chspec_bw == WL_CHANSPEC_BW_8080) {
uint ch1_id, ch2_id;
-
+ /* channel IDs in 80+80 must be in range */
ch1_id = CHSPEC_CHAN1(chanspec);
ch2_id = CHSPEC_CHAN2(chanspec);
if (ch1_id >= WF_NUM_5G_80M_CHANS || ch2_id >= WF_NUM_5G_80M_CHANS)
return TRUE;
-
- if (ch2_id <= ch1_id)
- return TRUE;
} else if (chspec_bw == WL_CHANSPEC_BW_20 || chspec_bw == WL_CHANSPEC_BW_40 ||
chspec_bw == WL_CHANSPEC_BW_80 || chspec_bw == WL_CHANSPEC_BW_160) {
return TRUE;
}
} else {
-
+ /* invalid bandwidth */
return TRUE;
}
} else {
-
+ /* must be 2G or 5G band */
return TRUE;
}
-
+ /* side band needs to be consistent with bandwidth */
if (chspec_bw == WL_CHANSPEC_BW_20) {
if (CHSPEC_CTL_SB(chanspec) != WL_CHANSPEC_CTL_SB_LLL)
return TRUE;
} else if (chspec_bw == WL_CHANSPEC_BW_40) {
if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LLU)
return TRUE;
- } else if (chspec_bw == WL_CHANSPEC_BW_80) {
+ } else if (chspec_bw == WL_CHANSPEC_BW_80 ||
+ chspec_bw == WL_CHANSPEC_BW_8080) {
if (CHSPEC_CTL_SB(chanspec) > WL_CHANSPEC_CTL_SB_LUU)
return TRUE;
}
-
+ else if (chspec_bw == WL_CHANSPEC_BW_160) {
+ ASSERT(CHSPEC_CTL_SB(chanspec) <= WL_CHANSPEC_CTL_SB_UUU);
+ }
return FALSE;
}
-
+/*
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ * RETURNS: TRUE if the chanspec is a valid 802.11 channel
+ */
bool
wf_chspec_valid(chanspec_t chanspec)
{
return FALSE;
if (CHSPEC_IS2G(chanspec)) {
-
+ /* must be valid bandwidth and channel range */
if (chspec_bw == WL_CHANSPEC_BW_20) {
if (chspec_ch >= 1 && chspec_ch <= 14)
return TRUE;
ch1 = wf_5g_80m_chans[CHSPEC_CHAN1(chanspec)];
ch2 = wf_5g_80m_chans[CHSPEC_CHAN2(chanspec)];
-
- if (ch2 > ch1 + CH_80MHZ_APART)
+ /* the two channels must be separated by more than 80MHz by VHT req */
+ if ((ch2 > ch1 + CH_80MHZ_APART) ||
+ (ch1 > ch2 + CH_80MHZ_APART))
return TRUE;
} else {
const uint8 *center_ch;
center_ch = wf_5g_160m_chans;
num_ch = WF_NUM_5G_160M_CHANS;
} else {
-
+ /* invalid bandwidth */
return FALSE;
}
-
+ /* check for a valid center channel */
if (chspec_bw == WL_CHANSPEC_BW_20) {
-
+ /* We don't have an array of legal 20MHz 5G channels, but they are
+ * each side of the legal 40MHz channels. Check the chanspec
+ * channel against either side of the 40MHz channels.
+ */
for (i = 0; i < num_ch; i ++) {
if (chspec_ch == (uint)LOWER_20_SB(center_ch[i]) ||
chspec_ch == (uint)UPPER_20_SB(center_ch[i]))
- break;
+ break; /* match found */
}
if (i == num_ch) {
-
+ /* check for channel 165 which is not the side band
+ * of 40MHz 5G channel
+ */
if (chspec_ch == 165)
i = 0;
-
+ /* check for legacy JP channels on failure */
if (chspec_ch == 34 || chspec_ch == 38 ||
chspec_ch == 42 || chspec_ch == 46)
i = 0;
}
} else {
-
+ /* check the chanspec channel to each legal channel */
for (i = 0; i < num_ch; i ++) {
if (chspec_ch == center_ch[i])
- break;
+ break; /* match found */
}
}
if (i < num_ch) {
-
+ /* match found */
return TRUE;
}
}
return FALSE;
}
-
+/*
+ * This function returns the channel number that control traffic is being sent on, for 20MHz
+ * channels this is just the channel number, for 40MHZ, 80MHz, 160MHz channels it is the 20MHZ
+ * sideband depending on the chanspec selected
+ */
uint8
wf_chspec_ctlchan(chanspec_t chspec)
{
ASSERT(!wf_chspec_malformed(chspec));
-
+ /* Is there a sideband ? */
if (CHSPEC_IS20(chspec)) {
return CHSPEC_CHANNEL(chspec);
} else {
sb = CHSPEC_CTL_SB(chspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
if (CHSPEC_IS8080(chspec)) {
- bw_mhz = 80;
+ /* For an 80+80 MHz channel, the sideband 'sb' field is an 80 MHz sideband
+ * (LL, LU, UL, LU) for the 80 MHz frequency segment 0.
+ */
+ uint chan_id = CHSPEC_CHAN1(chspec);
- if (sb < 4) {
- center_chan = CHSPEC_CHAN1(chspec);
- }
- else {
- center_chan = CHSPEC_CHAN2(chspec);
- sb -= 4;
- }
+ bw_mhz = 80;
-
- center_chan = wf_5g_80m_chans[center_chan];
+ /* convert from channel index to channel number */
+ center_chan = wf_5g_80m_chans[chan_id];
}
else {
bw_mhz = bw_chspec_to_mhz(chspec);
}
}
+/* given a chanspec, return the bandwidth string */
+char *
+wf_chspec_to_bw_str(chanspec_t chspec)
+{
+ return (char *)wf_chspec_bw_str[(CHSPEC_BW(chspec) >> WL_CHANSPEC_BW_SHIFT)];
+}
+/*
+ * This function returns the chanspec of the control channel of a given chanspec
+ */
chanspec_t
wf_chspec_ctlchspec(chanspec_t chspec)
{
ASSERT(!wf_chspec_malformed(chspec));
-
+ /* Is there a sideband ? */
if (!CHSPEC_IS20(chspec)) {
ctl_chan = wf_chspec_ctlchan(chspec);
ctl_chspec = ctl_chan | WL_CHANSPEC_BW_20;
return ctl_chspec;
}
-
+/* return chanspec given control channel and bandwidth
+ * return 0 on error
+ */
uint16
wf_channel2chspec(uint ctl_ch, uint bw)
{
}
}
-
+ /* check for no matching sb/center */
if (sb < 0) {
return 0;
}
return chspec;
}
-#endif
-
-
+/*
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec)
{
chanspec_t chspec40 = chspec;
ASSERT(!wf_chspec_malformed(chspec));
+ /* if the chanspec is > 80MHz, use the helper routine to find the primary 80 MHz channel */
+ if (CHSPEC_IS8080(chspec) || CHSPEC_IS160(chspec)) {
+ chspec = wf_chspec_primary80_chspec(chspec);
+ }
+
+ /* determine primary 40 MHz sub-channel of an 80 MHz chanspec */
if (CHSPEC_IS80(chspec)) {
center_chan = CHSPEC_CHANNEL(chspec);
sb = CHSPEC_CTL_SB(chspec);
- if (sb == WL_CHANSPEC_CTL_SB_UL) {
-
- sb = WL_CHANSPEC_CTL_SB_L;
- center_chan += CH_20MHZ_APART;
- } else if (sb == WL_CHANSPEC_CTL_SB_UU) {
-
- sb = WL_CHANSPEC_CTL_SB_U;
- center_chan += CH_20MHZ_APART;
- } else {
-
-
+ if (sb < WL_CHANSPEC_CTL_SB_UL) {
+ /* Primary 40MHz is on lower side */
center_chan -= CH_20MHZ_APART;
+ /* sideband bits are the same for LL/LU and L/U */
+ } else {
+ /* Primary 40MHz is on upper side */
+ center_chan += CH_20MHZ_APART;
+ /* sideband bits need to be adjusted by UL offset */
+ sb -= WL_CHANSPEC_CTL_SB_UL;
}
-
+ /* Create primary 40MHz chanspec */
chspec40 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_40 |
sb | center_chan);
}
return chspec40;
}
-
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
int
wf_mhz2channel(uint freq, uint start_factor)
{
uint base;
int offset;
-
+ /* take the default channel start frequency */
if (start_factor == 0) {
if (freq >= 2400 && freq <= 2500)
start_factor = WF_CHAN_FACTOR_2_4_G;
base = start_factor / 2;
-
+ /* check that the frequency is in 1GHz range of the base */
if ((freq < base) || (freq > base + 1000))
return -1;
offset = freq - base;
ch = offset / 5;
-
+ /* check that frequency is a 5MHz multiple from the base */
if (offset != (ch * 5))
return -1;
-
+ /* restricted channel range check for 2.4G */
if (start_factor == WF_CHAN_FACTOR_2_4_G && (ch < 1 || ch > 13))
return -1;
return ch;
}
-
+/*
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_4_G, and WF_CHAN_FACTOR_5_G
+ * are defined for 2.4 GHz, 4 GHz, and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814 = 2407 * 2).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ */
int
wf_channel2mhz(uint ch, uint start_factor)
{
return freq;
}
-
-static const struct chan_info {
- uint16 chan;
- uint16 freq;
-} chan_info[] = {
-
- {1, 2412},
- {2, 2417},
- {3, 2422},
- {4, 2427},
- {5, 2432},
- {6, 2437},
- {7, 2442},
- {8, 2447},
- {9, 2452},
- {10, 2457},
- {11, 2462},
- {12, 2467},
- {13, 2472},
- {14, 2484},
-
-#ifdef BAND5G
-
- {34, 5170},
- {38, 5190},
- {42, 5210},
- {46, 5230},
-
-
- {36, 5180},
- {40, 5200},
- {44, 5220},
- {48, 5240},
- {52, 5260},
- {56, 5280},
- {60, 5300},
- {64, 5320},
-
-
- {100, 5500},
- {104, 5520},
- {108, 5540},
- {112, 5560},
- {116, 5580},
- {120, 5600},
- {124, 5620},
- {128, 5640},
- {132, 5660},
- {136, 5680},
- {140, 5700},
-
-
- {149, 5745},
- {153, 5765},
- {157, 5785},
- {161, 5805},
- {165, 5825},
-
-
- {184, 4920},
- {188, 4940},
- {192, 4960},
- {196, 4980},
- {200, 5000},
- {204, 5020},
- {208, 5040},
- {212, 5060},
- {216, 5080}
-#endif
+static const uint16 sidebands[] = {
+ WL_CHANSPEC_CTL_SB_LLL, WL_CHANSPEC_CTL_SB_LLU,
+ WL_CHANSPEC_CTL_SB_LUL, WL_CHANSPEC_CTL_SB_LUU,
+ WL_CHANSPEC_CTL_SB_ULL, WL_CHANSPEC_CTL_SB_ULU,
+ WL_CHANSPEC_CTL_SB_UUL, WL_CHANSPEC_CTL_SB_UUU
};
-
-uint
-wf_freq2channel(uint freq)
+/*
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ * primary_channel - primary 20Mhz channel
+ * center_channel - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+chanspec_t
+wf_chspec_80(uint8 center_channel, uint8 primary_channel)
{
+
+ chanspec_t chanspec = INVCHANSPEC;
+ chanspec_t chanspec_cur;
uint i;
- for (i = 0; i < ARRAYSIZE(chan_info); i++) {
- if (chan_info[i].freq == freq)
- return (chan_info[i].chan);
+ for (i = 0; i < WF_NUM_SIDEBANDS_80MHZ; i++) {
+ chanspec_cur = CH80MHZ_CHSPEC(center_channel, sidebands[i]);
+ if (primary_channel == wf_chspec_ctlchan(chanspec_cur)) {
+ chanspec = chanspec_cur;
+ break;
+ }
}
- return (0);
+ /* If the loop ended early, we are good, otherwise we did not
+ * find a 80MHz chanspec with the given center_channel that had a primary channel
+ *matching the given primary_channel.
+ */
+ return chanspec;
}
+/*
+ * Returns the 80+80 chanspec corresponding to the following input parameters
+ *
+ * primary_20mhz - Primary 20 MHz channel
+ * chan0 - center channel number of one frequency segment
+ * chan1 - center channel number of the other frequency segment
+ *
+ * Parameters chan0 and chan1 are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+chanspec_t
+wf_chspec_get8080_chspec(uint8 primary_20mhz, uint8 chan0, uint8 chan1)
+{
+ int sb = 0;
+ uint16 chanspec = 0;
+ int chan0_id = 0, chan1_id = 0;
+ int seg0, seg1;
+
+ chan0_id = channel_80mhz_to_id(chan0);
+ chan1_id = channel_80mhz_to_id(chan1);
+
+ /* make sure the channel numbers were valid */
+ if (chan0_id == -1 || chan1_id == -1)
+ return INVCHANSPEC;
+
+ /* does the primary channel fit with the 1st 80MHz channel ? */
+ sb = channel_to_sb(chan0, primary_20mhz, 80);
+ if (sb >= 0) {
+ /* yes, so chan0 is frequency segment 0, and chan1 is seg 1 */
+ seg0 = chan0_id;
+ seg1 = chan1_id;
+ } else {
+ /* no, so does the primary channel fit with the 2nd 80MHz channel ? */
+ sb = channel_to_sb(chan1, primary_20mhz, 80);
+ if (sb < 0) {
+ /* no match for ctl_ch to either 80MHz center channel */
+ return INVCHANSPEC;
+ }
+ /* swapped, so chan1 is frequency segment 0, and chan0 is seg 1 */
+ seg0 = chan1_id;
+ seg1 = chan0_id;
+ }
+
+ chanspec = ((seg0 << WL_CHANSPEC_CHAN1_SHIFT) |
+ (seg1 << WL_CHANSPEC_CHAN2_SHIFT) |
+ (sb << WL_CHANSPEC_CTL_SB_SHIFT) |
+ WL_CHANSPEC_BW_8080 |
+ WL_CHANSPEC_BAND_5G);
-uint
-wf_channel2freq(uint channel)
+ return chanspec;
+}
+
+/*
+ * This function returns the 80Mhz channel for the given id.
+ */
+static uint8
+wf_chspec_get80Mhz_ch(uint8 chan_80Mhz_id)
{
- uint i;
+ if (chan_80Mhz_id < WF_NUM_5G_80M_CHANS)
+ return wf_5g_80m_chans[chan_80Mhz_id];
+
+ return 0;
+}
- for (i = 0; i < ARRAYSIZE(chan_info); i++)
- if (chan_info[i].chan == channel)
- return (chan_info[i].freq);
- return (0);
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ * returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+
+uint8
+wf_chspec_primary80_channel(chanspec_t chanspec)
+{
+ uint8 primary80_chan;
+
+ if (CHSPEC_IS80(chanspec)) {
+ primary80_chan = CHSPEC_CHANNEL(chanspec);
+ }
+ else if (CHSPEC_IS8080(chanspec)) {
+ /* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+ primary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chanspec));
+ }
+ else if (CHSPEC_IS160(chanspec)) {
+ uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+ uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+ /* based on the sb value primary 80 channel can be retrieved
+ * if sb is in range 0 to 3 the lower band is the 80Mhz primary band
+ */
+ if (sb < 4) {
+ primary80_chan = center_chan - CH_40MHZ_APART;
+ }
+ /* if sb is in range 4 to 7 the upper band is the 80Mhz primary band */
+ else
+ {
+ primary80_chan = center_chan + CH_40MHZ_APART;
+ }
+ }
+ else {
+ /* for 20 and 40 Mhz */
+ primary80_chan = -1;
+ }
+ return primary80_chan;
+}
+
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ * returns -1 in case the provided channel is 20/40/80 Mhz chanspec
+ */
+uint8
+wf_chspec_secondary80_channel(chanspec_t chanspec)
+{
+ uint8 secondary80_chan;
+
+ if (CHSPEC_IS8080(chanspec)) {
+ secondary80_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN2(chanspec));
+ }
+ else if (CHSPEC_IS160(chanspec)) {
+ uint8 center_chan = CHSPEC_CHANNEL(chanspec);
+ uint sb = CHSPEC_CTL_SB(chanspec) >> WL_CHANSPEC_CTL_SB_SHIFT;
+
+ /* based on the sb value secondary 80 channel can be retrieved
+ * if sb is in range 0 to 3 upper band is the secondary 80Mhz band
+ */
+ if (sb < 4) {
+ secondary80_chan = center_chan + CH_40MHZ_APART;
+ }
+ /* if sb is in range 4 to 7 the lower band is the secondary 80Mhz band */
+ else
+ {
+ secondary80_chan = center_chan - CH_40MHZ_APART;
+ }
+ }
+ else {
+ /* for 20, 40, and 80 Mhz */
+ secondary80_chan = -1;
+ }
+ return secondary80_chan;
+}
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ *
+ * chanspec - Input chanspec for which the primary 80Mhz chanspec has to be retreived
+ *
+ * returns the input chanspec in case the provided chanspec is an 80 MHz chanspec
+ * returns INVCHANSPEC in case the provided channel is 20/40 MHz chanspec
+ */
+chanspec_t
+wf_chspec_primary80_chspec(chanspec_t chspec)
+{
+ chanspec_t chspec80;
+ uint center_chan;
+ uint sb;
+
+ ASSERT(!wf_chspec_malformed(chspec));
+ if (CHSPEC_IS80(chspec)) {
+ chspec80 = chspec;
+ }
+ else if (CHSPEC_IS8080(chspec)) {
+
+ /* Channel ID 1 corresponds to frequency segment 0, the primary 80 MHz segment */
+ center_chan = wf_chspec_get80Mhz_ch(CHSPEC_CHAN1(chspec));
+
+ sb = CHSPEC_CTL_SB(chspec);
+
+ /* Create primary 80MHz chanspec */
+ chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+ }
+ else if (CHSPEC_IS160(chspec)) {
+ center_chan = CHSPEC_CHANNEL(chspec);
+ sb = CHSPEC_CTL_SB(chspec);
+
+ if (sb < WL_CHANSPEC_CTL_SB_ULL) {
+ /* Primary 80MHz is on lower side */
+ center_chan -= CH_40MHZ_APART;
+ }
+ else {
+ /* Primary 80MHz is on upper side */
+ center_chan += CH_40MHZ_APART;
+ sb -= WL_CHANSPEC_CTL_SB_ULL;
+ }
+ /* Create primary 80MHz chanspec */
+ chspec80 = (WL_CHANSPEC_BAND_5G | WL_CHANSPEC_BW_80 | sb | center_chan);
+ }
+ else {
+ chspec80 = INVCHANSPEC;
+ }
+
+ return chspec80;
+}
+
+#ifdef WL11AC_80P80
+uint8
+wf_chspec_channel(chanspec_t chspec)
+{
+ if (CHSPEC_IS8080(chspec)) {
+ return wf_chspec_primary80_channel(chspec);
+ }
+ else {
+ return ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK));
+ }
}
+#endif /* WL11AC_80P80 */
--- /dev/null
+/** @file circularbuf.c
+ *
+ * PCIe host driver and dongle firmware need to communicate with each other. The mechanism consists
+ * of multiple circular buffers located in (DMA'able) host memory. A circular buffer is either used
+ * for host -> dongle (h2d) or dongle -> host communication. Both host driver and firmware make use
+ * of this source file. This source file contains functions to manage such a set of circular
+ * buffers, but does not contain the code to read or write the data itself into the buffers. It
+ * leaves that up to the software layer that uses this file, which can be implemented either using
+ * pio or DMA transfers. It also leaves the format of the data that is written and read to a higher
+ * layer. Typically the data is in the form of so-called 'message buffers'.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: circularbuf.c 467150 2014-04-02 17:30:43Z $
+ */
+
+#include <circularbuf.h>
+#include <bcmmsgbuf.h>
+#include <osl.h>
+
+#define CIRCULARBUF_READ_SPACE_AT_END(x) \
+ ((x->w_ptr >= x->rp_ptr) ? (x->w_ptr - x->rp_ptr) : (x->e_ptr - x->rp_ptr))
+
+#define CIRCULARBUF_READ_SPACE_AVAIL(x) \
+ (((CIRCULARBUF_READ_SPACE_AT_END(x) == 0) && (x->w_ptr < x->rp_ptr)) ? \
+ x->w_ptr : CIRCULARBUF_READ_SPACE_AT_END(x))
+
+int cbuf_msg_level = CBUF_ERROR_VAL | CBUF_TRACE_VAL | CBUF_INFORM_VAL;
+
+/* #define CBUF_DEBUG */
+#ifdef CBUF_DEBUG
+#define CBUF_DEBUG_CHECK(x) x
+#else
+#define CBUF_DEBUG_CHECK(x)
+#endif /* CBUF_DEBUG */
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function : circularbuf_init
+ * Description:
+ *
+ *
+ * Input Args : buf_base_addr: address of DMA'able host memory provided by caller
+ *
+ *
+ * Return Values :
+ *
+ * -----------------------------------------------------------------------------
+ */
+void
+circularbuf_init(circularbuf_t *handle, void *buf_base_addr, uint16 total_buf_len)
+{
+ handle->buf_addr = buf_base_addr;
+
+ handle->depth = handle->e_ptr = HTOL32(total_buf_len);
+
+ /* Initialize Read and Write pointers */
+ handle->w_ptr = handle->r_ptr = handle->wp_ptr = handle->rp_ptr = HTOL32(0);
+ handle->mb_ring_bell = NULL;
+ handle->mb_ctx = NULL;
+
+ return;
+}
+
+/**
+ * When an item is added to the circular buffer by the producing party, the consuming party has to
+ * be notified by means of a 'door bell' or 'ring'. This function allows the caller to register a
+ * 'ring' function that will be called when a 'write complete' occurs.
+ */
+void
+circularbuf_register_cb(circularbuf_t *handle, mb_ring_t mb_ring_func, void *ctx)
+{
+ handle->mb_ring_bell = mb_ring_func;
+ handle->mb_ctx = ctx;
+}
+
+#ifdef CBUF_DEBUG
+static void
+circularbuf_check_sanity(circularbuf_t *handle)
+{
+ if ((handle->e_ptr > handle->depth) ||
+ (handle->r_ptr > handle->e_ptr) ||
+ (handle->rp_ptr > handle->e_ptr) ||
+ (handle->w_ptr > handle->e_ptr))
+ {
+ printf("%s:%d: Pointers are corrupted.\n", __FUNCTION__, __LINE__);
+ circularbuf_debug_print(handle);
+ ASSERT(0);
+ }
+ return;
+}
+#endif /* CBUF_DEBUG */
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function : circularbuf_reserve_for_write
+ *
+ * Description:
+ * This function reserves N bytes for write in the circular buffer. The circularbuf
+ * implementation will only reserve space in the circular buffer and return
+ * the pointer to the address where the new data can be written.
+ * The actual write implementation (bcopy/dma) is outside the scope of
+ * circularbuf implementation.
+ *
+ * Input Args :
+ * size - No. of bytes to reserve for write
+ *
+ * Return Values :
+ * void * : Pointer to the reserved location. This is the address
+ * that will be used for write (dma/bcopy)
+ *
+ * -----------------------------------------------------------------------------
+ */
+void * BCMFASTPATH
+circularbuf_reserve_for_write(circularbuf_t *handle, uint16 size)
+{
+ int16 avail_space;
+ void *ret_ptr = NULL;
+
+ CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+ ASSERT(size < handle->depth);
+
+ if (handle->wp_ptr >= handle->r_ptr)
+ avail_space = handle->depth - handle->wp_ptr;
+ else
+ avail_space = handle->r_ptr - handle->wp_ptr;
+
+ ASSERT(avail_space <= handle->depth);
+ if (avail_space > size)
+ {
+ /* Great. We have enough space. */
+ ret_ptr = CIRCULARBUF_START(handle) + handle->wp_ptr;
+
+ /*
+ * We need to update the wp_ptr for the next guy to write.
+ *
+ * Please Note : We are not updating the write pointer here. This can be
+ * done only after write is complete (In case of DMA, we can only schedule
+ * the DMA. Actual completion will be known only on DMA complete interrupt).
+ */
+ handle->wp_ptr += size;
+ return ret_ptr;
+ }
+
+ /*
+ * If there is no available space, we should check if there is some space left
+ * in the beginning of the circular buffer. Wrap-around case, where there is
+ * not enough space in the end of the circular buffer. But, there might be
+ * room in the beginning of the buffer.
+ */
+ if (handle->wp_ptr >= handle->r_ptr)
+ {
+ avail_space = handle->r_ptr;
+ if (avail_space > size)
+ {
+ /* OK. There is room in the beginning. Let's go ahead and use that.
+ * But, before that, we have left a hole at the end of the circular
+ * buffer as that was not sufficient to accomodate the requested
+ * size. Let's make sure this is updated in the circularbuf structure
+ * so that consumer does not use the hole.
+ */
+ handle->e_ptr = handle->wp_ptr;
+ handle->wp_ptr = size;
+
+ return CIRCULARBUF_START(handle);
+ }
+ }
+
+ /* We have tried enough to accomodate the new packet. There is no room for now. */
+ return NULL;
+}
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function : circularbuf_write_complete
+ *
+ * Description:
+ * This function has to be called by the producer end of circularbuf to indicate to
+ * the circularbuf layer that data has been written and the write pointer can be
+ * updated. In the process, if there was a doorbell callback registered, that
+ * function would also be invoked as to notify the consuming party.
+ *
+ * Input Args :
+ * dest_addr : Address where the data was written. This would be the
+ * same address that was reserved earlier.
+ * bytes_written : Length of data written
+ *
+ * -----------------------------------------------------------------------------
+ */
+void BCMFASTPATH
+circularbuf_write_complete(circularbuf_t *handle, uint16 bytes_written)
+{
+ CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+
+ /* Update the write pointer */
+ if ((handle->w_ptr + bytes_written) >= handle->depth) {
+ OSL_CACHE_FLUSH((void *) CIRCULARBUF_START(handle), bytes_written);
+ handle->w_ptr = bytes_written;
+ } else {
+ OSL_CACHE_FLUSH((void *) (CIRCULARBUF_START(handle) + handle->w_ptr),
+ bytes_written);
+ handle->w_ptr += bytes_written;
+ }
+
+ /* And ring the door bell (mail box interrupt) to indicate to the peer that
+ * message is available for consumption.
+ */
+ if (handle->mb_ring_bell)
+ handle->mb_ring_bell(handle->mb_ctx);
+}
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function : circularbuf_get_read_ptr
+ *
+ * Description:
+ * This function will be called by the consumer of circularbuf for reading data from
+ * the circular buffer. This will typically be invoked when the consumer gets a
+ * doorbell interrupt.
+ * Please note that the function only returns the pointer (and length) from
+ * where the data can be read. Actual read implementation is up to the
+ * consumer. It could be a bcopy or dma.
+ *
+ * Input Args :
+ * void * : Address from where the data can be read.
+ * available_len : Length of data available for read.
+ *
+ * -----------------------------------------------------------------------------
+ */
+void * BCMFASTPATH
+circularbuf_get_read_ptr(circularbuf_t *handle, uint16 *available_len)
+{
+ uint8 *ret_addr;
+
+ CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+
+ /* First check if there is any data available in the circular buffer */
+ *available_len = CIRCULARBUF_READ_SPACE_AVAIL(handle);
+ if (*available_len == 0)
+ return NULL;
+
+ /*
+ * Although there might be data in the circular buffer for read, in
+ * cases of write wrap-around and read still in the end of the circular
+ * buffer, we might have to wrap around the read pending pointer also.
+ */
+ if (CIRCULARBUF_READ_SPACE_AT_END(handle) == 0)
+ handle->rp_ptr = 0;
+
+ ret_addr = CIRCULARBUF_START(handle) + handle->rp_ptr;
+
+ /*
+ * Please note that we do not update the read pointer here. Only
+ * read pending pointer is updated, so that next reader knows where
+ * to read data from.
+ * read pointer can only be updated when the read is complete.
+ */
+ handle->rp_ptr = (uint16)(ret_addr - CIRCULARBUF_START(handle) + *available_len);
+
+ ASSERT(*available_len <= handle->depth);
+
+ OSL_CACHE_INV((void *) ret_addr, *available_len);
+
+ return ret_addr;
+}
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function : circularbuf_read_complete
+ * Description:
+ * This function has to be called by the consumer end of circularbuf to indicate
+ * that data has been consumed and the read pointer can be updated, so the producing side
+ * can can use the freed space for new entries.
+ *
+ *
+ * Input Args :
+ * bytes_read : No. of bytes consumed by the consumer. This has to match
+ * the length returned by circularbuf_get_read_ptr
+ *
+ * Return Values :
+ * CIRCULARBUF_SUCCESS : Otherwise
+ *
+ * -----------------------------------------------------------------------------
+ */
+circularbuf_ret_t BCMFASTPATH
+circularbuf_read_complete(circularbuf_t *handle, uint16 bytes_read)
+{
+ CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+ ASSERT(bytes_read < handle->depth);
+
+ /* Update the read pointer */
+ if ((handle->w_ptr < handle->e_ptr) && (handle->r_ptr + bytes_read) > handle->e_ptr)
+ handle->r_ptr = bytes_read;
+ else
+ handle->r_ptr += bytes_read;
+
+ return CIRCULARBUF_SUCCESS;
+}
+
+/**
+ * -----------------------------------------------------------------------------
+ * Function : circularbuf_revert_rp_ptr
+ *
+ * Description:
+ * The rp_ptr update during circularbuf_get_read_ptr() is done to reflect the amount of data
+ * that is sent out to be read by the consumer. But the consumer may not always read the
+ * entire data. In such a case, the rp_ptr needs to be reverted back by 'left' bytes, where
+ * 'left' is the no. of bytes left unread.
+ *
+ * Input args:
+ * bytes : The no. of bytes left unread by the consumer
+ *
+ * -----------------------------------------------------------------------------
+ */
+circularbuf_ret_t
+circularbuf_revert_rp_ptr(circularbuf_t *handle, uint16 bytes)
+{
+ CBUF_DEBUG_CHECK(circularbuf_check_sanity(handle));
+ ASSERT(bytes < handle->depth);
+
+ handle->rp_ptr -= bytes;
+
+ return CIRCULARBUF_SUCCESS;
+}
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd.h 419132 2013-08-19 21:33:05Z $
+ * $Id: dhd.h 491170 2014-07-15 06:23:58Z $
*/
/****************
#include <linux/random.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
-#include <linux/string.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_HAS_WAKELOCK)
struct task_struct;
struct sched_param;
int setScheduler(struct task_struct *p, int policy, struct sched_param *param);
+int get_scheduler_policy(struct task_struct *p);
+#define MAX_EVENT 16
#define ALL_INTERFACES 0xff
#include <wlioctl.h>
#include <wlfc_proto.h>
-#if defined(NDISVER) && (NDISVER >= 0x0600)
+#if defined(BCMWDF)
#include <wdf.h>
#include <WdfMiniport.h>
-#endif /* (NDISVER >= 0x0600) */
+#endif /* (BCMWDF) */
+
+#if defined(WL11U) && !defined(MFP)
+#define MFP /* Applying interaction with MFP by spec HS2.0 REL2 */
+#endif /* WL11U */
#if defined(KEEP_ALIVE)
/* Default KEEP_ALIVE Period is 55 sec to prevent AP from sending Keep Alive probe frame */
struct dhd_prot;
struct dhd_info;
struct dhd_ioctl;
-struct dhd_cmn;
/* The level of bus communication with the dongle */
enum dhd_bus_state {
DHD_BUS_DOWN, /* Not ready for frame transfers */
DHD_BUS_LOAD, /* Download access only (CPU reset) */
- DHD_BUS_DATA /* Ready for frame transfers */
+ DHD_BUS_DATA, /* Ready for frame transfers */
+ DHD_BUS_SUSPEND, /* Bus has been suspended */
};
#if defined(NDISVER) && (NDISVER >= 0x0600)
#define CONCURENT_MASK 0x00F0
#endif /* (NDISVER >= 0x0600) */
+/* For supporting multiple interfaces */
+#define DHD_MAX_IFS 16
+#define DHD_DEL_IF -0xE
+#define DHD_BAD_IF -0xF
+
enum dhd_op_flags {
/* Firmware requested operation mode */
DHD_FLAG_STA_MODE = (1 << (0)), /* STA only */
DHD_FLAG_P2P_GC_MODE = (1 << (5)),
DHD_FLAG_P2P_GO_MODE = (1 << (6)),
DHD_FLAG_MBSS_MODE = (1 << (7)), /* MBSS in future */
- DHD_FLAG_IBSS_MODE = (1 << (8))
+ DHD_FLAG_IBSS_MODE = (1 << (8)),
+ DHD_FLAG_MFG_MODE = (1 << (9))
};
-#define MANUFACTRING_FW "WLTEST"
-
/* Max sequential TX/RX Control timeouts to set HANG event */
#ifndef MAX_CNTL_TX_TIMEOUT
#define MAX_CNTL_TX_TIMEOUT 2
#define POWERUP_MAX_RETRY 3 /* how many times we retry to power up the chip */
#endif
#ifndef POWERUP_WAIT_MS
-#define POWERUP_WAIT_MS 5000 /* ms: time out in waiting wifi to come up */
+#define POWERUP_WAIT_MS 2000 /* ms: time out in waiting wifi to come up */
#endif
enum dhd_bus_wake_state {
DHD_PREALLOC_DHD_INFO = 7
};
-typedef enum {
- DHD_IF_NONE = 0,
- DHD_IF_ADD,
- DHD_IF_DEL,
- DHD_IF_CHANGE,
- DHD_IF_DELETING
-} dhd_if_state_t;
-
-
-typedef enum {
- DHD_IPV6_ADDR_NONE = 0,
- DHD_IPV6_ADDR_ADD,
- DHD_IPV6_ADDR_DELETE
-} dhd_ipv6_op_t;
-
-
-#if defined(CONFIG_DHD_USE_STATIC_BUF)
-
-uint8* dhd_os_prealloc(void *osh, int section, uint size);
-void dhd_os_prefree(void *osh, void *addr, uint size);
-#define DHD_OS_PREALLOC(osh, section, size) dhd_os_prealloc(osh, section, size)
-#define DHD_OS_PREFREE(osh, addr, size) dhd_os_prefree(osh, addr, size)
-
-#else
-
-#define DHD_OS_PREALLOC(osh, section, size) MALLOC(osh, size)
-#define DHD_OS_PREFREE(osh, addr, size) MFREE(osh, addr, size)
-
-#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
-
/* Packet alignment for most efficient SDIO (can change based on platform) */
#ifndef DHD_SDALIGN
#define DHD_SDALIGN 32
} reorder_info_t;
#ifdef DHDTCPACK_SUPPRESS
-#define MAXTCPSTREAMS 4 /* Keep this to be power of 2 */
-typedef struct tcp_ack_info {
- void *p_tcpackinqueue;
- uint32 tcpack_number;
- uint ip_tcp_ttllen;
- uint8 ipaddrs[8]; /* Each 4bytes src and dst IP addrs */
- uint8 tcpports[4]; /* Each 2bytes src and dst port number */
-} tcp_ack_info_t;
-
-void dhd_onoff_tcpack_sup(void *pub, bool on);
+
+enum {
+ /* TCPACK suppress off */
+ TCPACK_SUP_OFF,
+ /* Replace TCPACK in txq when new coming one has higher ACK number. */
+ TCPACK_SUP_REPLACE,
+ /* TCPACK_SUP_REPLACE + delayed TCPACK TX unless ACK to PSH DATA.
+ * This will give benefits to Half-Duplex bus interface(e.g. SDIO) that
+ * 1. we are able to read TCP DATA packets first from the bus
+ * 2. TCPACKs that don't need to hurry delivered remains longer in TXQ so can be suppressed.
+ */
+ TCPACK_SUP_DELAYTX,
+ TCPACK_SUP_LAST_MODE
+};
#endif /* DHDTCPACK_SUPPRESS */
+
+/* DMA'ing r/w indices for rings supported */
+#ifdef BCM_INDX_TCM /* FW gets r/w indices in TCM */
+#define DMA_INDX_ENAB(dma_indxsup) 0
+#elif defined BCM_INDX_DMA /* FW gets r/w indices from Host memory */
+#define DMA_INDX_ENAB(dma_indxsup) 1
+#else /* r/w indices in TCM or host memory based on FW/Host agreement */
+#define DMA_INDX_ENAB(dma_indxsup) dma_indxsup
+#endif /* BCM_INDX_TCM */
+
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+struct tdls_peer_node {
+ uint8 addr[ETHER_ADDR_LEN];
+ struct tdls_peer_node *next;
+};
+typedef struct tdls_peer_node tdls_peer_node_t;
+typedef struct {
+ tdls_peer_node_t *node;
+ uint8 tdls_peer_count;
+} tdls_peer_tbl_t;
+#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
+
/* Common structure for module and instance linkage */
typedef struct dhd_pub {
/* Linkage ponters */
struct dhd_bus *bus; /* Bus module handle */
struct dhd_prot *prot; /* Protocol module handle */
struct dhd_info *info; /* Info module handle */
- struct dhd_cmn *cmn; /* dhd_common module handle */
+
+ /* to NDIS developer, the structure dhd_common is redundant,
+ * please do NOT merge it back from other branches !!!
+ */
+
/* Internal dhd items */
bool up; /* Driver up/down (to OS) */
/* Additional stats for the bus level */
ulong tx_packets; /* Data packets sent to dongle */
+ ulong tx_dropped; /* Data packets dropped in dhd */
ulong tx_multicast; /* Multicast data packets sent to dongle */
ulong tx_errors; /* Errors in sending data to dongle */
ulong tx_ctlpkts; /* Control packets sent to dongle */
/* Suspend disable flag and "in suspend" flag */
int suspend_disable_flag; /* "1" to disable all extra powersaving during suspend */
int in_suspend; /* flag set to 1 when early suspend called */
+#ifdef PNO_SUPPORT
+ int pno_enable; /* pno status : "1" is pno enable */
+ int pno_suspend; /* pno suspend status : "1" is pno suspended */
+#endif /* PNO_SUPPORT */
/* DTIM skip value, default 0(or 1) means wake each DTIM
* 3 means skip 2 DTIMs and wake up 3rd DTIM(9th beacon when AP DTIM is 3)
*/
uint16 maxdatablks;
#endif /* WLBTAMP */
#ifdef PROP_TXSTATUS
- int wlfc_enabled;
+ bool wlfc_enabled;
+ int wlfc_mode;
void* wlfc_state;
- int ptx_opt_enabled;
- bool pending_tx_pkts;
+ /*
+ Mode in which the dhd flow control shall operate. Must be set before
+ traffic starts to the device.
+ 0 - Do not do any proptxtstatus flow control
+ 1 - Use implied credit from a packet status
+ 2 - Use explicit credit
+ 3 - Only AMPDU hostreorder used. no wlfc.
+ */
+ uint8 proptxstatus_mode;
+ bool proptxstatus_txoff;
+ bool proptxstatus_module_ignore;
+ bool proptxstatus_credit_ignore;
+ bool proptxstatus_txstatus_ignore;
+ bool wlfc_rxpkt_chk;
/*
* implement below functions in each platform if needed.
*/
/* platform specific function whether to skip flow control */
bool (*skip_fc)(void);
/* platform specific function for wlfc_enable and wlfc_deinit */
- void (*plat_enable)(void *dhd);
+ void (*plat_init)(void *dhd);
void (*plat_deinit)(void *dhd);
-#endif
+#endif /* PROP_TXSTATUS */
#ifdef PNO_SUPPORT
void *pno_state;
#endif
#endif
struct reorder_info *reorder_bufs[WLHOST_REORDERDATA_MAXFLOWS];
char fw_capabilities[WLC_IOCTL_SMLEN];
-#ifdef RXFRAME_THREAD
-#define MAXSKBPEND 1024
+ #define MAXSKBPEND 1024
void *skbbuf[MAXSKBPEND];
uint32 store_idx;
uint32 sent_idx;
-#endif /* RXFRAME_THREAD */
#ifdef DHDTCPACK_SUPPRESS
- int tcp_ack_info_cnt;
- tcp_ack_info_t tcp_ack_info_tbl[MAXTCPSTREAMS];
+ uint8 tcpack_sup_mode; /* TCPACK suppress mode */
+ void *tcpack_sup_module; /* TCPACK suppress module */
#endif /* DHDTCPACK_SUPPRESS */
+#if defined(ARP_OFFLOAD_SUPPORT)
uint32 arp_version;
+#endif
#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
bool fw_4way_handshake; /* Whether firmware will to do the 4way handshake. */
#endif
+#ifdef CUSTOM_SET_CPUCORE
+ struct task_struct * current_dpc;
+ struct task_struct * current_rxf;
+ int chan_isvht80;
+#endif /* CUSTOM_SET_CPUCORE */
+
+
+ void *sta_pool; /* pre-allocated pool of sta objects */
+ void *staid_allocator; /* allocator of sta indexes */
+
+ void *flowid_allocator; /* unique flowid allocator */
+ void *flow_ring_table; /* flow ring table, include prot and bus info */
+ void *if_flow_lkup; /* per interface flowid lkup hash table */
+ uint32 num_flow_rings;
+ uint8 flow_prio_map[NUMPRIO];
+ uint8 flow_prio_map_type;
+ char enable_log[MAX_EVENT];
+ bool dma_d2h_ring_upd_support;
+ bool dma_h2d_ring_upd_support;
+#ifdef DHD_WMF
+ bool wmf_ucast_igmp;
+#ifdef DHD_IGMP_UCQUERY
+ bool wmf_ucast_igmp_query;
+#endif
+#ifdef DHD_UCAST_UPNP
+ bool wmf_ucast_upnp;
+#endif
+#endif /* DHD_WMF */
+#ifdef DHD_UNICAST_DHCP
+ bool dhcp_unicast;
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+ bool block_ping;
+#endif
+#if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
+ tdls_peer_tbl_t peer_tbl;
+#endif /* defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
char *conf_path; /* module_param: path to config vars file */
struct dhd_conf *conf; /* Bus module handle */
} dhd_pub_t;
-typedef struct dhd_cmn {
- osl_t *osh; /* OSL handle */
- dhd_pub_t *dhd;
-} dhd_cmn_t;
+#if defined(BCMWDF)
+typedef struct {
+ dhd_pub_t *dhd_pub;
+} dhd_workitem_context_t;
+
+WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(dhd_workitem_context_t, dhd_get_dhd_workitem_context)
+#endif /* (BCMWDF) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
} while (0)
#define DHD_PM_RESUME_WAIT(a) _DHD_PM_RESUME_WAIT(a, 200)
#define DHD_PM_RESUME_WAIT_FOREVER(a) _DHD_PM_RESUME_WAIT(a, ~0)
- #define DHD_PM_RESUME_RETURN_ERROR(a) do { if (dhd_mmc_suspend) return a; } while (0)
+ #ifdef CUSTOMER_HW4
+ #define DHD_PM_RESUME_RETURN_ERROR(a) do { \
+ if (dhd_mmc_suspend) { \
+ printf("%s[%d]: mmc is still in suspend state!!!\n", \
+ __FUNCTION__, __LINE__); \
+ return a; \
+ } \
+ } while (0)
+ #else
+ #define DHD_PM_RESUME_RETURN_ERROR(a) do { \
+ if (dhd_mmc_suspend) return a; } while (0)
+ #endif
#define DHD_PM_RESUME_RETURN do { if (dhd_mmc_suspend) return; } while (0)
#define DHD_SPINWAIT_SLEEP_INIT(a) DECLARE_WAIT_QUEUE_HEAD(a);
} while (0)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */
-#ifndef DHDTHREAD
-#undef SPINWAIT_SLEEP
-#define SPINWAIT_SLEEP(a, exp, us) SPINWAIT(exp, us)
-#endif /* DHDTHREAD */
#ifndef OSL_SLEEP
#define OSL_SLEEP(ms) OSL_DELAY(ms*1000)
#define DHD_IF_VIF 0x01 /* Virtual IF (Hidden from user) */
-unsigned long dhd_os_spin_lock(dhd_pub_t *pub);
-void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags);
-
-/* Wakelock Functions */
+#ifdef PNO_SUPPORT
+int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* PNO_SUPPORT */
+/*
+ * Wake locks are an Android power management concept. They are used by applications and services
+ * to request CPU resources.
+ */
extern int dhd_os_wake_lock(dhd_pub_t *pub);
extern int dhd_os_wake_unlock(dhd_pub_t *pub);
extern int dhd_os_wake_lock_timeout(dhd_pub_t *pub);
extern int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val);
extern int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val);
+extern int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub);
extern int dhd_os_wd_wake_lock(dhd_pub_t *pub);
extern int dhd_os_wd_wake_unlock(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_waive(dhd_pub_t *pub);
+extern int dhd_os_wake_lock_restore(dhd_pub_t *pub);
inline static void MUTEX_LOCK_SOFTAP_SET_INIT(dhd_pub_t * dhdp)
{
#define DHD_OS_WAKE_LOCK(pub) dhd_os_wake_lock(pub)
#define DHD_OS_WAKE_UNLOCK(pub) dhd_os_wake_unlock(pub)
-#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub)
-#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub)
#define DHD_OS_WAKE_LOCK_TIMEOUT(pub) dhd_os_wake_lock_timeout(pub)
#define DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(pub, val) \
dhd_os_wake_lock_rx_timeout_enable(pub, val)
#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(pub, val) \
dhd_os_wake_lock_ctrl_timeout_enable(pub, val)
-#define DHD_PACKET_TIMEOUT_MS 1000
+#define DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL(pub) \
+ dhd_os_wake_lock_ctrl_timeout_cancel(pub)
+#define DHD_OS_WAKE_LOCK_WAIVE(pub) dhd_os_wake_lock_waive(pub)
+#define DHD_OS_WAKE_LOCK_RESTORE(pub) dhd_os_wake_lock_restore(pub)
+
+#define DHD_OS_WD_WAKE_LOCK(pub) dhd_os_wd_wake_lock(pub)
+#define DHD_OS_WD_WAKE_UNLOCK(pub) dhd_os_wd_wake_unlock(pub)
+#define DHD_PACKET_TIMEOUT_MS 500
#define DHD_EVENT_TIMEOUT_MS 1500
+
/* interface operations (register, remove) should be atomic, use this lock to prevent race
* condition among wifi on/off and interface operation functions
*/
void dhd_net_if_unlock(struct net_device *dev);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
extern struct mutex _dhd_sdio_mutex_lock_;
#endif
#endif /* MULTIPLE_SUPPLICANT */
-typedef struct dhd_if_event {
- uint8 ifidx;
- uint8 action;
- uint8 flags;
- uint8 bssidx;
- uint8 is_AP;
-} dhd_if_event_t;
-
typedef enum dhd_attach_states
{
DHD_ATTACH_STATE_INIT = 0x0,
* Exported from dhd OS modules (dhd_linux/dhd_ndis)
*/
-/* To allow osl_attach/detach calls from os-independent modules */
-osl_t *dhd_osl_attach(void *pdev, uint bustype);
-void dhd_osl_detach(osl_t *osh);
-
/* Indication from bus module regarding presence/insertion of dongle.
* Return dhd_pub_t pointer, used as handle to OS module in later calls.
* Returned structure should have bus and prot pointers filled in.
extern int dhd_attach_p2p(dhd_pub_t *);
extern int dhd_detach_p2p(dhd_pub_t *);
#endif /* WLP2P && WL_CFG80211 */
-extern int dhd_net_attach(dhd_pub_t *dhdp, int idx);
+extern int dhd_register_if(dhd_pub_t *dhdp, int idx, bool need_rtnl_lock);
/* Indication from bus module regarding removal/absence of dongle */
extern void dhd_detach(dhd_pub_t *dhdp);
extern int dhd_os_ioctl_resp_wake(dhd_pub_t * pub);
extern unsigned int dhd_os_get_ioctl_resp_timeout(void);
extern void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec);
-extern void * dhd_os_open_image(char * filename);
+#if 0 && (NDISVER >= 0x0600)
+#define dhd_os_open_image(a) wl_os_open_image(a)
+#define dhd_os_close_image(a) wl_os_close_image(a)
+#define dhd_os_get_image_block(a, b, c) wl_os_get_image_block(a, b, c)
+#endif /* (NDISVER >= 0x0600) */
+
extern int dhd_os_get_image_block(char * buf, int len, void * image);
+extern void * dhd_os_open_image(char * filename);
extern void dhd_os_close_image(void * image);
extern void dhd_os_wd_timer(void *bus, uint wdtick);
extern void dhd_os_sdlock(dhd_pub_t * pub);
extern void dhd_os_tcpackunlock(dhd_pub_t *pub);
#endif /* DHDTCPACK_SUPPRESS */
-extern void dhd_customer_gpio_wlan_ctrl(int onoff);
-extern int dhd_custom_get_mac_address(unsigned char *buf);
+extern int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr);
+extern int dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff);
+extern int dhd_custom_get_mac_address(void *adapter, unsigned char *buf);
+extern void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec);
extern void dhd_os_sdunlock_sndup_rxq(dhd_pub_t * pub);
extern void dhd_os_sdlock_eventq(dhd_pub_t * pub);
extern void dhd_os_sdunlock_eventq(dhd_pub_t * pub);
extern bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret);
extern int dhd_os_send_hang_message(dhd_pub_t *dhdp);
extern void dhd_set_version_info(dhd_pub_t *pub, char *fw);
+extern bool dhd_os_check_if_up(dhd_pub_t *pub);
+extern int dhd_os_check_wakelock(dhd_pub_t *pub);
+extern int dhd_get_instance(dhd_pub_t *pub);
+#ifdef CUSTOM_SET_CPUCORE
+extern void dhd_set_cpucore(dhd_pub_t *dhd, int set);
+#endif /* CUSTOM_SET_CPUCORE */
#if defined(KEEP_ALIVE)
extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
#ifdef DHD_DEBUG
extern int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size);
#endif /* DHD_DEBUG */
-#if defined(OOB_INTR_ONLY)
-extern int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr);
-#endif
-extern void dhd_os_sdtxlock(dhd_pub_t * pub);
-extern void dhd_os_sdtxunlock(dhd_pub_t * pub);
typedef struct {
uint32 limit; /* Expiration time (usec) */
uint32 tick; /* O/S tick time (usec) */
} dhd_timeout_t;
+#ifdef SHOW_LOGTRACE
+typedef struct {
+ int num_fmts;
+ char **fmts;
+ char *raw_fmts;
+} dhd_event_log_t;
+#endif /* SHOW_LOGTRACE */
+
extern void dhd_timeout_start(dhd_timeout_t *tmo, uint usec);
extern int dhd_timeout_expired(dhd_timeout_t *tmo);
extern int dhd_ifname2idx(struct dhd_info *dhd, char *name);
+extern int dhd_ifidx2hostidx(struct dhd_info *dhd, int ifidx);
extern int dhd_net2idx(struct dhd_info *dhd, struct net_device *net);
extern struct net_device * dhd_idx2net(void *pub, int ifidx);
extern int net_os_send_hang_message(struct net_device *dev);
extern int wl_host_event(dhd_pub_t *dhd_pub, int *idx, void *pktdata,
- wl_event_msg_t *, void **data_ptr);
+ wl_event_msg_t *, void **data_ptr, void *);
extern void wl_event_to_host_order(wl_event_msg_t * evt);
extern int dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len);
extern int dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set,
int ifindex);
extern void dhd_common_init(osl_t *osh);
-extern void dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn);
extern int dhd_do_driver_init(struct net_device *net);
-extern int dhd_add_if(struct dhd_info *dhd, int ifidx, void *handle,
- char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx);
-extern void dhd_del_if(struct dhd_info *dhd, int ifidx);
-
+extern int dhd_event_ifadd(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+ char *name, uint8 *mac);
+extern int dhd_event_ifdel(struct dhd_info *dhd, struct wl_event_data_if *ifevent,
+ char *name, uint8 *mac);
+extern struct net_device* dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
+ uint8 *mac, uint8 bssidx, bool need_rtnl_lock);
+extern int dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock);
extern void dhd_vif_add(struct dhd_info *dhd, int ifidx, char * name);
extern void dhd_vif_del(struct dhd_info *dhd, int ifidx);
-
extern void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx);
extern void dhd_vif_sendup(struct dhd_info *dhd, int ifidx, uchar *cp, int len);
-
/* Send packet to dongle via data channel */
extern int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pkt);
extern void dhd_sendup_event_common(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
/* Send event to host */
extern void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data);
+#ifdef LOG_INTO_TCPDUMP
+extern void dhd_sendup_log(dhd_pub_t *dhdp, void *data, int len);
+#endif /* LOG_INTO_TCPDUMP */
extern int dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag);
extern uint dhd_bus_status(dhd_pub_t *dhdp);
extern int dhd_bus_start(dhd_pub_t *dhdp);
+extern int dhd_bus_suspend(dhd_pub_t *dhdpub);
+extern int dhd_bus_resume(dhd_pub_t *dhdpub, int stage);
extern int dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size);
extern void dhd_print_buf(void *pbuf, int len, int bytes_per_line);
extern bool dhd_is_associated(dhd_pub_t *dhd, void *bss_buf, int *retval);
+#if defined(BCMSDIO) || defined(BCMPCIE)
extern uint dhd_bus_chip_id(dhd_pub_t *dhdp);
extern uint dhd_bus_chiprev_id(dhd_pub_t *dhdp);
extern uint dhd_bus_chippkg_id(dhd_pub_t *dhdp);
+#endif /* defined(BCMSDIO) || defined(BCMPCIE) */
#if defined(KEEP_ALIVE)
extern int dhd_keep_alive_onoff(dhd_pub_t *dhd);
#endif /* KEEP_ALIVE */
+/* OS spin lock API */
+extern void *dhd_os_spin_lock_init(osl_t *osh);
+extern void dhd_os_spin_lock_deinit(osl_t *osh, void *lock);
+extern unsigned long dhd_os_spin_lock(void *lock);
+void dhd_os_spin_unlock(void *lock, unsigned long flags);
+
+/*
+ * Manage sta objects in an interface. Interface is identified by an ifindex and
+ * sta(s) within an interfaces are managed using a MacAddress of the sta.
+ */
+struct dhd_sta;
+extern struct dhd_sta *dhd_findadd_sta(void *pub, int ifidx, void *ea);
+extern void dhd_del_sta(void *pub, int ifidx, void *ea);
+extern int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx);
+extern int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val);
+extern int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx);
+
extern bool dhd_is_concurrent_mode(dhd_pub_t *dhd);
extern int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set);
typedef enum cust_gpio_modes {
#endif /* defined(DHD_DEBUG) */
extern uint android_msg_level;
extern uint config_msg_level;
+extern uint sd_msglevel;
#ifdef WL_WIRELESS_EXT
extern uint iw_msg_level;
#endif
#ifdef WL_CFG80211
extern uint wl_dbg_level;
#endif
+
extern uint dhd_slpauto;
/* Use interrupts */
#ifndef CUSTOM_PNO_EVENT_LOCK_xTIME
#define CUSTOM_PNO_EVENT_LOCK_xTIME DEFAULT_PNO_EVENT_LOCK_xTIME
#endif
-
/* hooks for custom dhd_dpc_prio setting option via Makefile */
#define DEFAULT_DHP_DPC_PRIO 1
#ifndef CUSTOM_DPC_PRIO_SETTING
#define CUSTOM_SUSPEND_BCN_LI_DTIM DEFAULT_SUSPEND_BCN_LI_DTIM
#endif
-#define DEFAULT_WIFI_TURNOFF_DELAY 0
+#ifndef CUSTOM_RXF_PRIO_SETTING
+#define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
+#endif
+
+#define DEFAULT_WIFI_TURNOFF_DELAY 0
#ifndef WIFI_TURNOFF_DELAY
#define WIFI_TURNOFF_DELAY DEFAULT_WIFI_TURNOFF_DELAY
#endif /* WIFI_TURNOFF_DELAY */
-#ifdef RXFRAME_THREAD
-#ifndef CUSTOM_RXF_PRIO_SETTING
-#define CUSTOM_RXF_PRIO_SETTING MAX((CUSTOM_DPC_PRIO_SETTING - 1), 1)
-#endif
-#endif /* RXFRAME_THREAD */
+#define DEFAULT_WIFI_TURNON_DELAY 200
+#ifndef WIFI_TURNON_DELAY
+#define WIFI_TURNON_DELAY DEFAULT_WIFI_TURNON_DELAY
+#endif /* WIFI_TURNON_DELAY */
+
+#define DEFAULT_DHD_WATCHDOG_INTERVAL_MS 10 /* msec */
+#ifndef CUSTOM_DHD_WATCHDOG_MS
+#define CUSTOM_DHD_WATCHDOG_MS DEFAULT_DHD_WATCHDOG_INTERVAL_MS
+#endif /* DEFAULT_DHD_WATCHDOG_INTERVAL_MS */
#ifdef WLTDLS
#ifndef CUSTOM_TDLS_IDLE_MODE_SETTING
#endif /* WLTDLS */
-#define MAX_DTIM_SKIP_BEACON_ITERVAL 100 /* max allowed associated AP beacon for dtim skip */
-
+#define MAX_DTIM_SKIP_BEACON_INTERVAL 100 /* max allowed associated AP beacon for DTIM skip */
+#ifndef MAX_DTIM_ALLOWED_INTERVAL
+#define MAX_DTIM_ALLOWED_INTERVAL 600 /* max allowed total beacon interval for DTIM skip */
+#endif
+#define NO_DTIM_SKIP 1
#ifdef SDTEST
/* Echo packet generator (SDIO), pkts/s */
extern uint dhd_pktgen;
/* optionally set by a module_param_string() */
#define MOD_PARAM_PATHLEN 2048
-extern char fw_path[MOD_PARAM_PATHLEN];
-extern char nv_path[MOD_PARAM_PATHLEN];
-extern char conf_path[MOD_PARAM_PATHLEN];
-
#define MOD_PARAM_INFOLEN 512
+
#ifdef SOFTAP
extern char fw_path2[MOD_PARAM_PATHLEN];
#endif
extern uint dhd_download_fw_on_driverload;
-/* For supporting multiple interfaces */
-#define DHD_MAX_IFS 16
-#define DHD_DEL_IF -0xe
-#define DHD_BAD_IF -0xf
-
-#ifdef PROP_TXSTATUS
-/* Please be mindful that total pkttag space is 32 octets only */
-typedef struct dhd_pkttag {
- /*
- b[14:13] - encryption exemption
- b[12 ] - 1 = event channel
- b[11 ] - 1 = this packet was sent in response to one time packet request,
- do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
- b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
- b[9 ] - 1 = packet is host->firmware (transmit direction)
- - 0 = packet received from firmware (firmware->host)
- b[8 ] - 1 = packet was sent due to credit_request (pspoll),
- packet does not count against FIFO credit.
- - 0 = normal transaction, packet counts against FIFO credit
- b[7 ] - 1 = AP, 0 = STA
- b[6:4] - AC FIFO number
- b[3:0] - interface index
- */
- uint16 if_flags;
- /* destination MAC address for this packet so that not every
- module needs to open the packet to find this
- */
- uint8 dstn_ether[ETHER_ADDR_LEN];
- /*
- This 32-bit goes from host to device for every packet.
- */
- uint32 htod_tag;
- /* bus specific stuff */
- union {
- struct {
- void* stuff;
- uint32 thing1;
- uint32 thing2;
- } sd;
- struct {
- void* bus;
- void* urb;
- } usb;
- } bus_specific;
-} dhd_pkttag_t;
-
-#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
-#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag)
-
-#define DHD_PKTTAG_IFMASK 0xf
-#define DHD_PKTTAG_IFTYPE_MASK 0x1
-#define DHD_PKTTAG_IFTYPE_SHIFT 7
-#define DHD_PKTTAG_FIFO_MASK 0x7
-#define DHD_PKTTAG_FIFO_SHIFT 4
-
-#define DHD_PKTTAG_SIGNALONLY_MASK 0x1
-#define DHD_PKTTAG_SIGNALONLY_SHIFT 10
-
-#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1
-#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11
-
-#define DHD_PKTTAG_EVENT_MASK 0x1
-#define DHD_PKTTAG_EVENT_SHIFT 12
-
-#define DHD_PKTTAG_EXEMPT_MASK 0x3
-#define DHD_PKTTAG_EXEMPT_SHIFT 13
-
-#define DHD_PKTTAG_PKTDIR_MASK 0x1
-#define DHD_PKTTAG_PKTDIR_SHIFT 9
-
-#define DHD_PKTTAG_CREDITCHECK_MASK 0x1
-#define DHD_PKTTAG_CREDITCHECK_SHIFT 8
-
-#define DHD_PKTTAG_INVALID_FIFOID 0x7
-
-#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
- (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
-#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
- DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
-
-#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & ~DHD_PKTTAG_IFMASK) | ((if) & DHD_PKTTAG_IFMASK)
-#define DHD_PKTTAG_IF(tag) (((dhd_pkttag_t*)(tag))->if_flags & DHD_PKTTAG_IFMASK)
-
-#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & \
- ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
- (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
-#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
- DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
-
-#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & \
- ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
- (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
-#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
- DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
-
-#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & \
- ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
- (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
-#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
- DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
-
-#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & \
- ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
- (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
-#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
- DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
-
-#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & \
- ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
- (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
-#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
- DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
-
-#define DHD_PKTTAG_SETEVENT(tag, event) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & \
- ~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \
- (((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT)
-#define DHD_PKTTAG_EVENT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
- DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK)
-
-#define DHD_PKTTAG_EXEMPT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \
- (((dhd_pkttag_t*)(tag))->if_flags & \
- ~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \
- (((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT)
-#define DHD_PKTTAG_EXEMPT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
- DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK)
-
-#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
- (dstn_MAC_ea), ETHER_ADDR_LEN)
-#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether
-
-typedef int (*f_commitpkt_t)(void* ctx, void* p);
-int dhd_os_wlfc_block(dhd_pub_t *pub);
-int dhd_os_wlfc_unblock(dhd_pub_t *pub);
-void dhd_schedule_tx(struct dhd_info *dhd);
-
-#ifdef PROP_TXSTATUS_DEBUG
-#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0)
-#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0)
-#else
-#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0)
-#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0)
-#endif
-
-#endif /* PROP_TXSTATUS */
-
extern void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar);
extern void dhd_wait_event_wakeup(dhd_pub_t*dhd);
void dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx);
#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef WLTDLS
-int dhd_tdls_enable_disable(dhd_pub_t *dhd, bool flag);
-#endif
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac);
+#ifdef PCIE_FULL_DONGLE
+void dhd_tdls_update_peer_info(struct net_device *dev, bool connect_disconnect, uint8 *addr);
+#endif /* PCIE_FULL_DONGLE */
+#endif /* WLTDLS */
/* Neighbor Discovery Offload Support */
int dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable);
int dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipaddr, int idx);
int dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx);
/* ioctl processing for nl80211 */
-int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc);
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, struct dhd_ioctl *ioc, void *data_buf);
+void dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, char *pconf_path);
void dhd_set_bus_state(void *bus, uint32 state);
/* Remove proper pkts(either one no-frag pkt or whole fragmented pkts) */
-extern bool dhd_prec_drop_pkts(osl_t *osh, struct pktq *pq, int prec);
+typedef int (*f_droppkt_t)(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ);
+extern bool dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn);
+
+#ifdef PROP_TXSTATUS
+int dhd_os_wlfc_block(dhd_pub_t *pub);
+int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+extern const uint8 prio2fifo[];
+#endif /* PROP_TXSTATUS */
+
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail);
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size);
+
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost);
+
+#if defined(CONFIG_DHD_USE_STATIC_BUF)
+#define DHD_OS_PREALLOC(dhdpub, section, size) dhd_os_prealloc(dhdpub, section, size, FALSE)
+#define DHD_OS_PREFREE(dhdpub, addr, size) dhd_os_prefree(dhdpub, addr, size)
+#else
+#define DHD_OS_PREALLOC(dhdpub, section, size) MALLOC(dhdpub->osh, size)
+#define DHD_OS_PREFREE(dhdpub, addr, size) MFREE(dhdpub->osh, addr, size)
+#endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */
+
+
+#define dhd_add_flowid(pub, ifidx, ac_prio, ea, flowid) do {} while (0)
+#define dhd_del_flowid(pub, ifidx, flowid) do {} while (0)
+
+extern unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub);
+extern void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags);
+
+/** Miscellaenous DHD Spin Locks */
+
+/* Disable router 3GMAC bypass path perimeter lock */
+#define DHD_PERIM_LOCK(dhdp) do {} while (0)
+#define DHD_PERIM_UNLOCK(dhdp) do {} while (0)
+
+/* Enable DHD general spin lock/unlock */
+#define DHD_GENERAL_LOCK(dhdp, flags) \
+ (flags) = dhd_os_general_spin_lock(dhdp)
+#define DHD_GENERAL_UNLOCK(dhdp, flags) \
+ dhd_os_general_spin_unlock((dhdp), (flags))
+
+/* Enable DHD flowring queue spin lock/unlock */
+#define DHD_QUEUE_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
+#define DHD_QUEUE_UNLOCK(lock, flags) dhd_os_spin_unlock((lock), (flags))
+
+
+
+typedef struct wl_io_pport {
+ dhd_pub_t *dhd_pub;
+ uint ifidx;
+} wl_io_pport_t;
+extern void *dhd_pub_wlinfo(dhd_pub_t *dhd_pub);
+#ifdef EXYNOS5433_PCIE_WAR
+extern void exynos_pcie_set_l1_exit(void);
+extern void exynos_pcie_clear_l1_exit(void);
+extern int enum_wifi;
+#endif /* EXYNOS5433_PCIE_WAR */
#endif /* _dhd_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_bta.c 379512 2013-01-17 22:49:08Z $
+ * $Id: dhd_bta.c 434434 2013-11-06 07:16:02Z $
*/
#ifndef WLBTAMP
#error "WLBTAMP is not defined"
void *pkt = pktq_pdeq(q, prec);
int ifidx;
- PKTPULL(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
dhd_prot_hdrpull(pub, &ifidx, pkt, NULL, NULL);
if (PKTLEN(pub->osh, pkt) >= RFC1042_HDR_LEN) {
}
dhd_prot_hdrpush(pub, ifidx, pkt);
- PKTPUSH(pub->osh, pkt, dhd_bus_hdrlen(pub->bus));
if (head_pkt == NULL)
head_pkt = pkt;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_bus.h 335569 2012-05-29 12:04:43Z $
+ * $Id: dhd_bus.h 491657 2014-07-17 06:29:40Z $
*/
#ifndef _dhd_bus_h_
extern void dhd_bus_unregister(void);
/* Download firmware image and nvram image */
-extern bool dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+extern int dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
char *fw_path, char *nv_path, char *conf_path);
/* Stop bus module: clear pending frames, disable data flow */
extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
/* Send a data frame to the dongle. Callee disposes of txp. */
+#ifdef BCMPCIE
+extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx);
+#else
extern int dhd_bus_txdata(struct dhd_bus *bus, void *txp);
+#endif
+
/* Send/receive a control message to/from the dongle.
* Expects caller to enforce a single outstanding transaction.
/* Watchdog timer function */
extern bool dhd_bus_watchdog(dhd_pub_t *dhd);
-extern void dhd_disable_intr(dhd_pub_t *dhd);
+
+extern int dhd_bus_oob_intr_register(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp);
+extern void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable);
+extern void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub);
+extern void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub);
+extern bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub);
#if defined(DHD_DEBUG)
/* Device console input function */
/* return the dongle chipid */
extern uint dhd_bus_chip(struct dhd_bus *bus);
+/* return the dongle chiprev */
+extern uint dhd_bus_chiprev(struct dhd_bus *bus);
+
/* Set user-specified nvram parameters. */
extern void dhd_bus_set_nvram_params(struct dhd_bus * bus, const char *nvram_params);
extern void *dhd_bus_pub(struct dhd_bus *bus);
extern void *dhd_bus_txq(struct dhd_bus *bus);
+extern void *dhd_bus_sih(struct dhd_bus *bus);
extern uint dhd_bus_hdrlen(struct dhd_bus *bus);
-
+#ifdef BCMSDIO
+extern void dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val);
+#else
+#define dhd_bus_set_dotxinrx(a, b) do {} while (0)
+#endif
#define DHD_SET_BUS_STATE_DOWN(_bus) do { \
(_bus)->dhd->busstate = DHD_BUS_DOWN; \
/* Register a dummy SDIO client driver in order to be notified of new SDIO device */
extern int dhd_bus_reg_sdio_notify(void* semaphore);
extern void dhd_bus_unreg_sdio_notify(void);
-
extern void dhd_txglom_enable(dhd_pub_t *dhdp, bool enable);
-
+extern int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num,
+ uint32 *slot_num);
+
+#ifdef BCMPCIE
+enum {
+ DNGL_TO_HOST_BUF_IOCT,
+ DNGL_TO_HOST_DMA_SCRATCH_BUFFER,
+ DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN,
+ HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
+ HOST_TO_DNGL_DMA_READINDX_BUFFER,
+ DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
+ DNGL_TO_HOST_DMA_READINDX_BUFFER,
+ TOTAL_LFRAG_PACKET_CNT,
+ HTOD_MB_DATA,
+ DTOH_MB_DATA,
+ RING_BUF_ADDR,
+ H2D_DMA_WRITEINDX,
+ H2D_DMA_READINDX,
+ D2H_DMA_WRITEINDX,
+ D2H_DMA_READINDX,
+ RING_READ_PTR,
+ RING_WRITE_PTR,
+ RING_LEN_ITEMS,
+ RING_MAX_ITEM,
+ MAX_HOST_RXBUFS
+};
+typedef void (*dhd_mb_ring_t) (struct dhd_bus *, uint32);
+extern void dhd_bus_cmn_writeshared(struct dhd_bus *bus, void * data, uint32 len, uint8 type,
+ uint16 ringid);
+extern void dhd_bus_ringbell(struct dhd_bus *bus, uint32 value);
+extern void dhd_bus_cmn_readshared(struct dhd_bus *bus, void* data, uint8 type, uint16 ringid);
+extern uint32 dhd_bus_get_sharedflags(struct dhd_bus *bus);
+extern void dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count);
+extern void dhd_bus_start_queue(struct dhd_bus *bus);
+extern void dhd_bus_stop_queue(struct dhd_bus *bus);
+extern void dhd_bus_update_retlen(struct dhd_bus *bus, uint32 retlen, uint32 cmd_id, uint16 status,
+ uint32 resp_len);
+extern dhd_mb_ring_t dhd_bus_get_mbintr_fn(struct dhd_bus *bus);
+extern void dhd_bus_write_flow_ring_states(struct dhd_bus *bus,
+ void * data, uint16 flowid);
+extern void dhd_bus_read_flow_ring_states(struct dhd_bus *bus,
+ void * data, uint8 flowid);
+extern int dhd_bus_flow_ring_create_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_clean_flow_ring(struct dhd_bus *bus, uint16 flowid);
+extern void dhd_bus_flow_ring_create_response(struct dhd_bus *bus, uint16 flow_id, int32 status);
+extern int dhd_bus_flow_ring_delete_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_delete_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern int dhd_bus_flow_ring_flush_request(struct dhd_bus *bus, void *flow_ring_node);
+extern void dhd_bus_flow_ring_flush_response(struct dhd_bus *bus, uint16 flowid, uint32 status);
+extern uint8 dhd_bus_is_txmode_push(struct dhd_bus *bus);
+extern uint32 dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush);
+extern int dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs);
+extern int dhdpcie_bus_clock_start(struct dhd_bus *bus);
+extern int dhdpcie_bus_clock_stop(struct dhd_bus *bus);
+extern int dhdpcie_bus_enable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_disable_device(struct dhd_bus *bus);
+extern int dhdpcie_bus_alloc_resource(struct dhd_bus *bus);
+extern void dhdpcie_bus_free_resource(struct dhd_bus *bus);
+extern bool dhdpcie_bus_dongle_attach(struct dhd_bus *bus);
+extern int dhd_bus_release_dongle(struct dhd_bus *bus);
+
+
+#endif /* BCMPCIE */
#endif /* _dhd_bus_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_cdc.c 416698 2013-08-06 07:53:34Z $
+ * $Id: dhd_cdc.c 472193 2014-04-23 06:27:38Z $
*
* BDC is like CDC, except it includes a header for data packets to convey
* packet priority over the bus, and flags (e.g. to indicate checksum status
return -EIO;
}
-
memset(msg, 0, sizeof(cdc_ioctl_t));
msg->cmd = htol32(cmd);
{
bcm_bprintf(strbuf, "Protocol CDC: reqid %d\n", dhdp->prot->reqid);
#ifdef PROP_TXSTATUS
- if (dhdp->wlfc_state)
- dhd_wlfc_dump(dhdp, strbuf);
+ dhd_wlfc_dump(dhdp, strbuf);
#endif
}
}
#undef PKTBUF /* Only defined in the above routine */
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+ uint hdrlen = 0;
+#ifdef BDC
+ /* Length of BDC(+WLFC) headers pushed */
+ hdrlen = BDC_HEADER_LEN + (((struct bdc_header *)PKTBUF)->dataOffset * 4);
+#endif
+ return hdrlen;
+}
+
int
dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pktbuf, uchar *reorder_buf_info,
uint *reorder_info_len)
PKTPULL(dhd->osh, pktbuf, BDC_HEADER_LEN);
#endif /* BDC */
+#if defined(NDISVER) && (NDISVER < 0x0630)
+ if (PKTLEN(dhd->osh, pktbuf) < (uint32) (data_offset << 2)) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ PKTLEN(dhd->osh, pktbuf), (data_offset * 4)));
+ return BCME_ERROR;
+ }
+#endif /* (NDISVER < 0x0630) */
+
#ifdef PROP_TXSTATUS
- dhd_os_wlfc_block(dhd);
- if (dhd->wlfc_state &&
- ((athost_wl_status_info_t*)dhd->wlfc_state)->proptxstatus_mode
- != WLFC_FCMODE_NONE &&
- (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf)))) {
+ if (!DHD_PKTTAG_PKTDIR(PKTTAG(pktbuf))) {
/*
- parse txstatus only for packets that came from the firmware
*/
dhd_wlfc_parse_header_info(dhd, pktbuf, (data_offset << 2),
reorder_buf_info, reorder_info_len);
- ((athost_wl_status_info_t*)dhd->wlfc_state)->stats.dhd_hdrpulls++;
}
- dhd_os_wlfc_unblock(dhd);
#endif /* PROP_TXSTATUS */
exit:
- PKTPULL(dhd->osh, pktbuf, (data_offset << 2));
+ PKTPULL(dhd->osh, pktbuf, (data_offset << 2));
return 0;
}
-#if defined(PROP_TXSTATUS)
-void
-dhd_wlfc_trigger_pktcommit(dhd_pub_t *dhd)
-{
- dhd_os_wlfc_block(dhd);
- if (dhd->wlfc_state &&
- (((athost_wl_status_info_t*)dhd->wlfc_state)->proptxstatus_mode
- != WLFC_FCMODE_NONE)) {
- dhd_wlfc_commit_packets(dhd->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
- (void *)dhd->bus, NULL);
- }
- dhd_os_wlfc_unblock(dhd);
-}
-#endif
-
int
dhd_prot_attach(dhd_pub_t *dhd)
{
dhd_prot_t *cdc;
- if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd->osh, DHD_PREALLOC_PROT,
- sizeof(dhd_prot_t)))) {
- DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
- goto fail;
- }
+ if (!(cdc = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
memset(cdc, 0, sizeof(dhd_prot_t));
/* ensure that the msg buf directly follows the cdc msg struct */
return 0;
fail:
-#ifndef CONFIG_DHD_USE_STATIC_BUF
if (cdc != NULL)
- MFREE(dhd->osh, cdc, sizeof(dhd_prot_t));
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ DHD_OS_PREFREE(dhd, cdc, sizeof(dhd_prot_t));
return BCME_NOMEM;
}
{
#ifdef PROP_TXSTATUS
dhd_wlfc_deinit(dhd);
- if (dhd->plat_deinit)
- dhd->plat_deinit((void *)dhd);
#endif
-#ifndef CONFIG_DHD_USE_STATIC_BUF
- MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ DHD_OS_PREFREE(dhd, dhd->prot, sizeof(dhd_prot_t));
dhd->prot = NULL;
}
void
dhd_prot_dstats(dhd_pub_t *dhd)
{
- /* No stats from dongle added yet, copy bus stats */
+ /* copy bus stats */
+
dhd->dstats.tx_packets = dhd->tx_packets;
dhd->dstats.tx_errors = dhd->tx_errors;
dhd->dstats.rx_packets = dhd->rx_packets;
}
int
-dhd_prot_init(dhd_pub_t *dhd)
+dhd_sync_with_dongle(dhd_pub_t *dhd)
{
int ret = 0;
wlc_rev_info_t revinfo;
goto done;
-#if defined(WL_CFG80211)
- if (dhd_download_fw_on_driverload)
-#endif /* defined(WL_CFG80211) */
- ret = dhd_preinit_ioctls(dhd);
+ dhd_process_cid_mac(dhd, TRUE);
+
+ ret = dhd_preinit_ioctls(dhd);
+
+ if (!ret)
+ dhd_process_cid_mac(dhd, FALSE);
+
/* Always assumes wl for now */
dhd->iswl = TRUE;
return ret;
}
+int dhd_prot_init(dhd_pub_t *dhd)
+{
+ return TRUE;
+}
+
void
dhd_prot_stop(dhd_pub_t *dhd)
{
- /* Nothing to do for CDC */
+/* Nothing to do for CDC */
}
dhd_get_hostreorder_pkts(void *osh, struct reorder_info *ptr, void **pkt,
uint32 *pkt_count, void **pplast, uint8 start, uint8 end)
{
- uint i;
void *plast = NULL, *p;
uint32 pkt_cnt = 0;
*pkt = NULL;
return;
}
- if (start == end)
- i = ptr->max_idx + 1;
- else {
- if (start > end)
- i = ((ptr->max_idx + 1) - start) + end;
- else
- i = end - start;
- }
- while (i) {
+ do {
p = (void *)(ptr->p[start]);
ptr->p[start] = NULL;
plast = p;
pkt_cnt++;
}
- i--;
- if (start++ == ptr->max_idx)
+ start++;
+ if (start > ptr->max_idx)
start = 0;
- }
+ } while (start != end);
*pplast = plast;
- *pkt_count = (uint32)pkt_cnt;
+ *pkt_count = pkt_cnt;
+ ptr->pend_pkts -= (uint8)pkt_cnt;
}
int
dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast,
cur_idx, exp_idx);
- ptr->pend_pkts -= (uint8)cnt;
*pkt_count = cnt;
DHD_REORDER(("%s: freeing up buffers %d, still pending %d\n",
__FUNCTION__, cnt, ptr->pend_pkts));
end_idx = exp_idx;
dhd_get_hostreorder_pkts(dhd->osh, ptr, pkt, &cnt, &plast, ptr->exp_idx, end_idx);
- ptr->pend_pkts -= (uint8)cnt;
if (plast)
PKTSETNEXT(dhd->osh, plast, cur_pkt);
else
* $Id: wl_cfg80211.c,v 1.1.4.1.2.14 2011/02/09 01:40:07 Exp $
*/
+#include <linux/vmalloc.h>
#include <net/rtnetlink.h>
#include <bcmutils.h>
#include <dhd.h>
#endif
-extern struct wl_priv *wlcfg_drv_priv;
+extern struct bcm_cfg80211 *g_bcm_cfg;
#ifdef PKT_FILTER_SUPPORT
extern uint dhd_pkt_filter_enable;
#include <dhd.h>
#include <dhdioctl.h>
#include <wlioctl.h>
+#include <brcm_nl80211.h>
#include <dhd_cfg80211.h>
-static s32 wl_dongle_up(struct net_device *ndev, u32 up);
+static s32 wl_dongle_up(struct net_device *ndev);
+static s32 wl_dongle_down(struct net_device *ndev);
/**
* Function implementations
*/
-s32 dhd_cfg80211_init(struct wl_priv *wl)
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg)
{
dhd_dongle_up = FALSE;
return 0;
}
-s32 dhd_cfg80211_deinit(struct wl_priv *wl)
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg)
{
dhd_dongle_up = FALSE;
return 0;
}
-s32 dhd_cfg80211_down(struct wl_priv *wl)
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg)
{
+ struct net_device *ndev;
+ s32 err = 0;
+
+ WL_TRACE(("In\n"));
+ if (!dhd_dongle_up) {
+ WL_ERR(("Dongle is already down\n"));
+ return err;
+ }
+
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wl_dongle_down(ndev);
dhd_dongle_up = FALSE;
return 0;
}
-s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val)
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val)
{
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
dhd->op_mode |= val;
WL_ERR(("Set : op_mode=0x%04x\n", dhd->op_mode));
#ifdef ARP_OFFLOAD_SUPPORT
return 0;
}
-s32 dhd_cfg80211_clean_p2p_info(struct wl_priv *wl)
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg)
{
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
dhd->op_mode &= ~(DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE);
WL_ERR(("Clean : op_mode=0x%04x\n", dhd->op_mode));
return 0;
}
-static s32 wl_dongle_up(struct net_device *ndev, u32 up)
+struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
+ uint8 *mac, uint8 bssidx)
{
- s32 err = 0;
-
- err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true);
- if (unlikely(err)) {
- WL_ERR(("WLC_UP error (%d)\n", err));
- }
- return err;
+ return dhd_allocate_if(cfg->pub, ifidx, name, mac, bssidx, FALSE);
}
-s32 dhd_config_dongle(struct wl_priv *wl, bool need_lock)
-{
-#ifndef DHD_SDALIGN
-#define DHD_SDALIGN 32
-#endif
- struct net_device *ndev;
- s32 err = 0;
-
- WL_TRACE(("In\n"));
- if (dhd_dongle_up) {
- WL_ERR(("Dongle is already up\n"));
- return err;
- }
-
- ndev = wl_to_prmry_ndev(wl);
-
- if (need_lock)
- rtnl_lock();
-
- err = wl_dongle_up(ndev, 0);
- if (unlikely(err)) {
- WL_ERR(("wl_dongle_up failed\n"));
- goto default_conf_out;
- }
- dhd_dongle_up = true;
-
-default_conf_out:
- if (need_lock)
- rtnl_unlock();
- return err;
-}
-
-#ifdef CONFIG_NL80211_TESTMODE
-int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
-{
- struct sk_buff *reply;
- struct wl_priv *wl;
- dhd_pub_t *dhd;
- dhd_ioctl_t *ioc = data;
- int err = 0;
-
- WL_TRACE(("entry: cmd = %d\n", ioc->cmd));
- wl = wiphy_priv(wiphy);
- dhd = wl->pub;
-
- DHD_OS_WAKE_LOCK(dhd);
-
- /* send to dongle only if we are not waiting for reload already */
- if (dhd->hang_was_sent) {
- WL_ERR(("HANG was sent up earlier\n"));
- DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
- DHD_OS_WAKE_UNLOCK(dhd);
- return OSL_ERROR(BCME_DONGLE_DOWN);
- }
-
- /* currently there is only one wiphy for ifidx 0 */
- err = dhd_ioctl_process(dhd, 0, ioc);
- if (err)
- goto done;
-
- /* response data is in ioc->buf so return ioc here */
- reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*ioc));
- nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*ioc), ioc);
- err = cfg80211_testmode_reply(reply);
-done:
- DHD_OS_WAKE_UNLOCK(dhd);
- return err;
-}
-#endif /* CONFIG_NL80211_TESTMODE */
-
-/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
-#define COEX_DHCP
-
-#if defined(COEX_DHCP)
-
-/* use New SCO/eSCO smart YG suppression */
-#define BT_DHCP_eSCO_FIX
-/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
-#define BT_DHCP_USE_FLAGS
-/* T1 start SCO/ESCo priority suppression */
-#define BT_DHCP_OPPR_WIN_TIME 2500
-/* T2 turn off SCO/SCO supperesion is (timeout) */
-#define BT_DHCP_FLAG_FORCE_TIME 5500
-
-enum wl_cfg80211_btcoex_status {
- BT_DHCP_IDLE,
- BT_DHCP_START,
- BT_DHCP_OPPR_WIN,
- BT_DHCP_FLAG_FORCE_TIMEOUT
-};
-
-/*
- * get named driver variable to uint register value and return error indication
- * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, ®_value)
- */
-static int
-dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
- uint reg, int *retval)
+int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
{
- union {
- char buf[WLC_IOCTL_SMLEN];
- int val;
- } var;
- int error;
-
- bcm_mkiovar(name, (char *)(®), sizeof(reg),
- (char *)(&var), sizeof(var.buf));
- error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false);
-
- *retval = dtoh32(var.val);
- return (error);
+ return dhd_register_if(cfg->pub, ifidx, FALSE);
}
-static int
-dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
-{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
- char ioctlbuf_local[1024];
-#else
- static char ioctlbuf_local[1024];
-#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
-
- bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
-
- return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true));
-}
-/*
-get named driver variable to uint register value and return error indication
-calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
-*/
-static int
-dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev)
{
- char reg_addr[8];
-
- memset(reg_addr, 0, sizeof(reg_addr));
- memcpy((char *)®_addr[0], (char *)addr, 4);
- memcpy((char *)®_addr[4], (char *)val, 4);
-
- return (dev_wlc_bufvar_set(dev, name, (char *)®_addr[0], sizeof(reg_addr)));
+ return dhd_remove_if(cfg->pub, ifidx, FALSE);
}
-static bool btcoex_is_sco_active(struct net_device *dev)
+struct net_device * dhd_cfg80211_netdev_free(struct net_device *ndev)
{
- int ioc_res = 0;
- bool res = FALSE;
- int sco_id_cnt = 0;
- int param27;
- int i;
-
- for (i = 0; i < 12; i++) {
-
- ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27);
-
- WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27));
-
- if (ioc_res < 0) {
- WL_ERR(("ioc read btc params error\n"));
- break;
- }
-
- if ((param27 & 0x6) == 2) { /* count both sco & esco */
- sco_id_cnt++;
+ if (ndev) {
+ if (ndev->ieee80211_ptr) {
+ kfree(ndev->ieee80211_ptr);
+ ndev->ieee80211_ptr = NULL;
}
-
- if (sco_id_cnt > 2) {
- WL_TRACE(("sco/esco detected, pkt id_cnt:%d samples:%d\n",
- sco_id_cnt, i));
- res = TRUE;
- break;
- }
-
- OSL_SLEEP(5);
+ free_netdev(ndev);
+ return NULL;
}
- return res;
+ return ndev;
}
-#if defined(BT_DHCP_eSCO_FIX)
-/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
-static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+void dhd_netdev_free(struct net_device *ndev)
{
- static bool saved_status = FALSE;
-
- char buf_reg50va_dhcp_on[8] =
- { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
- char buf_reg51va_dhcp_on[8] =
- { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
- char buf_reg64va_dhcp_on[8] =
- { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
- char buf_reg65va_dhcp_on[8] =
- { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
- char buf_reg71va_dhcp_on[8] =
- { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
- uint32 regaddr;
- static uint32 saved_reg50;
- static uint32 saved_reg51;
- static uint32 saved_reg64;
- static uint32 saved_reg65;
- static uint32 saved_reg71;
-
- if (trump_sco) {
- /* this should reduce eSCO agressive retransmit
- * w/o breaking it
- */
-
- /* 1st save current */
- WL_TRACE(("Do new SCO/eSCO coex algo {save &"
- "override}\n"));
- if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
- (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
- (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
- (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
- (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
- saved_status = TRUE;
- WL_TRACE(("saved bt_params[50,51,64,65,71]:"
- "0x%x 0x%x 0x%x 0x%x 0x%x\n",
- saved_reg50, saved_reg51,
- saved_reg64, saved_reg65, saved_reg71));
- } else {
- WL_ERR((":%s: save btc_params failed\n",
- __FUNCTION__));
- saved_status = FALSE;
- return -1;
- }
-
- WL_TRACE(("override with [50,51,64,65,71]:"
- "0x%x 0x%x 0x%x 0x%x 0x%x\n",
- *(u32 *)(buf_reg50va_dhcp_on+4),
- *(u32 *)(buf_reg51va_dhcp_on+4),
- *(u32 *)(buf_reg64va_dhcp_on+4),
- *(u32 *)(buf_reg65va_dhcp_on+4),
- *(u32 *)(buf_reg71va_dhcp_on+4)));
-
- dev_wlc_bufvar_set(dev, "btc_params",
- (char *)&buf_reg50va_dhcp_on[0], 8);
- dev_wlc_bufvar_set(dev, "btc_params",
- (char *)&buf_reg51va_dhcp_on[0], 8);
- dev_wlc_bufvar_set(dev, "btc_params",
- (char *)&buf_reg64va_dhcp_on[0], 8);
- dev_wlc_bufvar_set(dev, "btc_params",
- (char *)&buf_reg65va_dhcp_on[0], 8);
- dev_wlc_bufvar_set(dev, "btc_params",
- (char *)&buf_reg71va_dhcp_on[0], 8);
-
- saved_status = TRUE;
- } else if (saved_status) {
- /* restore previously saved bt params */
- WL_TRACE(("Do new SCO/eSCO coex algo {save &"
- "override}\n"));
-
- regaddr = 50;
- dev_wlc_intvar_set_reg(dev, "btc_params",
- (char *)®addr, (char *)&saved_reg50);
- regaddr = 51;
- dev_wlc_intvar_set_reg(dev, "btc_params",
- (char *)®addr, (char *)&saved_reg51);
- regaddr = 64;
- dev_wlc_intvar_set_reg(dev, "btc_params",
- (char *)®addr, (char *)&saved_reg64);
- regaddr = 65;
- dev_wlc_intvar_set_reg(dev, "btc_params",
- (char *)®addr, (char *)&saved_reg65);
- regaddr = 71;
- dev_wlc_intvar_set_reg(dev, "btc_params",
- (char *)®addr, (char *)&saved_reg71);
-
- WL_TRACE(("restore bt_params[50,51,64,65,71]:"
- "0x%x 0x%x 0x%x 0x%x 0x%x\n",
- saved_reg50, saved_reg51, saved_reg64,
- saved_reg65, saved_reg71));
-
- saved_status = FALSE;
- } else {
- WL_ERR((":%s att to restore not saved BTCOEX params\n",
- __FUNCTION__));
- return -1;
- }
- return 0;
-}
-#endif /* BT_DHCP_eSCO_FIX */
-
-static void
-wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
-{
-#if defined(BT_DHCP_USE_FLAGS)
- char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
- char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#ifdef WL_CFG80211
+ ndev = dhd_cfg80211_netdev_free(ndev);
#endif
-
-
-#if defined(BT_DHCP_eSCO_FIX)
- /* set = 1, save & turn on 0 - off & restore prev settings */
- set_btc_esco_params(dev, set);
-#endif
-
-#if defined(BT_DHCP_USE_FLAGS)
- WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
- if (set == TRUE)
- /* Forcing bt_flag7 */
- dev_wlc_bufvar_set(dev, "btc_flags",
- (char *)&buf_flag7_dhcp_on[0],
- sizeof(buf_flag7_dhcp_on));
- else
- /* Restoring default bt flag7 */
- dev_wlc_bufvar_set(dev, "btc_flags",
- (char *)&buf_flag7_default[0],
- sizeof(buf_flag7_default));
-#endif
-}
-
-static void wl_cfg80211_bt_timerfunc(ulong data)
-{
- struct btcoex_info *bt_local = (struct btcoex_info *)data;
- WL_TRACE(("Enter\n"));
- bt_local->timer_on = 0;
- schedule_work(&bt_local->work);
+ if (ndev)
+ free_netdev(ndev);
}
-static void wl_cfg80211_bt_handler(struct work_struct *work)
+static s32
+wl_dongle_up(struct net_device *ndev)
{
- struct btcoex_info *btcx_inf;
-
- btcx_inf = container_of(work, struct btcoex_info, work);
-
- if (btcx_inf->timer_on) {
- btcx_inf->timer_on = 0;
- del_timer_sync(&btcx_inf->timer);
- }
+ s32 err = 0;
+ u32 up = 0;
- switch (btcx_inf->bt_state) {
- case BT_DHCP_START:
- /* DHCP started
- * provide OPPORTUNITY window to get DHCP address
- */
- WL_TRACE(("bt_dhcp stm: started \n"));
-
- btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
- mod_timer(&btcx_inf->timer,
- jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME));
- btcx_inf->timer_on = 1;
- break;
-
- case BT_DHCP_OPPR_WIN:
- if (btcx_inf->dhcp_done) {
- WL_TRACE(("DHCP Done before T1 expiration\n"));
- goto btc_coex_idle;
- }
-
- /* DHCP is not over yet, start lowering BT priority
- * enforce btc_params + flags if necessary
- */
- WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME));
- if (btcx_inf->dev)
- wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
- btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
- mod_timer(&btcx_inf->timer,
- jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME));
- btcx_inf->timer_on = 1;
- break;
-
- case BT_DHCP_FLAG_FORCE_TIMEOUT:
- if (btcx_inf->dhcp_done) {
- WL_TRACE(("DHCP Done before T2 expiration\n"));
- } else {
- /* Noo dhcp during T1+T2, restore BT priority */
- WL_TRACE(("DHCP wait interval T2:%d msec expired\n",
- BT_DHCP_FLAG_FORCE_TIME));
- }
-
- /* Restoring default bt priority */
- if (btcx_inf->dev)
- wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
-btc_coex_idle:
- btcx_inf->bt_state = BT_DHCP_IDLE;
- btcx_inf->timer_on = 0;
- break;
-
- default:
- WL_ERR(("error g_status=%d !!!\n", btcx_inf->bt_state));
- if (btcx_inf->dev)
- wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
- btcx_inf->bt_state = BT_DHCP_IDLE;
- btcx_inf->timer_on = 0;
- break;
+ err = wldev_ioctl(ndev, WLC_UP, &up, sizeof(up), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_UP error (%d)\n", err));
}
-
- net_os_wake_unlock(btcx_inf->dev);
-}
-
-int wl_cfg80211_btcoex_init(struct wl_priv *wl)
-{
- struct btcoex_info *btco_inf = NULL;
-
- btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
- if (!btco_inf)
- return -ENOMEM;
-
- btco_inf->bt_state = BT_DHCP_IDLE;
- btco_inf->ts_dhcp_start = 0;
- btco_inf->ts_dhcp_ok = 0;
- /* Set up timer for BT */
- btco_inf->timer_ms = 10;
- init_timer(&btco_inf->timer);
- btco_inf->timer.data = (ulong)btco_inf;
- btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
-
- btco_inf->dev = wl->wdev->netdev;
-
- INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
-
- wl->btcoex_info = btco_inf;
- return 0;
+ return err;
}
-void wl_cfg80211_btcoex_deinit(struct wl_priv *wl)
+static s32
+wl_dongle_down(struct net_device *ndev)
{
- if (!wl->btcoex_info)
- return;
+ s32 err = 0;
+ u32 down = 0;
- if (wl->btcoex_info->timer_on) {
- wl->btcoex_info->timer_on = 0;
- del_timer_sync(&wl->btcoex_info->timer);
+ err = wldev_ioctl(ndev, WLC_DOWN, &down, sizeof(down), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_DOWN error (%d)\n", err));
}
-
- cancel_work_sync(&wl->btcoex_info->work);
-
- kfree(wl->btcoex_info);
- wl->btcoex_info = NULL;
+ return err;
}
-int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command)
-{
-
- struct wl_priv *wl = wlcfg_drv_priv;
- char powermode_val = 0;
- char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
- char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
- char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
-
- uint32 regaddr;
- static uint32 saved_reg66;
- static uint32 saved_reg41;
- static uint32 saved_reg68;
- static bool saved_status = FALSE;
-
-#ifdef COEX_DHCP
- char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
- struct btcoex_info *btco_inf = wl->btcoex_info;
-#endif /* COEX_DHCP */
-#ifdef PKT_FILTER_SUPPORT
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
-#endif
-
- /* Figure out powermode 1 or o command */
- strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
-
- if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
- WL_TRACE_HW4(("DHCP session starts\n"));
-
-#if defined(DHCP_SCAN_SUPPRESS)
- /* Suppress scan during the DHCP */
- wl_cfg80211_scan_suppress(dev, 1);
-#endif /* OEM_ANDROID */
-
-#ifdef PKT_FILTER_SUPPORT
- dhd->dhcp_in_progress = 1;
-
- if (dhd->early_suspended) {
- WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n"));
- dhd_enable_packet_filter(0, dhd);
- }
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg)
+{
+#ifndef DHD_SDALIGN
+#define DHD_SDALIGN 32
#endif
+ struct net_device *ndev;
+ s32 err = 0;
- /* Retrieve and saved orig regs value */
- if ((saved_status == FALSE) &&
- (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) &&
- (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) &&
- (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) {
- saved_status = TRUE;
- WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
- saved_reg66, saved_reg41, saved_reg68));
-
- /* Disable PM mode during dhpc session */
-
- /* Disable PM mode during dhpc session */
-#ifdef COEX_DHCP
- /* Start BT timer only for SCO connection */
- if (btcoex_is_sco_active(dev)) {
- /* btc_params 66 */
- dev_wlc_bufvar_set(dev, "btc_params",
- (char *)&buf_reg66va_dhcp_on[0],
- sizeof(buf_reg66va_dhcp_on));
- /* btc_params 41 0x33 */
- dev_wlc_bufvar_set(dev, "btc_params",
- (char *)&buf_reg41va_dhcp_on[0],
- sizeof(buf_reg41va_dhcp_on));
- /* btc_params 68 0x190 */
- dev_wlc_bufvar_set(dev, "btc_params",
- (char *)&buf_reg68va_dhcp_on[0],
- sizeof(buf_reg68va_dhcp_on));
- saved_status = TRUE;
-
- btco_inf->bt_state = BT_DHCP_START;
- btco_inf->timer_on = 1;
- mod_timer(&btco_inf->timer, btco_inf->timer.expires);
- WL_TRACE(("enable BT DHCP Timer\n"));
- }
-#endif /* COEX_DHCP */
- }
- else if (saved_status == TRUE) {
- WL_ERR(("was called w/o DHCP OFF. Continue\n"));
- }
+ WL_TRACE(("In\n"));
+ if (dhd_dongle_up) {
+ WL_ERR(("Dongle is already up\n"));
+ return err;
}
- else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
-
-
-#if defined(DHCP_SCAN_SUPPRESS)
- /* Since DHCP is complete, enable the scan back */
- wl_cfg80211_scan_suppress(dev, 0);
-#endif /* OEM_ANDROID */
-
-#ifdef PKT_FILTER_SUPPORT
- dhd->dhcp_in_progress = 0;
- WL_TRACE_HW4(("DHCP is complete \n"));
-
- /* Enable packet filtering */
- if (dhd->early_suspended) {
- WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n"));
- dhd_enable_packet_filter(1, dhd);
- }
-#endif /* PKT_FILTER_SUPPORT */
-
- /* Restoring PM mode */
-
-#ifdef COEX_DHCP
- /* Stop any bt timer because DHCP session is done */
- WL_TRACE(("disable BT DHCP Timer\n"));
- if (btco_inf->timer_on) {
- btco_inf->timer_on = 0;
- del_timer_sync(&btco_inf->timer);
-
- if (btco_inf->bt_state != BT_DHCP_IDLE) {
- /* need to restore original btc flags & extra btc params */
- WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state));
- /* wake up btcoex thread to restore btlags+params */
- schedule_work(&btco_inf->work);
- }
- }
- /* Restoring btc_flag paramter anyway */
- if (saved_status == TRUE)
- dev_wlc_bufvar_set(dev, "btc_flags",
- (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
-#endif /* COEX_DHCP */
-
- /* Restore original values */
- if (saved_status == TRUE) {
- regaddr = 66;
- dev_wlc_intvar_set_reg(dev, "btc_params",
- (char *)®addr, (char *)&saved_reg66);
- regaddr = 41;
- dev_wlc_intvar_set_reg(dev, "btc_params",
- (char *)®addr, (char *)&saved_reg41);
- regaddr = 68;
- dev_wlc_intvar_set_reg(dev, "btc_params",
- (char *)®addr, (char *)&saved_reg68);
-
- WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
- saved_reg66, saved_reg41, saved_reg68));
- }
- saved_status = FALSE;
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ err = wl_dongle_up(ndev);
+ if (unlikely(err)) {
+ WL_ERR(("wl_dongle_up failed\n"));
+ goto default_conf_out;
}
- else {
- WL_ERR(("Unkwown yet power setting, ignored\n"));
- }
+ dhd_dongle_up = true;
+
+default_conf_out:
- snprintf(command, 3, "OK");
+ return err;
- return (strlen("OK"));
}
-#endif
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
-s32 dhd_cfg80211_init(struct wl_priv *wl);
-s32 dhd_cfg80211_deinit(struct wl_priv *wl);
-s32 dhd_cfg80211_down(struct wl_priv *wl);
-s32 dhd_cfg80211_set_p2p_info(struct wl_priv *wl, int val);
-s32 dhd_cfg80211_clean_p2p_info(struct wl_priv *wl);
-s32 dhd_config_dongle(struct wl_priv *wl, bool need_lock);
-
-#ifdef CONFIG_NL80211_TESTMODE
-int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len);
-#else
-static inline int dhd_cfg80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
-{
- return 0;
-}
+#ifndef WL_ERR
+#define WL_ERR CFG80211_ERR
+#endif
+#ifndef WL_TRACE
+#define WL_TRACE CFG80211_TRACE
#endif
-int wl_cfg80211_btcoex_init(struct wl_priv *wl);
-void wl_cfg80211_btcoex_deinit(struct wl_priv *wl);
+s32 dhd_cfg80211_init(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_deinit(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_down(struct bcm_cfg80211 *cfg);
+s32 dhd_cfg80211_set_p2p_info(struct bcm_cfg80211 *cfg, int val);
+s32 dhd_cfg80211_clean_p2p_info(struct bcm_cfg80211 *cfg);
+s32 dhd_config_dongle(struct bcm_cfg80211 *cfg);
#endif /* __DHD_CFG80211__ */
--- /dev/null
+/*
+ * Linux cfg80211 vendor command/event handlers of DHD
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_cfg_vendor.c 487126 2014-06-24 23:06:12Z $
+ */
+
+#include <linuxver.h>
+#include <net/cfg80211.h>
+#include <net/netlink.h>
+
+#include <bcmutils.h>
+#include <wl_cfg80211.h>
+#include <wl_cfgvendor.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <brcm_nl80211.h>
+
+#ifdef VENDOR_EXT_SUPPORT
+static int dhd_cfgvendor_priv_string_handler(struct wiphy *wiphy,
+ struct wireless_dev *wdev, const void *data, int len)
+{
+ const struct bcm_nlmsg_hdr *nlioc = data;
+ struct net_device *ndev = NULL;
+ struct bcm_cfg80211 *cfg;
+ struct sk_buff *reply;
+ void *buf = NULL, *cur;
+ dhd_pub_t *dhd;
+ dhd_ioctl_t ioc = { 0 };
+ int ret = 0, ret_len, payload, msglen;
+ int maxmsglen = PAGE_SIZE - 0x100;
+ int8 index;
+
+ WL_TRACE(("entry: cmd = %d\n", nlioc->cmd));
+ DHD_ERROR(("entry: cmd = %d\n", nlioc->cmd));
+
+ cfg = wiphy_priv(wiphy);
+ dhd = cfg->pub;
+
+ DHD_OS_WAKE_LOCK(dhd);
+
+ /* send to dongle only if we are not waiting for reload already */
+ if (dhd->hang_was_sent) {
+ WL_ERR(("HANG was sent up earlier\n"));
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhd, DHD_EVENT_TIMEOUT_MS);
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return OSL_ERROR(BCME_DONGLE_DOWN);
+ }
+
+ len -= sizeof(struct bcm_nlmsg_hdr);
+ ret_len = nlioc->len;
+ if (ret_len > 0 || len > 0) {
+ if (len > DHD_IOCTL_MAXLEN) {
+ WL_ERR(("oversize input buffer %d\n", len));
+ len = DHD_IOCTL_MAXLEN;
+ }
+ if (ret_len > DHD_IOCTL_MAXLEN) {
+ WL_ERR(("oversize return buffer %d\n", ret_len));
+ ret_len = DHD_IOCTL_MAXLEN;
+ }
+ payload = max(ret_len, len) + 1;
+ buf = vzalloc(payload);
+ if (!buf) {
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return -ENOMEM;
+ }
+ memcpy(buf, (void *)nlioc + nlioc->offset, len);
+ *(char *)(buf + len) = '\0';
+ }
+
+ ndev = wdev_to_wlc_ndev(wdev, cfg);
+ index = dhd_net2idx(dhd->info, ndev);
+ if (index == DHD_BAD_IF) {
+ WL_ERR(("Bad ifidx from wdev:%p\n", wdev));
+ ret = BCME_ERROR;
+ goto done;
+ }
+
+ ioc.cmd = nlioc->cmd;
+ ioc.len = nlioc->len;
+ ioc.set = nlioc->set;
+ ioc.driver = nlioc->magic;
+ ret = dhd_ioctl_process(dhd, index, &ioc, buf);
+ if (ret) {
+ WL_TRACE(("dhd_ioctl_process return err %d\n", ret));
+ ret = OSL_ERROR(ret);
+ goto done;
+ }
+
+ cur = buf;
+ while (ret_len > 0) {
+ msglen = nlioc->len > maxmsglen ? maxmsglen : ret_len;
+ ret_len -= msglen;
+ payload = msglen + sizeof(msglen);
+ reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload);
+ if (!reply) {
+ WL_ERR(("Failed to allocate reply msg\n"));
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (nla_put(reply, BCM_NLATTR_DATA, msglen, cur) ||
+ nla_put_u16(reply, BCM_NLATTR_LEN, msglen)) {
+ kfree_skb(reply);
+ ret = -ENOBUFS;
+ break;
+ }
+
+ ret = cfg80211_vendor_cmd_reply(reply);
+ if (ret) {
+ WL_ERR(("testmode reply failed:%d\n", ret));
+ break;
+ }
+ cur += msglen;
+ }
+
+done:
+ vfree(buf);
+ DHD_OS_WAKE_UNLOCK(dhd);
+ return ret;
+}
+
+const struct wiphy_vendor_command dhd_cfgvendor_cmds [] = {
+ {
+ {
+ .vendor_id = OUI_BRCM,
+ .subcmd = BRCM_VENDOR_SCMD_PRIV_STR
+ },
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV | WIPHY_VENDOR_CMD_NEED_NETDEV,
+ .doit = dhd_cfgvendor_priv_string_handler
+ },
+};
+
+int cfgvendor_attach(struct wiphy *wiphy)
+{
+ wiphy->vendor_commands = dhd_cfgvendor_cmds;
+ wiphy->n_vendor_commands = ARRAY_SIZE(dhd_cfgvendor_cmds);
+
+ return 0;
+}
+
+int cfgvendor_detach(struct wiphy *wiphy)
+{
+ wiphy->vendor_commands = NULL;
+ wiphy->n_vendor_commands = 0;
+
+ return 0;
+}
+#endif /* VENDOR_EXT_SUPPORT */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_common.c 419132 2013-08-19 21:33:05Z $
+ * $Id: dhd_common.c 490628 2014-07-11 07:13:31Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <wlioctl.h>
#include <dhd.h>
#include <dhd_ip.h>
-
#include <proto/bcmevent.h>
-#include <proto/bcmip.h>
+
+#ifdef SHOW_LOGTRACE
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
#include <dhd_bus.h>
#include <dhd_proto.h>
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif
-#ifdef SET_RANDOM_MAC_SOFTAP
-#include <linux/random.h>
-#include <linux/jiffies.h>
-#endif
-#define htod32(i) i
-#define htod16(i) i
-#define dtoh32(i) i
-#define dtoh16(i) i
-#define htodchanspec(i) i
-#define dtohchanspec(i) i
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
#ifdef PROP_TXSTATUS
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
#endif
+#ifdef DHD_WMF
+#include <dhd_linux.h>
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+
#ifdef WLMEDIA_HTSF
extern void htsf_update(struct dhd_info *dhd, void *data);
#endif
#include <wl_iw.h>
-char fw_path[MOD_PARAM_PATHLEN];
-char nv_path[MOD_PARAM_PATHLEN];
-// terence 20130703: customer can add some parameters to configure driver
-char conf_path[MOD_PARAM_PATHLEN];
-
#ifdef SOFTAP
char fw_path2[MOD_PARAM_PATHLEN];
extern bool softap_enabled;
uint32 dhd_conn_status;
uint32 dhd_conn_reason;
+#if defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE)
+static int check_event_log_sequence_number(uint32 seq_no);
+#endif /* defined(SHOW_EVENTS) && defined(SHOW_LOGTRACE) */
extern int dhd_iscan_request(void * dhdp, uint16 action);
extern void dhd_ind_scan_confirm(void *h, bool status);
extern int dhd_iscan_in_progress(void *h);
bool ap_cfg_running = FALSE;
bool ap_fw_loaded = FALSE;
-//(gwl)
-#define CONFIG_BCMDHD_CONFIG_PATH "/system/etc/firmware/config.txt"
-extern char CONFIG_BCMDHD_FW_PATH[];
-extern char CONFIG_BCMDHD_NVRAM_PATH[];
-
+/* Version string to report */
#ifdef DHD_DEBUG
-const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR "\nCompiled on "
- __DATE__ " at " __TIME__;
-#else
-const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR;
+#ifndef SRCBASE
+#define SRCBASE "drivers/net/wireless/bcmdhd"
#endif
+#define DHD_COMPILED "\nCompiled in " SRCBASE
+#endif /* DHD_DEBUG */
+
+#if defined(DHD_DEBUG)
+const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
+ DHD_COMPILED " on " __DATE__ " at " __TIME__;
+#else
+const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from ";
+#endif
void dhd_set_timer(void *bus, uint wdtick);
+
+
/* IOVar table */
enum {
IOV_VERSION = 1,
IOV_QMON_TIME_THRES,
IOV_QMON_TIME_PERCENT,
#endif /* QMONITOR */
+ IOV_PROPTXSTATUS_MODULE_IGNORE,
+ IOV_PROPTXSTATUS_CREDIT_IGNORE,
+ IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
+ IOV_PROPTXSTATUS_RXPKT_CHK,
#endif /* PROP_TXSTATUS */
IOV_BUS_TYPE,
#ifdef WLMEDIA_HTSF
#endif
IOV_CHANGEMTU,
IOV_HOSTREORDER_FLOWS,
+#ifdef DHDTCPACK_SUPPRESS
+ IOV_TCPACK_SUPPRESS,
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+ IOV_WMF_BSS_ENAB,
+ IOV_WMF_UCAST_IGMP,
+ IOV_WMF_MCAST_DATA_SENDUP,
+#ifdef WL_IGMP_UCQUERY
+ IOV_WMF_UCAST_IGMP_QUERY,
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ IOV_WMF_UCAST_UPNP,
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+ IOV_AP_ISOLATE,
+#ifdef DHD_UNICAST_DHCP
+ IOV_DHCP_UNICAST,
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+ IOV_BLOCK_PING,
+#endif
IOV_LAST
};
const bcm_iovar_t dhd_iovars[] = {
- {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) },
+ {"version", IOV_VERSION, 0, IOVT_BUFFER, sizeof(dhd_version) },
{"wlmsglevel", IOV_WLMSGLEVEL, 0, IOVT_UINT32, 0 },
#ifdef DHD_DEBUG
{"msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
{"HCI_ACL_data", IOV_HCI_ACL_DATA, 0, IOVT_BUFFER, 0},
#endif
#ifdef PROP_TXSTATUS
- {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_UINT32, 0 },
+ {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, IOVT_BOOL, 0 },
/*
set the proptxtstatus operation mode:
0 - Do not do any proptxtstatus flow control
2 - Use explicit credit
*/
{"ptxmode", IOV_PROPTXSTATUS_MODE, 0, IOVT_UINT32, 0 },
- {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, IOVT_UINT32, 0 },
+ {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, IOVT_UINT32, 0 },
#ifdef QMONITOR
{"qtime_thres", IOV_QMON_TIME_THRES, 0, IOVT_UINT32, 0 },
{"qtime_percent", IOV_QMON_TIME_PERCENT, 0, IOVT_UINT32, 0 },
#endif /* QMONITOR */
+ {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, IOVT_BOOL, 0 },
+ {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, IOVT_BOOL, 0 },
+ {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, IOVT_BOOL, 0 },
+ {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, IOVT_BOOL, 0 },
#endif /* PROP_TXSTATUS */
{"bustype", IOV_BUS_TYPE, 0, IOVT_UINT32, 0},
#ifdef WLMEDIA_HTSF
{"changemtu", IOV_CHANGEMTU, 0, IOVT_UINT32, 0 },
{"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, IOVT_BUFFER,
(WLHOST_REORDERDATA_MAXFLOWS + 1) },
+#ifdef DHDTCPACK_SUPPRESS
+ {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, IOVT_UINT8, 0 },
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+ {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, IOVT_BOOL, 0 },
+ {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, IOVT_BOOL, 0 },
+ {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, IOVT_BOOL, 0 },
+#ifdef WL_IGMP_UCQUERY
+ {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), IOVT_BOOL, 0 },
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), IOVT_BOOL, 0 },
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+#ifdef DHD_UNICAST_DHCP
+ {"dhcp_unicast", IOV_DHCP_UNICAST, (0), IOVT_BOOL, 0 },
+#endif /* DHD_UNICAST_DHCP */
+ {"ap_isolate", IOV_AP_ISOLATE, (0), IOVT_BOOL, 0},
+#ifdef DHD_L2_FILTER
+ {"block_ping", IOV_BLOCK_PING, (0), IOVT_BOOL, 0},
+#endif
{NULL, 0, 0, 0, 0 }
};
-#define DHD_IOVAR_BUF_SIZE 128
-//(gwl)
-extern int rkwifi_set_firmware(char *fw, char *nvram);
-
-void
-dhd_common_init(osl_t *osh)
-{
-//#ifdef CONFIG_BCMDHD_FW_PATH
- if(strlen(CONFIG_BCMDHD_FW_PATH))
- bcm_strncpy_s(fw_path, sizeof(fw_path), CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);
-//#else /* CONFIG_BCMDHD_FW_PATH */
- else
- fw_path[0] = '\0';
-//#endif /* CONFIG_BCMDHD_FW_PATH */
-//#ifdef CONFIG_BCMDHD_NVRAM_PATH
- if(strlen(CONFIG_BCMDHD_NVRAM_PATH))
- bcm_strncpy_s(nv_path, sizeof(nv_path), CONFIG_BCMDHD_NVRAM_PATH, MOD_PARAM_PATHLEN-1);
-//#else /* CONFIG_BCMDHD_NVRAM_PATH */
- else
- nv_path[0] = '\0';
-//#endif /* CONFIG_BCMDHD_NVRAM_PATH */
-#ifdef CONFIG_BCMDHD_CONFIG_PATH
- bcm_strncpy_s(conf_path, sizeof(conf_path), CONFIG_BCMDHD_CONFIG_PATH, MOD_PARAM_PATHLEN-1);
-#else /* CONFIG_BCMDHD_CONFIG_PATH */
- conf_path[0] = '\0';
-#endif /* CONFIG_BCMDHD_CONFIG_PATH */
- rkwifi_set_firmware(fw_path, nv_path);
- //bcm_strncpy_s(firmware_path, sizeof(firmware_path), fw_path, MOD_PARAM_PATHLEN-1);
- printk("FW_PATH = %s\nNVRAM_PATH = %s\n", fw_path, nv_path); //(gwl)
-#ifdef SOFTAP
- fw_path2[0] = '\0';
-#endif
-}
-
-void
-dhd_common_deinit(dhd_pub_t *dhd_pub, dhd_cmn_t *sa_cmn)
-{
- osl_t *osh;
- dhd_cmn_t *cmn;
-
- if (dhd_pub != NULL)
- cmn = dhd_pub->cmn;
- else
- cmn = sa_cmn;
- if (!cmn)
- return;
-
- osh = cmn->osh;
-
- if (dhd_pub != NULL)
- dhd_pub->cmn = NULL;
+#define DHD_IOVAR_BUF_SIZE 128
- MFREE(osh, cmn, sizeof(dhd_cmn_t));
-}
+/* to NDIS developer, the structure dhd_common is redundant,
+ * please do NOT merge it back from other branches !!!
+ */
static int
dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
bcm_bprintf(strbuf, "bus stats:\n");
- bcm_bprintf(strbuf, "tx_packets %lu tx_multicast %lu tx_errors %lu\n",
- dhdp->tx_packets, dhdp->tx_multicast, dhdp->tx_errors);
+ bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
+ dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
/* Add any bus info */
dhd_bus_dump(dhdp, strbuf);
+
return (!strbuf->size ? BCME_BUFTOOSHORT : 0);
}
int
-dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifindex)
+dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
{
wl_ioctl_t ioc;
ioc.len = len;
ioc.set = set;
- return dhd_wl_ioctl(dhd_pub, ifindex, &ioc, arg, len);
+ return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
}
-
int
-dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifindex, wl_ioctl_t *ioc, void *buf, int len)
+dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
{
- int ret = 0;
+ int ret = BCME_ERROR;
if (dhd_os_proto_block(dhd_pub))
{
- ret = dhd_prot_ioctl(dhd_pub, ifindex, ioc, buf, len);
- if ((ret) && (dhd_pub->up))
+#if defined(WL_WLC_SHIM)
+ wl_info_t *wl = dhd_pub_wlinfo(dhd_pub);
+
+ wl_io_pport_t io_pport;
+ io_pport.dhd_pub = dhd_pub;
+ io_pport.ifidx = ifidx;
+
+ ret = wl_shim_ioctl(wl->shim, ioc, &io_pport);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: wl_shim_ioctl(%d) ERR %d\n", __FUNCTION__, ioc->cmd, ret));
+ }
+#else
+ ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
+#endif /* defined(WL_WLC_SHIM) */
+
+ if (ret && dhd_pub->up) {
/* Send hang event only if dhd_open() was success */
- dhd_os_check_hang(dhd_pub, ifindex, ret);
+ dhd_os_check_hang(dhd_pub, ifidx, ret);
+ }
if (ret == -ETIMEDOUT && !dhd_pub->up) {
DHD_ERROR(("%s: 'resumed on timeout' error is "
dhd_os_proto_unblock(dhd_pub);
}
+
return ret;
}
+uint wl_get_port_num(wl_io_pport_t *io_pport)
+{
+ return 0;
+}
+
+/* Get bssidx from iovar params
+ * Input: dhd_pub - pointer to dhd_pub_t
+ * params - IOVAR params
+ * Output: idx - BSS index
+ * val - ponter to the IOVAR arguments
+ */
+static int
+dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, char *params, int *idx, char **val)
+{
+ char *prefix = "bsscfg:";
+ uint32 bssidx;
+
+ if (!(strncmp(params, prefix, strlen(prefix)))) {
+ /* per bss setting should be prefixed with 'bsscfg:' */
+ char *p = (char *)params + strlen(prefix);
+
+ /* Skip Name */
+ while (*p != '\0')
+ p++;
+ /* consider null */
+ p = p + 1;
+ bcopy(p, &bssidx, sizeof(uint32));
+ /* Get corresponding dhd index */
+ bssidx = dhd_bssidx2idx(dhd_pub, bssidx);
+
+ if (bssidx >= DHD_MAX_IFS) {
+ DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* skip bss idx */
+ p += sizeof(uint32);
+ *val = p;
+ *idx = bssidx;
+ } else {
+ DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ return BCME_OK;
+}
+
static int
dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
void *params, int plen, void *arg, int len, int val_size)
dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
+ dhd_pub->tx_dropped = 0;
dhd_pub->rx_dropped = 0;
dhd_pub->rx_readahead_cnt = 0;
dhd_pub->tx_realloc = 0;
dhd_bus_clearcounts(dhd_pub);
#ifdef PROP_TXSTATUS
/* clear proptxstatus related counters */
- if (dhd_pub->wlfc_state) {
- athost_wl_status_info_t *wlfc =
- (athost_wl_status_info_t*)dhd_pub->wlfc_state;
- wlfc_hanger_t* hanger;
-
- memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
-
- hanger = (wlfc_hanger_t*)wlfc->hanger;
- hanger->pushed = 0;
- hanger->popped = 0;
- hanger->failed_slotfind = 0;
- hanger->failed_to_pop = 0;
- hanger->failed_to_push = 0;
- }
+ dhd_wlfc_clear_counts(dhd_pub);
#endif /* PROP_TXSTATUS */
break;
+
case IOV_GVAL(IOV_IOCTLTIMEOUT): {
int_val = (int32)dhd_os_get_ioctl_resp_timeout();
bcopy(&int_val, arg, sizeof(int_val));
#endif /* WLBTAMP */
#ifdef PROP_TXSTATUS
- case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE):
- int_val = dhd_pub->wlfc_enabled? 1 : 0;
+ case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
+ bool wlfc_enab = FALSE;
+ bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ int_val = wlfc_enab ? 1 : 0;
bcopy(&int_val, arg, val_size);
break;
+ }
+ case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
+ bool wlfc_enab = FALSE;
+ bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
+ if (bcmerror != BCME_OK)
+ goto exit;
- case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE):
- dhd_pub->wlfc_enabled = int_val? 1 : 0;
- break;
+ /* wlfc is already set as desired */
+ if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
+ goto exit;
+
+ if (int_val == TRUE)
+ bcmerror = dhd_wlfc_init(dhd_pub);
+ else
+ bcmerror = dhd_wlfc_deinit(dhd_pub);
- case IOV_GVAL(IOV_PROPTXSTATUS_MODE): {
- athost_wl_status_info_t *wlfc =
- (athost_wl_status_info_t*)dhd_pub->wlfc_state;
- int_val = dhd_pub->wlfc_state ? (int32)wlfc->proptxstatus_mode : 0;
- bcopy(&int_val, arg, val_size);
break;
}
+ case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
+ bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
- if (dhd_pub->wlfc_state) {
- athost_wl_status_info_t *wlfc =
- (athost_wl_status_info_t*)dhd_pub->wlfc_state;
- wlfc->proptxstatus_mode = int_val & 0xff;
- }
+ dhd_wlfc_set_mode(dhd_pub, int_val);
break;
#ifdef QMONITOR
case IOV_GVAL(IOV_QMON_TIME_THRES): {
break;
}
#endif /* QMONITOR */
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+ bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
+ dhd_wlfc_set_module_ignore(dhd_pub, int_val);
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+ bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
+ dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+ bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
+ dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
+ break;
+
+ case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+ bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
+ if (bcmerror != BCME_OK)
+ goto exit;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
+ dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
+ break;
+
#endif /* PROP_TXSTATUS */
case IOV_GVAL(IOV_BUS_TYPE):
#ifdef BCMDHDUSB
int_val = BUS_TYPE_USB;
#endif
+#ifdef BCMSDIO
int_val = BUS_TYPE_SDIO;
+#endif
+#ifdef PCIE_FULL_DONGLE
+ int_val = BUS_TYPE_PCIE;
+#endif
bcopy(&int_val, arg, val_size);
break;
*ptr = count;
break;
}
+#ifdef DHDTCPACK_SUPPRESS
+ case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
+ int_val = (uint32)dhd_pub->tcpack_sup_mode;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
+ bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
+ break;
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+#ifdef DHD_WMF
+ case IOV_GVAL(IOV_WMF_BSS_ENAB): {
+ uint32 bssidx;
+ dhd_wmf_t *wmf;
+ char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ wmf = dhd_wmf_conf(dhd_pub, bssidx);
+ int_val = wmf->wmf_enable ? 1 :0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_WMF_BSS_ENAB): {
+ /* Enable/Disable WMF */
+ uint32 bssidx;
+ dhd_wmf_t *wmf;
+ char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ ASSERT(val);
+ bcopy(val, &int_val, sizeof(uint32));
+ wmf = dhd_wmf_conf(dhd_pub, bssidx);
+ if (wmf->wmf_enable == int_val)
+ break;
+ if (int_val) {
+ /* Enable WMF */
+ if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) {
+ DHD_ERROR(("%s: Error in creating WMF instance\n",
+ __FUNCTION__));
+ break;
+ }
+ if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) {
+ DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__));
+ break;
+ }
+ wmf->wmf_enable = TRUE;
+ } else {
+ /* Disable WMF */
+ wmf->wmf_enable = FALSE;
+ dhd_wmf_stop(dhd_pub, bssidx);
+ dhd_wmf_instance_del(dhd_pub, bssidx);
+ }
+ break;
+ }
+ case IOV_GVAL(IOV_WMF_UCAST_IGMP):
+ int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_IGMP):
+ if (dhd_pub->wmf_ucast_igmp == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_igmp = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+ case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP):
+ int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP):
+ dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val);
+ break;
+
+#ifdef WL_IGMP_UCQUERY
+ case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY):
+ int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY):
+ if (dhd_pub->wmf_ucast_igmp_query == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_igmp_query = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+#endif /* WL_IGMP_UCQUERY */
+#ifdef DHD_UCAST_UPNP
+ case IOV_GVAL(IOV_WMF_UCAST_UPNP):
+ int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_WMF_UCAST_UPNP):
+ if (dhd_pub->wmf_ucast_upnp == int_val)
+ break;
+
+ if (int_val >= OFF && int_val <= ON)
+ dhd_pub->wmf_ucast_upnp = int_val;
+ else
+ bcmerror = BCME_RANGE;
+ break;
+#endif /* DHD_UCAST_UPNP */
+#endif /* DHD_WMF */
+
+
+#ifdef DHD_UNICAST_DHCP
+ case IOV_GVAL(IOV_DHCP_UNICAST):
+ int_val = dhd_pub->dhcp_unicast;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_DHCP_UNICAST):
+ if (dhd_pub->dhcp_unicast == int_val)
+ break;
+
+ if (int_val >= OFF || int_val <= ON) {
+ dhd_pub->dhcp_unicast = int_val;
+ } else {
+ bcmerror = BCME_RANGE;
+ }
+ break;
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+ case IOV_GVAL(IOV_BLOCK_PING):
+ int_val = dhd_pub->block_ping;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_BLOCK_PING):
+ if (dhd_pub->block_ping == int_val)
+ break;
+ if (int_val >= OFF || int_val <= ON) {
+ dhd_pub->block_ping = int_val;
+ } else {
+ bcmerror = BCME_RANGE;
+ }
+ break;
+#endif
+
+ case IOV_GVAL(IOV_AP_ISOLATE): {
+ uint32 bssidx;
+ char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_AP_ISOLATE): {
+ uint32 bssidx;
+ char *val;
+
+ if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
+ DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ ASSERT(val);
+ bcopy(val, &int_val, sizeof(uint32));
+ dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
+ break;
+ }
default:
bcmerror = BCME_UNSUPPORTED;
/* Evict packet according to discard policy */
p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
ASSERT(p);
-
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
PKTFREE(dhdp->osh, p, TRUE);
}
* If can't find pkts matching upper 2 cases, drop first pkt anyway
*/
bool
-dhd_prec_drop_pkts(osl_t *osh, struct pktq *pq, int prec)
+dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
{
struct pktq_prec *q = NULL;
void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
pkt_frag_t frag_info;
- ASSERT(osh && pq);
+ ASSERT(dhdp && pq);
ASSERT(prec >= 0 && prec < pq->num_prec);
q = &pq->q[prec];
return FALSE;
while (p) {
- frag_info = pkt_frag_info(osh, p);
+ frag_info = pkt_frag_info(dhdp->osh, p);
if (frag_info == DHD_PKT_FRAG_NONE) {
break;
} else if (frag_info == DHD_PKT_FRAG_FIRST) {
if (first) {
/* No last frag pkt, use prev as last */
last = prev;
+ break;
} else {
first = p;
prev_first = prev;
PKTSETLINK(p, NULL);
- PKTFREE(osh, p, TRUE);
+ if (fn)
+ fn(dhdp, prec, p, TRUE);
if (p == last)
break;
q->tail = NULL;
} else {
PKTSETLINK(prev_first, next);
+ if (!next)
+ q->tail = prev_first;
}
return TRUE;
}
#ifdef SHOW_EVENTS
+#ifdef SHOW_LOGTRACE
+
+#define AVOID_BYTE 64
+#define MAX_NO_OF_ARG 16
+
+static int
+check_event_log_sequence_number(uint32 seq_no)
+{
+ int32 diff;
+ uint32 ret;
+ static uint32 logtrace_seqnum_prev = 0;
+
+ diff = ntoh32(seq_no)-logtrace_seqnum_prev;
+ switch (diff)
+ {
+ case 0:
+ ret = -1; /* duplicate packet . drop */
+ break;
+
+ case 1:
+ ret =0; /* in order */
+ break;
+
+ default:
+ if ((ntoh32(seq_no) == 0) &&
+ (logtrace_seqnum_prev == 0xFFFFFFFF) ) { /* in-order - Roll over */
+ ret = 0;
+ } else {
+
+ if (diff > 0) {
+ DHD_EVENT(("WLC_E_TRACE:"
+ "Event lost (log) seqnum %d nblost %d\n",
+ ntoh32(seq_no), (diff-1)));
+ } else {
+ DHD_EVENT(("WLC_E_TRACE:"
+ "Event Packets coming out of order!!\n"));
+ }
+ ret = 0;
+ }
+ }
+
+ logtrace_seqnum_prev = ntoh32(seq_no);
+
+ return ret;
+}
+#endif /* SHOW_LOGTRACE */
+
static void
-wl_show_host_event(wl_event_msg_t *event, void *event_data)
+wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
+ void *raw_event_ptr, char *eventmask)
{
uint i, status, reason;
bool group = FALSE, flush_txq = FALSE, link = FALSE;
(uchar)event->addr.octet[4]&0xff,
(uchar)event->addr.octet[5]&0xff);
- event_name = "UNKNOWN";
- for (i = 0; i < (uint)bcmevent_names_size; i++)
- if (bcmevent_names[i].event == event_type)
- event_name = bcmevent_names[i].name;
+ event_name = bcmevent_get_name(event_type);
+ BCM_REFERENCE(event_name);
if (flags & WLC_EVENT_MSG_LINK)
link = TRUE;
break;
#endif /* WIFI_ACT_FRAME */
- case WLC_E_TRACE: {
- static uint32 seqnum_prev = 0;
- static uint32 logtrace_seqnum_prev = 0;
+#ifdef SHOW_LOGTRACE
+ case WLC_E_TRACE:
+ {
msgtrace_hdr_t hdr;
uint32 nblost;
+ uint8 count;
char *s, *p;
+ static uint32 seqnum_prev = 0;
+ uint32 *record = NULL;
+ uint32 *log_ptr = NULL;
+ uint32 writeindex = 0;
+ event_log_hdr_t event_hdr;
+ int no_of_fmts = 0;
+ char *fmt = NULL;
+ dhd_event_log_t *raw_event = (dhd_event_log_t *) raw_event_ptr;
buf = (uchar *) event_data;
memcpy(&hdr, buf, MSGTRACE_HDRLEN);
if (hdr.version != MSGTRACE_VERSION) {
- printf("\nMACEVENT: %s [unsupported version --> "
- "dhd version:%d dongle version:%d]\n",
- event_name, MSGTRACE_VERSION, hdr.version);
+ DHD_EVENT(("\nMACEVENT: %s [unsupported version --> "
+ "dhd version:%d dongle version:%d]\n",
+ event_name, MSGTRACE_VERSION, hdr.version));
/* Reset datalen to avoid display below */
datalen = 0;
break;
buf[MSGTRACE_HDRLEN + ntoh16(hdr.len)] = '\0';
if (ntoh32(hdr.discarded_bytes) || ntoh32(hdr.discarded_printf)) {
- printf("\nWLC_E_TRACE: [Discarded traces in dongle -->"
- "discarded_bytes %d discarded_printf %d]\n",
- ntoh32(hdr.discarded_bytes), ntoh32(hdr.discarded_printf));
+ DHD_EVENT(("WLC_E_TRACE: [Discarded traces in dongle -->"
+ "discarded_bytes %d discarded_printf %d]\n",
+ ntoh32(hdr.discarded_bytes),
+ ntoh32(hdr.discarded_printf)));
}
nblost = ntoh32(hdr.seqnum) - seqnum_prev - 1;
if (nblost > 0) {
- printf("\nWLC_E_TRACE: [Event lost (msg) --> seqnum %d nblost %d\n",
- ntoh32(hdr.seqnum), nblost);
+ DHD_EVENT(("WLC_E_TRACE:"
+ "[Event lost (msg) --> seqnum %d nblost %d\n",
+ ntoh32(hdr.seqnum), nblost));
}
seqnum_prev = ntoh32(hdr.seqnum);
- /* Display the trace buffer. Advance from \n to \n to avoid display big
+ /* Display the trace buffer. Advance from
+ * \n to \n to avoid display big
* printf (issue with Linux printk )
*/
p = (char *)&buf[MSGTRACE_HDRLEN];
- while (*p != '\0' && (s = strstr(p, "\n")) != NULL) {
+ while (*p != '\0' && (s = strstr(p, "\n")) != NULL) {
*s = '\0';
- printf("%s\n", p);
+ DHD_EVENT(("%s\n", p));
p = s+1;
}
- if (*p) printf("%s", p);
+ if (*p)
+ DHD_EVENT(("%s", p));
/* Reset datalen to avoid display below */
datalen = 0;
} else if (hdr.trace_type == MSGTRACE_HDR_TYPE_LOG) {
/* Let the standard event printing work for now */
- uint32 timestamp, w;
- if (ntoh32(hdr.seqnum) == logtrace_seqnum_prev) {
- printf("\nWLC_E_TRACE: [Event duplicate (log) %d",
- logtrace_seqnum_prev);
- } else {
- nblost = ntoh32(hdr.seqnum) - logtrace_seqnum_prev - 1;
- if (nblost > 0) {
- printf("\nWLC_E_TRACE: [Event lost (log)"
- " --> seqnum %d nblost %d\n",
- ntoh32(hdr.seqnum), nblost);
+ uint32 timestamp, w, malloc_len;
+
+ if (check_event_log_sequence_number(hdr.seqnum)) {
+
+ DHD_EVENT(("%s: WLC_E_TRACE:"
+ "[Event duplicate (log) %d] dropping!!\n",
+ __FUNCTION__, hdr.seqnum));
+ return; /* drop duplicate events */
+ }
+
+ p = (char *)&buf[MSGTRACE_HDRLEN];
+ datalen -= MSGTRACE_HDRLEN;
+ w = ntoh32((uint32)*p);
+ p += 4;
+ datalen -= 4;
+ timestamp = ntoh32((uint32)*p);
+ BCM_REFERENCE(timestamp);
+ BCM_REFERENCE(w);
+
+ DHD_EVENT(("timestamp %x%x\n", timestamp, w));
+
+ if (raw_event->fmts) {
+ malloc_len = datalen+ AVOID_BYTE;
+ record = (uint32 *)MALLOC(dhd_pub->osh, malloc_len);
+ if (record == NULL) {
+ DHD_EVENT(("MSGTRACE_HDR_TYPE_LOG:"
+ "malloc failed\n"));
+ return;
+ }
+ log_ptr = (uint32 *) (p + datalen);
+ writeindex = datalen/4;
+
+ if (record) {
+ while (datalen > 4) {
+ log_ptr--;
+ datalen -= 4;
+ event_hdr.t = *log_ptr;
+ /*
+ * Check for partially overriten entries
+ */
+ if (log_ptr - (uint32 *) p < event_hdr.count) {
+ break;
+ }
+ /*
+ * Check for end of the Frame.
+ */
+ if (event_hdr.tag == EVENT_LOG_TAG_NULL) {
+ continue;
+ }
+ /*
+ * Check For Special Time Stamp Packet
+ */
+ if (event_hdr.tag == EVENT_LOG_TAG_TS) {
+ datalen -= 12;
+ log_ptr = log_ptr - 3;
+ continue;
+ }
+
+ log_ptr[0] = event_hdr.t;
+ if (event_hdr.count > MAX_NO_OF_ARG) {
+ break;
+ }
+ /* Now place the header at the front
+ * and copy back.
+ */
+ log_ptr -= event_hdr.count;
+
+ writeindex = writeindex - event_hdr.count;
+ record[writeindex++] = event_hdr.t;
+ for (count = 0; count < (event_hdr.count-1);
+ count++) {
+ record[writeindex++] = log_ptr[count];
+ }
+ writeindex = writeindex - event_hdr.count;
+ datalen = datalen - (event_hdr.count * 4);
+ no_of_fmts++;
+ }
}
- logtrace_seqnum_prev = ntoh32(hdr.seqnum);
- p = (char *)&buf[MSGTRACE_HDRLEN];
- datalen -= MSGTRACE_HDRLEN;
- w = ntoh32((uint32) *p);
- p += 4;
- datalen -= 4;
- timestamp = ntoh32((uint32) *p);
- printf("Logtrace %x timestamp %x %x",
- logtrace_seqnum_prev, timestamp, w);
+ while (no_of_fmts--)
+ {
+ event_log_hdr_t event_hdr;
+ event_hdr.t = record[writeindex];
+
+ if ((event_hdr.fmt_num>>2) < raw_event->num_fmts) {
+ fmt = raw_event->fmts[event_hdr.fmt_num>>2];
+ DHD_EVENT((fmt,
+ record[writeindex + 1],
+ record[writeindex + 2],
+ record[writeindex + 3],
+ record[writeindex + 4],
+ record[writeindex + 5],
+ record[writeindex + 6],
+ record[writeindex + 7],
+ record[writeindex + 8],
+ record[writeindex + 9],
+ record[writeindex + 10],
+ record[writeindex + 11],
+ record[writeindex + 12],
+ record[writeindex + 13],
+ record[writeindex + 14],
+ record[writeindex + 15],
+ record[writeindex + 16]));
+
+ if (fmt[strlen(fmt) - 1] != '\n') {
+ /* Add newline if missing */
+ DHD_EVENT(("\n"));
+ }
+ }
+
+ writeindex = writeindex + event_hdr.count;
+ }
+ if (record) {
+ MFREE(dhd_pub->osh, record, malloc_len);
+ record = NULL;
+ }
+ } else {
while (datalen > 4) {
p += 4;
datalen -= 4;
/* Print each word. DO NOT ntoh it. */
- printf(" %8.8x", *((uint32 *) p));
+ DHD_EVENT((" %8.8x", *((uint32 *) p)));
}
- printf("\n");
+ DHD_EVENT(("\n"));
}
datalen = 0;
}
-
break;
}
-
+#endif /* SHOW_LOGTRACE */
case WLC_E_RSSI:
DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
break;
+#ifdef BT_WIFI_HANDOBER
+ case WLC_E_BT_WIFI_HANDOVER_REQ:
+ DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
+ break;
+#endif
+
default:
DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
event_name, event_type, eabuf, (int)status, (int)reason,
/* show any appended data */
if (DHD_BYTES_ON() && DHD_EVENT_ON() && datalen) {
buf = (uchar *) event_data;
+ BCM_REFERENCE(buf);
DHD_EVENT((" data (%d) : ", datalen));
for (i = 0; i < datalen; i++)
DHD_EVENT((" 0x%02x ", *buf++));
int
wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
- wl_event_msg_t *event, void **data_ptr)
+ wl_event_msg_t *event, void **data_ptr, void *raw_event)
{
/* check whether packet is a BRCM event pkt */
bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
uint32 type, status, datalen;
uint16 flags;
int evlen;
+ int hostidx;
if (bcmp(BRCM_OUI, &pvt_data->bcm_hdr.oui[0], DOT11_OUI_LEN)) {
DHD_ERROR(("%s: mismatched OUI, bailing\n", __FUNCTION__));
*data_ptr = &pvt_data[1];
event_data = *data_ptr;
+
/* memcpy since BRCM event pkt may be unaligned. */
memcpy(event, &pvt_data->event, sizeof(wl_event_msg_t));
datalen = ntoh32_ua((void *)&event->datalen);
evlen = datalen + sizeof(bcm_event_t);
+ /* find equivalent host index for event ifidx */
+ hostidx = dhd_ifidx2hostidx(dhd_pub->info, event->ifidx);
+
switch (type) {
#ifdef PROP_TXSTATUS
case WLC_E_FIFO_CREDIT_MAP:
- dhd_os_wlfc_block(dhd_pub);
- dhd_wlfc_event(dhd_pub->info);
- dhd_wlfc_FIFOcreditmap_event(dhd_pub->info, event_data);
- dhd_os_wlfc_unblock(dhd_pub);
+ dhd_wlfc_enable(dhd_pub);
+ dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
event_data[2],
event_data[3], event_data[4], event_data[5]));
break;
+
+ case WLC_E_BCMC_CREDIT_SUPPORT:
+ dhd_wlfc_BCMCCredit_support_event(dhd_pub);
+ break;
#endif
case WLC_E_IF:
{
- dhd_if_event_t *ifevent = (dhd_if_event_t *)event_data;
+ struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
/* Ignore the event if NOIF is set */
- if (ifevent->flags & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
- WLFC_DBGMESG(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
- return (BCME_OK);
+ if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
+ DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
+ return (BCME_UNSUPPORTED);
}
-
+#ifdef PCIE_FULL_DONGLE
+ dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
+ ifevent->opcode, ifevent->role);
+#endif
#ifdef PROP_TXSTATUS
- {
- uint8* ea = pvt_data->eth.ether_dhost;
- WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
- "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
- ifevent->ifidx,
- ((ifevent->action == WLC_E_IF_ADD) ? "ADD":"DEL"),
- ((ifevent->is_AP == 0) ? "STA":"AP "),
- ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
- (void)ea;
-
- dhd_os_wlfc_block(dhd_pub);
- if (ifevent->action == WLC_E_IF_CHANGE)
- dhd_wlfc_interface_event(dhd_pub->info,
- eWLFC_MAC_ENTRY_ACTION_UPDATE,
- ifevent->ifidx, ifevent->is_AP, ea);
- else
- dhd_wlfc_interface_event(dhd_pub->info,
- ((ifevent->action == WLC_E_IF_ADD) ?
- eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
- ifevent->ifidx, ifevent->is_AP, ea);
- dhd_os_wlfc_unblock(dhd_pub);
-
- /* dhd already has created an interface by default, for 0 */
- if (ifevent->ifidx == 0)
- break;
- }
+ {
+ uint8* ea = pvt_data->eth.ether_dhost;
+ WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, "
+ "[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+ ifevent->ifidx,
+ ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
+ ((ifevent->role == 0) ? "STA":"AP "),
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5]));
+ (void)ea;
+
+ if (ifevent->opcode == WLC_E_IF_CHANGE)
+ dhd_wlfc_interface_event(dhd_pub,
+ eWLFC_MAC_ENTRY_ACTION_UPDATE,
+ ifevent->ifidx, ifevent->role, ea);
+ else
+ dhd_wlfc_interface_event(dhd_pub,
+ ((ifevent->opcode == WLC_E_IF_ADD) ?
+ eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
+ ifevent->ifidx, ifevent->role, ea);
+
+ /* dhd already has created an interface by default, for 0 */
+ if (ifevent->ifidx == 0)
+ break;
+ }
#endif /* PROP_TXSTATUS */
+ if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
+ if (ifevent->opcode == WLC_E_IF_ADD) {
+ if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
+ event->addr.octet)) {
+
+ DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
+ __FUNCTION__, ifevent->ifidx, event->ifname));
+ return (BCME_ERROR);
+ }
+ } else if (ifevent->opcode == WLC_E_IF_DEL) {
+ dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
+ event->addr.octet);
+ } else if (ifevent->opcode == WLC_E_IF_CHANGE) {
#ifdef WL_CFG80211
- if (wl_cfg80211_is_progress_ifchange()) {
- DHD_ERROR(("%s: ifidx %d for %s action %d\n",
- __FUNCTION__, ifevent->ifidx,
- event->ifname, ifevent->action));
- if (ifevent->action == WLC_E_IF_ADD ||
- ifevent->action == WLC_E_IF_CHANGE)
- wl_cfg80211_notify_ifchange();
- return (BCME_OK);
- }
+ wl_cfg80211_notify_ifchange(ifevent->ifidx,
+ event->ifname, event->addr.octet, ifevent->bssidx);
#endif /* WL_CFG80211 */
- if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
- if (ifevent->action == WLC_E_IF_ADD) {
- if (dhd_add_if(dhd_pub->info, ifevent->ifidx,
- NULL, event->ifname,
- event->addr.octet,
- ifevent->flags, ifevent->bssidx)) {
- DHD_ERROR(("%s: dhd_add_if failed!!"
- " ifidx: %d for %s\n",
- __FUNCTION__,
- ifevent->ifidx,
- event->ifname));
- return (BCME_ERROR);
- }
- }
- else if (ifevent->action == WLC_E_IF_DEL)
- dhd_del_if(dhd_pub->info, ifevent->ifidx);
+ }
} else {
-#ifndef PROP_TXSTATUS
+#if !defined(PROP_TXSTATUS) || !defined(PCIE_FULL_DONGLE)
DHD_ERROR(("%s: Invalid ifidx %d for %s\n",
__FUNCTION__, ifevent->ifidx, event->ifname));
#endif /* !PROP_TXSTATUS */
}
- }
/* send up the if event: btamp user needs it */
- *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ *ifidx = hostidx;
/* push up to external supp/auth */
dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
break;
-
+ }
#ifdef WLMEDIA_HTSF
case WLC_E_HTSFSYNC:
htsf_update(dhd_pub->info, event_data);
break;
#endif /* WLMEDIA_HTSF */
+#if defined(NDISVER) && (NDISVER >= 0x0630)
+ case WLC_E_NDIS_LINK:
+ break;
+#else
case WLC_E_NDIS_LINK: {
uint32 temp = hton32(WLC_E_LINK);
memcpy((void *)(&pvt_data->event.event_type), &temp,
sizeof(pvt_data->event.event_type));
+ break;
}
+#endif /* NDISVER >= 0x0630 */
case WLC_E_PFN_NET_FOUND:
case WLC_E_PFN_NET_LOST:
break;
+#if defined(PNO_SUPPORT)
case WLC_E_PFN_BSSID_NET_FOUND:
case WLC_E_PFN_BSSID_NET_LOST:
case WLC_E_PFN_BEST_BATCHING:
-#ifdef PNO_SUPPORT
dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
-#endif
break;
+#endif
/* These are what external supplicant/authenticator wants */
- /* fall through */
+ case WLC_E_ASSOC_IND:
+ case WLC_E_AUTH_IND:
+ case WLC_E_REASSOC_IND:
+ dhd_findadd_sta(dhd_pub, hostidx, &event->addr.octet);
+ break;
case WLC_E_LINK:
+#ifdef PCIE_FULL_DONGLE
+ if (dhd_update_interface_link_status(dhd_pub, (uint8)hostidx,
+ (uint8)flags) != BCME_OK)
+ break;
+ if (!flags) {
+ dhd_flow_rings_delete(dhd_pub, hostidx);
+ }
+ /* fall through */
+#endif
case WLC_E_DEAUTH:
case WLC_E_DEAUTH_IND:
case WLC_E_DISASSOC:
case WLC_E_DISASSOC_IND:
+ if (type != WLC_E_LINK) {
+ dhd_del_sta(dhd_pub, hostidx, &event->addr.octet);
+ }
DHD_EVENT(("%s: Link event %d, flags %x, status %x\n",
__FUNCTION__, type, flags, status));
+#ifdef PCIE_FULL_DONGLE
+ if (type != WLC_E_LINK) {
+ uint8 ifindex = (uint8)hostidx;
+ uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
+ if (role == WLC_E_IF_ROLE_STA) {
+ dhd_flow_rings_delete(dhd_pub, ifindex);
+ } else {
+ dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
+ &event->addr.octet[0]);
+ }
+ }
+#endif
/* fall through */
default:
- *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
+ *ifidx = hostidx;
/* push up to external supp/auth */
dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
BCM_REFERENCE(flags);
BCM_REFERENCE(status);
- /* put it back to WLC_E_NDIS_LINK */
- if (type == WLC_E_NDIS_LINK) {
- uint32 temp;
-
- temp = ntoh32_ua((void *)&event->event_type);
- DHD_TRACE(("Converted to WLC_E_LINK type %d\n", temp));
-
- temp = ntoh32(WLC_E_NDIS_LINK);
- memcpy((void *)(&pvt_data->event.event_type), &temp,
- sizeof(pvt_data->event.event_type));
- }
break;
}
#ifdef SHOW_EVENTS
- wl_show_host_event(event, (void *)event_data);
+ wl_show_host_event(dhd_pub, event,
+ (void *)event_data, raw_event, dhd_pub->enable_log);
#endif /* SHOW_EVENTS */
return (BCME_OK);
printf("\n");
#endif /* DHD_DEBUG */
}
-
#ifndef strtoul
#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
#endif
{
char *argv[8];
int i = 0;
- const char *str;
+ const char *str;
int buf_len;
int str_len;
char *arg_save = 0, *arg_org = 0;
int rc;
- char buf[128];
+ char buf[32] = {0};
wl_pkt_filter_enable_t enable_parm;
wl_pkt_filter_enable_t * pkt_filterp;
return;
if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
- DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
goto fail;
}
arg_org = arg_save;
str = "pkt_filter_enable";
str_len = strlen(str);
- bcm_strncpy_s(buf, sizeof(buf), str, str_len);
- buf[str_len] = '\0';
+ bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
+ buf[ sizeof(buf) - 1 ] = '\0';
buf_len = str_len + 1;
pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
/* Parse packet filter id. */
enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
+ if (dhd_conf_del_pkt_filter(dhd, enable_parm.id))
+ goto fail;
/* Parse enable/disable value. */
enable_parm.enable = htod32(enable);
rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
rc = rc >= 0 ? 0 : rc;
if (rc)
- DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
- __FUNCTION__, arg, rc));
+ DHD_TRACE(("%s: failed to %s pktfilter %s, retcode = %d\n",
+ __FUNCTION__, enable?"enable":"disable", arg, rc));
else
- DHD_TRACE(("%s: successfully added pktfilter %s\n",
- __FUNCTION__, arg));
+ DHD_TRACE(("%s: successfully %s pktfilter %s\n",
+ __FUNCTION__, enable?"enable":"disable", arg));
/* Contorl the master mode */
bcm_mkiovar("pkt_filter_mode", (char *)&master_mode, 4, buf, sizeof(buf));
rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
rc = rc >= 0 ? 0 : rc;
if (rc)
- DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
- __FUNCTION__, arg, rc));
+ DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
+ __FUNCTION__, master_mode, rc));
fail:
if (arg_org)
return;
if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
- DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
goto fail;
}
arg_org = arg_save;
if (!(buf = MALLOC(dhd->osh, BUF_SIZE))) {
- DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
goto fail;
}
/* Parse packet filter id. */
pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
+ if (dhd_conf_del_pkt_filter(dhd, pkt_filter.id))
+ goto fail;
if (argv[++i] == NULL) {
DHD_ERROR(("Polarity not provided\n"));
DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
__FUNCTION__, id, ret));
}
+ else
+ DHD_TRACE(("%s: successfully deleted pktfilter %d\n",
+ __FUNCTION__, id));
}
#endif /* PKT_FILTER_SUPPORT */
void
dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
{
- char iovbuf[32];
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int iovar_len;
int retcode;
- bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
- retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ iovar_len = bcm_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf));
+ if (!iovar_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return;
+ }
+
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
retcode = retcode >= 0 ? 0 : retcode;
if (retcode)
- DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
+ DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
__FUNCTION__, arp_mode, retcode));
else
- DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
+ DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n",
__FUNCTION__, arp_mode));
}
void
dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
{
- char iovbuf[32];
+ char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int iovar_len;
int retcode;
- bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
- retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ iovar_len = bcm_mkiovar("arpoe", (char *)&arp_enable, 4, iovbuf, sizeof(iovbuf));
+ if (!iovar_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return;
+ }
+
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iovar_len, TRUE, 0);
retcode = retcode >= 0 ? 0 : retcode;
if (retcode)
- DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
+ DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
__FUNCTION__, arp_enable, retcode));
else
- DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
+ DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
__FUNCTION__, arp_enable));
if (arp_enable) {
uint32 version;
idx = 0;
iov_len = bcm_mkiovar("arp_table_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return;
+ }
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0)
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
}
idx = 0;
iov_len = bcm_mkiovar("arp_hostip_clear", 0, 0, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return;
+ }
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx)) < 0)
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
}
idx = 0;
iov_len = bcm_mkiovar("arp_hostip", (char *)&ipaddr,
sizeof(ipaddr), iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return;
+ }
retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
if (retcode)
- DHD_TRACE(("%s: ARP ip addr add failed, retcode = %d\n",
- __FUNCTION__, retcode));
+ DHD_ERROR(("%s: ARP ip addr add failed, retcode = %d\n",
+ __FUNCTION__, retcode));
else
- DHD_TRACE(("%s: sARP H ipaddr entry added \n",
- __FUNCTION__));
+ DHD_ARPOE(("%s: sARP H ipaddr entry added \n",
+ __FUNCTION__));
}
int
retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, buflen, FALSE, idx);
if (retcode) {
- DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
- __FUNCTION__, retcode));
+ DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n",
+ __FUNCTION__, retcode));
return -1;
}
return 0;
}
#endif /* ARP_OFFLOAD_SUPPORT */
+
/*
* Neighbor Discovery Offload: enable NDO feature
* Called by ipv6 event handler when interface comes up/goes down
dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
{
char iovbuf[DHD_IOVAR_BUF_SIZE];
+ int iov_len;
int retcode;
if (dhd == NULL)
return -1;
- bcm_mkiovar("ndoe", (char *)&ndo_enable, 4, iovbuf, sizeof(iovbuf));
- retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ iov_len = bcm_mkiovar("ndoe", (char *)&ndo_enable, 4, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return -1;
+ }
+ retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
if (retcode)
DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
__FUNCTION__, ndo_enable, retcode));
}
/*
- * Neighbor Discover Offload: add host ipv6 ip into firmware
+ * Neighbor Discover Offload: enable NDO feature
* Called by ipv6 event handler when interface comes up
*/
int
char iovbuf[DHD_IOVAR_BUF_SIZE];
int retcode;
- if (dhd == NULL || ipv6addr == NULL)
+ if (dhd == NULL)
return -1;
- iov_len = bcm_mkiovar("nd_hostip", ipv6addr,
+ iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return -1;
+ }
retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
if (retcode)
DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
__FUNCTION__, retcode));
else
- DHD_ERROR(("%s: ndo ipaddr entry added \n",
+ DHD_TRACE(("%s: ndo ipaddr entry added \n",
__FUNCTION__));
+
return retcode;
}
/*
- * Neighbor Discover Offload: disable NDO feature
+ * Neighbor Discover Offload: enable NDO feature
* Called by ipv6 event handler when interface goes down
*/
int
if (dhd == NULL)
return -1;
- iov_len = bcm_mkiovar("nd_hostip_clear", (char *)NULL,
+ iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
0, iovbuf, sizeof(iovbuf));
+ if (!iov_len) {
+ DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
+ __FUNCTION__, sizeof(iovbuf)));
+ return -1;
+ }
retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
if (retcode)
}
}
-
/* Function to estimate possible DTIM_SKIP value */
int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
{
int bcn_li_dtim = 1; /* deafult no dtim skip setting */
int ret = -1;
- int dtim_assoc = 0;
+ int dtim_period = 0;
int ap_beacon = 0;
-
+ int allowed_skip_dtim_cnt = 0;
/* Check if associated */
if (dhd_is_associated(dhd, NULL, NULL) == FALSE) {
DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
goto exit;
}
- /* if associated APs Beacon more that 100msec do no dtim skip */
- if (ap_beacon > MAX_DTIM_SKIP_BEACON_ITERVAL) {
- DHD_ERROR(("%s NO dtim skip for AP with beacon %d ms\n", __FUNCTION__, ap_beacon));
- goto exit;
- }
-
/* read associated ap's dtim setup */
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
- &dtim_assoc, sizeof(dtim_assoc), FALSE, 0)) < 0) {
+ &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
goto exit;
}
/* if not assocated just eixt */
- if (dtim_assoc == 0) {
+ if (dtim_period == 0) {
goto exit;
}
bcn_li_dtim = dhd->suspend_bcn_li_dtim;
/* check if sta listen interval fits into AP dtim */
- if (dtim_assoc > CUSTOM_LISTEN_INTERVAL) {
+ if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
/* AP DTIM to big for our Listen Interval : no dtim skiping */
- bcn_li_dtim = 1;
+ bcn_li_dtim = NO_DTIM_SKIP;
DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
- __FUNCTION__, dtim_assoc, CUSTOM_LISTEN_INTERVAL));
+ __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
goto exit;
}
- if ((bcn_li_dtim * dtim_assoc) > CUSTOM_LISTEN_INTERVAL) {
+ if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
+ allowed_skip_dtim_cnt = MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
+ bcn_li_dtim = (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
+ }
+
+ if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
/* Round up dtim_skip to fit into STAs Listen Interval */
- bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_assoc);
+ bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
}
DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
- __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_assoc, CUSTOM_LISTEN_INTERVAL));
+ __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
exit:
return bcn_li_dtim;
#if defined(KEEP_ALIVE)
int dhd_keep_alive_onoff(dhd_pub_t *dhd)
{
- char buf[256];
- const char *str;
+ char buf[32] = {0};
+ const char *str;
wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0};
wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
int buf_len;
int str_len;
- int res = -1;
+ int res = -1;
if (!dhd_support_sta_mode(dhd))
return res;
str = "mkeep_alive";
str_len = strlen(str);
- strncpy(buf, str, str_len);
- buf[ str_len ] = '\0';
+ strncpy(buf, str, sizeof(buf) - 1);
+ buf[ sizeof(buf) - 1 ] = '\0';
mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period;
buf_len = str_len + 1;
#include <osl.h>\r
\r
#include <bcmutils.h>\r
+#include <hndsoc.h>\r
#if defined(HW_OOB)\r
#include <bcmdefs.h>\r
#include <bcmsdh.h>\r
-#include <hndsoc.h>\r
#include <sdio.h>\r
#include <sbchipc.h>\r
#endif\r
} \
} while (0)\r
\r
-#define MAXSZ_BUF 512\r
+#define SBSDIO_CIS_SIZE_LIMIT 0x200 /* maximum bytes in one CIS */\r
+#define MAXSZ_BUF 1000\r
#define MAXSZ_CONFIG 4096\r
\r
#define BCM43362A0_CHIP_REV 0\r
#define BCM43362A2_CHIP_REV 1\r
+#define BCM43430A0_CHIP_REV 0\r
#define BCM4330B2_CHIP_REV 4\r
+#define BCM43340B0_CHIP_REV 2\r
#define BCM43341B0_CHIP_REV 2\r
#define BCM43241B4_CHIP_REV 5\r
#define BCM4335A0_CHIP_REV 2\r
#define BCM4339A0_CHIP_REV 1\r
+#define BCM4354A1_CHIP_REV 1\r
+#define BCM4356A2_CHIP_REV 2\r
\r
#define FW_TYPE_STA 0\r
#define FW_TYPE_APSTA 1\r
"fw_RK903b2_mfg.bin"\r
};\r
\r
-const static char *bcm4330b2ag_fw_name[] = {\r
+const static char *bcm4330b2_ag_fw_name[] = {\r
"fw_RK903_ag.bin",\r
"fw_RK903_ag_apsta.bin",\r
"fw_RK903_ag_p2p.bin",\r
"fw_RK901a2_mfg.bin"\r
};\r
\r
-const static char *bcm43341b0ag_fw_name[] = {\r
+const static char *bcm43438a0_fw_name[] = {\r
+ "fw_bcm43438a0.bin",\r
+ "fw_bcm43438a0_apsta.bin",\r
+ "fw_bcm43438a0_p2p.bin",\r
+ "fw_bcm43438a0_mfg.bin"\r
+};\r
+\r
+const static char *bcm43341b0_ag_fw_name[] = {\r
"fw_bcm43341b0_ag.bin",\r
"fw_bcm43341b0_ag_apsta.bin",\r
"fw_bcm43341b0_ag_p2p.bin",\r
"fw_bcm43341b0_ag_mfg.bin"\r
};\r
\r
-const static char *bcm43241b4ag_fw_name[] = {\r
+const static char *bcm43241b4_ag_fw_name[] = {\r
"fw_bcm43241b4_ag.bin",\r
"fw_bcm43241b4_ag_apsta.bin",\r
"fw_bcm43241b4_ag_p2p.bin",\r
"fw_bcm43241b4_ag_mfg.bin"\r
};\r
\r
-const static char *bcm4339a0ag_fw_name[] = {\r
+const static char *bcm4339a0_ag_fw_name[] = {\r
"fw_bcm4339a0_ag.bin",\r
"fw_bcm4339a0_ag_apsta.bin",\r
"fw_bcm4339a0_ag_p2p.bin",\r
"fw_bcm4339a0_ag_mfg.bin"\r
};\r
\r
+const static char *bcm4354a1_ag_fw_name[] = {\r
+ "fw_bcm4354a1_ag.bin",\r
+ "fw_bcm4354a1_ag_apsta.bin",\r
+ "fw_bcm4354a1_ag_p2p.bin",\r
+ "fw_bcm4354a1_ag_mfg.bin"\r
+};\r
+\r
+const static char *bcm4356a2_ag_fw_name[] = {\r
+ "fw_bcm4356a2_ag.bin",\r
+ "fw_bcm4356a2_ag_apsta.bin",\r
+ "fw_bcm4356a2_ag_p2p.bin",\r
+ "fw_bcm4356a2_ag_mfg.bin"\r
+};\r
+\r
+#define htod32(i) i
+#define htod16(i) i
+#define dtoh32(i) i
+#define dtoh16(i) i
+#define htodchanspec(i) i
+#define dtohchanspec(i) i\r
+\r
void\r
-dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *dst, char *src)\r
+dhd_conf_free_mac_list(wl_mac_list_ctrl_t *mac_list)\r
{\r
- int fw_type, ag_type;\r
- static uint chip, chiprev, first=1;\r
- int i;\r
-\r
- //if(src[0] == '\0')\r
- // rule out bcm4330 (gwl)\r
- if (strstr(dst, "bcm4330"))\r
- return;\r
-\r
- //strcpy(dst, src);\r
-\r
- /* find out the last '/' */\r
- i = strlen(dst);\r
- while (i>0){\r
- if (dst[i] == '/') break;\r
- i--;\r
- }\r
-\r
- ag_type = strstr(&dst[i], "_ag") ? FW_TYPE_AG : FW_TYPE_G;\r
- fw_type = (strstr(&src[i], "_mfg") ?\r
- FW_TYPE_MFG : (strstr(&src[i], "_apsta") ?\r
- FW_TYPE_APSTA : (strstr(&src[i], "_p2p") ?\r
- FW_TYPE_P2P : FW_TYPE_STA)));\r
-\r
- if (first) {\r
- chip = dhd_bus_chip_id(dhd);\r
- chiprev = dhd_bus_chiprev_id(dhd);\r
- first = 0;\r
- }\r
-\r
- if (ag_type == FW_TYPE_G) {\r
- switch (chip) {\r
- case BCM4330_CHIP_ID:\r
- strcpy(&dst[i+1], bcm4330b2_fw_name[fw_type]);\r
- break;\r
- case BCM43362_CHIP_ID:\r
- if (chiprev==BCM43362A0_CHIP_REV)\r
- strcpy(&dst[i+1], bcm43362a0_fw_name[fw_type]);\r
- else\r
- strcpy(&dst[i+1], bcm43362a2_fw_name[fw_type]);\r
- break;\r
- }\r
- } else {
- switch (chip) {
- case BCM4330_CHIP_ID:
- strcpy(&dst[i+1], bcm4330b2ag_fw_name[fw_type]);
- break;
- case BCM43341_CHIP_ID:
- if (chiprev == BCM43341B0_CHIP_REV)
- strcpy(&dst[i+1], bcm43341b0ag_fw_name[fw_type]);
- break;
- case BCM4324_CHIP_ID:
- if (chiprev == BCM43241B4_CHIP_REV)
- strcpy(&dst[i+1], bcm43241b4ag_fw_name[fw_type]);
- break;
- case BCM4335_CHIP_ID:
- if (chiprev == BCM4335A0_CHIP_REV)
- strcpy(&dst[i+1], bcm4339a0ag_fw_name[fw_type]);
- break;
- case BCM4339_CHIP_ID:
- if (chiprev == BCM4339A0_CHIP_REV)
- strcpy(&dst[i+1], bcm4339a0ag_fw_name[fw_type]);
- break;
- case BCM43362_CHIP_ID:
- if (chiprev==BCM43362A0_CHIP_REV)
- strcpy(&dst[i+1], bcm43362a0_fw_name[fw_type]);
- else
- strcpy(&dst[i+1], bcm43362a2_fw_name[fw_type]);
- bzero(nv_path, MOD_PARAM_PATHLEN);
- strcpy(nv_path, "/system/etc/firmware/nvram_AP6210.txt");
- break;
- }
- }
-
- printk("%s: firmware_path=%s\n", __FUNCTION__, dst);\r
+ CONFIG_TRACE(("%s called\n", __FUNCTION__));\r
+\r
+ if (mac_list->m_mac_list_head) {\r
+ CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, mac_list->m_mac_list_head));\r
+ if (mac_list->m_mac_list_head->mac) {\r
+ CONFIG_TRACE(("%s Free %p\n", __FUNCTION__, mac_list->m_mac_list_head->mac));\r
+ kfree(mac_list->m_mac_list_head->mac);\r
+ }\r
+ kfree(mac_list->m_mac_list_head);\r
+ }\r
+ mac_list->count = 0;\r
+}\r
+\r
+int\r
+dhd_conf_get_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, uint8 *mac)\r
+{\r
+ int i, err = -1;\r
+ uint8 *ptr = 0;\r
+ unsigned char tpl_code, tpl_link='\0';\r
+ uint8 header[3] = {0x80, 0x07, 0x19};\r
+ uint8 *cis;\r
+\r
+ if (!(cis = MALLOC(dhd->osh, SBSDIO_CIS_SIZE_LIMIT))) {\r
+ CONFIG_ERROR(("%s: cis malloc failed\n", __FUNCTION__));\r
+ return err;\r
+ }\r
+ bzero(cis, SBSDIO_CIS_SIZE_LIMIT);\r
+\r
+ if ((err = bcmsdh_cis_read(sdh, 0, cis, SBSDIO_CIS_SIZE_LIMIT))) {\r
+ CONFIG_ERROR(("%s: cis read err %d\n", __FUNCTION__, err));\r
+ MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);\r
+ return err;\r
+ }\r
+ err = -1; // reset err;\r
+ ptr = cis;\r
+ do {\r
+ /* 0xff means we're done */\r
+ tpl_code = *ptr;\r
+ ptr++;\r
+ if (tpl_code == 0xff)\r
+ break;\r
+\r
+ /* null entries have no link field or data */\r
+ if (tpl_code == 0x00)\r
+ continue;\r
+\r
+ tpl_link = *ptr;\r
+ ptr++;\r
+ /* a size of 0xff also means we're done */\r
+ if (tpl_link == 0xff)\r
+ break;\r
+ if (config_msg_level & CONFIG_TRACE_LEVEL) {\r
+ printf("%s: tpl_code=0x%02x, tpl_link=0x%02x, tag=0x%02x\n",\r
+ __FUNCTION__, tpl_code, tpl_link, *ptr);\r
+ printf("%s: value:", __FUNCTION__);\r
+ for (i=0; i<tpl_link-1; i++) {\r
+ printf("%02x ", ptr[i+1]);\r
+ if ((i+1)%16==0)\r
+ printf("\n");\r
+ }\r
+ printf("\n");\r
+ }\r
+\r
+ if (tpl_code == 0x80 && tpl_link == 0x07 && *ptr == 0x19)\r
+ break;\r
+\r
+ ptr += tpl_link;\r
+ } while (1);\r
+\r
+ if (tpl_code == 0x80 && tpl_link == 0x07 && *ptr == 0x19) {\r
+ /* Normal OTP */\r
+ memcpy(mac, ptr+1, 6);\r
+ err = 0;\r
+ } else {\r
+ ptr = cis;\r
+ /* Special OTP */\r
+ if (bcmsdh_reg_read(sdh, SI_ENUM_BASE, 4) == 0x16044330) {\r
+ for (i=0; i<SBSDIO_CIS_SIZE_LIMIT; i++) {\r
+ if (!memcmp(header, ptr, 3)) {\r
+ memcpy(mac, ptr+1, 6);\r
+ err = 0;\r
+ break;\r
+ }\r
+ ptr++;\r
+ }\r
+ }\r
+ }\r
+\r
+ ASSERT(cis);\r
+ MFREE(dhd->osh, cis, SBSDIO_CIS_SIZE_LIMIT);\r
+\r
+ return err;\r
+}\r
+\r
+void\r
+dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path)\r
+{\r
+ int i, j;\r
+ uint8 mac[6]={0};\r
+ int fw_num=0, mac_num=0;\r
+ uint32 oui, nic;\r
+ wl_mac_list_t *mac_list;\r
+ wl_mac_range_t *mac_range;\r
+ char *pfw_name;\r
+ int fw_type, fw_type_new;\r
+\r
+ mac_list = dhd->conf->fw_by_mac.m_mac_list_head;\r
+ fw_num = dhd->conf->fw_by_mac.count;\r
+ if (!mac_list || !fw_num)\r
+ return;\r
+\r
+ if (dhd_conf_get_mac(dhd, sdh, mac)) {\r
+ CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__));\r
+ return;\r
+ }\r
+ oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);\r
+ nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);\r
+\r
+ /* find out the last '/' */\r
+ i = strlen(fw_path);\r
+ while (i>0){\r
+ if (fw_path[i] == '/') break;\r
+ i--;\r
+ }\r
+ pfw_name = &fw_path[i+1];\r
+ fw_type = (strstr(pfw_name, "_mfg") ?\r
+ FW_TYPE_MFG : (strstr(pfw_name, "_apsta") ?\r
+ FW_TYPE_APSTA : (strstr(pfw_name, "_p2p") ?\r
+ FW_TYPE_P2P : FW_TYPE_STA)));\r
+\r
+ for (i=0; i<fw_num; i++) {\r
+ mac_num = mac_list[i].count;\r
+ mac_range = mac_list[i].mac;\r
+ fw_type_new = (strstr(mac_list[i].name, "_mfg") ?\r
+ FW_TYPE_MFG : (strstr(mac_list[i].name, "_apsta") ?\r
+ FW_TYPE_APSTA : (strstr(mac_list[i].name, "_p2p") ?\r
+ FW_TYPE_P2P : FW_TYPE_STA)));\r
+ if (fw_type != fw_type_new) {\r
+ printf("%s: fw_typ=%d != fw_type_new=%d\n", __FUNCTION__, fw_type, fw_type_new);\r
+ continue;\r
+ }\r
+ for (j=0; j<mac_num; j++) {\r
+ if (oui == mac_range[j].oui) {\r
+ if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {\r
+ strcpy(pfw_name, mac_list[i].name);\r
+ printf("%s: matched oui=0x%06X, nic=0x%06X\n",\r
+ __FUNCTION__, oui, nic);\r
+ printf("%s: fw_path=%s\n", __FUNCTION__, fw_path);\r
+ return;\r
+ }\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+void\r
+dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path)\r
+{\r
+ int i, j;\r
+ uint8 mac[6]={0};\r
+ int nv_num=0, mac_num=0;\r
+ uint32 oui, nic;\r
+ wl_mac_list_t *mac_list;\r
+ wl_mac_range_t *mac_range;\r
+ char *pnv_name;\r
+\r
+ mac_list = dhd->conf->nv_by_mac.m_mac_list_head;\r
+ nv_num = dhd->conf->nv_by_mac.count;\r
+ if (!mac_list || !nv_num)\r
+ return;\r
+\r
+ if (dhd_conf_get_mac(dhd, sdh, mac)) {\r
+ CONFIG_ERROR(("%s: Can not read MAC address\n", __FUNCTION__));\r
+ return;\r
+ }\r
+ oui = (mac[0] << 16) | (mac[1] << 8) | (mac[2]);\r
+ nic = (mac[3] << 16) | (mac[4] << 8) | (mac[5]);\r
+\r
+ /* find out the last '/' */\r
+ i = strlen(nv_path);\r
+ while (i>0){\r
+ if (nv_path[i] == '/') break;\r
+ i--;\r
+ }\r
+ pnv_name = &nv_path[i+1];\r
+\r
+ for (i=0; i<nv_num; i++) {\r
+ mac_num = mac_list[i].count;\r
+ mac_range = mac_list[i].mac;\r
+ for (j=0; j<mac_num; j++) {\r
+ if (oui == mac_range[j].oui) {\r
+ if (nic >= mac_range[j].nic_start && nic <= mac_range[j].nic_end) {\r
+ strcpy(pnv_name, mac_list[i].name);\r
+ printf("%s: matched oui=0x%06X, nic=0x%06X\n",\r
+ __FUNCTION__, oui, nic);\r
+ printf("%s: nv_path=%s\n", __FUNCTION__, nv_path);\r
+ return;\r
+ }\r
+ }\r
+ }\r
+ }\r
+}\r
+\r
+void\r
+dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path)\r
+{\r
+ int fw_type, ag_type;\r
+ uint chip, chiprev;\r
+ int i;\r
+\r
+ chip = dhd->conf->chip;\r
+ chiprev = dhd->conf->chiprev;\r
+\r
+ if (fw_path[0] == '\0') {\r
+#ifdef CONFIG_BCMDHD_FW_PATH\r
+ bcm_strncpy_s(fw_path, MOD_PARAM_PATHLEN-1, CONFIG_BCMDHD_FW_PATH, MOD_PARAM_PATHLEN-1);\r
+ if (fw_path[0] == '\0')\r
+#endif\r
+ {\r
+ printf("firmware path is null\n");\r
+ return;\r
+ }\r
+ }\r
+#ifndef FW_PATH_AUTO_SELECT\r
+ return;\r
+#endif\r
+\r
+ /* find out the last '/' */\r
+ i = strlen(fw_path);\r
+ while (i>0){\r
+ if (fw_path[i] == '/') break;\r
+ i--;\r
+ }\r
+#ifdef BAND_AG\r
+ ag_type = FW_TYPE_AG;\r
+#else\r
+ ag_type = strstr(&fw_path[i], "_ag") ? FW_TYPE_AG : FW_TYPE_G;\r
+#endif\r
+ fw_type = (strstr(&fw_path[i], "_mfg") ?\r
+ FW_TYPE_MFG : (strstr(&fw_path[i], "_apsta") ?\r
+ FW_TYPE_APSTA : (strstr(&fw_path[i], "_p2p") ?\r
+ FW_TYPE_P2P : FW_TYPE_STA)));\r
+\r
+ switch (chip) {\r
+ case BCM4330_CHIP_ID:\r
+ if (ag_type == FW_TYPE_G) {\r
+ if (chiprev == BCM4330B2_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4330b2_fw_name[fw_type]);\r
+ break;\r
+ } else {\r
+ if (chiprev == BCM4330B2_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4330b2_ag_fw_name[fw_type]);\r
+ break;\r
+ }\r
+ case BCM43362_CHIP_ID:\r
+ if (chiprev == BCM43362A0_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm43362a0_fw_name[fw_type]);\r
+ else\r
+ strcpy(&fw_path[i+1], bcm43362a2_fw_name[fw_type]);\r
+ break;\r
+ case BCM43430_CHIP_ID:\r
+ if (chiprev == BCM43430A0_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm43438a0_fw_name[fw_type]);\r
+ break;\r
+ case BCM43340_CHIP_ID:\r
+ if (chiprev == BCM43340B0_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm43341b0_ag_fw_name[fw_type]);\r
+ break;\r
+ case BCM43341_CHIP_ID:\r
+ if (chiprev == BCM43341B0_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm43341b0_ag_fw_name[fw_type]);\r
+ break;\r
+ case BCM4324_CHIP_ID:\r
+ if (chiprev == BCM43241B4_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm43241b4_ag_fw_name[fw_type]);\r
+ break;\r
+ case BCM4335_CHIP_ID:\r
+ if (chiprev == BCM4335A0_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4339a0_ag_fw_name[fw_type]);\r
+ break;\r
+ case BCM4339_CHIP_ID:\r
+ if (chiprev == BCM4339A0_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4339a0_ag_fw_name[fw_type]);\r
+ break;\r
+ case BCM4354_CHIP_ID:\r
+ if (chiprev == BCM4354A1_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4354a1_ag_fw_name[fw_type]);\r
+ else if (chiprev == BCM4356A2_CHIP_REV)\r
+ strcpy(&fw_path[i+1], bcm4356a2_ag_fw_name[fw_type]);\r
+ break;\r
+ }\r
+\r
+ printf("%s: firmware_path=%s\n", __FUNCTION__, fw_path);\r
}\r
\r
#if defined(HW_OOB)\r
}\r
\r
int\r
-dhd_conf_get_country(dhd_pub_t *dhd)\r
+dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec)\r
{\r
int bcmerror = -1;\r
- wl_country_t cspec;\r
\r
- memset(&cspec, 0, sizeof(wl_country_t));\r
- bcm_mkiovar("country", NULL, 0, (char*)&cspec, sizeof(wl_country_t));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, &cspec, sizeof(wl_country_t), FALSE, 0)) < 0)\r
+ memset(cspec, 0, sizeof(wl_country_t));\r
+ bcm_mkiovar("country", NULL, 0, (char*)cspec, sizeof(wl_country_t));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, cspec, sizeof(wl_country_t), FALSE, 0)) < 0)\r
printf("%s: country code getting failed %d\n", __FUNCTION__, bcmerror);\r
else\r
- printf("Country code: %s (%s/%d)\n", cspec.country_abbrev, cspec.ccode, cspec.rev);\r
+ printf("Country code: %s (%s/%d)\n", cspec->country_abbrev, cspec->ccode, cspec->rev);\r
+\r
+ return bcmerror;\r
+}\r
+\r
+int\r
+dhd_conf_fix_country(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ uint band;\r
+ wl_uint32_list_t *list;\r
+ u8 valid_chan_list[sizeof(u32)*(WL_NUMCHANNELS + 1)];\r
+\r
+ if (!(dhd && dhd->conf)) {\r
+ return bcmerror;\r
+ }\r
+
+ memset(valid_chan_list, 0, sizeof(valid_chan_list));\r
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_GET_VALID_CHANNELS, valid_chan_list, sizeof(valid_chan_list), FALSE, 0)) < 0) {\r
+ CONFIG_ERROR(("%s: get channels failed with %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+\r
+ band = dhd_conf_get_band(dhd);\r
+\r
+ if (bcmerror || ((band==WLC_BAND_AUTO || band==WLC_BAND_2G) &&\r
+ dtoh32(list->count)<11)) {\r
+ CONFIG_ERROR(("%s: bcmerror=%d, # of channels %d\n",\r
+ __FUNCTION__, bcmerror, dtoh32(list->count)));\r
+ if ((bcmerror = dhd_conf_set_country(dhd)) < 0) {\r
+ strcpy(dhd->conf->cspec.country_abbrev, "US");\r
+ dhd->conf->cspec.rev = 0;\r
+ strcpy(dhd->conf->cspec.ccode, "US");\r
+ dhd_conf_set_country(dhd);\r
+ }\r
+ }\r
\r
return bcmerror;\r
}\r
{\r
int bcmerror = -1;\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
+ struct dhd_conf *conf = dhd->conf;\r
\r
- printf("%s: Set roam_off %d\n", __FUNCTION__, dhd->conf->roam_off);\r
- dhd_roam_disable = dhd->conf->roam_off;\r
- bcm_mkiovar("roam_off", (char *)&dhd->conf->roam_off, 4, iovbuf, sizeof(iovbuf));\r
+ printf("%s: Set roam_off %d\n", __FUNCTION__, conf->roam_off);\r
+ dhd_roam_disable = conf->roam_off;\r
+ bcm_mkiovar("roam_off", (char *)&conf->roam_off, 4, iovbuf, sizeof(iovbuf));\r
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);\r
\r
- if (!dhd->conf->roam_off || !dhd->conf->roam_off_suspend) {\r
- printf("%s: Set roam_trigger %d\n", __FUNCTION__, dhd->conf->roam_trigger[0]);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, dhd->conf->roam_trigger,\r
- sizeof(dhd->conf->roam_trigger), TRUE, 0)) < 0)\r
+ if (!conf->roam_off || !conf->roam_off_suspend) {\r
+ printf("%s: Set roam_trigger %d\n", __FUNCTION__, conf->roam_trigger[0]);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, conf->roam_trigger,\r
+ sizeof(conf->roam_trigger), TRUE, 0)) < 0)\r
CONFIG_ERROR(("%s: roam trigger setting failed %d\n", __FUNCTION__, bcmerror));\r
\r
- printf("%s: Set roam_scan_period %d\n", __FUNCTION__, dhd->conf->roam_scan_period[0]);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, dhd->conf->roam_scan_period,\r
- sizeof(dhd->conf->roam_scan_period), TRUE, 0)) < 0)\r
+ printf("%s: Set roam_scan_period %d\n", __FUNCTION__, conf->roam_scan_period[0]);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, conf->roam_scan_period,\r
+ sizeof(conf->roam_scan_period), TRUE, 0)) < 0)\r
CONFIG_ERROR(("%s: roam scan period setting failed %d\n", __FUNCTION__, bcmerror));\r
\r
- printf("%s: Set roam_delta %d\n", __FUNCTION__, dhd->conf->roam_delta[0]);\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, dhd->conf->roam_delta,\r
- sizeof(dhd->conf->roam_delta), TRUE, 0)) < 0)\r
+ printf("%s: Set roam_delta %d\n", __FUNCTION__, conf->roam_delta[0]);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, conf->roam_delta,\r
+ sizeof(conf->roam_delta), TRUE, 0)) < 0)\r
CONFIG_ERROR(("%s: roam delta setting failed %d\n", __FUNCTION__, bcmerror));\r
\r
- printf("%s: Set fullroamperiod %d\n", __FUNCTION__, dhd->conf->fullroamperiod);\r
- bcm_mkiovar("fullroamperiod", (char *)&dhd->conf->fullroamperiod, 4, iovbuf, sizeof(iovbuf));\r
+ printf("%s: Set fullroamperiod %d\n", __FUNCTION__, conf->fullroamperiod);\r
+ bcm_mkiovar("fullroamperiod", (char *)&conf->fullroamperiod, 4, iovbuf, sizeof(iovbuf));\r
if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
CONFIG_ERROR(("%s: roam fullscan period setting failed %d\n", __FUNCTION__, bcmerror));\r
}\r
}\r
\r
void\r
-dhd_conf_set_bw(dhd_pub_t *dhd)\r
+dhd_conf_set_mimo_bw_cap(dhd_pub_t *dhd)\r
{\r
int bcmerror = -1;\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
- uint32 mimo_bw_cap = 1; /* Turn HT40 on in 2.4 GHz */\r
+ uint32 mimo_bw_cap;\r
\r
- if (dhd_bus_chip_id(dhd) == BCM43341_CHIP_ID ||
- dhd_bus_chip_id(dhd) == BCM4324_CHIP_ID ||
- dhd_bus_chip_id(dhd) == BCM4335_CHIP_ID ||\r
- dhd_bus_chip_id(dhd) == BCM4339_CHIP_ID) {\r
+ if (dhd->conf->mimo_bw_cap >= 0) {\r
+ mimo_bw_cap = (uint)dhd->conf->mimo_bw_cap;\r
if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror));\r
- /* Enable HT40 in 2.4 GHz */\r
- printf("%s: Enable HT40 in 2.4 GHz\n", __FUNCTION__);\r
- bcm_mkiovar("mimo_bw_cap", (char *)&mimo_bw_cap, 4, iovbuf, sizeof(iovbuf));
+ /* 0:HT20 in ALL, 1:HT40 in ALL, 2: HT20 in 2G HT40 in 5G */\r
+ printf("%s: Set mimo_bw_cap %d\n", __FUNCTION__, mimo_bw_cap);\r
+ bcm_mkiovar("mimo_bw_cap", (char *)&mimo_bw_cap, 4, iovbuf, sizeof(iovbuf));\r
if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
CONFIG_ERROR(("%s: mimo_bw_cap setting failed %d\n", __FUNCTION__, bcmerror));\r
}\r
int bcmerror = -1;\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
\r
- if (dhd_bus_chip_id(dhd) == BCM43362_CHIP_ID && dhd->conf->force_wme_ac) {\r
+ if (dhd->conf->chip == BCM43362_CHIP_ID && dhd->conf->force_wme_ac) {\r
bcm_mkiovar("force_wme_ac", (char *)&dhd->conf->force_wme_ac, 4, iovbuf, sizeof(iovbuf));\r
if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
CONFIG_ERROR(("%s: force_wme_ac setting failed %d\n", __FUNCTION__, bcmerror));\r
memcpy((char*)acp, iovbuf, sizeof(edcf_acparam_t)*AC_COUNT);\r
\r
acparam = &acp[AC_BK];\r
- CONFIG_TRACE(("BK: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n",\r
+ CONFIG_TRACE(("%s: BK: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
sizeof(acp)));\r
acparam = &acp[AC_BE];\r
- CONFIG_TRACE(("BE: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n",\r
+ CONFIG_TRACE(("%s: BE: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
sizeof(acp)));\r
acparam = &acp[AC_VI];\r
- CONFIG_TRACE(("VI: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n",\r
+ CONFIG_TRACE(("%s: VI: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
sizeof(acp)));\r
acparam = &acp[AC_VO];\r
- CONFIG_TRACE(("VO: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n",\r
+ CONFIG_TRACE(("%s: VO: aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acparam->ACI, acparam->ACI&EDCF_AIFSN_MASK,\r
acparam->ECW&EDCF_ECWMIN_MASK, (acparam->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
sizeof(acp)));\r
int aifsn, ecwmin, ecwmax;\r
edcf_acparam_t *acp;\r
char iovbuf[WLC_IOCTL_SMLEN];\r
+ struct dhd_conf *conf = dhd->conf;\r
\r
/* Default value */\r
aifsn = acparam_cur->ACI&EDCF_AIFSN_MASK;\r
ecwmax = (acparam_cur->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT;\r
\r
/* Modified value */\r
- if (dhd->conf->wme.aifsn[aci] > 0)\r
- aifsn = dhd->conf->wme.aifsn[aci];\r
- if (dhd->conf->wme.cwmin[aci] > 0)\r
- ecwmin = dhd->conf->wme.cwmin[aci];\r
- if (dhd->conf->wme.cwmax[aci] > 0)\r
- ecwmax = dhd->conf->wme.cwmax[aci];\r
+ if (conf->wme.aifsn[aci] > 0)\r
+ aifsn = conf->wme.aifsn[aci];\r
+ if (conf->wme.cwmin[aci] > 0)\r
+ ecwmin = conf->wme.cwmin[aci];\r
+ if (conf->wme.cwmax[aci] > 0)\r
+ ecwmax = conf->wme.cwmax[aci];\r
\r
/* Update */\r
acp = acparam_cur;\r
acp->ECW = ((ecwmax << EDCF_ECWMAX_SHIFT) & EDCF_ECWMAX_MASK) | (acp->ECW & EDCF_ECWMIN_MASK);\r
acp->ECW = ((acp->ECW & EDCF_ECWMAX_MASK) | (ecwmin & EDCF_ECWMIN_MASK));\r
\r
- CONFIG_TRACE(("mod aci %d aifsn %d ecwmin %d ecwmax %d size %d\n",\r
+ CONFIG_TRACE(("%s: mod aci %d aifsn %d ecwmin %d ecwmax %d size %d\n", __FUNCTION__,\r
acp->ACI, acp->ACI&EDCF_AIFSN_MASK,\r
acp->ECW&EDCF_ECWMIN_MASK, (acp->ECW&EDCF_ECWMAX_MASK)>>EDCF_ECWMAX_SHIFT,\r
sizeof(edcf_acparam_t)));\r
return;\r
}\r
\r
- CONFIG_TRACE(("Before change:\n"));\r
+ CONFIG_TRACE(("%s: Before change:\n", __FUNCTION__));\r
dhd_conf_get_wme(dhd, acparam_cur);\r
\r
dhd_conf_update_wme(dhd, &acparam_cur[AC_BK], AC_BK);\r
dhd_conf_update_wme(dhd, &acparam_cur[AC_VI], AC_VI);\r
dhd_conf_update_wme(dhd, &acparam_cur[AC_VO], AC_VO);\r
\r
- CONFIG_TRACE(("After change:\n"));\r
+ CONFIG_TRACE(("%s: After change:\n", __FUNCTION__));\r
dhd_conf_get_wme(dhd, acparam_cur);\r
} else {\r
CONFIG_ERROR(("%s: dhd or conf is NULL\n", __FUNCTION__));\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
uint stbc = 0;\r
\r
- if (dhd_bus_chip_id(dhd) == BCM4324_CHIP_ID) {\r
- if (dhd->conf->stbc >= 0) {\r
- stbc = (uint)dhd->conf->stbc;\r
- printf("%s: set stbc_tx %d\n", __FUNCTION__, stbc);\r
- bcm_mkiovar("stbc_tx", (char *)&stbc, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: stbc_tx setting failed %d\n", __FUNCTION__, bcmerror));\r
-\r
- printf("%s: set stbc_rx %d\n", __FUNCTION__, stbc);\r
- bcm_mkiovar("stbc_rx", (char *)&stbc, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: stbc_rx setting failed %d\n", __FUNCTION__, bcmerror));\r
- }\r
+ if (dhd->conf->chip == BCM4324_CHIP_ID && dhd->conf->stbc >= 0) {\r
+ stbc = (uint)dhd->conf->stbc;\r
+ printf("%s: set stbc_tx %d\n", __FUNCTION__, stbc);\r
+ bcm_mkiovar("stbc_tx", (char *)&stbc, 4, iovbuf, sizeof(iovbuf));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: stbc_tx setting failed %d\n", __FUNCTION__, bcmerror));\r
+\r
+ printf("%s: set stbc_rx %d\n", __FUNCTION__, stbc);\r
+ bcm_mkiovar("stbc_rx", (char *)&stbc, 4, iovbuf, sizeof(iovbuf));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: stbc_rx setting failed %d\n", __FUNCTION__, bcmerror));\r
}\r
}\r
\r
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
uint phy_oclscdenable = 0;\r
\r
- if (dhd_bus_chip_id(dhd) == BCM4324_CHIP_ID) {\r
- if (dhd->conf->phy_oclscdenable >= 0) {\r
- phy_oclscdenable = (uint)dhd->conf->phy_oclscdenable;\r
- printf("%s: set stbc_tx %d\n", __FUNCTION__, phy_oclscdenable);\r
- bcm_mkiovar("phy_oclscdenable", (char *)&phy_oclscdenable, 4, iovbuf, sizeof(iovbuf));\r
- if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
- CONFIG_ERROR(("%s: stbc_tx setting failed %d\n", __FUNCTION__, bcmerror));\r
+ if (dhd->conf->chip == BCM4324_CHIP_ID && dhd->conf->phy_oclscdenable >= 0) {\r
+ phy_oclscdenable = (uint)dhd->conf->phy_oclscdenable;\r
+ printf("%s: set stbc_tx %d\n", __FUNCTION__, phy_oclscdenable);\r
+ bcm_mkiovar("phy_oclscdenable", (char *)&phy_oclscdenable, 4, iovbuf, sizeof(iovbuf));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: stbc_tx setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+}\r
+\r
+#ifdef PKT_FILTER_SUPPORT\r
+void\r
+dhd_conf_add_pkt_filter(dhd_pub_t *dhd)\r
+{\r
+ int i;\r
+\r
+ /*\r
+ All pkt: pkt_filter_add=99 0 0 0 0x000000000000 0x000000000000
+ Netbios pkt: 120 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089\r
+ */\r
+ for(i=0; i<dhd->conf->pkt_filter_add.count; i++) {\r
+ dhd->pktfilter[i+dhd->pktfilter_count] = dhd->conf->pkt_filter_add.filter[i];\r
+ printf("%s: %s\n", __FUNCTION__, dhd->pktfilter[i+dhd->pktfilter_count]);\r
+ }\r
+ dhd->pktfilter_count += i;\r
+}\r
+\r
+bool\r
+dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id)\r
+{\r
+ int i;\r
+\r
+ for(i=0; i<dhd->conf->pkt_filter_del.count; i++) {\r
+ if (id == dhd->conf->pkt_filter_del.id[i]) {\r
+ printf("%s: %d\n", __FUNCTION__, dhd->conf->pkt_filter_del.id[i]);\r
+ return true;\r
+ }\r
+ }\r
+ return false;\r
+}\r
+\r
+void\r
+dhd_conf_discard_pkt_filter(dhd_pub_t *dhd)\r
+{\r
+ dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = NULL;\r
+ dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";\r
+ dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "102 0 0 0 0xFFFFFF 0x01005E";\r
+ dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = "103 0 0 0 0xFFFF 0x3333";\r
+ dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;\r
+ /* Do not enable ARP to pkt filter if dhd_master_mode is false.*/\r
+ dhd->pktfilter[DHD_ARP_FILTER_NUM] = NULL;\r
+\r
+ /* IPv4 broadcast address XXX.XXX.XXX.255 */\r
+ dhd->pktfilter[dhd->pktfilter_count] = "110 0 0 12 0xFFFF00000000000000000000000000000000000000FF 0x080000000000000000000000000000000000000000FF";\r
+ dhd->pktfilter_count++;\r
+ /* discard IPv4 multicast address 224.0.0.0/4 */\r
+ dhd->pktfilter[dhd->pktfilter_count] = "111 0 0 12 0xFFFF00000000000000000000000000000000F0 0x080000000000000000000000000000000000E0";\r
+ dhd->pktfilter_count++;\r
+ /* discard IPv6 multicast address FF00::/8 */\r
+ dhd->pktfilter[dhd->pktfilter_count] = "112 0 0 12 0xFFFF000000000000000000000000000000000000000000000000FF 0x86DD000000000000000000000000000000000000000000000000FF";\r
+ dhd->pktfilter_count++;\r
+ /* discard Netbios pkt */\r
+ dhd->pktfilter[dhd->pktfilter_count] = "120 0 0 12 0xFFFF000000000000000000FF000000000000000000000000FFFF 0x0800000000000000000000110000000000000000000000000089";\r
+ dhd->pktfilter_count++;\r
+\r
+}\r
+#endif /* PKT_FILTER_SUPPORT */\r
+\r
+void\r
+dhd_conf_set_srl(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ uint srl = 0;\r
+\r
+ if (dhd->conf->srl >= 0) {\r
+ srl = (uint)dhd->conf->srl;\r
+ printf("%s: set srl %d\n", __FUNCTION__, srl);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_SRL, &srl , sizeof(srl), true, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_SET_SRL setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+}\r
+\r
+void\r
+dhd_conf_set_lrl(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ uint lrl = 0;\r
+\r
+ if (dhd->conf->lrl >= 0) {\r
+ lrl = (uint)dhd->conf->lrl;\r
+ printf("%s: set lrl %d\n", __FUNCTION__, lrl);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_LRL, &lrl , sizeof(lrl), true, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_SET_LRL setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+}\r
+\r
+void\r
+dhd_conf_set_glom(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
+ uint32 bus_txglom = 0;\r
+\r
+ if (dhd->conf->bus_txglom) {\r
+ bus_txglom = (uint)dhd->conf->bus_txglom;\r
+ printf("%s: set bus:txglom %d\n", __FUNCTION__, bus_txglom);\r
+ bcm_mkiovar("bus:txglom", (char *)&bus_txglom, 4, iovbuf, sizeof(iovbuf));\r
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: bus:txglom setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+}\r
+\r
+void\r
+dhd_conf_set_ampdu_ba_wsize(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */\r
+ uint32 ampdu_ba_wsize = dhd->conf->ampdu_ba_wsize;\r
+\r
+ /* Set ampdu ba wsize */\r
+ if (ampdu_ba_wsize > 0) {\r
+ printf("%s: set ampdu_ba_wsize %d\n", __FUNCTION__, ampdu_ba_wsize);\r
+ bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,\r
+ sizeof(iovbuf), TRUE, 0)) < 0) {\r
+ DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",\r
+ __FUNCTION__, ampdu_ba_wsize, bcmerror));\r
}\r
}\r
}\r
\r
+void\r
+dhd_conf_set_spect(dhd_pub_t *dhd)\r
+{\r
+ int bcmerror = -1;\r
+ uint spect = 0;\r
+\r
+ if (dhd->conf->spect >= 0) {\r
+ spect = (uint)dhd->conf->spect;\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, NULL, 0, TRUE, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_DOWN setting failed %d\n", __FUNCTION__, bcmerror));\r
+ printf("%s: set spect %d\n", __FUNCTION__, spect);\r
+ if ((bcmerror = dhd_wl_ioctl_cmd(dhd, WLC_SET_SPECT_MANAGMENT, &spect , sizeof(spect), true, 0)) < 0)\r
+ CONFIG_ERROR(("%s: WLC_SET_SPECT_MANAGMENT setting failed %d\n", __FUNCTION__, bcmerror));\r
+ }\r
+}\r
+\r
unsigned int\r
process_config_vars(char *varbuf, unsigned int len, char *pickbuf, char *param)\r
{\r
- bool findNewline, pick=FALSE;\r
+ bool findNewline, changenewline=FALSE, pick=FALSE;\r
int column;\r
unsigned int n, pick_column=0;\r
\r
for (n = 0; n < len; n++) {\r
if (varbuf[n] == '\r')\r
continue;\r
- if (findNewline && varbuf[n] != '\n')\r
+ if ((findNewline || changenewline) && varbuf[n] != '\n')\r
continue;\r
findNewline = FALSE;\r
if (varbuf[n] == '#') {\r
findNewline = TRUE;\r
continue;\r
}\r
- if (varbuf[n] == '\n') {\r
+ if (varbuf[n] == '\\') {\r
+ changenewline = TRUE;\r
+ continue;\r
+ }\r
+ if (!changenewline && varbuf[n] == '\n') {\r
if (column == 0)\r
continue;\r
column = 0;\r
continue;\r
}\r
+ if (changenewline && varbuf[n] == '\n') {\r
+ changenewline = FALSE;\r
+ continue;\r
+ }\r
if (!memcmp(&varbuf[n], param, strlen(param)) && column==0) {\r
pick = TRUE;\r
column = strlen(param);\r
column++;\r
}\r
if (pick) {\r
+ if (varbuf[n] == 0x9)\r
+ continue;\r
+ if (pick_column>0 && pickbuf[pick_column-1]==' ' && varbuf[n]==' ')\r
+ continue;\r
pickbuf[pick_column] = varbuf[n];\r
pick_column++;\r
}\r
return pick_column;\r
}\r
\r
+void\r
+dhd_conf_read_log_level(dhd_pub_t *dhd, char *bufp, uint len)\r
+{\r
+ uint len_val;\r
+ char pick[MAXSZ_BUF];\r
+\r
+ /* Process dhd_msglevel */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "msglevel=");\r
+ if (len_val) {\r
+ dhd_msg_level = (int)simple_strtol(pick, NULL, 0);\r
+ printf("%s: dhd_msg_level = 0x%X\n", __FUNCTION__, dhd_msg_level);\r
+ }\r
+ /* Process sd_msglevel */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "sd_msglevel=");\r
+ if (len_val) {\r
+ sd_msglevel = (int)simple_strtol(pick, NULL, 0);\r
+ printf("%s: sd_msglevel = 0x%X\n", __FUNCTION__, sd_msglevel);\r
+ }\r
+ /* Process android_msg_level */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "android_msg_level=");\r
+ if (len_val) {\r
+ android_msg_level = (int)simple_strtol(pick, NULL, 0);\r
+ printf("%s: android_msg_level = 0x%X\n", __FUNCTION__, android_msg_level);\r
+ }\r
+ /* Process config_msg_level */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "config_msg_level=");\r
+ if (len_val) {\r
+ config_msg_level = (int)simple_strtol(pick, NULL, 0);\r
+ printf("%s: config_msg_level = 0x%X\n", __FUNCTION__, config_msg_level);\r
+ }\r
+#ifdef WL_CFG80211\r
+ /* Process wl_dbg_level */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "wl_dbg_level=");\r
+ if (len_val) {\r
+ wl_dbg_level = (int)simple_strtol(pick, NULL, 0);\r
+ printf("%s: wl_dbg_level = 0x%X\n", __FUNCTION__, wl_dbg_level);\r
+ }\r
+#endif\r
+#if defined(WL_WIRELESS_EXT)\r
+ /* Process iw_msg_level */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "iw_msg_level=");\r
+ if (len_val) {\r
+ iw_msg_level = (int)simple_strtol(pick, NULL, 0);\r
+ printf("%s: iw_msg_level = 0x%X\n", __FUNCTION__, iw_msg_level);\r
+ }\r
+#endif\r
+\r
+ /* Process dhd_console_ms */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "dhd_console_ms=");\r
+ if (len_val) {\r
+ dhd_console_ms = (int)simple_strtol(pick, NULL, 0);\r
+ printf("%s: dhd_console_ms = 0x%X\n", __FUNCTION__, dhd_console_ms);\r
+ }\r
+}\r
+\r
+/*\r
+ * [fw_by_mac]:\r
+ * fw_by_mac=[fw_mac_num] \\r
+ * [fw_name1] [mac_num1] [oui1-1] [nic_start1-1] [nic_end1-1] \\r
+ * [oui1-1] [nic_start1-1] [nic_end1-1]... \\r
+ * [oui1-n] [nic_start1-n] [nic_end1-n] \\r
+ * [fw_name2] [mac_num2] [oui2-1] [nic_start2-1] [nic_end2-1] \\r
+ * [oui2-1] [nic_start2-1] [nic_end2-1]... \\r
+ * [oui2-n] [nic_start2-n] [nic_end2-n] \\r
+ * Ex: fw_by_mac=2 \\r
+ * fw_bcmdhd1.bin 2 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \\r
+ * fw_bcmdhd2.bin 3 0x0022F4 0xE85408 0xE8549D 0x983B16 0x3557A9 0x35582A \\r
+ * 0x983B16 0x916157 0x916487\r
+ * [nv_by_mac]: The same format as fw_by_mac\r
+ *\r
+*/\r
+\r
int\r
-dhd_conf_download_config(dhd_pub_t *dhd)\r
+dhd_conf_read_config(dhd_pub_t *dhd)\r
{\r
- int bcmerror = -1, i;\r
+ int bcmerror = -1, i, j;\r
uint len, len_val;\r
void * image = NULL;\r
char * memblock = NULL;\r
char *bufp, pick[MAXSZ_BUF], *pch, *pick_tmp;\r
char *pconf_path;\r
bool conf_file_exists;\r
+ wl_mac_list_t *mac_list;\r
+ wl_mac_range_t *mac_range;\r
+ struct dhd_conf *conf = dhd->conf;\r
\r
pconf_path = dhd->conf_path;\r
\r
memblock = MALLOC(dhd->osh, MAXSZ_CONFIG);\r
if (memblock == NULL) {\r
CONFIG_ERROR(("%s: Failed to allocate memory %d bytes\n",\r
- __FUNCTION__, MAXSZ_CONFIG));\r
+ __FUNCTION__, MAXSZ_CONFIG));\r
goto err;\r
}\r
\r
- /* Download variables */\r
+ /* Read variables */\r
if (conf_file_exists) {\r
len = dhd_os_get_image_block(memblock, MAXSZ_CONFIG, image);\r
}\r
bufp = (char *)memblock;\r
bufp[len] = 0;\r
\r
+ /* Process log_level */\r
+ dhd_conf_read_log_level(dhd, bufp, len);\r
+\r
+ /* Process fw_by_mac */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "fw_by_mac=");\r
+ if (len_val) {\r
+ pick_tmp = pick;\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ conf->fw_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);\r
+ if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->fw_by_mac.count, GFP_KERNEL))) {\r
+ conf->fw_by_mac.count = 0;\r
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ }\r
+ printf("%s: fw_count=%d\n", __FUNCTION__, conf->fw_by_mac.count);\r
+ conf->fw_by_mac.m_mac_list_head = mac_list;\r
+ for (i=0; i<conf->fw_by_mac.count; i++) {\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ strcpy(mac_list[i].name, pch);\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);\r
+ printf("%s: name=%s, mac_count=%d\n", __FUNCTION__,\r
+ mac_list[i].name, mac_list[i].count);\r
+ if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) {\r
+ mac_list[i].count = 0;\r
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ break;\r
+ }\r
+ mac_list[i].mac = mac_range;\r
+ for (j=0; j<mac_list[i].count; j++) {\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);\r
+ printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",\r
+ __FUNCTION__, mac_range[j].oui,\r
+ mac_range[j].nic_start, mac_range[j].nic_end);\r
+ }\r
+ }\r
+ }\r
+\r
+ /* Process nv_by_mac */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "nv_by_mac=");\r
+ if (len_val) {\r
+ pick_tmp = pick;\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ conf->nv_by_mac.count = (uint32)simple_strtol(pch, NULL, 0);\r
+ if (!(mac_list = kmalloc(sizeof(wl_mac_list_t)*conf->nv_by_mac.count, GFP_KERNEL))) {\r
+ conf->nv_by_mac.count = 0;\r
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ }\r
+ printf("%s: nv_count=%d\n", __FUNCTION__, conf->nv_by_mac.count);\r
+ conf->nv_by_mac.m_mac_list_head = mac_list;\r
+ for (i=0; i<conf->nv_by_mac.count; i++) {\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ strcpy(mac_list[i].name, pch);\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ mac_list[i].count = (uint32)simple_strtol(pch, NULL, 0);\r
+ printf("%s: name=%s, mac_count=%d\n", __FUNCTION__,\r
+ mac_list[i].name, mac_list[i].count);\r
+ if (!(mac_range = kmalloc(sizeof(wl_mac_range_t)*mac_list[i].count, GFP_KERNEL))) {\r
+ mac_list[i].count = 0;\r
+ CONFIG_ERROR(("%s: kmalloc failed\n", __FUNCTION__));\r
+ break;\r
+ }\r
+ mac_list[i].mac = mac_range;\r
+ for (j=0; j<mac_list[i].count; j++) {\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ mac_range[j].oui = (uint32)simple_strtol(pch, NULL, 0);\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ mac_range[j].nic_start = (uint32)simple_strtol(pch, NULL, 0);\r
+ pch = bcmstrtok(&pick_tmp, " ", 0);\r
+ mac_range[j].nic_end = (uint32)simple_strtol(pch, NULL, 0);\r
+ printf("%s: oui=0x%06X, nic_start=0x%06X, nic_end=0x%06X\n",\r
+ __FUNCTION__, mac_range[j].oui,\r
+ mac_range[j].nic_start, mac_range[j].nic_end);\r
+ }\r
+ }\r
+ }\r
+\r
/* Process firmware path */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "fw_path=");\r
if (len_val) {\r
- memcpy(dhd->conf->fw_path, pick, len_val);\r
- printf("%s: fw_path = %s\n", __FUNCTION__, dhd->conf->fw_path);\r
+ memcpy(conf->fw_path, pick, len_val);\r
+ printf("%s: fw_path = %s\n", __FUNCTION__, conf->fw_path);\r
}\r
\r
/* Process nvram path */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "nv_path=");\r
if (len_val) {\r
- memcpy(dhd->conf->nv_path, pick, len_val);\r
- printf("%s: nv_path = %s\n", __FUNCTION__, dhd->conf->nv_path);\r
+ memcpy(conf->nv_path, pick, len_val);\r
+ printf("%s: nv_path = %s\n", __FUNCTION__, conf->nv_path);\r
}\r
\r
/* Process band */\r
len_val = process_config_vars(bufp, len, pick, "band=");\r
if (len_val) {\r
if (!strncmp(pick, "b", len_val))\r
- dhd->conf->band = WLC_BAND_2G;\r
+ conf->band = WLC_BAND_2G;\r
else if (!strncmp(pick, "a", len_val))\r
- dhd->conf->band = WLC_BAND_5G;\r
+ conf->band = WLC_BAND_5G;\r
else\r
- dhd->conf->band = WLC_BAND_AUTO;\r
- printf("%s: band = %d\n", __FUNCTION__, dhd->conf->band);\r
+ conf->band = WLC_BAND_AUTO;\r
+ printf("%s: band = %d\n", __FUNCTION__, conf->band);\r
+ }\r
+\r
+ /* Process bandwidth */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "mimo_bw_cap=");\r
+ if (len_val) {\r
+ conf->mimo_bw_cap = (uint)simple_strtol(pick, NULL, 10);\r
+ printf("%s: mimo_bw_cap = %d\n", __FUNCTION__, conf->mimo_bw_cap);\r
}\r
\r
/* Process country code */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "ccode=");\r
if (len_val) {\r
- memset(&dhd->conf->cspec, 0, sizeof(wl_country_t));\r
- memcpy(dhd->conf->cspec.country_abbrev, pick, len_val);\r
- memcpy(dhd->conf->cspec.ccode, pick, len_val);\r
+ memset(&conf->cspec, 0, sizeof(wl_country_t));\r
+ memcpy(conf->cspec.country_abbrev, pick, len_val);\r
+ memcpy(conf->cspec.ccode, pick, len_val);\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "regrev=");\r
if (len_val)\r
- dhd->conf->cspec.rev = (int32)simple_strtol(pick, NULL, 10);\r
+ conf->cspec.rev = (int32)simple_strtol(pick, NULL, 10);\r
}\r
\r
/* Process channels */\r
pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
i=0;\r
while (pch != NULL && i<WL_NUMCHANNELS) {\r
- dhd->conf->channels.channel[i] = (uint32)simple_strtol(pch, NULL, 10);\r
+ conf->channels.channel[i] = (uint32)simple_strtol(pch, NULL, 10);\r
pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
i++;\r
}\r
- dhd->conf->channels.count = i;\r
+ conf->channels.count = i;\r
printf("%s: channels = ", __FUNCTION__);\r
- for (i=0; i<dhd->conf->channels.count; i++)\r
- printf("%d ", dhd->conf->channels.channel[i]);\r
+ for (i=0; i<conf->channels.count; i++)\r
+ printf("%d ", conf->channels.channel[i]);\r
printf("\n");\r
}\r
\r
len_val = process_config_vars(bufp, len, pick, "roam_off=");\r
if (len_val) {\r
if (!strncmp(pick, "0", len_val))\r
- dhd->conf->roam_off = 0;\r
+ conf->roam_off = 0;\r
else\r
- dhd->conf->roam_off = 1;\r
- printf("%s: roam_off = %d\n", __FUNCTION__, dhd->conf->roam_off);\r
+ conf->roam_off = 1;\r
+ printf("%s: roam_off = %d\n", __FUNCTION__, conf->roam_off);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "roam_off_suspend=");\r
if (len_val) {\r
if (!strncmp(pick, "0", len_val))\r
- dhd->conf->roam_off_suspend = 0;\r
+ conf->roam_off_suspend = 0;\r
else\r
- dhd->conf->roam_off_suspend = 1;\r
+ conf->roam_off_suspend = 1;\r
printf("%s: roam_off_suspend = %d\n", __FUNCTION__,\r
- dhd->conf->roam_off_suspend);\r
+ conf->roam_off_suspend);\r
}\r
\r
- if (!dhd->conf->roam_off || !dhd->conf->roam_off_suspend) {\r
+ if (!conf->roam_off || !conf->roam_off_suspend) {\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "roam_trigger=");\r
if (len_val)\r
- dhd->conf->roam_trigger[0] = (int)simple_strtol(pick, NULL, 10);\r
+ conf->roam_trigger[0] = (int)simple_strtol(pick, NULL, 10);\r
printf("%s: roam_trigger = %d\n", __FUNCTION__,\r
- dhd->conf->roam_trigger[0]);\r
+ conf->roam_trigger[0]);\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "roam_scan_period=");\r
if (len_val)\r
- dhd->conf->roam_scan_period[0] = (int)simple_strtol(pick, NULL, 10);\r
+ conf->roam_scan_period[0] = (int)simple_strtol(pick, NULL, 10);\r
printf("%s: roam_scan_period = %d\n", __FUNCTION__,\r
- dhd->conf->roam_scan_period[0]);\r
+ conf->roam_scan_period[0]);\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "roam_delta=");\r
if (len_val)\r
- dhd->conf->roam_delta[0] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: roam_delta = %d\n", __FUNCTION__, dhd->conf->roam_delta[0]);\r
+ conf->roam_delta[0] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: roam_delta = %d\n", __FUNCTION__, conf->roam_delta[0]);\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "fullroamperiod=");\r
if (len_val)\r
- dhd->conf->fullroamperiod = (int)simple_strtol(pick, NULL, 10);\r
+ conf->fullroamperiod = (int)simple_strtol(pick, NULL, 10);\r
printf("%s: fullroamperiod = %d\n", __FUNCTION__,\r
- dhd->conf->fullroamperiod);\r
- }\r
-\r
- /* Process filter out all packets */\r
- memset(pick, 0, MAXSZ_BUF);\r
- len_val = process_config_vars(bufp, len, pick, "filter_out_all_packets=");\r
- if (len_val) {\r
- if (!strncmp(pick, "0", len_val))\r
- dhd->conf->filter_out_all_packets = 0;\r
- else\r
- dhd->conf->filter_out_all_packets = 1;\r
- printf("%s: filter_out_all_packets = %d\n", __FUNCTION__,\r
- dhd->conf->filter_out_all_packets);\r
+ conf->fullroamperiod);\r
}\r
\r
/* Process keep alive period */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "keep_alive_period=");\r
if (len_val) {\r
- dhd->conf->keep_alive_period = (int)simple_strtol(pick, NULL, 10);\r
+ conf->keep_alive_period = (int)simple_strtol(pick, NULL, 10);\r
printf("%s: keep_alive_period = %d\n", __FUNCTION__,\r
- dhd->conf->keep_alive_period);\r
+ conf->keep_alive_period);\r
}\r
\r
/* Process WMM parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "force_wme_ac=");\r
if (len_val) {\r
- dhd->conf->force_wme_ac = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: force_wme_ac = %d\n", __FUNCTION__, dhd->conf->force_wme_ac);\r
+ conf->force_wme_ac = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: force_wme_ac = %d\n", __FUNCTION__, conf->force_wme_ac);\r
}\r
\r
- if (dhd->conf->force_wme_ac) {\r
+ if (conf->force_wme_ac) {\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "bk_aifsn=");\r
if (len_val) {\r
- dhd->conf->wme.aifsn[AC_BK] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_BK aifsn = %d\n", __FUNCTION__, dhd->conf->wme.aifsn[AC_BK]);\r
+ conf->wme.aifsn[AC_BK] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_BK aifsn = %d\n", __FUNCTION__, conf->wme.aifsn[AC_BK]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "bk_cwmin=");\r
if (len_val) {\r
- dhd->conf->wme.cwmin[AC_BK] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_BK cwmin = %d\n", __FUNCTION__, dhd->conf->wme.cwmin[AC_BK]);\r
+ conf->wme.cwmin[AC_BK] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_BK cwmin = %d\n", __FUNCTION__, conf->wme.cwmin[AC_BK]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "bk_cwmax=");\r
if (len_val) {\r
- dhd->conf->wme.cwmax[AC_BK] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_BK cwmax = %d\n", __FUNCTION__, dhd->conf->wme.cwmax[AC_BK]);\r
+ conf->wme.cwmax[AC_BK] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_BK cwmax = %d\n", __FUNCTION__, conf->wme.cwmax[AC_BK]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "be_aifsn=");\r
if (len_val) {\r
- dhd->conf->wme.aifsn[AC_BE] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_BE aifsn = %d\n", __FUNCTION__, dhd->conf->wme.aifsn[AC_BE]);\r
+ conf->wme.aifsn[AC_BE] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_BE aifsn = %d\n", __FUNCTION__, conf->wme.aifsn[AC_BE]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "be_cwmin=");\r
if (len_val) {\r
- dhd->conf->wme.cwmin[AC_BE] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_BE cwmin = %d\n", __FUNCTION__, dhd->conf->wme.cwmin[AC_BE]);\r
+ conf->wme.cwmin[AC_BE] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_BE cwmin = %d\n", __FUNCTION__, conf->wme.cwmin[AC_BE]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "be_cwmax=");\r
if (len_val) {\r
- dhd->conf->wme.cwmax[AC_BE] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_BE cwmax = %d\n", __FUNCTION__, dhd->conf->wme.cwmax[AC_BE]);\r
+ conf->wme.cwmax[AC_BE] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_BE cwmax = %d\n", __FUNCTION__, conf->wme.cwmax[AC_BE]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "vi_aifsn=");\r
if (len_val) {\r
- dhd->conf->wme.aifsn[AC_VI] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_VI aifsn = %d\n", __FUNCTION__, dhd->conf->wme.aifsn[AC_VI]);\r
+ conf->wme.aifsn[AC_VI] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_VI aifsn = %d\n", __FUNCTION__, conf->wme.aifsn[AC_VI]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "vi_cwmin=");\r
if (len_val) {\r
- dhd->conf->wme.cwmin[AC_VI] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_VI cwmin = %d\n", __FUNCTION__, dhd->conf->wme.cwmin[AC_VI]);\r
+ conf->wme.cwmin[AC_VI] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_VI cwmin = %d\n", __FUNCTION__, conf->wme.cwmin[AC_VI]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "vi_cwmax=");\r
if (len_val) {\r
- dhd->conf->wme.cwmax[AC_VI] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_VI cwmax = %d\n", __FUNCTION__, dhd->conf->wme.cwmax[AC_VI]);\r
+ conf->wme.cwmax[AC_VI] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_VI cwmax = %d\n", __FUNCTION__, conf->wme.cwmax[AC_VI]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "vo_aifsn=");\r
if (len_val) {\r
- dhd->conf->wme.aifsn[AC_VO] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_VO aifsn = %d\n", __FUNCTION__, dhd->conf->wme.aifsn[AC_VO]);\r
+ conf->wme.aifsn[AC_VO] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_VO aifsn = %d\n", __FUNCTION__, conf->wme.aifsn[AC_VO]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "vo_cwmin=");\r
if (len_val) {\r
- dhd->conf->wme.cwmin[AC_VO] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_VO cwmin = %d\n", __FUNCTION__, dhd->conf->wme.cwmin[AC_VO]);\r
+ conf->wme.cwmin[AC_VO] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_VO cwmin = %d\n", __FUNCTION__, conf->wme.cwmin[AC_VO]);\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "vo_cwmax=");\r
if (len_val) {\r
- dhd->conf->wme.cwmax[AC_VO] = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: AC_VO cwmax = %d\n", __FUNCTION__, dhd->conf->wme.cwmax[AC_VO]);\r
+ conf->wme.cwmax[AC_VO] = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: AC_VO cwmax = %d\n", __FUNCTION__, conf->wme.cwmax[AC_VO]);\r
}\r
}\r
\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "stbc=");\r
if (len_val) {\r
- dhd->conf->stbc = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: stbc = %d\n", __FUNCTION__, dhd->conf->stbc);\r
+ conf->stbc = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: stbc = %d\n", __FUNCTION__, conf->stbc);\r
}\r
\r
/* Process phy_oclscdenable parameters */\r
memset(pick, 0, MAXSZ_BUF);\r
len_val = process_config_vars(bufp, len, pick, "phy_oclscdenable=");\r
if (len_val) {\r
- dhd->conf->phy_oclscdenable = (int)simple_strtol(pick, NULL, 10);\r
- printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, dhd->conf->phy_oclscdenable);\r
+ conf->phy_oclscdenable = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: phy_oclscdenable = %d\n", __FUNCTION__, conf->phy_oclscdenable);\r
+ }\r
+\r
+ /* Process dhd_doflow parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "dhd_doflow=");\r
+ if (len_val) {\r
+ if (!strncmp(pick, "0", len_val))\r
+ dhd_doflow = FALSE;\r
+ else\r
+ dhd_doflow = TRUE;\r
+ printf("%s: dhd_doflow = %d\n", __FUNCTION__, dhd_doflow);\r
+ }\r
+\r
+ /* Process dhd_master_mode parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "dhd_master_mode=");\r
+ if (len_val) {\r
+ if (!strncmp(pick, "0", len_val))\r
+ dhd_master_mode = FALSE;\r
+ else\r
+ dhd_master_mode = TRUE;\r
+ printf("%s: dhd_master_mode = %d\n", __FUNCTION__, dhd_master_mode);\r
+ }\r
+\r
+ /* Process pkt_filter_add */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "pkt_filter_add=");\r
+ pick_tmp = pick;\r
+ if (len_val) {\r
+ pch = bcmstrtok(&pick_tmp, ",.-", 0);\r
+ i=0;\r
+ while (pch != NULL && i<DHD_CONF_FILTER_MAX) {\r
+ strcpy(&conf->pkt_filter_add.filter[i][0], pch);\r
+ printf("%s: pkt_filter_add[%d][] = %s\n", __FUNCTION__, i, &conf->pkt_filter_add.filter[i][0]);\r
+ pch = bcmstrtok(&pick_tmp, ",.-", 0);\r
+ i++;\r
+ }\r
+ conf->pkt_filter_add.count = i;\r
+ }\r
+\r
+ /* Process pkt_filter_del */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "pkt_filter_del=");\r
+ pick_tmp = pick;\r
+ if (len_val) {\r
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
+ i=0;\r
+ while (pch != NULL && i<DHD_CONF_FILTER_MAX) {\r
+ conf->pkt_filter_del.id[i] = (uint32)simple_strtol(pch, NULL, 10);\r
+ pch = bcmstrtok(&pick_tmp, " ,.-", 0);\r
+ i++;\r
+ }\r
+ conf->pkt_filter_del.count = i;\r
+ printf("%s: pkt_filter_del id = ", __FUNCTION__);\r
+ for (i=0; i<conf->pkt_filter_del.count; i++)\r
+ printf("%d ", conf->pkt_filter_del.id[i]);\r
+ printf("\n");\r
}\r
\r
+ /* Process srl parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "srl=");\r
+ if (len_val) {\r
+ conf->srl = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: srl = %d\n", __FUNCTION__, conf->srl);\r
+ }\r
+\r
+ /* Process lrl parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "lrl=");\r
+ if (len_val) {\r
+ conf->lrl = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: lrl = %d\n", __FUNCTION__, conf->lrl);\r
+ }\r
+\r
+ /* Process beacon timeout parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "bcn_timeout=");\r
+ if (len_val) {\r
+ conf->bcn_timeout= (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: bcn_timeout = %d\n", __FUNCTION__, conf->bcn_timeout);\r
+ }\r
+\r
+ /* Process bus_txglom */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "bus:txglom=");\r
+ if (len_val) {\r
+ conf->bus_txglom = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: bus:txglom = %d\n", __FUNCTION__, conf->bus_txglom);\r
+ }\r
+\r
+ /* Process ampdu_ba_wsize parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "ampdu_ba_wsize=");\r
+ if (len_val) {\r
+ conf->ampdu_ba_wsize = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: ampdu_ba_wsize = %d\n", __FUNCTION__, conf->ampdu_ba_wsize);\r
+ }\r
+\r
+ /* Process kso parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "kso_enable=");\r
+ if (len_val) {\r
+ if (!strncmp(pick, "0", len_val))\r
+ conf->kso_enable = FALSE;\r
+ else\r
+ conf->kso_enable = TRUE;\r
+ printf("%s: kso_enable = %d\n", __FUNCTION__, conf->kso_enable);\r
+ }\r
+\r
+ /* Process spect parameters */\r
+ memset(pick, 0, MAXSZ_BUF);\r
+ len_val = process_config_vars(bufp, len, pick, "spect=");\r
+ if (len_val) {\r
+ conf->spect = (int)simple_strtol(pick, NULL, 10);\r
+ printf("%s: spect = %d\n", __FUNCTION__, conf->spect);\r
+ }\r
+ \r
bcmerror = 0;\r
} else {\r
CONFIG_ERROR(("%s: error reading config file: %d\n", __FUNCTION__, len));\r
return bcmerror;\r
}\r
\r
+int\r
+dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev)\r
+{\r
+ dhd->conf->chip = chip;\r
+ dhd->conf->chiprev = chiprev;\r
+ return 0;\r
+}\r
+\r
+uint\r
+dhd_conf_get_chip(void *context)\r
+{\r
+ dhd_pub_t *dhd = context;\r
+\r
+ if (dhd && dhd->conf)\r
+ return dhd->conf->chip;\r
+ return 0;\r
+}\r
+\r
+uint\r
+dhd_conf_get_chiprev(void *context)\r
+{\r
+ dhd_pub_t *dhd = context;\r
+\r
+ if (dhd && dhd->conf)\r
+ return dhd->conf->chiprev;\r
+ return 0;\r
+}\r
+\r
int\r
dhd_conf_preinit(dhd_pub_t *dhd)\r
{\r
- CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));\r
+ struct dhd_conf *conf = dhd->conf;\r
\r
- memset(dhd->conf, 0, sizeof(dhd_conf_t));\r
+ CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));\r
\r
- dhd->conf->band = WLC_BAND_AUTO;\r
- strcpy(dhd->conf->cspec.country_abbrev, "ALL");\r
- dhd->conf->cspec.rev = 0;\r
- strcpy(dhd->conf->cspec.ccode, "ALL");\r
- memset(&dhd->conf->channels, 0, sizeof(wl_channel_list_t));\r
- dhd->conf->roam_off = 1;\r
- dhd->conf->roam_off_suspend = 1;\r
+ dhd_conf_free_mac_list(&conf->fw_by_mac);\r
+ dhd_conf_free_mac_list(&conf->nv_by_mac);\r
+ conf->band = WLC_BAND_AUTO;\r
+ conf->mimo_bw_cap = -1;\r
+ if (conf->chip == BCM43362_CHIP_ID || conf->chip == BCM4330_CHIP_ID) {\r
+ strcpy(conf->cspec.country_abbrev, "ALL");\r
+ strcpy(conf->cspec.ccode, "ALL");\r
+ conf->cspec.rev = 0;\r
+ } else if (conf->chip == BCM4335_CHIP_ID || conf->chip == BCM4339_CHIP_ID ||\r
+ conf->chip == BCM4354_CHIP_ID) {\r
+ strcpy(conf->cspec.country_abbrev, "CN");\r
+ strcpy(conf->cspec.ccode, "CN");\r
+ conf->cspec.rev = 38;\r
+ } else {\r
+ strcpy(conf->cspec.country_abbrev, "CN");\r
+ strcpy(conf->cspec.ccode, "CN");\r
+ conf->cspec.rev = 0;\r
+ }\r
+ memset(&conf->channels, 0, sizeof(wl_channel_list_t));\r
+ conf->roam_off = 1;\r
+ conf->roam_off_suspend = 1;\r
#ifdef CUSTOM_ROAM_TRIGGER_SETTING\r
- dhd->conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING;\r
+ conf->roam_trigger[0] = CUSTOM_ROAM_TRIGGER_SETTING;\r
#else\r
- dhd->conf->roam_trigger[0] = -65;\r
+ conf->roam_trigger[0] = -65;\r
#endif\r
- dhd->conf->roam_trigger[1] = WLC_BAND_ALL;\r
- dhd->conf->roam_scan_period[0] = 10;\r
- dhd->conf->roam_scan_period[1] = WLC_BAND_ALL;\r
+ conf->roam_trigger[1] = WLC_BAND_ALL;\r
+ conf->roam_scan_period[0] = 10;\r
+ conf->roam_scan_period[1] = WLC_BAND_ALL;\r
#ifdef CUSTOM_ROAM_DELTA_SETTING\r
- dhd->conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING;\r
+ conf->roam_delta[0] = CUSTOM_ROAM_DELTA_SETTING;\r
#else\r
- dhd->conf->roam_delta[0] = 15;\r
+ conf->roam_delta[0] = 15;\r
#endif\r
- dhd->conf->roam_delta[1] = WLC_BAND_ALL;\r
+ conf->roam_delta[1] = WLC_BAND_ALL;\r
#ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
- dhd->conf->fullroamperiod = 60;\r
+ conf->fullroamperiod = 60;\r
#else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
- dhd->conf->fullroamperiod = 120;\r
+ conf->fullroamperiod = 120;\r
#endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */\r
- dhd->conf->filter_out_all_packets = 0;\r
#ifdef CUSTOM_KEEP_ALIVE_SETTING\r
- dhd->conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING;\r
+ conf->keep_alive_period = CUSTOM_KEEP_ALIVE_SETTING;\r
#else\r
- dhd->conf->keep_alive_period = 28000;\r
+ conf->keep_alive_period = 28000;\r
+#endif\r
+ conf->force_wme_ac = 0;\r
+ conf->stbc = -1;\r
+ conf->phy_oclscdenable = -1;\r
+#ifdef PKT_FILTER_SUPPORT\r
+ memset(&conf->pkt_filter_add, 0, sizeof(conf_pkt_filter_add_t));\r
+ memset(&conf->pkt_filter_del, 0, sizeof(conf_pkt_filter_del_t));\r
#endif\r
- dhd->conf->force_wme_ac = 0;\r
- dhd->conf->stbc = -1;\r
- dhd->conf->phy_oclscdenable = -1;\r
+ conf->srl = -1;\r
+ conf->lrl = -1;\r
+ conf->bcn_timeout = 15;\r
+ if (conf->chip == BCM4339_CHIP_ID) {\r
+ conf->bus_txglom = 8;\r
+ conf->ampdu_ba_wsize = 40;\r
+ }\r
+ conf->kso_enable = TRUE;\r
+ conf->spect = -1;\r
\r
return 0;\r
}\r
\r
+int\r
+dhd_conf_reset(dhd_pub_t *dhd)\r
+{\r
+ dhd_conf_free_mac_list(&dhd->conf->fw_by_mac);\r
+ dhd_conf_free_mac_list(&dhd->conf->nv_by_mac);\r
+ memset(dhd->conf, 0, sizeof(dhd_conf_t));\r
+ return 0;\r
+}\r
+\r
int\r
dhd_conf_attach(dhd_pub_t *dhd)\r
{\r
{\r
CONFIG_TRACE(("%s: Enter\n", __FUNCTION__));\r
\r
- if (dhd->conf)\r
+ if (dhd->conf) {\r
+ dhd_conf_free_mac_list(&dhd->conf->fw_by_mac);\r
+ dhd_conf_free_mac_list(&dhd->conf->nv_by_mac);\r
MFREE(dhd->osh, dhd->conf, sizeof(dhd_conf_t));\r
- dhd->conf = NULL;\r
-}\r
-\r
-#ifdef POWER_OFF_IN_SUSPEND\r
-struct net_device *g_netdev;\r
-#if defined(CONFIG_HAS_EARLYSUSPEND)\r
-#include <linux/earlysuspend.h>\r
-struct sdio_early_suspend_info {\r
- struct sdio_func *func;\r
- struct early_suspend sdio_early_suspend;\r
- struct work_struct tqueue;\r
- int do_late_resume;\r
-};\r
-struct sdio_early_suspend_info sdioinfo[4];\r
-\r
-void\r
-dhd_conf_wifi_stop(struct net_device *dev)\r
-{\r
- if (!dev) {\r
- CONFIG_ERROR(("%s: dev is null\n", __FUNCTION__));\r
- return;\r
- }\r
-\r
- printk("%s in 1\n", __FUNCTION__);\r
- dhd_net_if_lock(dev);\r
- printk("%s in 2: g_wifi_on=%d, name=%s\n", __FUNCTION__, g_wifi_on, dev->name);\r
- if (g_wifi_on) {\r
-#ifdef WL_CFG80211\r
- wl_cfg80211_user_sync(true);\r
- wl_cfg80211_stop();\r
-#endif\r
- dhd_bus_devreset(bcmsdh_get_drvdata(), true);\r
- sdioh_stop(NULL);\r
- dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);\r
- g_wifi_on = FALSE;\r
-#ifdef WL_CFG80211\r
- wl_cfg80211_user_sync(false);\r
-#endif\r
- }\r
- printk("%s out\n", __FUNCTION__);\r
- dhd_net_if_unlock(dev);\r
-\r
-}\r
-\r
-void\r
-dhd_conf_wifi_power(bool on)\r
-{\r
- printk("%s: Enter %d\n", __FUNCTION__, on);\r
- if (on) {\r
-#ifdef WL_CFG80211\r
- wl_cfg80211_user_sync(true);\r
-#endif\r
- wl_android_wifi_on(g_netdev);\r
-#ifdef WL_CFG80211\r
- wl_cfg80211_send_disconnect();\r
- wl_cfg80211_user_sync(false);\r
-#endif\r
- } else {\r
- dhd_conf_wifi_stop(g_netdev);\r
- }\r
-}\r
-\r
-void\r
-dhd_conf_power_workqueue(struct work_struct *work)\r
-{\r
- dhd_conf_wifi_power(true);\r
-}\r
-\r
-void\r
-dhd_conf_early_suspend(struct early_suspend *h)\r
-{\r
- struct sdio_early_suspend_info *sdioinfo = container_of(h, struct sdio_early_suspend_info, sdio_early_suspend);\r
-\r
- printk("%s: Enter\n", __FUNCTION__);\r
- if(sdioinfo->func->num == 2)\r
- sdioinfo->do_late_resume = 0;\r
-}\r
-\r
-void\r
-dhd_conf_late_resume(struct early_suspend *h)\r
-{\r
- struct sdio_early_suspend_info *sdioinfo = container_of(h, struct sdio_early_suspend_info, sdio_early_suspend);\r
-\r
- printk("%s: Enter\n", __FUNCTION__);\r
- if(sdioinfo->func->num == 2 && sdioinfo->do_late_resume ){\r
- sdioinfo->do_late_resume = 0;\r
- schedule_work(&sdioinfo->tqueue);\r
}\r
+ dhd->conf = NULL;\r
}\r
-#endif /* defined(CONFIG_HAS_EARLYSUSPEND) */\r
-\r
-void\r
-dhd_conf_wifi_suspend(struct sdio_func *func)\r
-{\r
-#ifdef CONFIG_HAS_EARLYSUSPEND\r
- if (!sdioinfo[func->num].do_late_resume) {\r
- dhd_conf_wifi_power(false);\r
- sdioinfo[func->num].do_late_resume = 1;\r
- }\r
-#endif\r
-}\r
-\r
-void\r
-dhd_conf_register_wifi_suspend(struct sdio_func *func)\r
-{\r
-#ifdef CONFIG_HAS_EARLYSUSPEND\r
- if (func->num == 2) {\r
- sdioinfo[func->num].func = func;\r
- sdioinfo[func->num].do_late_resume = 0;\r
- sdioinfo[func->num].sdio_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 30;\r
- sdioinfo[func->num].sdio_early_suspend.suspend = dhd_conf_early_suspend;\r
- sdioinfo[func->num].sdio_early_suspend.resume = dhd_conf_late_resume;\r
- register_early_suspend(&sdioinfo[func->num].sdio_early_suspend);\r
- INIT_WORK(&sdioinfo[func->num].tqueue, dhd_conf_power_workqueue);\r
- }\r
-#endif\r
-}\r
-\r
-void\r
-dhd_conf_unregister_wifi_suspend(struct sdio_func *func)\r
-{\r
-#ifdef CONFIG_HAS_EARLYSUSPEND\r
- if (func->num == 2) {\r
- if (sdioinfo[func->num].sdio_early_suspend.suspend) {\r
- unregister_early_suspend(&sdioinfo[func->num].sdio_early_suspend);\r
- sdioinfo[func->num].sdio_early_suspend.suspend = NULL;\r
- }\r
- }\r
-#endif\r
-}\r
-#endif\r
-\r
#include <dhd.h>\r
#include <wlioctl.h>\r
#include <proto/802.11.h>\r
-#ifdef POWER_OFF_IN_SUSPEND\r
-#include <wl_android.h>\r
-#include <bcmsdbus.h>\r
-#include <linux/mmc/sdio_func.h>\r
-#endif\r
\r
#define FW_PATH_AUTO_SELECT 1\r
extern char firmware_path[MOD_PARAM_PATHLEN];\r
extern int disable_proptx;\r
+extern uint dhd_doflow;\r
+\r
+/* mac range */\r
+typedef struct wl_mac_range {\r
+ uint32 oui;\r
+ uint32 nic_start;\r
+ uint32 nic_end;\r
+} wl_mac_range_t;\r
+\r
+/* mac list */\r
+typedef struct wl_mac_list {\r
+ int count;\r
+ wl_mac_range_t *mac;\r
+ char name[MOD_PARAM_PATHLEN]; /* path */\r
+} wl_mac_list_t;\r
+\r
+/* mac list head */\r
+typedef struct wl_mac_list_ctrl {\r
+ int count;\r
+ struct wl_mac_list *m_mac_list_head;\r
+} wl_mac_list_ctrl_t;\r
\r
/* channel list */\r
typedef struct wl_channel_list {\r
int cwmax[AC_COUNT];\r
} wme_param_t;\r
\r
+#ifdef PKT_FILTER_SUPPORT\r
+#define DHD_CONF_FILTER_MAX 8\r
+/* filter list */\r
+#define PKT_FILTER_LEN 150\r
+typedef struct conf_pkt_filter_add {\r
+ /* in - # of channels, out - # of entries */\r
+ uint32 count;\r
+ /* variable length filter list */\r
+ char filter[DHD_CONF_FILTER_MAX][PKT_FILTER_LEN];\r
+} conf_pkt_filter_add_t;\r
+\r
+/* pkt_filter_del list */\r
+typedef struct conf_pkt_filter_del {\r
+ /* in - # of channels, out - # of entries */\r
+ uint32 count;\r
+ /* variable length filter list */\r
+ uint32 id[DHD_CONF_FILTER_MAX];\r
+} conf_pkt_filter_del_t;\r
+#endif\r
+\r
typedef struct dhd_conf {\r
+ uint chip; /* chip number */
+ uint chiprev; /* chip revision */\r
+ wl_mac_list_ctrl_t fw_by_mac; /* Firmware auto selection by MAC */\r
+ wl_mac_list_ctrl_t nv_by_mac; /* NVRAM auto selection by MAC */\r
char fw_path[MOD_PARAM_PATHLEN]; /* Firmware path */\r
char nv_path[MOD_PARAM_PATHLEN]; /* NVRAM path */\r
uint band; /* Band, b:2.4G only, otherwise for auto */\r
+ int mimo_bw_cap; /* Bandwidth, 0:HT20ALL, 1: HT40ALL, 2:HT20IN2G_HT40PIN5G */\r
wl_country_t cspec; /* Country */\r
wl_channel_list_t channels; /* Support channels */\r
uint roam_off; /* Roaming, 0:enable, 1:disable */\r
int roam_scan_period[2]; /* Roaming scan period */\r
int roam_delta[2]; /* Roaming candidate qualification delta */\r
int fullroamperiod; /* Full Roaming period */\r
- uint filter_out_all_packets; /* Filter out all packets in early suspend */\r
uint keep_alive_period; /* The perioid in ms to send keep alive packet */\r
uint force_wme_ac;\r
wme_param_t wme; /* WME parameters */\r
int stbc; /* STBC for Tx/Rx */\r
int phy_oclscdenable; /* phy_oclscdenable */\r
+#ifdef PKT_FILTER_SUPPORT\r
+ conf_pkt_filter_add_t pkt_filter_add; /* Packet filter add */\r
+ conf_pkt_filter_del_t pkt_filter_del; /* Packet filter add */\r
+#endif\r
+ int srl; /* short retry limit */\r
+ int lrl; /* long retry limit */\r
+ uint bcn_timeout; /* beacon timeout */\r
+ uint32 bus_txglom; /* bus:txglom */\r
+ uint32 ampdu_ba_wsize;\r
+ bool kso_enable;\r
+ int spect;\r
} dhd_conf_t;\r
\r
-void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *dst, char *src);\r
+void dhd_conf_set_fw_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *fw_path);\r
+void dhd_conf_set_nv_name_by_mac(dhd_pub_t *dhd, bcmsdh_info_t *sdh, char *nv_path);\r
+void dhd_conf_set_fw_name_by_chip(dhd_pub_t *dhd, char *fw_path);\r
+#if defined(HW_OOB)\r
void dhd_conf_set_hw_oob_intr(bcmsdh_info_t *sdh, uint chip);\r
+#endif\r
void dhd_conf_set_fw_path(dhd_pub_t *dhd, char *fw_path);\r
void dhd_conf_set_nv_path(dhd_pub_t *dhd, char *nv_path);\r
int dhd_conf_set_band(dhd_pub_t *dhd);\r
uint dhd_conf_get_band(dhd_pub_t *dhd);\r
int dhd_conf_set_country(dhd_pub_t *dhd);\r
-int dhd_conf_get_country(dhd_pub_t *dhd);\r
+int dhd_conf_get_country(dhd_pub_t *dhd, wl_country_t *cspec);\r
+int dhd_conf_fix_country(dhd_pub_t *dhd);\r
bool dhd_conf_match_channel(dhd_pub_t *dhd, uint32 channel);\r
int dhd_conf_set_roam(dhd_pub_t *dhd);\r
-void dhd_conf_set_bw(dhd_pub_t *dhd);\r
+void dhd_conf_set_mimo_bw_cap(dhd_pub_t *dhd);\r
void dhd_conf_force_wme(dhd_pub_t *dhd);\r
void dhd_conf_get_wme(dhd_pub_t *dhd, edcf_acparam_t *acp);\r
void dhd_conf_set_wme(dhd_pub_t *dhd);\r
void dhd_conf_set_stbc(dhd_pub_t *dhd);\r
void dhd_conf_set_phyoclscdenable(dhd_pub_t *dhd);\r
-int dhd_conf_download_config(dhd_pub_t *dhd);\r
+void dhd_conf_add_pkt_filter(dhd_pub_t *dhd);\r
+bool dhd_conf_del_pkt_filter(dhd_pub_t *dhd, uint32 id);\r
+void dhd_conf_discard_pkt_filter(dhd_pub_t *dhd);\r
+void dhd_conf_set_srl(dhd_pub_t *dhd);\r
+void dhd_conf_set_lrl(dhd_pub_t *dhd);\r
+void dhd_conf_set_glom(dhd_pub_t *dhd);\r
+void dhd_conf_set_ampdu_ba_wsize(dhd_pub_t *dhd);\r
+void dhd_conf_set_spect(dhd_pub_t *dhd);\r
+int dhd_conf_read_config(dhd_pub_t *dhd);\r
+int dhd_conf_set_chiprev(dhd_pub_t *dhd, uint chip, uint chiprev);\r
+uint dhd_conf_get_chip(void *context);\r
+uint dhd_conf_get_chiprev(void *context);\r
int dhd_conf_preinit(dhd_pub_t *dhd);\r
+int dhd_conf_reset(dhd_pub_t *dhd);\r
int dhd_conf_attach(dhd_pub_t *dhd);\r
void dhd_conf_detach(dhd_pub_t *dhd);\r
-\r
-extern void *bcmsdh_get_drvdata(void);\r
-\r
-#ifdef POWER_OFF_IN_SUSPEND\r
-extern struct net_device *g_netdev;\r
-#if defined(CONFIG_HAS_EARLYSUSPEND)\r
-extern int g_wifi_on;\r
-#ifdef WL_CFG80211\r
-void wl_cfg80211_stop(void);\r
-void wl_cfg80211_send_disconnect(void);\r
-void wl_cfg80211_user_sync(bool lock);\r
-#endif\r
-#endif\r
-void dhd_conf_wifi_suspend(struct sdio_func *func);\r
-void dhd_conf_register_wifi_suspend(struct sdio_func *func);\r
-void dhd_conf_unregister_wifi_suspend(struct sdio_func *func);\r
-#endif\r
+void *dhd_get_pub(struct net_device *dev);\r
\r
#endif /* _dhd_config_ */\r
* Customer code to add GPIO control during WLAN start/stop
* $Copyright Open Broadcom Corporation$
*
-* $Id: dhd_custom_gpio.c 417465 2013-08-09 11:47:27Z $
+* $Id: dhd_custom_gpio.c 447105 2014-01-08 05:27:09Z $
*/
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
#include <bcmutils.h>
-
#include <dngl_stats.h>
#include <dhd.h>
+#include <dhd_linux.h>
#include <wlioctl.h>
#include <wl_iw.h>
#define WL_ERROR(x) printf x
#define WL_TRACE(x)
-#ifdef CUSTOMER_HW
-#if defined(CUSTOMER_OOB)
-extern int bcm_wlan_get_oob_irq(void);
-#endif
-extern void bcm_wlan_power_off(int);
-extern void bcm_wlan_power_on(int);
-#endif /* CUSTOMER_HW */
#if defined(CUSTOMER_HW2)
#if defined(PLATFORM_MPS)
int __attribute__ ((weak)) wifi_get_fw_nv_path(char *fw, char *nv) { return 0;};
#endif
-#ifdef CONFIG_WIFI_CONTROL_FUNC
-int wifi_set_power(int on, unsigned long msec);
-int wifi_get_irq_number(unsigned long *irq_flags_ptr);
-int wifi_get_mac_addr(unsigned char *buf);
-void *wifi_get_country_code(char *ccode);
-#else
-int wifi_set_power(int on, unsigned long msec) { return -1; }
-int wifi_get_irq_number(unsigned long *irq_flags_ptr) { return -1; }
-int wifi_get_mac_addr(unsigned char *buf) { return -1; }
-void *wifi_get_country_code(char *ccode) { return NULL; }
-#endif /* CONFIG_WIFI_CONTROL_FUNC */
#endif
#if defined(OOB_INTR_ONLY)
* Broadcom provides just reference settings as example.
*
*/
-int dhd_customer_oob_irq_map(unsigned long *irq_flags_ptr)
+int dhd_customer_oob_irq_map(void *adapter, unsigned long *irq_flags_ptr)
{
int host_oob_irq = 0;
#if defined(CUSTOMER_HW2) && !defined(PLATFORM_MPS)
- host_oob_irq = wifi_get_irq_number(irq_flags_ptr);
-
-#elif defined(CUSTOMER_OOB)
- host_oob_irq = bcm_wlan_get_oob_irq();
+ host_oob_irq = wifi_platform_get_irq_number(adapter, irq_flags_ptr);
#else
#if defined(CUSTOM_OOB_GPIO_NUM)
WL_ERROR(("%s: customer specific Host GPIO number is (%d)\n",
__FUNCTION__, dhd_oob_gpio_num));
-#if defined CUSTOMER_HW
- host_oob_irq = MSM_GPIO_TO_INT(dhd_oob_gpio_num);
-#elif defined CUSTOMER_HW3 || defined(PLATFORM_MPS)
+#if defined CUSTOMER_HW3 || defined(PLATFORM_MPS)
gpio_request(dhd_oob_gpio_num, "oob irq");
host_oob_irq = gpio_to_irq(dhd_oob_gpio_num);
gpio_direction_input(dhd_oob_gpio_num);
-#endif /* CUSTOMER_HW */
+#endif /* defined CUSTOMER_HW3 || defined(PLATFORM_MPS) */
#endif
return (host_oob_irq);
#endif
/* Customer function to control hw specific wlan gpios */
-void
-dhd_customer_gpio_wlan_ctrl(int onoff)
+int
+dhd_customer_gpio_wlan_ctrl(void *adapter, int onoff)
{
- switch (onoff) {
- case WLAN_RESET_OFF:
- WL_TRACE(("%s: call customer specific GPIO to insert WLAN RESET\n",
- __FUNCTION__));
-#ifdef CUSTOMER_HW
- bcm_wlan_power_off(2);
-#endif /* CUSTOMER_HW */
-#if defined(CUSTOMER_HW2)
- wifi_set_power(0, WIFI_TURNOFF_DELAY);
-#endif
- WL_ERROR(("=========== WLAN placed in RESET ========\n"));
- break;
-
- case WLAN_RESET_ON:
- WL_TRACE(("%s: call customer specific GPIO to remove WLAN RESET\n",
- __FUNCTION__));
-#ifdef CUSTOMER_HW
- bcm_wlan_power_on(2);
- //OSL_DELAY(200);
-#endif /* CUSTOMER_HW */
-#if defined(CUSTOMER_HW2)
- wifi_set_power(1, 200);
-#endif
- WL_ERROR(("=========== WLAN going back to live ========\n"));
- break;
-
- case WLAN_POWER_OFF:
- WL_TRACE(("%s: call customer specific GPIO to turn off WL_REG_ON\n",
- __FUNCTION__));
-#ifdef CUSTOMER_HW
- bcm_wlan_power_off(1);
-#endif /* CUSTOMER_HW */
- WL_ERROR(("=========== WLAN placed in POWER OFF ========\n"));
- break;
-
- case WLAN_POWER_ON:
- WL_TRACE(("%s: call customer specific GPIO to turn on WL_REG_ON\n",
- __FUNCTION__));
-#ifdef CUSTOMER_HW
- bcm_wlan_power_on(1);
-#endif /* CUSTOMER_HW */
- /* Lets customer power to get stable */
- //OSL_DELAY(200);
- WL_ERROR(("=========== WLAN placed in POWER ON ========\n"));
- break;
- }
+ int err = 0;
+
+ return err;
}
#ifdef GET_CUSTOM_MAC_ENABLE
#endif
/* Function to get custom MAC address */
int
-dhd_custom_get_mac_address(unsigned char *buf)
+dhd_custom_get_mac_address(void *adapter, unsigned char *buf)
{
int ret = 0;
return -EINVAL;
/* Customer access to MAC address stored outside of DHD driver */
-#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
- ret = wifi_get_mac_addr(buf);
+#if (defined(CUSTOMER_HW2) || defined(CUSTOMER_HW10)) && (LINUX_VERSION_CODE >= \
+ KERNEL_VERSION(2, 6, 35))
+ ret = wifi_platform_get_mac_addr(adapter, buf);
#endif
#if defined(CUSTOMER_HW)
{"TR", "TR", 0},
{"NO", "NO", 0},
#endif /* EXMAPLE_TABLE */
+#if defined(CUSTOMER_HW2)
+#if defined(BCM4335_CHIP)
+ {"", "XZ", 11}, /* Universal if Country code is unknown or empty */
+#endif
+ {"AE", "AE", 1},
+ {"AR", "AR", 1},
+ {"AT", "AT", 1},
+ {"AU", "AU", 2},
+ {"BE", "BE", 1},
+ {"BG", "BG", 1},
+ {"BN", "BN", 1},
+ {"CA", "CA", 2},
+ {"CH", "CH", 1},
+ {"CY", "CY", 1},
+ {"CZ", "CZ", 1},
+ {"DE", "DE", 3},
+ {"DK", "DK", 1},
+ {"EE", "EE", 1},
+ {"ES", "ES", 1},
+ {"FI", "FI", 1},
+ {"FR", "FR", 1},
+ {"GB", "GB", 1},
+ {"GR", "GR", 1},
+ {"HR", "HR", 1},
+ {"HU", "HU", 1},
+ {"IE", "IE", 1},
+ {"IS", "IS", 1},
+ {"IT", "IT", 1},
+ {"ID", "ID", 1},
+ {"JP", "JP", 8},
+ {"KR", "KR", 24},
+ {"KW", "KW", 1},
+ {"LI", "LI", 1},
+ {"LT", "LT", 1},
+ {"LU", "LU", 1},
+ {"LV", "LV", 1},
+ {"MA", "MA", 1},
+ {"MT", "MT", 1},
+ {"MX", "MX", 1},
+ {"NL", "NL", 1},
+ {"NO", "NO", 1},
+ {"PL", "PL", 1},
+ {"PT", "PT", 1},
+ {"PY", "PY", 1},
+ {"RO", "RO", 1},
+ {"SE", "SE", 1},
+ {"SI", "SI", 1},
+ {"SK", "SK", 1},
+ {"TR", "TR", 7},
+ {"TW", "TW", 1},
+ {"IR", "XZ", 11}, /* Universal if Country code is IRAN, (ISLAMIC REPUBLIC OF) */
+ {"SD", "XZ", 11}, /* Universal if Country code is SUDAN */
+ {"SY", "XZ", 11}, /* Universal if Country code is SYRIAN ARAB REPUBLIC */
+ {"GL", "XZ", 11}, /* Universal if Country code is GREENLAND */
+ {"PS", "XZ", 11}, /* Universal if Country code is PALESTINIAN TERRITORY, OCCUPIED */
+ {"TL", "XZ", 11}, /* Universal if Country code is TIMOR-LESTE (EAST TIMOR) */
+ {"MH", "XZ", 11}, /* Universal if Country code is MARSHALL ISLANDS */
+#ifdef BCM4330_CHIP
+ {"RU", "RU", 1},
+ {"US", "US", 5}
+#endif
+#endif /* CUSTOMER_HW2 */
};
* input : ISO 3166-1 country abbreviation
* output: customized cspec
*/
-void get_customized_country_code(char *country_iso_code, wl_country_t *cspec)
+void get_customized_country_code(void *adapter, char *country_iso_code, wl_country_t *cspec)
{
#if defined(CUSTOMER_HW2) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
if (!cspec)
return;
- cloc_ptr = wifi_get_country_code(country_iso_code);
+ cloc_ptr = wifi_platform_get_country_code(adapter, country_iso_code);
if (cloc_ptr) {
strlcpy(cspec->ccode, cloc_ptr->custom_locale, WLC_CNTRY_BUF_SZ);
cspec->rev = cloc_ptr->custom_locale_rev;
*
* $ Copyright Open Broadcom Corporation $
*
- * $Id: dhd_dbg.h 419132 2013-08-19 21:33:05Z $
+ * $Id: dhd_dbg.h 491225 2014-07-15 11:58:29Z $
*/
#ifndef _dhd_dbg_
#define _dhd_dbg_
-#define USE_NET_RATELIMIT net_ratelimit()
+#define USE_NET_RATELIMIT 1
#if defined(DHD_DEBUG)
#define DHD_PNO(args) do {if (dhd_msg_level & DHD_PNO_VAL) printf args;} while (0)
#define DHD_TRACE_HW4 DHD_TRACE
+#define DHD_INFO_HW4 DHD_INFO
#define DHD_ERROR_ON() (dhd_msg_level & DHD_ERROR_VAL)
#define DHD_TRACE_ON() (dhd_msg_level & DHD_TRACE_VAL)
#define DHD_PNO(args)
#define DHD_TRACE_HW4 DHD_TRACE
+#define DHD_INFO_HW4 DHD_INFO
#define DHD_ERROR_ON() 0
#define DHD_TRACE_ON() 0
-/*
-* Customer code to add GPIO control during WLAN start/stop
-* Copyright (C) 1999-2011, Broadcom Corporation
-*
-* Unless you and Broadcom execute a separate written software license
-* agreement governing use of this software, this software is licensed to you
-* under the terms of the GNU General Public License version 2 (the "GPL"),
-* available at http://www.broadcom.com/licenses/GPLv2.php, with the
-* following added to such license:
-*
-* As a special exception, the copyright holders of this software give you
-* permission to link this software with independent modules, and to copy and
-* distribute the resulting executable under terms of your choice, provided that
-* you also meet, for each linked independent module, the terms and conditions of
-* the license of that module. An independent module is a module which is not
-* derived from this software. The special exception does not apply to any
-* modifications of the software.
-*
-* Notwithstanding the above, under no circumstances may you combine this
-* software in any way with any other Broadcom software provided under a license
-* other than the GPL, without Broadcom's express prior written consent.
-*
-* $Id: dhd_custom_gpio.c,v 1.2.42.1 2010-10-19 00:41:09 Exp $
-*/
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/rfkill.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
+
+#include <osl.h>
#include <linux/rfkill-wlan.h>
#ifdef CUSTOMER_HW
+
+#ifdef CONFIG_MACH_ODROID_4210
+#include <mach/gpio.h>
+#include <mach/regs-gpio.h>
+#include <plat/gpio-cfg.h>
+
+#include <plat/sdhci.h>
+#include <plat/devs.h> // modifed plat-samsung/dev-hsmmcX.c EXPORT_SYMBOL(s3c_device_hsmmcx) added
+
+#define sdmmc_channel s3c_device_hsmmc0
+#endif
+
+struct wifi_platform_data {
+ int (*set_power)(bool val);
+ int (*set_carddetect)(bool val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+ int (*get_mac_addr)(unsigned char *buf);
+ void *(*get_country_code)(char *ccode);
+};
+
+struct resource dhd_wlan_resources = {0};
+struct wifi_platform_data dhd_wlan_control = {0};
+
#ifdef CUSTOMER_OOB
-int bcm_wlan_get_oob_irq(void)
+uint bcm_wlan_get_oob_irq(void)
{
- return rockchip_wifi_get_oob_irq();
+ return rockchip_wifi_get_oob_irq();
+}
+
+uint bcm_wlan_get_oob_irq_flags(void)
+{
+ uint host_oob_irq_flags = 0;
+
+ host_oob_irq_flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE) & IRQF_TRIGGER_MASK;
+ printk("host_oob_irq_flags=%d\n", host_oob_irq_flags);
+
+ return host_oob_irq_flags;
}
#endif
-void bcm_wlan_power_on(int flag)
+int bcm_wlan_set_power(bool on)
{
- if (flag == 1) {
+ int err = 0;
+
+ if (on) {
printk("======== PULL WL_REG_ON HIGH! ========\n");
rockchip_wifi_power(1);
- rockchip_wifi_set_carddetect(1);
} else {
- printk("======== PULL WL_REG_ON HIGH! (flag = %d) ========\n", flag);
- rockchip_wifi_power(1);
+ printk("======== PULL WL_REG_ON LOW! ========\n");
+ rockchip_wifi_power(0);
}
+
+ return err;
}
-void bcm_wlan_power_off(int flag)
+int bcm_wlan_set_carddetect(bool present)
{
- if (flag == 1) {
- printk("======== Card detection to remove SDIO card! ========\n");
- rockchip_wifi_power(1);
- rockchip_wifi_set_carddetect(0);
- rockchip_wifi_power(0);
+ int err = 0;
+
+ if (present) {
+ printk("======== Card detection to detect SDIO card! ========\n");
+ rockchip_wifi_set_carddetect(1);
} else {
- printk("======== PULL WL_REG_ON LOW! (flag = %d) ========\n", flag);
- rockchip_wifi_power(0);
+ printk("======== Card detection to remove SDIO card! ========\n");
+ rockchip_wifi_set_carddetect(0);
+ }
+
+ return err;
+}
+
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+extern void *bcmdhd_mem_prealloc(int section, unsigned long size);
+void* bcm_wlan_prealloc(int section, unsigned long size)
+{
+ void *alloc_ptr = NULL;
+ alloc_ptr = bcmdhd_mem_prealloc(section, size);
+ if (alloc_ptr) {
+ printk("success alloc section %d, size %ld\n", section, size);
+ if (size != 0L)
+ bzero(alloc_ptr, size);
+ return alloc_ptr;
}
+ printk("can't alloc section %d\n", section);
+ return NULL;
+}
+#endif
+
+int bcm_wlan_set_plat_data(void) {
+ printk("======== %s ========\n", __FUNCTION__);
+ dhd_wlan_control.set_power = bcm_wlan_set_power;
+ dhd_wlan_control.set_carddetect = bcm_wlan_set_carddetect;
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ dhd_wlan_control.mem_prealloc = bcm_wlan_prealloc;
+#endif
+ return 0;
}
#endif /* CUSTOMER_HW */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id$
+ * $Id: dhd_ip.c 468932 2014-04-09 06:58:15Z $
*/
#include <typedefs.h>
#include <osl.h>
#include <dhd_ip.h>
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <proto/bcmtcp.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
/* special values */
/* 802.3 llc/snap header */
static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
}
iph = (struct ipv4_hdr *)(pt + ETHER_TYPE_LEN);
- ipl = length - (pt + ETHER_TYPE_LEN - frame);
+ ipl = (uint)(length - (pt + ETHER_TYPE_LEN - frame));
/* We support IPv4 only */
if ((ipl < IPV4_OPTIONS_OFFSET) || (IP_VER(iph) != IP_VER_4)) {
return (iph_frag & IPV4_FRAG_OFFSET_MASK)? DHD_PKT_FRAG_CONT : DHD_PKT_FRAG_FIRST;
}
}
+
+#ifdef DHDTCPACK_SUPPRESS
+
+typedef struct {
+ void *pkt_in_q; /* TCP ACK packet that is already in txq or DelayQ */
+ void *pkt_ether_hdr; /* Ethernet header pointer of pkt_in_q */
+} tcpack_info_t;
+
+typedef struct _tdata_psh_info_t {
+ uint32 end_seq; /* end seq# of a received TCP PSH DATA pkt */
+ struct _tdata_psh_info_t *next; /* next pointer of the link chain */
+} tdata_psh_info_t;
+
+typedef struct {
+ uint8 src_ip_addr[IPV4_ADDR_LEN]; /* SRC ip addrs of this TCP stream */
+ uint8 dst_ip_addr[IPV4_ADDR_LEN]; /* DST ip addrs of this TCP stream */
+ uint8 src_tcp_port[TCP_PORT_LEN]; /* SRC tcp ports of this TCP stream */
+ uint8 dst_tcp_port[TCP_PORT_LEN]; /* DST tcp ports of this TCP stream */
+ tdata_psh_info_t *tdata_psh_info_head; /* Head of received TCP PSH DATA chain */
+ tdata_psh_info_t *tdata_psh_info_tail; /* Tail of received TCP PSH DATA chain */
+ uint32 last_used_time; /* The last time this tcpdata_info was used(in ms) */
+} tcpdata_info_t;
+
+/* TCPACK SUPPRESS module */
+typedef struct {
+ int tcpack_info_cnt;
+ tcpack_info_t tcpack_info_tbl[TCPACK_INFO_MAXNUM]; /* Info of TCP ACK to send */
+ int tcpdata_info_cnt;
+ tcpdata_info_t tcpdata_info_tbl[TCPDATA_INFO_MAXNUM]; /* Info of received TCP DATA */
+ tdata_psh_info_t *tdata_psh_info_pool; /* Pointer to tdata_psh_info elements pool */
+ tdata_psh_info_t *tdata_psh_info_free; /* free tdata_psh_info elements chain in pool */
+#ifdef DHDTCPACK_SUP_DBG
+ int psh_info_enq_num; /* Number of free TCP PSH DATA info elements in pool */
+#endif /* DHDTCPACK_SUP_DBG */
+} tcpack_sup_module_t;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+counter_tbl_t tack_tbl = {"tcpACK", 0, 1000, 10, {0, }, 1};
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+static void
+_tdata_psh_info_pool_enq(tcpack_sup_module_t *tcpack_sup_mod,
+ tdata_psh_info_t *tdata_psh_info)
+{
+ if ((tcpack_sup_mod == NULL) || (tdata_psh_info == NULL)) {
+ DHD_ERROR(("%s %d: ERROR %p %p\n", __FUNCTION__, __LINE__,
+ tcpack_sup_mod, tdata_psh_info));
+ return;
+ }
+
+ ASSERT(tdata_psh_info->next == NULL);
+ tdata_psh_info->next = tcpack_sup_mod->tdata_psh_info_free;
+ tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info;
+#ifdef DHDTCPACK_SUP_DBG
+ tcpack_sup_mod->psh_info_enq_num++;
+#endif
+}
+
+static tdata_psh_info_t*
+_tdata_psh_info_pool_deq(tcpack_sup_module_t *tcpack_sup_mod)
+{
+ tdata_psh_info_t *tdata_psh_info = NULL;
+
+ if (tcpack_sup_mod == NULL) {
+ DHD_ERROR(("%s %d: ERROR %p\n", __FUNCTION__, __LINE__,
+ tcpack_sup_mod));
+ return NULL;
+ }
+
+ tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free;
+ if (tdata_psh_info == NULL)
+ DHD_ERROR(("%s %d: Out of tdata_disc_grp\n", __FUNCTION__, __LINE__));
+ else {
+ tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+ tdata_psh_info->next = NULL;
+#ifdef DHDTCPACK_SUP_DBG
+ tcpack_sup_mod->psh_info_enq_num--;
+#endif /* DHDTCPACK_SUP_DBG */
+ }
+
+ return tdata_psh_info;
+}
+
+static int _tdata_psh_info_pool_init(dhd_pub_t *dhdp,
+ tcpack_sup_module_t *tcpack_sup_mod)
+{
+ tdata_psh_info_t *tdata_psh_info_pool = NULL;
+ uint i;
+
+ DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+ if (tcpack_sup_mod == NULL)
+ return BCME_ERROR;
+
+ ASSERT(tcpack_sup_mod->tdata_psh_info_pool == NULL);
+ ASSERT(tcpack_sup_mod->tdata_psh_info_free == NULL);
+
+ tdata_psh_info_pool =
+ MALLOC(dhdp->osh, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+
+ if (tdata_psh_info_pool == NULL)
+ return BCME_NOMEM;
+ bzero(tdata_psh_info_pool, sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+#ifdef DHDTCPACK_SUP_DBG
+ tcpack_sup_mod->psh_info_enq_num = 0;
+#endif /* DHDTCPACK_SUP_DBG */
+
+ /* Enqueue newly allocated tcpdata psh info elements to the pool */
+ for (i = 0; i < TCPDATA_PSH_INFO_MAXNUM; i++)
+ _tdata_psh_info_pool_enq(tcpack_sup_mod, &tdata_psh_info_pool[i]);
+
+ ASSERT(tcpack_sup_mod->tdata_psh_info_free != NULL);
+ tcpack_sup_mod->tdata_psh_info_pool = tdata_psh_info_pool;
+
+ return BCME_OK;
+}
+
+static void _tdata_psh_info_pool_deinit(dhd_pub_t *dhdp,
+ tcpack_sup_module_t *tcpack_sup_mod)
+{
+ uint i;
+ tdata_psh_info_t *tdata_psh_info;
+
+ DHD_TRACE(("%s %d: Enter\n", __FUNCTION__, __LINE__));
+
+ if (tcpack_sup_mod == NULL) {
+ DHD_ERROR(("%s %d: ERROR tcpack_sup_mod NULL!\n",
+ __FUNCTION__, __LINE__));
+ return;
+ }
+
+ for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+ tcpdata_info_t *tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+ /* Return tdata_psh_info elements allocated to each tcpdata_info to the pool */
+ while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+ tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+ tdata_psh_info->next = NULL;
+ _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+ }
+ tcpdata_info->tdata_psh_info_tail = NULL;
+ }
+#ifdef DHDTCPACK_SUP_DBG
+ DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+ i = 0;
+ /* Be sure we recollected all tdata_psh_info elements */
+ while ((tdata_psh_info = tcpack_sup_mod->tdata_psh_info_free)) {
+ tcpack_sup_mod->tdata_psh_info_free = tdata_psh_info->next;
+ tdata_psh_info->next = NULL;
+ i++;
+ }
+ ASSERT(i == TCPDATA_PSH_INFO_MAXNUM);
+ MFREE(dhdp->osh, tcpack_sup_mod->tdata_psh_info_pool,
+ sizeof(tdata_psh_info_t) * TCPDATA_PSH_INFO_MAXNUM);
+ tcpack_sup_mod->tdata_psh_info_pool = NULL;
+
+ return;
+}
+
+int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 mode)
+{
+ int ret = BCME_OK;
+
+ dhd_os_tcpacklock(dhdp);
+
+ if (dhdp->tcpack_sup_mode == mode) {
+ DHD_ERROR(("%s %d: already set to %d\n", __FUNCTION__, __LINE__, mode));
+ goto exit;
+ }
+
+ if (mode >= TCPACK_SUP_LAST_MODE ||
+#ifndef BCMSDIO
+ mode == TCPACK_SUP_DELAYTX ||
+#endif
+ FALSE) {
+ DHD_ERROR(("%s %d: Invalid mode %d\n", __FUNCTION__, __LINE__, mode));
+ ret = BCME_BADARG;
+ goto exit;
+ }
+
+ DHD_TRACE(("%s: %d -> %d\n",
+ __FUNCTION__, dhdp->tcpack_sup_mode, mode));
+
+ /* Old tcpack_sup_mode is TCPACK_SUP_DELAYTX */
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX) {
+ tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+ /* We won't need tdata_psh_info pool and tcpddata_info_tbl anymore */
+ _tdata_psh_info_pool_deinit(dhdp, tcpack_sup_mod);
+ tcpack_sup_mod->tcpdata_info_cnt = 0;
+ bzero(tcpack_sup_mod->tcpdata_info_tbl,
+ sizeof(tcpdata_info_t) * TCPDATA_INFO_MAXNUM);
+ /* For half duplex bus interface, tx precedes rx by default */
+ if (dhdp->bus)
+ dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+ }
+
+ dhdp->tcpack_sup_mode = mode;
+
+ if (mode == TCPACK_SUP_OFF) {
+ ASSERT(dhdp->tcpack_sup_module != NULL);
+ MFREE(dhdp->osh, dhdp->tcpack_sup_module, sizeof(tcpack_sup_module_t));
+ dhdp->tcpack_sup_module = NULL;
+ goto exit;
+ }
+
+ if (dhdp->tcpack_sup_module == NULL) {
+ tcpack_sup_module_t *tcpack_sup_mod =
+ MALLOC(dhdp->osh, sizeof(tcpack_sup_module_t));
+ if (tcpack_sup_mod == NULL) {
+ DHD_ERROR(("%s %d: No MEM\n", __FUNCTION__, __LINE__));
+ dhdp->tcpack_sup_mode = TCPACK_SUP_OFF;
+ ret = BCME_NOMEM;
+ goto exit;
+ }
+ bzero(tcpack_sup_mod, sizeof(tcpack_sup_module_t));
+ dhdp->tcpack_sup_module = tcpack_sup_mod;
+ }
+
+ if (mode == TCPACK_SUP_DELAYTX) {
+ ret = _tdata_psh_info_pool_init(dhdp, dhdp->tcpack_sup_module);
+ if (ret != BCME_OK)
+ DHD_ERROR(("%s %d: pool init fail with %d\n", __FUNCTION__, __LINE__, ret));
+ else if (dhdp->bus)
+ dhd_bus_set_dotxinrx(dhdp->bus, FALSE);
+ }
+
+exit:
+ dhd_os_tcpackunlock(dhdp);
+ return ret;
+}
+
+void
+dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp)
+{
+ tcpack_sup_module_t *tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+ goto exit;
+
+ dhd_os_tcpacklock(dhdp);
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n",
+ __FUNCTION__, __LINE__));
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+
+ tcpack_sup_mod->tcpack_info_cnt = 0;
+ bzero(tcpack_sup_mod->tcpack_info_tbl, sizeof(tcpack_info_t) * TCPACK_INFO_MAXNUM);
+ dhd_os_tcpackunlock(dhdp);
+
+exit:
+ return;
+}
+
+inline int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 i;
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpack_info_t *tcpack_info_tbl;
+ int tbl_cnt;
+ int ret = BCME_OK;
+ void *pdata;
+ uint32 pktlen;
+
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+ goto exit;
+
+ pdata = PKTDATA(dhdp->osh, pkt);
+ pktlen = PKTLEN(dhdp->osh, pkt) - dhd_prot_hdrlen(dhdp, pdata);
+
+ if (pktlen < TCPACKSZMIN || pktlen > TCPACKSZMAX) {
+ DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+ __FUNCTION__, __LINE__, pktlen));
+ goto exit;
+ }
+
+ dhd_os_tcpacklock(dhdp);
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ ret = BCME_ERROR;
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+ tbl_cnt = tcpack_sup_mod->tcpack_info_cnt;
+ tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+ ASSERT(tbl_cnt <= TCPACK_INFO_MAXNUM);
+
+ for (i = 0; i < tbl_cnt; i++) {
+ if (tcpack_info_tbl[i].pkt_in_q == pkt) {
+ DHD_TRACE(("%s %d: pkt %p sent out. idx %d, tbl_cnt %d\n",
+ __FUNCTION__, __LINE__, pkt, i, tbl_cnt));
+ /* This pkt is being transmitted so remove the tcp_ack_info of it. */
+ if (i < tbl_cnt - 1) {
+ bcopy(&tcpack_info_tbl[tbl_cnt - 1],
+ &tcpack_info_tbl[i], sizeof(tcpack_info_t));
+ }
+ bzero(&tcpack_info_tbl[tbl_cnt - 1], sizeof(tcpack_info_t));
+ if (--tcpack_sup_mod->tcpack_info_cnt < 0) {
+ DHD_ERROR(("%s %d: ERROR!!! tcp_ack_info_cnt %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->tcpack_info_cnt));
+ ret = BCME_ERROR;
+ }
+ break;
+ }
+ }
+ dhd_os_tcpackunlock(dhdp);
+
+exit:
+ return ret;
+}
+
+static INLINE bool dhd_tcpdata_psh_acked(dhd_pub_t *dhdp, uint8 *ip_hdr,
+ uint8 *tcp_hdr, uint32 tcp_ack_num)
+{
+ tcpack_sup_module_t *tcpack_sup_mod;
+ int i;
+ tcpdata_info_t *tcpdata_info = NULL;
+ tdata_psh_info_t *tdata_psh_info = NULL;
+ bool ret = FALSE;
+
+ if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+ goto exit;
+
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d, ack %u\n", __FUNCTION__, __LINE__,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+ tcp_ack_num));
+
+ for (i = 0; i < tcpack_sup_mod->tcpdata_info_cnt; i++) {
+ tcpdata_info_t *tcpdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+ DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+ IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->src_ip_addr)),
+ IPV4_ADDR_TO_STR(ntoh32_ua(tcpdata_info_tmp->dst_ip_addr)),
+ ntoh16_ua(tcpdata_info_tmp->src_tcp_port),
+ ntoh16_ua(tcpdata_info_tmp->dst_tcp_port)));
+
+ /* If either IP address or TCP port number does not match, skip. */
+ if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+ tcpdata_info_tmp->dst_ip_addr, IPV4_ADDR_LEN) == 0 &&
+ memcmp(&ip_hdr[IPV4_DEST_IP_OFFSET],
+ tcpdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN) == 0 &&
+ memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+ tcpdata_info_tmp->dst_tcp_port, TCP_PORT_LEN) == 0 &&
+ memcmp(&tcp_hdr[TCP_DEST_PORT_OFFSET],
+ tcpdata_info_tmp->src_tcp_port, TCP_PORT_LEN) == 0) {
+ tcpdata_info = tcpdata_info_tmp;
+ break;
+ }
+ }
+
+ if (tcpdata_info == NULL) {
+ DHD_TRACE(("%s %d: no tcpdata_info!\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ if (tcpdata_info->tdata_psh_info_head == NULL) {
+ DHD_TRACE(("%s %d: No PSH DATA to be acked!\n", __FUNCTION__, __LINE__));
+ }
+
+ while ((tdata_psh_info = tcpdata_info->tdata_psh_info_head)) {
+ if (IS_TCPSEQ_GE(tcp_ack_num, tdata_psh_info->end_seq)) {
+ DHD_TRACE(("%s %d: PSH ACKED! %u >= %u\n",
+ __FUNCTION__, __LINE__, tcp_ack_num, tdata_psh_info->end_seq));
+ tcpdata_info->tdata_psh_info_head = tdata_psh_info->next;
+ tdata_psh_info->next = NULL;
+ _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info);
+ ret = TRUE;
+ } else
+ break;
+ }
+ if (tdata_psh_info == NULL)
+ tcpdata_info->tdata_psh_info_tail = NULL;
+
+#ifdef DHDTCPACK_SUP_DBG
+ DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+exit:
+ return ret;
+}
+
+bool
+dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 *new_ether_hdr; /* Ethernet header of the new packet */
+ uint16 new_ether_type; /* Ethernet type of the new packet */
+ uint8 *new_ip_hdr; /* IP header of the new packet */
+ uint8 *new_tcp_hdr; /* TCP header of the new packet */
+ uint32 new_ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint32 new_tcp_ack_num; /* TCP acknowledge number of the new packet */
+ uint16 new_ip_total_len; /* Total length of IP packet for the new packet */
+ uint32 new_tcp_hdr_len; /* TCP header length of the new packet */
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpack_info_t *tcpack_info_tbl;
+ int i;
+ bool ret = FALSE;
+ bool set_dotxinrx = TRUE;
+
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_OFF)
+ goto exit;
+
+ new_ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ if (cur_framelen < TCPACKSZMIN || cur_framelen > TCPACKSZMAX) {
+ DHD_TRACE(("%s %d: Too short or long length %d to be TCP ACK\n",
+ __FUNCTION__, __LINE__, cur_framelen));
+ goto exit;
+ }
+
+ new_ether_type = new_ether_hdr[12] << 8 | new_ether_hdr[13];
+
+ if (new_ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, new_ether_type));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, new_ether_type));
+
+ new_ip_hdr = new_ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+ new_ip_hdr_len = IPV4_HLEN(new_ip_hdr);
+ if (IP_VER(new_ip_hdr) != IP_VER_4 || IPV4_PROT(new_ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(new_ip_hdr), IPV4_PROT(new_ip_hdr)));
+ goto exit;
+ }
+
+ new_tcp_hdr = new_ip_hdr + new_ip_hdr_len;
+ cur_framelen -= new_ip_hdr_len;
+
+ ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+ DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+ /* is it an ack ? Allow only ACK flag, not to suppress others. */
+ if (new_tcp_hdr[TCP_FLAGS_OFFSET] != TCP_FLAG_ACK) {
+ DHD_TRACE(("%s %d: Do not touch TCP flag 0x%x\n",
+ __FUNCTION__, __LINE__, new_tcp_hdr[TCP_FLAGS_OFFSET]));
+ goto exit;
+ }
+
+ new_ip_total_len = ntoh16_ua(&new_ip_hdr[IPV4_PKTLEN_OFFSET]);
+ new_tcp_hdr_len = 4 * TCP_HDRLEN(new_tcp_hdr[TCP_HLEN_OFFSET]);
+
+ /* This packet has TCP data, so just send */
+ if (new_ip_total_len > new_ip_hdr_len + new_tcp_hdr_len) {
+ DHD_TRACE(("%s %d: Do nothing for TCP DATA\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ ASSERT(new_ip_total_len == new_ip_hdr_len + new_tcp_hdr_len);
+
+ new_tcp_ack_num = ntoh32_ua(&new_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+ DHD_TRACE(("%s %d: TCP ACK with zero DATA length"
+ " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+ __FUNCTION__, __LINE__,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&new_ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&new_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&new_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ /* Look for tcp_ack_info that has the same ip src/dst addrs and tcp src/dst ports */
+ dhd_os_tcpacklock(dhdp);
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ counter_printlog(&tack_tbl);
+ tack_tbl.cnt[0]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+ tcpack_info_tbl = tcpack_sup_mod->tcpack_info_tbl;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ ret = BCME_ERROR;
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+
+ if (dhd_tcpdata_psh_acked(dhdp, new_ip_hdr, new_tcp_hdr, new_tcp_ack_num)) {
+ /* This TCPACK is ACK to TCPDATA PSH pkt, so keep set_dotxinrx TRUE */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[5]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ } else
+ set_dotxinrx = FALSE;
+
+ for (i = 0; i < tcpack_sup_mod->tcpack_info_cnt; i++) {
+ void *oldpkt; /* TCPACK packet that is already in txq or DelayQ */
+ uint8 *old_ether_hdr, *old_ip_hdr, *old_tcp_hdr;
+ uint32 old_ip_hdr_len, old_tcp_hdr_len;
+ uint32 old_tcpack_num; /* TCP ACK number of old TCPACK packet in Q */
+
+ if ((oldpkt = tcpack_info_tbl[i].pkt_in_q) == NULL) {
+ DHD_ERROR(("%s %d: Unexpected error!! cur idx %d, ttl cnt %d\n",
+ __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+ break;
+ }
+
+ if (PKTDATA(dhdp->osh, oldpkt) == NULL) {
+ DHD_ERROR(("%s %d: oldpkt data NULL!! cur idx %d, ttl cnt %d\n",
+ __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpack_info_cnt));
+ break;
+ }
+
+ old_ether_hdr = tcpack_info_tbl[i].pkt_ether_hdr;
+ old_ip_hdr = old_ether_hdr + ETHER_HDR_LEN;
+ old_ip_hdr_len = IPV4_HLEN(old_ip_hdr);
+ old_tcp_hdr = old_ip_hdr + old_ip_hdr_len;
+ old_tcp_hdr_len = 4 * TCP_HDRLEN(old_tcp_hdr[TCP_HLEN_OFFSET]);
+
+ DHD_TRACE(("%s %d: oldpkt %p[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n", __FUNCTION__, __LINE__, oldpkt, i,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&old_ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&old_tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&old_tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ /* If either of IP address or TCP port number does not match, skip. */
+ if (memcmp(&new_ip_hdr[IPV4_SRC_IP_OFFSET],
+ &old_ip_hdr[IPV4_SRC_IP_OFFSET], IPV4_ADDR_LEN * 2) ||
+ memcmp(&new_tcp_hdr[TCP_SRC_PORT_OFFSET],
+ &old_tcp_hdr[TCP_SRC_PORT_OFFSET], TCP_PORT_LEN * 2))
+ continue;
+
+ old_tcpack_num = ntoh32_ua(&old_tcp_hdr[TCP_ACK_NUM_OFFSET]);
+
+ if (IS_TCPSEQ_GT(new_tcp_ack_num, old_tcpack_num)) {
+ /* New packet has higher TCP ACK number, so it replaces the old packet */
+ if (new_ip_hdr_len == old_ip_hdr_len &&
+ new_tcp_hdr_len == old_tcp_hdr_len) {
+ ASSERT(memcmp(new_ether_hdr, old_ether_hdr, ETHER_HDR_LEN) == 0);
+ bcopy(new_ip_hdr, old_ip_hdr, new_ip_total_len);
+ PKTFREE(dhdp->osh, pkt, FALSE);
+ DHD_TRACE(("%s %d: TCP ACK replace %u -> %u\n",
+ __FUNCTION__, __LINE__, old_tcpack_num, new_tcp_ack_num));
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[2]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ ret = TRUE;
+ } else {
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[6]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ DHD_TRACE(("%s %d: lenth mismatch %d != %d || %d != %d"
+ " ACK %u -> %u\n", __FUNCTION__, __LINE__,
+ new_ip_hdr_len, old_ip_hdr_len,
+ new_tcp_hdr_len, old_tcp_hdr_len,
+ old_tcpack_num, new_tcp_ack_num));
+ }
+ } else if (new_tcp_ack_num == old_tcpack_num) {
+ set_dotxinrx = TRUE;
+ /* TCPACK retransmission */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[3]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ } else {
+ DHD_TRACE(("%s %d: ACK number reverse old %u(0x%p) new %u(0x%p)\n",
+ __FUNCTION__, __LINE__, old_tcpack_num, oldpkt,
+ new_tcp_ack_num, pkt));
+ }
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+
+ if (i == tcpack_sup_mod->tcpack_info_cnt && i < TCPACK_INFO_MAXNUM) {
+ /* No TCPACK packet with the same IP addr and TCP port is found
+ * in tcp_ack_info_tbl. So add this packet to the table.
+ */
+ DHD_TRACE(("%s %d: Add pkt 0x%p(ether_hdr 0x%p) to tbl[%d]\n",
+ __FUNCTION__, __LINE__, pkt, new_ether_hdr,
+ tcpack_sup_mod->tcpack_info_cnt));
+
+ tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_in_q = pkt;
+ tcpack_info_tbl[tcpack_sup_mod->tcpack_info_cnt].pkt_ether_hdr = new_ether_hdr;
+ tcpack_sup_mod->tcpack_info_cnt++;
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[1]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+ } else {
+ ASSERT(i == tcpack_sup_mod->tcpack_info_cnt);
+ DHD_TRACE(("%s %d: No empty tcp ack info tbl\n",
+ __FUNCTION__, __LINE__));
+ }
+ dhd_os_tcpackunlock(dhdp);
+
+exit:
+ /* Unless TCPACK_SUP_DELAYTX, dotxinrx is alwasy TRUE, so no need to set here */
+ if (dhdp->tcpack_sup_mode == TCPACK_SUP_DELAYTX && set_dotxinrx)
+ dhd_bus_set_dotxinrx(dhdp->bus, TRUE);
+
+ return ret;
+}
+
+bool
+dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt)
+{
+ uint8 *ether_hdr; /* Ethernet header of the new packet */
+ uint16 ether_type; /* Ethernet type of the new packet */
+ uint8 *ip_hdr; /* IP header of the new packet */
+ uint8 *tcp_hdr; /* TCP header of the new packet */
+ uint32 ip_hdr_len; /* IP header length of the new packet */
+ uint32 cur_framelen;
+ uint16 ip_total_len; /* Total length of IP packet for the new packet */
+ uint32 tcp_hdr_len; /* TCP header length of the new packet */
+ uint32 tcp_seq_num; /* TCP sequence number of the new packet */
+ uint16 tcp_data_len; /* TCP DATA length that excludes IP and TCP headers */
+ uint32 end_tcp_seq_num; /* TCP seq number of the last byte in the new packet */
+ tcpack_sup_module_t *tcpack_sup_mod;
+ tcpdata_info_t *tcpdata_info = NULL;
+ tdata_psh_info_t *tdata_psh_info;
+
+ int i;
+ bool ret = FALSE;
+
+ if (dhdp->tcpack_sup_mode != TCPACK_SUP_DELAYTX)
+ goto exit;
+
+ ether_hdr = PKTDATA(dhdp->osh, pkt);
+ cur_framelen = PKTLEN(dhdp->osh, pkt);
+
+ ether_type = ether_hdr[12] << 8 | ether_hdr[13];
+
+ if (ether_type != ETHER_TYPE_IP) {
+ DHD_TRACE(("%s %d: Not a IP packet 0x%x\n",
+ __FUNCTION__, __LINE__, ether_type));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: IP pkt! 0x%x\n", __FUNCTION__, __LINE__, ether_type));
+
+ ip_hdr = ether_hdr + ETHER_HDR_LEN;
+ cur_framelen -= ETHER_HDR_LEN;
+
+ ASSERT(cur_framelen >= IPV4_MIN_HEADER_LEN);
+
+ ip_hdr_len = IPV4_HLEN(ip_hdr);
+ if (IP_VER(ip_hdr) != IP_VER_4 || IPV4_PROT(ip_hdr) != IP_PROT_TCP) {
+ DHD_TRACE(("%s %d: Not IPv4 nor TCP! ip ver %d, prot %d\n",
+ __FUNCTION__, __LINE__, IP_VER(ip_hdr), IPV4_PROT(ip_hdr)));
+ goto exit;
+ }
+
+ tcp_hdr = ip_hdr + ip_hdr_len;
+ cur_framelen -= ip_hdr_len;
+
+ ASSERT(cur_framelen >= TCP_MIN_HEADER_LEN);
+
+ DHD_TRACE(("%s %d: TCP pkt!\n", __FUNCTION__, __LINE__));
+
+ ip_total_len = ntoh16_ua(&ip_hdr[IPV4_PKTLEN_OFFSET]);
+ tcp_hdr_len = 4 * TCP_HDRLEN(tcp_hdr[TCP_HLEN_OFFSET]);
+
+ /* This packet is mere TCP ACK, so do nothing */
+ if (ip_total_len == ip_hdr_len + tcp_hdr_len) {
+ DHD_TRACE(("%s %d: Do nothing for no data TCP ACK\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ ASSERT(ip_total_len > ip_hdr_len + tcp_hdr_len);
+
+ if ((tcp_hdr[TCP_FLAGS_OFFSET] & TCP_FLAG_PSH) == 0) {
+ DHD_TRACE(("%s %d: Not interested TCP DATA packet\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
+
+ DHD_TRACE(("%s %d: TCP DATA with nonzero DATA length"
+ " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d, flag 0x%x\n",
+ __FUNCTION__, __LINE__,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET]),
+ tcp_hdr[TCP_FLAGS_OFFSET]));
+
+ dhd_os_tcpacklock(dhdp);
+ tcpack_sup_mod = dhdp->tcpack_sup_module;
+
+ if (!tcpack_sup_mod) {
+ DHD_ERROR(("%s %d: tcpack suppress module NULL!!\n", __FUNCTION__, __LINE__));
+ ret = BCME_ERROR;
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+
+ /* Look for tcpdata_info that has the same ip src/dst addrs and tcp src/dst ports */
+ i = 0;
+ while (i < tcpack_sup_mod->tcpdata_info_cnt) {
+ tcpdata_info_t *tdata_info_tmp = &tcpack_sup_mod->tcpdata_info_tbl[i];
+ uint32 now_in_ms = OSL_SYSUPTIME();
+ DHD_TRACE(("%s %d: data info[%d], IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n", __FUNCTION__, __LINE__, i,
+ IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->src_ip_addr)),
+ IPV4_ADDR_TO_STR(ntoh32_ua(tdata_info_tmp->dst_ip_addr)),
+ ntoh16_ua(tdata_info_tmp->src_tcp_port),
+ ntoh16_ua(tdata_info_tmp->dst_tcp_port)));
+
+ /* If both IP address and TCP port number match, we found it so break. */
+ if (memcmp(&ip_hdr[IPV4_SRC_IP_OFFSET],
+ tdata_info_tmp->src_ip_addr, IPV4_ADDR_LEN * 2) == 0 &&
+ memcmp(&tcp_hdr[TCP_SRC_PORT_OFFSET],
+ tdata_info_tmp->src_tcp_port, TCP_PORT_LEN * 2) == 0) {
+ tcpdata_info = tdata_info_tmp;
+ tcpdata_info->last_used_time = now_in_ms;
+ break;
+ }
+
+ if (now_in_ms - tdata_info_tmp->last_used_time > TCPDATA_INFO_TIMEOUT) {
+ tdata_psh_info_t *tdata_psh_info_tmp;
+ tcpdata_info_t *last_tdata_info;
+
+ while ((tdata_psh_info_tmp = tdata_info_tmp->tdata_psh_info_head)) {
+ tdata_info_tmp->tdata_psh_info_head = tdata_psh_info_tmp->next;
+ tdata_psh_info_tmp->next = NULL;
+ DHD_TRACE(("%s %d: Clean tdata_psh_info(end_seq %u)!\n",
+ __FUNCTION__, __LINE__, tdata_psh_info_tmp->end_seq));
+ _tdata_psh_info_pool_enq(tcpack_sup_mod, tdata_psh_info_tmp);
+ }
+#ifdef DHDTCPACK_SUP_DBG
+ DHD_ERROR(("%s %d: PSH INFO ENQ %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+ tcpack_sup_mod->tcpdata_info_cnt--;
+ ASSERT(tcpack_sup_mod->tcpdata_info_cnt >= 0);
+
+ last_tdata_info =
+ &tcpack_sup_mod->tcpdata_info_tbl[tcpack_sup_mod->tcpdata_info_cnt];
+ if (i < tcpack_sup_mod->tcpdata_info_cnt) {
+ ASSERT(last_tdata_info != tdata_info_tmp);
+ bcopy(last_tdata_info, tdata_info_tmp, sizeof(tcpdata_info_t));
+ }
+ bzero(last_tdata_info, sizeof(tcpdata_info_t));
+ DHD_TRACE(("%s %d: tcpdata_info(idx %d) is aged out. ttl cnt is now %d\n",
+ __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt));
+ /* Don't increase "i" here, so that the prev last tcpdata_info is checked */
+ } else
+ i++;
+ }
+
+ tcp_seq_num = ntoh32_ua(&tcp_hdr[TCP_SEQ_NUM_OFFSET]);
+ tcp_data_len = ip_total_len - ip_hdr_len - tcp_hdr_len;
+ end_tcp_seq_num = tcp_seq_num + tcp_data_len;
+
+ if (tcpdata_info == NULL) {
+ ASSERT(i == tcpack_sup_mod->tcpdata_info_cnt);
+ if (i >= TCPDATA_INFO_MAXNUM) {
+ DHD_TRACE(("%s %d: tcp_data_info_tbl FULL! %d %d"
+ " IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR" TCP port %d %d\n",
+ __FUNCTION__, __LINE__, i, tcpack_sup_mod->tcpdata_info_cnt,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+ tcpdata_info = &tcpack_sup_mod->tcpdata_info_tbl[i];
+
+ /* No TCP flow with the same IP addr and TCP port is found
+ * in tcp_data_info_tbl. So add this flow to the table.
+ */
+ DHD_TRACE(("%s %d: Add data info to tbl[%d]: IP addr "IPV4_ADDR_STR" "IPV4_ADDR_STR
+ " TCP port %d %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->tcpdata_info_cnt,
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_SRC_IP_OFFSET])),
+ IPV4_ADDR_TO_STR(ntoh32_ua(&ip_hdr[IPV4_DEST_IP_OFFSET])),
+ ntoh16_ua(&tcp_hdr[TCP_SRC_PORT_OFFSET]),
+ ntoh16_ua(&tcp_hdr[TCP_DEST_PORT_OFFSET])));
+
+ bcopy(&ip_hdr[IPV4_SRC_IP_OFFSET], tcpdata_info->src_ip_addr,
+ IPV4_ADDR_LEN * 2);
+ bcopy(&tcp_hdr[TCP_SRC_PORT_OFFSET], tcpdata_info->src_tcp_port,
+ TCP_PORT_LEN * 2);
+
+ tcpdata_info->last_used_time = OSL_SYSUPTIME();
+ tcpack_sup_mod->tcpdata_info_cnt++;
+ }
+
+ ASSERT(tcpdata_info != NULL);
+
+ tdata_psh_info = _tdata_psh_info_pool_deq(tcpack_sup_mod);
+#ifdef DHDTCPACK_SUP_DBG
+ DHD_TRACE(("%s %d: PSH INFO ENQ %d\n",
+ __FUNCTION__, __LINE__, tcpack_sup_mod->psh_info_enq_num));
+#endif /* DHDTCPACK_SUP_DBG */
+
+ if (tdata_psh_info == NULL) {
+ DHD_ERROR(("%s %d: No more free tdata_psh_info!!\n", __FUNCTION__, __LINE__));
+ ret = BCME_ERROR;
+ dhd_os_tcpackunlock(dhdp);
+ goto exit;
+ }
+ tdata_psh_info->end_seq = end_tcp_seq_num;
+
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+ tack_tbl.cnt[4]++;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+
+ DHD_TRACE(("%s %d: TCP PSH DATA recvd! end seq %u\n",
+ __FUNCTION__, __LINE__, tdata_psh_info->end_seq));
+
+ ASSERT(tdata_psh_info->next == NULL);
+
+ if (tcpdata_info->tdata_psh_info_head == NULL)
+ tcpdata_info->tdata_psh_info_head = tdata_psh_info;
+ else {
+ ASSERT(tcpdata_info->tdata_psh_info_tail);
+ tcpdata_info->tdata_psh_info_tail->next = tdata_psh_info;
+ }
+ tcpdata_info->tdata_psh_info_tail = tdata_psh_info;
+
+ dhd_os_tcpackunlock(dhdp);
+
+exit:
+ return ret;
+}
+
+#endif /* DHDTCPACK_SUPPRESS */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id$
+ * $Id: dhd_ip.h 458522 2014-02-27 02:26:15Z $
*/
#ifndef _dhd_ip_h_
#define _dhd_ip_h_
+#ifdef DHDTCPACK_SUPPRESS
+#include <dngl_stats.h>
+#include <bcmutils.h>
+#include <dhd.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
typedef enum pkt_frag
{
DHD_PKT_FRAG_NONE = 0,
extern pkt_frag_t pkt_frag_info(osl_t *osh, void *p);
+#ifdef DHDTCPACK_SUPPRESS
+#define TCPACKSZMIN (ETHER_HDR_LEN + IPV4_MIN_HEADER_LEN + TCP_MIN_HEADER_LEN)
+/* Size of MAX possible TCP ACK packet. Extra bytes for IP/TCP option fields */
+#define TCPACKSZMAX (TCPACKSZMIN + 100)
+
+/* Max number of TCP streams that have own src/dst IP addrs and TCP ports */
+#define TCPACK_INFO_MAXNUM 4
+#define TCPDATA_INFO_MAXNUM 4
+#define TCPDATA_PSH_INFO_MAXNUM (8 * TCPDATA_INFO_MAXNUM)
+
+#define TCPDATA_INFO_TIMEOUT 5000 /* Remove tcpdata_info if inactive for this time (in ms) */
+
+extern int dhd_tcpack_suppress_set(dhd_pub_t *dhdp, uint8 on);
+extern void dhd_tcpack_info_tbl_clean(dhd_pub_t *dhdp);
+extern int dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
+extern bool dhd_tcpdata_info_get(dhd_pub_t *dhdp, void *pkt);
+
+/* #define DHDTCPACK_SUP_DBG */
+#if defined(DEBUG_COUNTER) && defined(DHDTCPACK_SUP_DBG)
+extern counter_tbl_t tack_tbl;
+#endif /* DEBUG_COUNTER && DHDTCPACK_SUP_DBG */
+#endif /* DHDTCPACK_SUPPRESS */
+
#endif /* _dhd_ip_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_linux.c 419821 2013-08-22 21:43:26Z $
+ * $Id: dhd_linux.c 491481 2014-07-16 14:08:43Z $
*/
#include <typedefs.h>
#include <linuxver.h>
#include <osl.h>
+#ifdef SHOW_LOGTRACE
+#include <linux/syscalls.h>
+#include <event_log.h>
+#endif /* SHOW_LOGTRACE */
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
#include <linux/ip.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
#include <net/addrconf.h>
+#ifdef ENABLE_ADAPTIVE_SCHED
+#include <linux/cpufreq.h>
+#endif /* ENABLE_ADAPTIVE_SCHED */
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <bcmdevs.h>
#include <proto/ethernet.h>
-#include <proto/bcmip.h>
+#include <proto/bcmevent.h>
+#include <proto/vlan.h>
+#include <proto/bcmudp.h>
+#include <proto/bcmdhcp.h>
+#ifdef DHD_L2_FILTER
+#include <proto/bcmicmp.h>
+#endif
+#include <proto/802.3.h>
+
#include <dngl_stats.h>
+#include <dhd_linux_wq.h>
#include <dhd.h>
+#include <dhd_linux.h>
+#ifdef PCIE_FULL_DONGLE
+#include <dhd_flowring.h>
+#endif
#include <dhd_bus.h>
#include <dhd_proto.h>
#include <dhd_config.h>
#include <dhd_bta.h>
#endif
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif /* DHD_WMF */
+
+#ifdef AMPDU_VO_ENABLE
+#include <proto/802.1d.h>
+#endif /* AMPDU_VO_ENABLE */
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+#include <linux/tcp.h>
+#include <net/tcp.h>
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
#ifdef WLMEDIA_HTSF
#include <linux/time.h>
#include <htsf.h>
static histo_t vi_d1, vi_d2, vi_d3, vi_d4;
#endif /* WLMEDIA_HTSF */
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+#define MIN_TCP_WIN_SIZE 18000
+#define WIN_SIZE_SCALE_FACTOR 2
+#define MAX_TARGET_PORTS 5
+
+static uint target_ports[MAX_TARGET_PORTS] = {20, 0, 0, 0, 0};
+static uint dhd_use_tcp_window_size_adjust = FALSE;
+static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb);
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
#if defined(SOFTAP)
extern bool ap_cfg_running;
#endif
+#ifdef ENABLE_ADAPTIVE_SCHED
+#define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
+#ifndef CUSTOM_CPUFREQ_THRESH
+#define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
+#endif /* CUSTOM_CPUFREQ_THRESH */
+#endif /* ENABLE_ADAPTIVE_SCHED */
+
/* enable HOSTIP cache update from the host side when an eth0:N is up */
#define AOE_IP_ALIAS_SUPPORT 1
#include <wl_android.h>
+/* Maximum STA per radio */
+#define DHD_MAX_STA 32
+
+
+const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
+const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
+
#ifdef ARP_OFFLOAD_SUPPORT
void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
-static int dhd_device_event(struct notifier_block *this,
- unsigned long event,
- void *ptr);
-
-static struct notifier_block dhd_notifier = {
- .notifier_call = dhd_device_event
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
+ unsigned long event, void *ptr);
+static struct notifier_block dhd_inetaddr_notifier = {
+ .notifier_call = dhd_inetaddr_notifier_call
};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inetaddr_notifier_registered = FALSE;
#endif /* ARP_OFFLOAD_SUPPORT */
-static int dhd_device_ipv6_event(struct notifier_block *this,
- unsigned long event,
- void *ptr);
-static struct notifier_block dhd_notifier_ipv6 = {
- .notifier_call = dhd_device_ipv6_event
+#ifdef CONFIG_IPV6
+static int dhd_inet6addr_notifier_call(struct notifier_block *this,
+ unsigned long event, void *ptr);
+static struct notifier_block dhd_inet6addr_notifier = {
+ .notifier_call = dhd_inet6addr_notifier_call
};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_inet6addr_notifier_registered = FALSE;
+#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
-static void dhd_hang_process(struct work_struct *work);
+static void dhd_hang_process(void *dhd_info, void *event_data, u8 event);
#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
MODULE_LICENSE("GPL v2");
#ifdef PROP_TXSTATUS
extern bool dhd_wlfc_skip_fc(void);
-extern void dhd_wlfc_plat_enable(void *dhd);
+extern void dhd_wlfc_plat_init(void *dhd);
extern void dhd_wlfc_plat_deinit(void *dhd);
#endif /* PROP_TXSTATUS */
#else
static inline int dhd_write_macaddr(struct ether_addr *mac) { return 0; }
#endif
-struct ipv6_addr {
- char ipv6_addr[IPV6_ADDR_LEN];
- dhd_ipv6_op_t ipv6_oper;
- struct list_head list;
+
+#if defined(SOFTAP_TPUT_ENHANCE)
+extern void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time);
+extern void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time);
+#endif /* SOFTAP_TPUT_ENHANCE */
+
+
+static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
+static struct notifier_block dhd_reboot_notifier = {
+ .notifier_call = dhd_reboot_callback,
+ .priority = 1,
};
+
+typedef struct dhd_if_event {
+ struct list_head list;
+ wl_event_data_if_t event;
+ char name[IFNAMSIZ+1];
+ uint8 mac[ETHER_ADDR_LEN];
+} dhd_if_event_t;
+
/* Interface control information */
typedef struct dhd_if {
struct dhd_info *info; /* back pointer to dhd_info */
/* OS/stack specifics */
struct net_device *net;
- struct net_device_stats stats;
- int idx; /* iface idx in dongle */
- dhd_if_state_t state; /* interface state */
- uint subunit; /* subunit */
+ int idx; /* iface idx in dongle */
+ uint subunit; /* subunit */
uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */
+ bool set_macaddress;
+ bool set_multicast;
+ uint8 bssidx; /* bsscfg index for the interface */
bool attached; /* Delayed attachment when unset */
bool txflowcontrol; /* Per interface flow control indicator */
char name[IFNAMSIZ+1]; /* linux interface name */
- uint8 bssidx; /* bsscfg index for the interface */
- bool set_multicast;
- struct list_head ipv6_list;
- spinlock_t ipv6_lock;
- bool event2cfg80211; /* To determine if pass event to cfg80211 */
+ struct net_device_stats stats;
+#ifdef DHD_WMF
+ dhd_wmf_t wmf; /* per bsscfg wmf setting */
+#endif /* DHD_WMF */
+#ifdef PCIE_FULL_DONGLE
+ struct list_head sta_list; /* sll of associated stations */
+#if !defined(BCM_GMAC3)
+ spinlock_t sta_list_lock; /* lock for manipulating sll */
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+ uint32 ap_isolate; /* ap-isolation settings */
} dhd_if_t;
#ifdef WLMEDIA_HTSF
#endif /* WLMEDIA_HTSF */
+struct ipv6_work_info_t {
+ uint8 if_idx;
+ char ipv6_addr[16];
+ unsigned long event;
+};
+
+/* When Perimeter locks are deployed, any blocking calls must be preceeded
+ * with a PERIM UNLOCK and followed by a PERIM LOCK.
+ * Examples of blocking calls are: schedule_timeout(), down_interruptible(),
+ * wait_event_timeout().
+ */
+
/* Local private structure (extension of pub) */
typedef struct dhd_info {
#if defined(WL_WIRELESS_EXT)
wl_iw_t iw; /* wireless extensions state (must be first) */
#endif /* defined(WL_WIRELESS_EXT) */
-
dhd_pub_t pub;
+ dhd_if_t *iflist[DHD_MAX_IFS]; /* for supporting multiple interfaces */
- /* For supporting multiple interfaces */
- dhd_if_t *iflist[DHD_MAX_IFS];
+ void *adapter; /* adapter information, interrupt, fw path etc. */
+ char fw_path[PATH_MAX]; /* path to firmware image */
+ char nv_path[PATH_MAX]; /* path to nvram vars file */
+ char conf_path[PATH_MAX]; /* path to config vars file */
struct semaphore proto_sem;
#ifdef PROP_TXSTATUS
spinlock_t wlfc_spinlock;
+
#endif /* PROP_TXSTATUS */
#ifdef WLMEDIA_HTSF
htsf_t htsf;
spinlock_t sdlock;
spinlock_t txqlock;
spinlock_t dhd_lock;
-#ifdef DHDTHREAD
- /* Thread based operation */
- bool threads_only;
- struct semaphore sdsem;
+ struct semaphore sdsem;
tsk_ctl_t thr_dpc_ctl;
tsk_ctl_t thr_wdt_ctl;
-#ifdef RXFRAME_THREAD
+
tsk_ctl_t thr_rxf_ctl;
spinlock_t rxf_lock;
-#endif /* RXFRAME_THREAD */
-#endif /* DHDTHREAD */
- bool dhd_tasklet_create;
- tsk_ctl_t thr_sysioc_ctl;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- struct work_struct work_hang;
-#endif
+ bool rxthread_enabled;
/* Wakelocks */
#if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
struct mutex dhd_suspend_mutex;
#endif
spinlock_t wakelock_spinlock;
- int wakelock_counter;
+ uint32 wakelock_counter;
int wakelock_wd_counter;
int wakelock_rx_timeout_enable;
int wakelock_ctrl_timeout_enable;
+ bool waive_wakelock;
+ uint32 wakelock_before_waive;
/* Thread to issue ioctl for multicast */
- unsigned char set_macaddress;
- struct ether_addr macvalue;
wait_queue_head_t ctrl_wait;
atomic_t pend_8021x_cnt;
dhd_attach_states_t dhd_state;
+#ifdef SHOW_LOGTRACE
+ dhd_event_log_t event_data;
+#endif /* SHOW_LOGTRACE */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
struct early_suspend early_suspend;
#ifdef DHDTCPACK_SUPPRESS
spinlock_t tcpack_lock;
#endif /* DHDTCPACK_SUPPRESS */
+ void *dhd_deferred_wq;
+#ifdef DEBUG_CPU_FREQ
+ struct notifier_block freq_trans;
+ int __percpu *new_freq;
+#endif
+ unsigned int unit;
+ struct notifier_block pm_notifier;
} dhd_info_t;
+#define DHDIF_FWDER(dhdif) FALSE
+
/* Flag to indicate if we should download firmware on driver load */
uint dhd_download_fw_on_driverload = TRUE;
char nvram_path[MOD_PARAM_PATHLEN];
char config_path[MOD_PARAM_PATHLEN];
+/* backup buffer for firmware and nvram path */
+char fw_bak_path[MOD_PARAM_PATHLEN];
+char nv_bak_path[MOD_PARAM_PATHLEN];
+
/* information string to keep firmware, chio, cheip version info visiable from log */
char info_string[MOD_PARAM_INFOLEN];
module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
int disable_proptx = 0;
module_param(op_mode, int, 0644);
extern int wl_control_wl_start(struct net_device *dev);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
struct semaphore dhd_registration_sem;
-struct semaphore dhd_chipup_sem;
-int dhd_registration_check = FALSE;
-
-#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
-/* Spawn a thread for system ioctls (set mac, set mcast) */
-uint dhd_sysioc = TRUE;
-module_param(dhd_sysioc, uint, 0);
+/* deferred handlers */
+static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
+static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
+#ifdef CONFIG_IPV6
+static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
+#endif
+
+#ifdef WL_CFG80211
+extern void dhd_netdev_free(struct net_device *ndev);
+#endif /* WL_CFG80211 */
/* Error bits */
module_param(dhd_msg_level, int, 0);
module_param(dhd_arp_mode, uint, 0);
#endif /* ARP_OFFLOAD_SUPPORT */
-
-
/* Disable Prop tx */
module_param(disable_proptx, int, 0644);
/* load firmware and/or nvram values from the filesystem */
module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
-module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0);
+module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
module_param_string(config_path, config_path, MOD_PARAM_PATHLEN, 0);
/* Watchdog interval */
/* extend watchdog expiration to 2 seconds when DPC is running */
#define WATCHDOG_EXTEND_INTERVAL (2000)
-uint dhd_watchdog_ms = 10;
+uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
module_param(dhd_watchdog_ms, uint, 0);
#if defined(DHD_DEBUG)
module_param(dhd_console_ms, uint, 0644);
#endif /* defined(DHD_DEBUG) */
+
uint dhd_slpauto = TRUE;
module_param(dhd_slpauto, uint, 0);
module_param(dhd_pkt_filter_init, uint, 0);
/* Pkt filter mode control */
-uint dhd_master_mode = TRUE;
+uint dhd_master_mode = FALSE;
module_param(dhd_master_mode, uint, 0);
-#ifdef DHDTHREAD
int dhd_watchdog_prio = 0;
module_param(dhd_watchdog_prio, int, 0);
int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
module_param(dhd_dpc_prio, int, 0);
-#ifdef RXFRAME_THREAD
/* RX frame thread priority */
int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
module_param(dhd_rxf_prio, int, 0);
-#endif /* RXFRAME_THREAD */
-/* DPC thread priority, -1 to use tasklet */
+#if !defined(BCMDHDUSB)
extern int dhd_dongle_ramsize;
module_param(dhd_dongle_ramsize, int, 0);
-#endif /* DHDTHREAD */
+#endif /* BCMDHDUSB */
+
+/* Keep track of number of instances */
+static int dhd_found = 0;
+static int instance_base = 0; /* Starting instance number */
+module_param(instance_base, int, 0644);
+
+
+/* DHD Perimiter lock only used in router with bypass forwarding. */
+#define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
+#define DHD_PERIM_LOCK_ALL() do { /* noop */ } while (0)
+#define DHD_PERIM_UNLOCK_ALL() do { /* noop */ } while (0)
+
+#ifdef PCIE_FULL_DONGLE
+#if defined(BCM_GMAC3)
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) do { /* noop */ } while (0)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) ({ BCM_REFERENCE(flags); })
+#else /* ! BCM_GMAC3 */
+#define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
+#define DHD_IF_STA_LIST_LOCK(ifp, flags) \
+ spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
+#define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
+ spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
+#endif /* ! BCM_GMAC3 */
+#endif /* PCIE_FULL_DONGLE */
+
/* Control fw roaming */
#ifdef BCMCCX
uint dhd_roam_disable = 0;
uint dhd_sdiod_drive_strength = 6;
module_param(dhd_sdiod_drive_strength, uint, 0);
+#ifdef BCMSDIO
/* Tx/Rx bounds */
extern uint dhd_txbound;
extern uint dhd_rxbound;
extern void dhd_dbg_remove(void);
#endif /* BCMDBGFS */
+#endif /* BCMSDIO */
#ifdef SDTEST
module_param(dhd_pktgen_len, uint, 0);
#endif /* SDTEST */
-/* Version string to report */
-#ifdef DHD_DEBUG
-#ifndef SRCBASE
-#define SRCBASE "drivers/net/wireless/bcmdhd"
-#endif
-#define DHD_COMPILED "\nCompiled in " SRCBASE
-#else
-#define DHD_COMPILED
-#endif /* DHD_DEBUG */
+#if defined(BCMSUP_4WAY_HANDSHAKE)
+/* Use in dongle supplicant for 4-way handshake */
+uint dhd_use_idsup = 0;
+module_param(dhd_use_idsup, uint, 0);
+#endif /* BCMSUP_4WAY_HANDSHAKE */
-static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
-#ifdef DHD_DEBUG
-"\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__
-#endif
-;
+extern char dhd_version[];
+
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
static void dhd_net_if_lock_local(dhd_info_t *dhd);
static void dhd_net_if_unlock_local(dhd_info_t *dhd);
static void dhd_suspend_lock(dhd_pub_t *dhdp);
int dhd_monitor_uninit(void);
-
#if defined(WL_WIRELESS_EXT)
struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
#endif /* defined(WL_WIRELESS_EXT) */
static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata,
wl_event_msg_t *event_ptr, void **data_ptr);
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
- KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
-static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
+#ifdef DHD_UNICAST_DHCP
+static const uint8 llc_snap_hdr[SNAP_HDR_LEN] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
+static int dhd_get_pkt_ip_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
+ int *len_ptr, uint8 *prot_ptr);
+static int dhd_get_pkt_ether_type(dhd_pub_t *dhd, void *skb, uint8 **data_ptr,
+ int *len_ptr, uint16 *et_ptr, bool *snap_ptr);
+
+static int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx);
+#endif /* DHD_UNICAST_DHCP */
+#ifdef DHD_L2_FILTER
+static int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx);
+#endif
+#if defined(CONFIG_PM_SLEEP)
+static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
{
int ret = NOTIFY_DONE;
+ bool suspend = FALSE;
+ dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
+ BCM_REFERENCE(dhdinfo);
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
- dhd_mmc_suspend = TRUE;
- ret = NOTIFY_OK;
+ suspend = TRUE;
break;
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
- dhd_mmc_suspend = FALSE;
- ret = NOTIFY_OK;
+ suspend = FALSE;
break;
}
+
+#if defined(SUPPORT_P2P_GO_PS)
+#ifdef PROP_TXSTATUS
+ if (suspend) {
+ DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
+ dhd_wlfc_suspend(&dhdinfo->pub);
+ DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
+ } else
+ dhd_wlfc_resume(&dhdinfo->pub);
+#endif
+#endif /* defined(SUPPORT_P2P_GO_PS) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
+ KERNEL_VERSION(2, 6, 39))
+ dhd_mmc_suspend = suspend;
smp_mb();
+#endif
+
return ret;
}
-static struct notifier_block dhd_sleep_pm_notifier = {
- .notifier_call = dhd_sleep_pm_callback,
+static struct notifier_block dhd_pm_notifier = {
+ .notifier_call = dhd_pm_callback,
.priority = 10
};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool dhd_pm_notifier_registered = FALSE;
+
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
-#endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
-#if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
/* Request scheduling of the bus rx frame */
static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
static void dhd_os_rxflock(dhd_pub_t *pub);
static void dhd_os_rxfunlock(dhd_pub_t *pub);
+/** priv_link is the link between netdev and the dhdif and dhd_info structs. */
+typedef struct dhd_dev_priv {
+ dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
+ dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
+ int ifidx; /* interface index */
+} dhd_dev_priv_t;
+
+#define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
+#define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
+#define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
+#define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
+#define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
+
+/** Clear the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_clear(struct net_device * dev)
+{
+ dhd_dev_priv_t * dev_priv;
+ ASSERT(dev != (struct net_device *)NULL);
+ dev_priv = DHD_DEV_PRIV(dev);
+ dev_priv->dhd = (dhd_info_t *)NULL;
+ dev_priv->ifp = (dhd_if_t *)NULL;
+ dev_priv->ifidx = DHD_BAD_IF;
+}
+
+/** Setup the dhd net_device's private structure. */
+static inline void
+dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
+ int ifidx)
+{
+ dhd_dev_priv_t * dev_priv;
+ ASSERT(dev != (struct net_device *)NULL);
+ dev_priv = DHD_DEV_PRIV(dev);
+ dev_priv->dhd = dhd;
+ dev_priv->ifp = ifp;
+ dev_priv->ifidx = ifidx;
+}
+
+#ifdef PCIE_FULL_DONGLE
+
+/** Dummy objects are defined with state representing bad|down.
+ * Performance gains from reducing branch conditionals, instruction parallelism,
+ * dual issue, reducing load shadows, avail of larger pipelines.
+ * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
+ * is accessed via the dhd_sta_t.
+ */
+
+/* Dummy dhd_info object */
+dhd_info_t dhd_info_null = {
+#if defined(BCM_GMAC3)
+ .fwdh = FWDER_NULL,
+#endif
+ .pub = {
+ .info = &dhd_info_null,
+#ifdef DHDTCPACK_SUPPRESS
+ .tcpack_sup_mode = TCPACK_SUP_REPLACE,
+#endif /* DHDTCPACK_SUPPRESS */
+ .up = FALSE, .busstate = DHD_BUS_DOWN
+ }
+};
+#define DHD_INFO_NULL (&dhd_info_null)
+#define DHD_PUB_NULL (&dhd_info_null.pub)
+
+/* Dummy netdevice object */
+struct net_device dhd_net_dev_null = {
+ .reg_state = NETREG_UNREGISTERED
+};
+#define DHD_NET_DEV_NULL (&dhd_net_dev_null)
+
+/* Dummy dhd_if object */
+dhd_if_t dhd_if_null = {
+#if defined(BCM_GMAC3)
+ .fwdh = FWDER_NULL,
+#endif
+#ifdef WMF
+ .wmf = { .wmf_enable = TRUE },
+#endif
+ .info = DHD_INFO_NULL,
+ .net = DHD_NET_DEV_NULL,
+ .idx = DHD_BAD_IF
+};
+#define DHD_IF_NULL (&dhd_if_null)
+
+#define DHD_STA_NULL ((dhd_sta_t *)NULL)
+
+/** Interface STA list management. */
+
+/** Fetch the dhd_if object, given the interface index in the dhd. */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx);
+
+/** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
+static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
+static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
+
+/* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
+static void dhd_if_del_sta_list(dhd_if_t * ifp);
+static void dhd_if_flush_sta(dhd_if_t * ifp);
+
+/* Construct/Destruct a sta pool. */
+static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
+static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
+
+
+/* Return interface pointer */
+static inline dhd_if_t *dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
+{
+ ASSERT(ifidx < DHD_MAX_IFS);
+ return dhdp->info->iflist[ifidx];
+}
+
+/** Reset a dhd_sta object and free into the dhd pool. */
+static void
+dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
+{
+ int prio;
+
+ ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
+
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+ id16_map_free(dhdp->staid_allocator, sta->idx);
+ for (prio = 0; prio < (int)NUMPRIO; prio++)
+ sta->flowid[prio] = FLOWID_INVALID;
+ sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
+ sta->ifidx = DHD_BAD_IF;
+ bzero(sta->ea.octet, ETHER_ADDR_LEN);
+ INIT_LIST_HEAD(&sta->list);
+ sta->idx = ID16_INVALID; /* implying free */
+}
+
+/** Allocate a dhd_sta object from the dhd pool. */
+static dhd_sta_t *
+dhd_sta_alloc(dhd_pub_t * dhdp)
+{
+ uint16 idx;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+
+ ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
+
+ idx = id16_map_alloc(dhdp->staid_allocator);
+ if (idx == ID16_INVALID) {
+ DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
+ return DHD_STA_NULL;
+ }
+
+ sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
+ sta = &sta_pool[idx];
+
+ ASSERT((sta->idx == ID16_INVALID) &&
+ (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
+ sta->idx = idx; /* implying allocated */
+
+ return sta;
+}
+
+/** Delete all STAs in an interface's STA list. */
+static void
+dhd_if_del_sta_list(dhd_if_t *ifp)
+{
+ dhd_sta_t *sta, *next;
+ unsigned long flags;
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+#if defined(BCM_GMAC3)
+ if (ifp->fwdh) {
+ /* Remove sta from WOFA forwarder. */
+ fwder_deassoc(ifp->fwdh, (uint16 *)(sta->ea.octet), (wofa_t)sta);
+ }
+#endif /* BCM_GMAC3 */
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+ return;
+}
+
+/** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
+static void
+dhd_if_flush_sta(dhd_if_t * ifp)
+{
+#if defined(BCM_GMAC3)
+
+ if (ifp && (ifp->fwdh != FWDER_NULL)) {
+ dhd_sta_t *sta, *next;
+ unsigned long flags;
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ /* Remove any sta entry from WOFA forwarder. */
+ fwder_flush(ifp->fwdh, (wofa_t)sta);
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+ }
+#endif /* BCM_GMAC3 */
+}
+
+/** Construct a pool of dhd_sta_t objects to be used by interfaces. */
+static int
+dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
+{
+ int idx, sta_pool_memsz;
+ dhd_sta_t * sta;
+ dhd_sta_pool_t * sta_pool;
+ void * staid_allocator;
+
+ ASSERT(dhdp != (dhd_pub_t *)NULL);
+ ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
+
+ /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
+ staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
+ if (staid_allocator == NULL) {
+ DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ /* Pre allocate a pool of dhd_sta objects (one extra). */
+ sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
+ sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
+ if (sta_pool == NULL) {
+ DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
+ id16_map_fini(dhdp->osh, staid_allocator);
+ return BCME_ERROR;
+ }
+
+ dhdp->sta_pool = sta_pool;
+ dhdp->staid_allocator = staid_allocator;
+
+ /* Initialize all sta(s) for the pre-allocated free pool. */
+ bzero((uchar *)sta_pool, sta_pool_memsz);
+ for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
+ sta = &sta_pool[idx];
+ sta->idx = id16_map_alloc(staid_allocator);
+ ASSERT(sta->idx <= max_sta);
+ }
+ /* Now place them into the pre-allocated free pool. */
+ for (idx = 1; idx <= max_sta; idx++) {
+ sta = &sta_pool[idx];
+ dhd_sta_free(dhdp, sta);
+ }
+
+ return BCME_OK;
+}
+
+/** Destruct the pool of dhd_sta_t objects.
+ * Caller must ensure that no STA objects are currently associated with an if.
+ */
+static void
+dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
+{
+ dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
+
+ if (sta_pool) {
+ int idx;
+ int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
+ for (idx = 1; idx <= max_sta; idx++) {
+ ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
+ ASSERT(sta_pool[idx].idx == ID16_INVALID);
+ }
+ MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
+ dhdp->sta_pool = NULL;
+ }
+
+ id16_map_fini(dhdp->osh, dhdp->staid_allocator);
+ dhdp->staid_allocator = NULL;
+}
+
+/** Find STA with MAC address ea in an interface's STA list. */
+dhd_sta_t *
+dhd_find_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+ return sta;
+ }
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+ return DHD_STA_NULL;
+}
+
+/** Add STA into the interface's STA list. */
+dhd_sta_t *
+dhd_add_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+
+ sta = dhd_sta_alloc((dhd_pub_t *)pub);
+ if (sta == DHD_STA_NULL) {
+ DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
+ return DHD_STA_NULL;
+ }
+
+ memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
+
+ /* link the sta and the dhd interface */
+ sta->ifp = ifp;
+ sta->ifidx = ifidx;
+ INIT_LIST_HEAD(&sta->list);
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+ list_add_tail(&sta->list, &ifp->sta_list);
+
+#if defined(BCM_GMAC3)
+ if (ifp->fwdh) {
+ ASSERT(ISALIGNED(ea, 2));
+ /* Add sta to WOFA forwarder. */
+ fwder_reassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
+ }
+#endif /* BCM_GMAC3 */
+
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+ return sta;
+}
+
+/** Delete STA from the interface's STA list. */
+void
+dhd_del_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta, *next;
+ dhd_if_t *ifp;
+ unsigned long flags;
+
+ ASSERT(ea != NULL);
+ ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+ list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
+ if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
+#if defined(BCM_GMAC3)
+ if (ifp->fwdh) { /* Found a sta, remove from WOFA forwarder. */
+ ASSERT(ISALIGNED(ea, 2));
+ fwder_deassoc(ifp->fwdh, (uint16 *)ea, (wofa_t)sta);
+ }
+#endif /* BCM_GMAC3 */
+ list_del(&sta->list);
+ dhd_sta_free(&ifp->info->pub, sta);
+ }
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+
+ return;
+}
+
+/** Add STA if it doesn't exist. Not reentrant. */
+dhd_sta_t*
+dhd_findadd_sta(void *pub, int ifidx, void *ea)
+{
+ dhd_sta_t *sta;
+
+ sta = dhd_find_sta(pub, ifidx, ea);
+
+ if (!sta) {
+ /* Add entry */
+ sta = dhd_add_sta(pub, ifidx, ea);
+ }
+
+ return sta;
+}
+#else
+static inline void dhd_if_flush_sta(dhd_if_t * ifp) { }
+static inline void dhd_if_del_sta_list(dhd_if_t *ifp) {}
+static inline int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta) { return BCME_OK; }
+static inline void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta) {}
+dhd_sta_t *dhd_findadd_sta(void *pub, int ifidx, void *ea) { return NULL; }
+void dhd_del_sta(void *pub, int ifidx, void *ea) {}
+#endif /* PCIE_FULL_DONGLE */
+
+
+/* Returns dhd iflist index correspondig the the bssidx provided by apps */
+int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
+{
+ dhd_if_t *ifp;
+ dhd_info_t *dhd = dhdp->info;
+ int i;
+
+ ASSERT(bssidx < DHD_MAX_IFS);
+ ASSERT(dhdp);
+
+ for (i = 0; i < DHD_MAX_IFS; i++) {
+ ifp = dhd->iflist[i];
+ if (ifp && (ifp->bssidx == bssidx)) {
+ DHD_TRACE(("Index manipulated for %s from %d to %d\n",
+ ifp->name, bssidx, i));
+ break;
+ }
+ }
+ return i;
+}
+
static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
{
uint32 store_idx;
sent_idx = dhdp->sent_idx;
if (dhdp->skbbuf[store_idx] != NULL) {
/* Make sure the previous packets are processed */
- /* Do I need to make this context sleep here? Definitely in Single processor case */
dhd_os_rxfunlock(dhdp);
+#ifdef RXF_DEQUEUE_ON_BUSY
+ DHD_TRACE(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
+ skb, store_idx, sent_idx));
+ return BCME_BUSY;
+#else /* RXF_DEQUEUE_ON_BUSY */
DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
skb, store_idx, sent_idx));
- msleep(1);
+ /* removed msleep here, should use wait_event_timeout if we
+ * want to give rx frame thread a chance to run
+ */
+#if defined(WAIT_DEQUEUE)
+ OSL_SLEEP(1);
+#endif
return BCME_ERROR;
+#endif /* RXF_DEQUEUE_ON_BUSY */
}
DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
return skb;
}
-#endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
-static int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
+int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
{
+#ifndef CUSTOMER_HW10
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#endif /* !CUSTOMER_HW10 */
if (prepost) { /* pre process */
dhd_read_macaddr(dhd);
{
for (i = 0; i < dhd->pktfilter_count; i++) {
#ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
- if (!dhd->conf->filter_out_all_packets &&
- value && (i == DHD_ARP_FILTER_NUM) &&
+ if (value && (i == DHD_ARP_FILTER_NUM) &&
!_turn_on_arp_filter(dhd, dhd->op_mode)) {
DHD_TRACE(("Do not turn on ARP white list pkt filter:"
"val %d, cnt %d, op_mode 0x%x\n",
char iovbuf[32];
int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
uint roamvar = dhd->conf->roam_off_suspend;
+ uint nd_ra_filter = 0;
+ int ret = 0;
if (!dhd)
return -ENODEV;
__FUNCTION__, value, dhd->in_suspend));
dhd_suspend_lock(dhd);
+
+#ifdef CUSTOM_SET_CPUCORE
+ DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
+ /* set specific cpucore */
+ dhd_set_cpucore(dhd, TRUE);
+#endif /* CUSTOM_SET_CPUCORE */
if (dhd->up) {
if (value && dhd->in_suspend) {
#ifdef PKT_FILTER_SUPPORT
/* Disable firmware roaming during suspend */
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ if (FW_SUPPORTED(dhd, ndoe)) {
+ /* enable IPv6 RA filter in firmware during suspend */
+ nd_ra_filter = 1;
+ bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
+ iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+ ret));
+ }
} else {
#ifdef PKT_FILTER_SUPPORT
dhd->early_suspended = 0;
roamvar = dhd_roam_disable;
bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ if (FW_SUPPORTED(dhd, ndoe)) {
+ /* disable IPv6 RA filter in firmware during suspend */
+ nd_ra_filter = 0;
+ bcm_mkiovar("nd_ra_filter_enable", (char *)&nd_ra_filter, 4,
+ iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
+ ret));
+ }
}
}
dhd_suspend_unlock(dhd);
int ret = 0;
DHD_OS_WAKE_LOCK(dhdp);
+ DHD_PERIM_LOCK(dhdp);
+
/* Set flag when early suspend was called */
dhdp->in_suspend = val;
if ((force || !dhdp->suspend_disable_flag) &&
ret = dhd_set_suspend(val, dhdp);
}
+ DHD_PERIM_UNLOCK(dhdp);
DHD_OS_WAKE_UNLOCK(dhdp);
return ret;
}
/* Add the delay that's about to take place */
tmo->elapsed += tmo->increment;
- if (tmo->increment < tmo->tick) {
+ if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
OSL_DELAY(tmo->increment);
tmo->increment *= 2;
if (tmo->increment > tmo->tick)
init_waitqueue_head(&delay_wait);
add_wait_queue(&delay_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
+ (void)schedule_timeout(1);
remove_wait_queue(&delay_wait, &wait);
set_current_state(TASK_RUNNING);
}
ASSERT(dhd);
while (i < DHD_MAX_IFS) {
- if (dhd->iflist[i] && (dhd->iflist[i]->net == net))
+ if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
return i;
i++;
}
return i; /* default - the primary interface */
}
+int
+dhd_ifidx2hostidx(dhd_info_t *dhd, int ifidx)
+{
+ int i = DHD_MAX_IFS;
+
+ ASSERT(dhd);
+
+ while (--i > 0)
+ if (dhd->iflist[i] && (dhd->iflist[i]->idx == ifidx))
+ break;
+
+ DHD_TRACE(("%s: return hostidx %d for ifidx %d\n", __FUNCTION__, i, ifidx));
+
+ return i; /* default - the primary interface */
+}
+
char *
dhd_ifname(dhd_pub_t *dhdp, int ifidx)
{
uint buflen;
int ret;
- ASSERT(dhd && dhd->iflist[ifidx]);
- dev = dhd->iflist[ifidx]->net;
- if (!dev)
- return;
+ ASSERT(dhd && dhd->iflist[ifidx]);
+ dev = dhd->iflist[ifidx]->net;
+ if (!dev)
+ return;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
- netif_addr_lock_bh(dev);
+ netif_addr_lock_bh(dev);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
- cnt = netdev_mc_count(dev);
+ cnt = netdev_mc_count(dev);
#else
- cnt = dev->mc_count;
+ cnt = dev->mc_count;
#endif /* LINUX_VERSION_CODE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
- netif_addr_unlock_bh(dev);
+ netif_addr_unlock_bh(dev);
#endif
- /* Determine initial value of allmulti flag */
+ /* Determine initial value of allmulti flag */
allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
/* Send down the multicast list first. */
memcpy(bufp, &cnt, sizeof(cnt));
bufp += sizeof(cnt);
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
- netif_addr_lock_bh(dev);
+ netif_addr_lock_bh(dev);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
- netdev_for_each_mc_addr(ha, dev) {
- if (!cnt)
- break;
- memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- cnt--;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (!cnt)
+ break;
+ memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ cnt--;
}
#else
for (mclist = dev->mc_list; (mclist && (cnt > 0));
cnt--, mclist = mclist->next) {
- memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
- bufp += ETHER_ADDR_LEN;
- }
+ memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN);
+ bufp += ETHER_ADDR_LEN;
+ }
#endif /* LINUX_VERSION_CODE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
- netif_addr_unlock_bh(dev);
+ netif_addr_unlock_bh(dev);
#endif
memset(&ioc, 0, sizeof(ioc));
/* Finally, pick up the PROMISC flag as well, like the NIC driver does */
allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
+
allmulti = htol32(allmulti);
memset(&ioc, 0, sizeof(ioc));
}
int
-_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr)
+_dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
{
char buf[32];
wl_ioctl_t ioc;
DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
} else {
memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
- memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
+ if (ifidx == 0)
+ memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
}
return ret;
#endif
static void
-dhd_op_if(dhd_if_t *ifp)
+dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
{
- dhd_info_t *dhd;
- int ret = 0, err = 0;
-#ifdef SOFTAP
- unsigned long flags;
-#endif
+ dhd_info_t *dhd = handle;
+ dhd_if_event_t *if_event = event_info;
+ struct net_device *ndev;
+ int ifidx, bssidx;
+ int ret;
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ struct wireless_dev *vwdev, *primary_wdev;
+ struct net_device *primary_ndev;
+#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
- if (!ifp || !ifp->info || !ifp->idx)
+ if (event != DHD_WQ_WORK_IF_ADD) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
return;
- ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */
- dhd = ifp->info;
+ }
- DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state));
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
-#ifdef WL_CFG80211
- if (wl_cfg80211_is_progress_ifchange())
- return;
+ if (!if_event) {
+ DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+ return;
+ }
-#endif
- switch (ifp->state) {
- case DHD_IF_ADD:
- /*
- * Delete the existing interface before overwriting it
- * in case we missed the WLC_E_IF_DEL event.
- */
- if (ifp->net != NULL) {
- DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n",
- __FUNCTION__, ifp->net->name));
- netif_stop_queue(ifp->net);
- unregister_netdev(ifp->net);
- free_netdev(ifp->net);
- }
- /* Allocate etherdev, including space for private structure */
- if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) {
- DHD_ERROR(("%s: OOM - alloc_etherdev(%d)\n", __FUNCTION__, sizeof(dhd)));
- ret = -ENOMEM;
- }
- if (ret == 0) {
- strncpy(ifp->net->name, ifp->name, IFNAMSIZ);
- ifp->net->name[IFNAMSIZ - 1] = '\0';
- memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
-#ifdef WL_CFG80211
- if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)
- if (!wl_cfg80211_notify_ifadd(ifp->net, ifp->idx, ifp->bssidx,
- (void*)dhd_net_attach)) {
- ifp->state = DHD_IF_NONE;
- ifp->event2cfg80211 = TRUE;
- return;
- }
-#endif
- if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) {
- DHD_ERROR(("%s: dhd_net_attach failed, err %d\n",
- __FUNCTION__, err));
- ret = -EOPNOTSUPP;
- } else {
-#if defined(SOFTAP)
- if (ap_fw_loaded && !(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
- /* semaphore that the soft AP CODE waits on */
- flags = dhd_os_spin_lock(&dhd->pub);
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
- /* save ptr to wl0.1 netdev for use in wl_iw.c */
- ap_net_dev = ifp->net;
- /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */
- up(&ap_eth_ctl.sema);
- dhd_os_spin_unlock(&dhd->pub, flags);
- }
-#endif
- DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n",
- current->pid, ifp->net->name));
- ifp->state = DHD_IF_NONE;
- }
- }
- break;
- case DHD_IF_DEL:
- /* Make sure that we don't enter again here if .. */
- /* dhd_op_if is called again from some other context */
- ifp->state = DHD_IF_DELETING;
- if (ifp->net != NULL) {
- DHD_TRACE(("\n%s: got 'DHD_IF_DEL' state\n", __FUNCTION__));
- netif_stop_queue(ifp->net);
-#ifdef WL_CFG80211
- if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
- wl_cfg80211_ifdel_ops(ifp->net);
- }
-#endif
- unregister_netdev(ifp->net);
- ret = DHD_DEL_IF; /* Make sure the free_netdev() is called */
-#ifdef WL_CFG80211
- if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
- wl_cfg80211_notify_ifdel();
- }
-#endif
- }
- break;
- case DHD_IF_DELETING:
- break;
- default:
- DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state));
- ASSERT(!ifp->state);
- break;
+ ifidx = if_event->event.ifidx;
+ bssidx = if_event->event.bssidx;
+ DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
+
+ ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
+ if_event->mac, bssidx, TRUE);
+ if (!ndev) {
+ DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
+ goto done;
}
- if (ret < 0) {
- ifp->set_multicast = FALSE;
- if (ifp->net) {
- free_netdev(ifp->net);
- ifp->net = NULL;
- }
- dhd->iflist[ifp->idx] = NULL;
-#ifdef SOFTAP
- flags = dhd_os_spin_lock(&dhd->pub);
- if (ifp->net == ap_net_dev)
- ap_net_dev = NULL; /* NULL SOFTAP global wl0.1 as well */
- dhd_os_spin_unlock(&dhd->pub, flags);
-#endif /* SOFTAP */
- MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
+ if (unlikely(!vwdev)) {
+ WL_ERR(("Could not allocate wireless device\n"));
+ goto done;
}
+ primary_ndev = dhd->pub.info->iflist[0]->net;
+ primary_wdev = ndev_to_wdev(primary_ndev);
+ vwdev->wiphy = primary_wdev->wiphy;
+ vwdev->iftype = if_event->event.role;
+ vwdev->netdev = ndev;
+ ndev->ieee80211_ptr = vwdev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(vwdev->wiphy));
+ DHD_ERROR(("virtual interface(%s) is created\n", if_event->name));
+#endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
+
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
+ DHD_PERIM_LOCK(&dhd->pub);
+ if (ret != BCME_OK) {
+ DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+ }
+#ifdef PCIE_FULL_DONGLE
+ /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
+ if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
+ char iovbuf[WLC_IOCTL_SMLEN];
+ uint32 var_int = 1;
+
+ memset(iovbuf, 0, sizeof(iovbuf));
+ bcm_mkiovar("ap_isolate", (char *)&var_int, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, ifidx);
+ }
+#endif /* PCIE_FULL_DONGLE */
+done:
+ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
}
-#ifdef DHDTCPACK_SUPPRESS
-uint dhd_use_tcpack_suppress = TRUE;
-module_param(dhd_use_tcpack_suppress, uint, FALSE);
-extern bool dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt);
-#endif /* DHDTCPACK_SUPPRESS */
+static void
+dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ int ifidx;
+ dhd_if_event_t *if_event = event_info;
-static int
-_dhd_sysioc_thread(void *data)
+
+ if (event != DHD_WQ_WORK_IF_DEL) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ if (!if_event) {
+ DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
+ return;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+
+ ifidx = if_event->event.ifidx;
+ DHD_TRACE(("Removing interface with idx %d\n", ifidx));
+
+ dhd_remove_if(&dhd->pub, ifidx, TRUE);
+
+ MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
+
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
{
- tsk_ctl_t *tsk = (tsk_ctl_t *)data;
- dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
- struct ipv6_addr *iter, *next;
- int i, ret;
-#ifdef SOFTAP
- bool in_ap = FALSE;
- unsigned long flags;
-#endif
+ dhd_info_t *dhd = handle;
+ dhd_if_t *ifp = event_info;
- while (down_interruptible(&tsk->sema) == 0) {
+ if (event != DHD_WQ_WORK_SET_MAC) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ }
- SMP_RD_BARRIER_DEPENDS();
- if (tsk->terminated) {
- break;
- }
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
- dhd_net_if_lock_local(dhd);
- DHD_OS_WAKE_LOCK(&dhd->pub);
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
- for (i = 0; i < DHD_MAX_IFS; i++) {
- if (dhd->iflist[i]) {
- DHD_TRACE(("%s: interface %d\n", __FUNCTION__, i));
#ifdef SOFTAP
- flags = dhd_os_spin_lock(&dhd->pub);
- in_ap = (ap_net_dev != NULL);
- dhd_os_spin_unlock(&dhd->pub, flags);
+ {
+ unsigned long flags;
+ bool in_ap = FALSE;
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ in_ap = (ap_net_dev != NULL);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+ if (in_ap) {
+ DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
+ ifp->net->name));
+ goto done;
+ }
+ }
#endif /* SOFTAP */
- if (dhd->iflist[i] && dhd->iflist[i]->state)
- dhd_op_if(dhd->iflist[i]);
- if (dhd->iflist[i] == NULL) {
- DHD_TRACE(("\n\n %s: interface %d just been removed,"
- "!\n\n", __FUNCTION__, i));
- continue;
- }
-#ifdef SOFTAP
- if (in_ap && dhd->set_macaddress == i+1) {
- DHD_TRACE(("attempt to set MAC for %s in AP Mode,"
- "blocked. \n", dhd->iflist[i]->net->name));
- dhd->set_macaddress = 0;
- continue;
- }
+ if (ifp == NULL || !dhd->pub.up) {
+ DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+ goto done;
+ }
- if (in_ap && dhd->iflist[i]->set_multicast) {
- DHD_TRACE(("attempt to set MULTICAST list for %s"
- "in AP Mode, blocked. \n", dhd->iflist[i]->net->name));
- dhd->iflist[i]->set_multicast = FALSE;
- continue;
- }
-#endif /* SOFTAP */
- if (dhd->pub.up == 0)
- continue;
- if (dhd->iflist[i]->set_multicast) {
- dhd->iflist[i]->set_multicast = FALSE;
- _dhd_set_multicast_list(dhd, i);
+ DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
+ ifp->set_macaddress = FALSE;
+ if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
+ DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
+ else
+ DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
- }
- list_for_each_entry_safe(iter, next,
- &dhd->iflist[i]->ipv6_list, list) {
- spin_lock_bh(&dhd->iflist[i]->ipv6_lock);
- list_del(&iter->list);
- spin_unlock_bh(&dhd->iflist[i]->ipv6_lock);
- if (iter->ipv6_oper == DHD_IPV6_ADDR_ADD) {
- ret = dhd_ndo_enable(&dhd->pub, TRUE);
- if (ret < 0) {
- DHD_ERROR(("%s: Enabling NDO Failed %d\n",
- __FUNCTION__, ret));
- continue;
- }
- ret = dhd_ndo_add_ip(&dhd->pub,
- (char*)&iter->ipv6_addr[0], i);
- if (ret < 0) {
- DHD_ERROR(("%s: Adding host ip fail %d\n",
- __FUNCTION__, ret));
- continue;
- }
- } else {
- ret = dhd_ndo_remove_ip(&dhd->pub, i);
- if (ret < 0) {
- DHD_ERROR(("%s: Removing host ip fail %d\n",
- __FUNCTION__, ret));
- continue;
- }
- }
- NATIVE_MFREE(dhd->pub.osh, iter, sizeof(struct ipv6_addr));
- }
- if (dhd->set_macaddress == i+1) {
- dhd->set_macaddress = 0;
- if (_dhd_set_mac_address(dhd, i, &dhd->macvalue) == 0) {
- DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
- } else {
- DHD_ERROR(("%s: _dhd_set_mac_address() failed\n",
- __FUNCTION__));
- }
- }
- }
+done:
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
+}
+
+static void
+dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
+{
+ dhd_info_t *dhd = handle;
+ dhd_if_t *ifp = event_info;
+ int ifidx;
+
+ if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!dhd) {
+ DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
+ return;
+ }
+
+ dhd_net_if_lock_local(dhd);
+ DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+
+#ifdef SOFTAP
+ {
+ bool in_ap = FALSE;
+ unsigned long flags;
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
+ in_ap = (ap_net_dev != NULL);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
+ if (in_ap) {
+ DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
+ ifp->net->name));
+ ifp->set_multicast = FALSE;
+ goto done;
}
+ }
+#endif /* SOFTAP */
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
- dhd_net_if_unlock_local(dhd);
+ if (ifp == NULL || !dhd->pub.up) {
+ DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
+ goto done;
}
- DHD_TRACE(("%s: stopped\n", __FUNCTION__));
- complete_and_exit(&tsk->completed, 0);
+
+ ifidx = ifp->idx;
+
+
+ _dhd_set_multicast_list(dhd, ifidx);
+ DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
+
+done:
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ dhd_net_if_unlock_local(dhd);
}
static int
{
int ret = 0;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
struct sockaddr *sa = (struct sockaddr *)addr;
int ifidx;
+ dhd_if_t *dhdif;
ifidx = dhd_net2idx(dhd, dev);
if (ifidx == DHD_BAD_IF)
return -1;
- ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0);
- memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN);
- dhd->set_macaddress = ifidx+1;
- up(&dhd->thr_sysioc_ctl.sema);
+ dhdif = dhd->iflist[ifidx];
+ dhd_net_if_lock_local(dhd);
+ memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
+ dhdif->set_macaddress = TRUE;
+ dhd_net_if_unlock_local(dhd);
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
+ dhd_set_mac_addr_handler, DHD_WORK_PRIORITY_LOW);
return ret;
}
static void
dhd_set_multicast_list(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ifidx;
ifidx = dhd_net2idx(dhd, dev);
if (ifidx == DHD_BAD_IF)
return;
- ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0);
dhd->iflist[ifidx]->set_multicast = TRUE;
- up(&dhd->thr_sysioc_ctl.sema);
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhd->iflist[ifidx],
+ DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WORK_PRIORITY_LOW);
}
#ifdef PROP_TXSTATUS
return 1;
}
-const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
-uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-#define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
-
#endif /* PROP_TXSTATUS */
-int
+
+int BCMFASTPATH
dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
{
int ret = BCME_OK;
return -ENODEV;
}
+#ifdef PCIE_FULL_DONGLE
+ if (dhdp->busstate == DHD_BUS_SUSPEND) {
+ DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
+ PKTFREE(dhdp->osh, pktbuf, TRUE);
+ return -EBUSY;
+ }
+#endif /* PCIE_FULL_DONGLE */
+
+#ifdef DHD_UNICAST_DHCP
+ /* if dhcp_unicast is enabled, we need to convert the */
+ /* broadcast DHCP ACK/REPLY packets to Unicast. */
+ if (dhdp->dhcp_unicast) {
+ dhd_convert_dhcp_broadcast_ack_to_unicast(dhdp, pktbuf, ifidx);
+ }
+#endif /* DHD_UNICAST_DHCP */
/* Update multicast statistic */
if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X)
atomic_inc(&dhd->pend_8021x_cnt);
} else {
- PKTFREE(dhd->pub.osh, pktbuf, TRUE);
- return BCME_ERROR;
+ PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+ return BCME_ERROR;
}
+#ifdef DHDTCPACK_SUPPRESS
+ /* If this packet has replaced another packet and got freed, just return */
+ if (dhd_tcpack_suppress(dhdp, pktbuf))
+ return ret;
+#endif /* DHDTCPACK_SUPPRESS */
+
/* Look into the packet and update the packet priority */
#ifndef PKTPRIO_OVERRIDE
if (PKTPRIO(pktbuf) == 0)
#endif
pktsetprio(pktbuf, FALSE);
+
+#ifdef PCIE_FULL_DONGLE
+ /*
+ * Lkup the per interface hash table, for a matching flowring. If one is not
+ * available, allocate a unique flowid and add a flowring entry.
+ * The found or newly created flowid is placed into the pktbuf's tag.
+ */
+ ret = dhd_flowid_update(dhdp, ifidx, dhdp->flow_prio_map[(PKTPRIO(pktbuf))], pktbuf);
+ if (ret != BCME_OK) {
+ PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
+ return ret;
+ }
+#endif
+
#ifdef PROP_TXSTATUS
- if (dhdp->wlfc_state) {
+ if (dhd_wlfc_is_supported(dhdp)) {
/* store the interface ID */
DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
#ifdef WLMEDIA_HTSF
dhd_htsf_addtxts(dhdp, pktbuf);
#endif
-#ifdef DHDTCPACK_SUPPRESS
- if (dhd_use_tcpack_suppress && dhd_tcpack_suppress(dhdp, pktbuf))
- ret = BCME_OK;
- else
-#endif /* DHDTCPACK_SUPPRESS */
#ifdef PROP_TXSTATUS
{
- dhd_os_wlfc_block(dhdp);
- if (dhdp->wlfc_state && ((athost_wl_status_info_t*)dhdp->wlfc_state)->proptxstatus_mode
- != WLFC_FCMODE_NONE) {
- dhd_wlfc_commit_packets(dhdp->wlfc_state, (f_commitpkt_t)dhd_bus_txdata,
- dhdp->bus, pktbuf);
- if (((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if) {
- ((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if = 0;
+ if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
+ dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
+ /* non-proptxstatus way */
+#ifdef BCMPCIE
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
+#else
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
}
- dhd_os_wlfc_unblock(dhdp);
- }
- else {
- dhd_os_wlfc_unblock(dhdp);
- /* non-proptxstatus way */
- ret = dhd_bus_txdata(dhdp->bus, pktbuf);
- }
}
+#else
+#ifdef BCMPCIE
+ ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
#else
ret = dhd_bus_txdata(dhdp->bus, pktbuf);
+#endif /* BCMPCIE */
#endif /* PROP_TXSTATUS */
return ret;
}
-int
+int BCMFASTPATH
dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
{
int ret;
uint datalen;
void *pktbuf;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
dhd_if_t *ifp = NULL;
int ifidx;
#ifdef WLMEDIA_HTSF
#else
uint8 htsfdlystat_sz = 0;
#endif
+#ifdef DHD_WMF
+ struct ether_header *eh;
+ uint8 *iph;
+#endif /* DHD_WMF */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
/* Reject if down */
if (dhd->pub.busstate == DHD_BUS_DOWN || dhd->pub.hang_was_sent) {
DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
net_os_send_hang_message(net);
}
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
return -ENODEV;
#endif
}
- ifidx = dhd_net2idx(dhd, net);
+ ifp = DHD_DEV_IFP(net);
+ ifidx = DHD_DEV_IFIDX(net);
+
+ ASSERT(ifidx == dhd_net2idx(dhd, net));
+ ASSERT((ifp != NULL) && (ifp == dhd->iflist[ifidx]));
+
if (ifidx == DHD_BAD_IF) {
DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx));
netif_stop_queue(net);
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20))
return -ENODEV;
#endif
}
- ifp = dhd->iflist[ifidx];
- datalen = PKTLEN(dhdp->osh, skb);
+ /* re-align socket buffer if "skb->data" is odd address */
+ if (((unsigned long)(skb->data)) & 0x1) {
+ unsigned char *data = skb->data;
+ uint32 length = skb->len;
+ PKTPUSH(dhd->pub.osh, skb, 1);
+ memmove(skb->data, data, length);
+ PKTSETLEN(dhd->pub.osh, skb, length);
+ }
+
+ datalen = PKTLEN(dhd->pub.osh, skb);
/* Make sure there's enough room for any header */
}
}
#endif
+#ifdef DHD_WMF
+ eh = (struct ether_header *)PKTDATA(dhd->pub.osh, pktbuf);
+ iph = (uint8 *)eh + ETHER_HDR_LEN;
+
+ /* WMF processing for multicast packets
+ * Only IPv4 packets are handled
+ */
+ if (ifp->wmf.wmf_enable && (ntoh16(eh->ether_type) == ETHER_TYPE_IP) &&
+ (IP_VER(iph) == IP_VER_4) && (ETHER_ISMULTI(eh->ether_dhost) ||
+ ((IPV4_PROT(iph) == IP_PROT_IGMP) && dhd->pub.wmf_ucast_igmp))) {
+#if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
+ void *sdu_clone;
+ bool ucast_convert = FALSE;
+#ifdef DHD_UCAST_UPNP
+ uint32 dest_ip;
+
+ dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
+ ucast_convert = dhd->pub.wmf_ucast_upnp && MCAST_ADDR_UPNP_SSDP(dest_ip);
+#endif /* DHD_UCAST_UPNP */
+#ifdef DHD_IGMP_UCQUERY
+ ucast_convert |= dhd->pub.wmf_ucast_igmp_query &&
+ (IPV4_PROT(iph) == IP_PROT_IGMP) &&
+ (*(iph + IPV4_HLEN(iph)) == IGMPV2_HOST_MEMBERSHIP_QUERY);
+#endif /* DHD_IGMP_UCQUERY */
+ if (ucast_convert) {
+ dhd_sta_t *sta;
+ unsigned long flags;
+
+ DHD_IF_STA_LIST_LOCK(ifp, flags);
+
+ /* Convert upnp/igmp query to unicast for each assoc STA */
+ list_for_each_entry(sta, &ifp->sta_list, list) {
+ if ((sdu_clone = PKTDUP(dhd->pub.osh, pktbuf)) == NULL) {
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return (WMF_NOP);
+ }
+ dhd_wmf_forward(ifp->wmf.wmfh, sdu_clone, 0, sta, 1);
+ }
+
+ DHD_IF_STA_LIST_UNLOCK(ifp, flags);
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
+ PKTFREE(dhd->pub.osh, pktbuf, TRUE);
+ return NETDEV_TX_OK;
+ } else
+#endif /* defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP) */
+ {
+ /* There will be no STA info if the packet is coming from LAN host
+ * Pass as NULL
+ */
+ ret = dhd_wmf_packets_handle(&dhd->pub, pktbuf, NULL, ifidx, 0);
+ switch (ret) {
+ case WMF_TAKEN:
+ case WMF_DROP:
+ /* Either taken by WMF or we should drop it.
+ * Exiting send path
+ */
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return NETDEV_TX_OK;
+ default:
+ /* Continue the transmit path */
+ break;
+ }
+ }
+ }
+#endif /* DHD_WMF */
ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
done:
if (ret) {
ifp->stats.tx_dropped++;
+ dhd->pub.tx_dropped++;
}
else {
dhd->pub.tx_packets++;
ifp->stats.tx_bytes += datalen;
}
+ DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), TRUE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
/* Return ok: we always eat the packet */
#endif
}
+
void
dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
{
}
#endif /* DHD_RX_DUMP */
+
+#ifdef DHD_WMF
+bool
+dhd_is_rxthread_enabled(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd = dhdp->info;
+
+ return dhd->rxthread_enabled;
+}
+#endif /* DHD_WMF */
+
void
dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
{
wl_event_msg_t event;
int tout_rx = 0;
int tout_ctrl = 0;
-#if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
void *skbhead = NULL;
void *skbprev = NULL;
-#endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
-#ifdef DHD_RX_DUMP
-#ifdef DHD_RX_FULL_DUMP
- int k;
-#endif /* DHD_RX_FULL_DUMP */
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
char *dump_data;
uint16 protocol;
-#endif /* DHD_RX_DUMP */
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
-#ifdef WLBTAMP
struct ether_header *eh;
+#ifdef WLBTAMP
struct dot11_llc_snap_header *lsh;
#endif
pnext = PKTNEXT(dhdp->osh, pktbuf);
- PKTSETNEXT(wl->sh.osh, pktbuf, NULL);
+ PKTSETNEXT(dhdp->osh, pktbuf, NULL);
ifp = dhd->iflist[ifidx];
if (ifp == NULL) {
DHD_ERROR(("%s: ifp is NULL. drop packet\n",
__FUNCTION__));
- PKTFREE(dhdp->osh, pktbuf, TRUE);
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
continue;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- /* Dropping packets before registering net device to avoid kernel panic */
+
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+
+ /* Dropping only data packets before registering net device to avoid kernel panic */
#ifndef PROP_TXSTATUS_VSDB
- if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED)
+ if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
+ (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
#else
- if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up)
+ if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
+ (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
#endif /* PROP_TXSTATUS_VSDB */
{
DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
__FUNCTION__));
- PKTFREE(dhdp->osh, pktbuf, TRUE);
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
continue;
}
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
#ifdef WLBTAMP
- eh = (struct ether_header *)PKTDATA(wl->sh.osh, pktbuf);
lsh = (struct dot11_llc_snap_header *)&eh[1];
if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) &&
- (PKTLEN(wl->sh.osh, pktbuf) >= RFC1042_HDR_LEN) &&
+ (PKTLEN(dhdp->osh, pktbuf) >= RFC1042_HDR_LEN) &&
bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 &&
lsh->type == HTON16(BTA_PROT_L2CAP)) {
amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *)
#endif /* WLBTAMP */
#ifdef PROP_TXSTATUS
- if (dhdp->wlfc_state && PKTLEN(wl->sh.osh, pktbuf) == 0) {
+ if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
/* WLFC may send header only packet when
there is an urgent message but no packet to
piggy-back on
*/
- ((athost_wl_status_info_t*)dhdp->wlfc_state)->stats.wlfc_header_only_pkt++;
- PKTFREE(dhdp->osh, pktbuf, TRUE);
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
continue;
}
#endif
-
+#ifdef DHD_L2_FILTER
+ /* If block_ping is enabled drop the ping packet */
+ if (dhdp->block_ping) {
+ if (dhd_l2_filter_block_ping(dhdp, pktbuf, ifidx) == BCME_OK) {
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ }
+ }
+#endif
+#ifdef DHD_WMF
+ /* WMF processing for multicast packets */
+ if (ifp->wmf.wmf_enable && (ETHER_ISMULTI(eh->ether_dhost))) {
+ dhd_sta_t *sta;
+ int ret;
+
+ sta = dhd_find_sta(dhdp, ifidx, (void *)eh->ether_shost);
+ ret = dhd_wmf_packets_handle(dhdp, pktbuf, sta, ifidx, 1);
+ switch (ret) {
+ case WMF_TAKEN:
+ /* The packet is taken by WMF. Continue to next iteration */
+ continue;
+ case WMF_DROP:
+ /* Packet DROP decision by WMF. Toss it */
+ DHD_ERROR(("%s: WMF decides to drop packet\n",
+ __FUNCTION__));
+ PKTCFREE(dhdp->osh, pktbuf, FALSE);
+ continue;
+ default:
+ /* Continue the transmit path */
+ break;
+ }
+ }
+#endif /* DHD_WMF */
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpdata_info_get(dhdp, pktbuf);
+#endif
skb = PKTTONATIVE(dhdp->osh, pktbuf);
- /* Get the protocol, maintain skb around eth_type_trans()
- * The main reason for this hack is for the limitation of
- * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
- * to perform skb_pull inside vs ETH_HLEN. Since to avoid
- * coping of the packet coming from the network stack to add
- * BDC, Hardware header etc, during network interface registration
- * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
- * for BDC, Hardware header etc. and not just the ETH_HLEN
- */
- eth = skb->data;
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+
+#ifdef PCIE_FULL_DONGLE
+ if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
+ (!ifp->ap_isolate)) {
+ eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
+ if (ETHER_ISUCAST(eh->ether_dhost)) {
+ if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
+ dhd_sendpkt(dhdp, ifidx, pktbuf);
+ continue;
+ }
+ } else {
+ void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
+ dhd_sendpkt(dhdp, ifidx, npktbuf);
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
+
+ /* Get the protocol, maintain skb around eth_type_trans()
+ * The main reason for this hack is for the limitation of
+ * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
+ * to perform skb_pull inside vs ETH_HLEN. Since to avoid
+ * coping of the packet coming from the network stack to add
+ * BDC, Hardware header etc, during network interface registration
+ * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
+ * for BDC, Hardware header etc. and not just the ETH_HLEN
+ */
+ eth = skb->data;
len = skb->len;
-#ifdef DHD_RX_DUMP
+#if defined(DHD_RX_DUMP) || defined(DHD_8021X_DUMP)
dump_data = skb->data;
protocol = (dump_data[12] << 8) | dump_data[13];
- DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
-#ifdef DHD_RX_FULL_DUMP
- if (protocol != ETHER_TYPE_BRCM) {
- for (k = 0; k < skb->len; k++) {
- DHD_ERROR(("%02X ", dump_data[k]));
- if ((k & 15) == 15)
- DHD_ERROR(("\n"));
- }
- DHD_ERROR(("\n"));
+ if (protocol == ETHER_TYPE_802_1X) {
+ DHD_ERROR(("ETHER_TYPE_802_1X: "
+ "ver %d, type %d, replay %d\n",
+ dump_data[14], dump_data[15],
+ dump_data[30]));
}
-#endif /* DHD_RX_FULL_DUMP */
-
+#endif /* DHD_RX_DUMP || DHD_8021X_DUMP */
+#if defined(DHD_RX_DUMP)
+ DHD_ERROR(("RX DUMP - %s\n", _get_packet_type_str(protocol)));
if (protocol != ETHER_TYPE_BRCM) {
if (dump_data[0] == 0xFF) {
DHD_ERROR(("%s: BROADCAST\n", __FUNCTION__));
DHD_ERROR(("%s: MULTICAST: " MACDBG "\n",
__FUNCTION__, MAC2STRDBG(dump_data)));
}
-
- if (protocol == ETHER_TYPE_802_1X) {
- DHD_ERROR(("ETHER_TYPE_802_1X: "
- "ver %d, type %d, replay %d\n",
- dump_data[14], dump_data[15],
- dump_data[30]));
+#ifdef DHD_RX_FULL_DUMP
+ {
+ int k;
+ for (k = 0; k < skb->len; k++) {
+ DHD_ERROR(("%02X ", dump_data[k]));
+ if ((k & 15) == 15)
+ DHD_ERROR(("\n"));
+ }
+ DHD_ERROR(("\n"));
}
+#endif /* DHD_RX_FULL_DUMP */
}
-
#endif /* DHD_RX_DUMP */
- ifp = dhd->iflist[ifidx];
- if (ifp == NULL)
- ifp = dhd->iflist[0];
-
- ASSERT(ifp);
- skb->dev = ifp->net;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST) {
dhd->pub.rx_multicast++;
+ ifp->stats.multicast++;
}
skb->data = eth;
skb_mac_header(skb),
#else
skb->mac.raw,
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) */
&event,
&data);
#endif /* PNO_SUPPORT */
#ifdef DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT
- PKTFREE(dhdp->osh, pktbuf, TRUE);
+ PKTFREE(dhdp->osh, pktbuf, FALSE);
continue;
-#endif
+#endif /* DHD_DONOT_FORWARD_BCMEVENT_AS_NETWORK_PKT */
} else {
tout_rx = DHD_PACKET_TIMEOUT_MS;
+
+#ifdef PROP_TXSTATUS
+ dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
+#endif /* PROP_TXSTATUS */
}
ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
- if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state)
- ifp = dhd->iflist[ifidx];
+ ifp = dhd->iflist[ifidx];
if (ifp->net)
ifp->net->last_rx = jiffies;
- dhdp->dstats.rx_bytes += skb->len;
- dhdp->rx_packets++; /* Local count */
- ifp->stats.rx_bytes += skb->len;
- ifp->stats.rx_packets++;
+ if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
+ dhdp->dstats.rx_bytes += skb->len;
+ dhdp->rx_packets++; /* Local count */
+ ifp->stats.rx_bytes += skb->len;
+ ifp->stats.rx_packets++;
+ }
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+ if (dhd_use_tcp_window_size_adjust) {
+ if (ifidx == 0 && ntoh16(skb->protocol) == ETHER_TYPE_IP) {
+ dhd_adjust_tcp_winsize(dhdp->op_mode, skb);
+ }
+ }
+#endif /* DHD_TCP_WINSIZE_ADJUST */
if (in_interrupt()) {
netif_rx(skb);
} else {
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service
- * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
- * by netif_rx_ni(), but in earlier kernels, we need
- * to do it manually.
- */
-#if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
- if (!skbhead)
- skbhead = skb;
- else
- PKTSETNEXT(wl->sh.osh, skbprev, skb);
- skbprev = skb;
-#else
+ if (dhd->rxthread_enabled) {
+ if (!skbhead)
+ skbhead = skb;
+ else
+ PKTSETNEXT(dhdp->osh, skbprev, skb);
+ skbprev = skb;
+ } else {
+
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
+ * by netif_rx_ni(), but in earlier kernels, we need
+ * to do it manually.
+ */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
- netif_rx_ni(skb);
+ netif_rx_ni(skb);
#else
- ulong flags;
- netif_rx(skb);
- local_irq_save(flags);
- RAISE_RX_SOFTIRQ();
- local_irq_restore(flags);
+ ulong flags;
+ netif_rx(skb);
+ local_irq_save(flags);
+ RAISE_RX_SOFTIRQ();
+ local_irq_restore(flags);
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
-#endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
+ }
}
}
-#if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
- if (skbhead)
+
+ if (dhd->rxthread_enabled && skbhead)
dhd_sched_rxf(dhdp, skbhead);
-#endif
+
DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
}
static struct net_device_stats *
dhd_get_stats(struct net_device *net)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
dhd_if_t *ifp;
int ifidx;
ifidx = dhd_net2idx(dhd, net);
if (ifidx == DHD_BAD_IF) {
DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
- return NULL;
+
+ memset(&net->stats, 0, sizeof(net->stats));
+ return &net->stats;
}
ifp = dhd->iflist[ifidx];
/* Use the protocol to get dongle stats */
dhd_prot_dstats(&dhd->pub);
}
-
- /* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = dhd->pub.dstats.rx_packets;
- ifp->stats.tx_packets = dhd->pub.dstats.tx_packets;
- ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes;
- ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes;
- ifp->stats.rx_errors = dhd->pub.dstats.rx_errors;
- ifp->stats.tx_errors = dhd->pub.dstats.tx_errors;
- ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped;
- ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped;
- ifp->stats.multicast = dhd->pub.dstats.multicast;
-
return &ifp->stats;
}
-#ifdef DHDTHREAD
static int
dhd_watchdog_thread(void *data)
{
break;
}
- dhd_os_sdlock(&dhd->pub);
if (dhd->pub.dongle_reset == FALSE) {
DHD_TIMER(("%s:\n", __FUNCTION__));
/* Call the bus module watchdog */
dhd_bus_watchdog(&dhd->pub);
- flags = dhd_os_spin_lock(&dhd->pub);
+
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
/* Count the tick for reference */
dhd->pub.tickcnt++;
time_lapse = jiffies - jiffies_at_start;
/* Reschedule the watchdog */
if (dhd->wd_timer_valid)
mod_timer(&dhd->timer,
- jiffies +
- msecs_to_jiffies(dhd_watchdog_ms) -
- min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
- dhd_os_spin_unlock(&dhd->pub, flags);
- }
- dhd_os_sdunlock(&dhd->pub);
+ jiffies +
+ msecs_to_jiffies(dhd_watchdog_ms) -
+ min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+ }
} else {
break;
}
complete_and_exit(&tsk->completed, 0);
}
-#endif /* DHDTHREAD */
static void dhd_watchdog(ulong data)
{
return;
}
-#ifdef DHDTHREAD
if (dhd->thr_wdt_ctl.thr_pid >= 0) {
up(&dhd->thr_wdt_ctl.sema);
return;
}
-#endif /* DHDTHREAD */
- dhd_os_sdlock(&dhd->pub);
/* Call the bus module watchdog */
dhd_bus_watchdog(&dhd->pub);
- flags = dhd_os_spin_lock(&dhd->pub);
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
/* Count the tick for reference */
dhd->pub.tickcnt++;
/* Reschedule the watchdog */
if (dhd->wd_timer_valid)
mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
- dhd_os_spin_unlock(&dhd->pub, flags);
- dhd_os_sdunlock(&dhd->pub);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
+
}
-#ifdef DHDTHREAD
+#ifdef ENABLE_ADAPTIVE_SCHED
+static void
+dhd_sched_policy(int prio)
+{
+ struct sched_param param;
+ if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
+ param.sched_priority = 0;
+ setScheduler(current, SCHED_NORMAL, ¶m);
+ } else {
+ if (get_scheduler_policy(current) != SCHED_FIFO) {
+ param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
+ setScheduler(current, SCHED_FIFO, ¶m);
+ }
+ }
+}
+#endif /* ENABLE_ADAPTIVE_SCHED */
+#ifdef DEBUG_CPU_FREQ
+static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+ dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
+ struct cpufreq_freqs *freq = data;
+ if (dhd) {
+ if (!dhd->new_freq)
+ goto exit;
+ if (val == CPUFREQ_POSTCHANGE) {
+ DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
+ freq->new, freq->cpu));
+ *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
+ }
+ }
+exit:
+ return 0;
+}
+#endif /* DEBUG_CPU_FREQ */
static int
dhd_dpc_thread(void *data)
{
#ifdef CUSTOM_DPC_CPUCORE
set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
-#endif /* CUSTOM_DPC_CPUCORE */
+#endif
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->pub.current_dpc = current;
+#endif /* CUSTOM_SET_CPUCORE */
/* Run until signal received */
while (1) {
if (!binary_sema_down(tsk)) {
-
+#ifdef ENABLE_ADAPTIVE_SCHED
+ dhd_sched_policy(dhd_dpc_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
SMP_RD_BARRIER_DEPENDS();
if (tsk->terminated) {
break;
}
dhd_os_wd_timer_extend(&dhd->pub, FALSE);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
+
} else {
if (dhd->pub.up)
dhd_bus_stop(dhd->pub.bus, TRUE);
complete_and_exit(&tsk->completed, 0);
}
-#ifdef RXFRAME_THREAD
static int
dhd_rxf_thread(void *data)
{
tsk_ctl_t *tsk = (tsk_ctl_t *)data;
dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
+#if defined(WAIT_DEQUEUE)
+#define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
+ ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
+#endif
dhd_pub_t *pub = &dhd->pub;
/* This thread doesn't need any user-level access,
/* signal: thread has started */
complete(&tsk->completed);
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->pub.current_rxf = current;
+#endif /* CUSTOM_SET_CPUCORE */
/* Run until signal received */
while (1) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
ulong flags;
#endif
+#ifdef ENABLE_ADAPTIVE_SCHED
+ dhd_sched_policy(dhd_rxf_prio);
+#endif /* ENABLE_ADAPTIVE_SCHED */
SMP_RD_BARRIER_DEPENDS();
#endif
skb = skbnext;
}
+#if defined(WAIT_DEQUEUE)
+ if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
+ OSL_SLEEP(1);
+ watchdogTime = OSL_SYSUPTIME();
+ }
+#endif
DHD_OS_WAKE_UNLOCK(pub);
}
complete_and_exit(&tsk->completed, 0);
}
-#endif /* RXFRAME_THREAD */
-#endif /* DHDTHREAD */
+
+#ifdef BCMPCIE
+void dhd_dpc_kill(dhd_pub_t *dhdp)
+{
+ dhd_info_t *dhd;
+
+ if (!dhdp)
+ return;
+
+ dhd = dhdp->info;
+
+ if (!dhd)
+ return;
+
+ tasklet_kill(&dhd->tasklet);
+ DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
+}
+#endif /* BCMPCIE */
static void
dhd_dpc(ulong data)
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
DHD_OS_WAKE_LOCK(dhdp);
-#ifdef DHDTHREAD
if (dhd->thr_dpc_ctl.thr_pid >= 0) {
/* If the semaphore does not get up,
* wake unlock should be done here
if (!binary_sema_up(&dhd->thr_dpc_ctl))
DHD_OS_WAKE_UNLOCK(dhdp);
return;
- }
-#endif /* DHDTHREAD */
-
- if (dhd->dhd_tasklet_create)
+ } else {
tasklet_schedule(&dhd->tasklet);
+ }
}
-#if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
static void
dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+#ifdef RXF_DEQUEUE_ON_BUSY
+ int ret = BCME_OK;
+ int retry = 2;
+#endif /* RXF_DEQUEUE_ON_BUSY */
DHD_OS_WAKE_LOCK(dhdp);
DHD_TRACE(("dhd_sched_rxf: Enter\n"));
+#ifdef RXF_DEQUEUE_ON_BUSY
+ do {
+ ret = dhd_rxf_enqueue(dhdp, skb);
+ if (ret == BCME_OK || ret == BCME_ERROR)
+ break;
+ else
+ OSL_SLEEP(50); /* waiting for dequeueing */
+ } while (retry-- > 0);
+ if (retry <= 0 && ret == BCME_BUSY) {
+ void *skbp = skb;
+
+ while (skbp) {
+ void *skbnext = PKTNEXT(dhdp->osh, skbp);
+ PKTSETNEXT(dhdp->osh, skbp, NULL);
+ netif_rx_ni(skbp);
+ skbp = skbnext;
+ }
+ DHD_ERROR(("send skb to kernel backlog without rxf_thread\n"));
+ }
+ else {
+ if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+ up(&dhd->thr_rxf_ctl.sema);
+ }
+ }
+#else /* RXF_DEQUEUE_ON_BUSY */
do {
if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
break;
up(&dhd->thr_rxf_ctl.sema);
}
return;
+#endif /* RXF_DEQUEUE_ON_BUSY */
}
-#endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
#ifdef TOE
/* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
}
#endif /* TOE */
+#if defined(WL_CFG80211)
+void dhd_set_scb_probe(dhd_pub_t *dhd)
+{
+#define NUM_SCB_MAX_PROBE 3
+ int ret = 0;
+ wl_scb_probe_t scb_probe;
+ char iovbuf[WL_EVENTING_MASK_LEN + 12];
+
+ memset(&scb_probe, 0, sizeof(wl_scb_probe_t));
+
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+ return;
+
+ bcm_mkiovar("scb_probe", NULL, 0, iovbuf, sizeof(iovbuf));
+
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
+
+ memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
+
+ scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
+
+ bcm_mkiovar("scb_probe", (char *)&scb_probe,
+ sizeof(wl_scb_probe_t), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
+#undef NUM_SCB_MAX_PROBE
+ return;
+}
+#endif /* WL_CFG80211 */
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
static void
dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
snprintf(info->driver, sizeof(info->driver), "wl");
snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
return FALSE;
dhd = (dhd_info_t *)dhdp->info;
- if (dhd->thr_sysioc_ctl.thr_pid < 0) {
+#if !defined(BCMPCIE)
+ if (dhd->thr_dpc_ctl.thr_pid < 0) {
DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
return FALSE;
}
+#endif
if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
return FALSE;
}
-int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc)
+int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
{
int bcmerror = BCME_OK;
int buflen = 0;
- void *buf = NULL;
struct net_device *net;
net = dhd_idx2net(pub, ifidx);
goto done;
}
- /* Copy out any buffer passed */
- if (ioc->buf) {
- if (ioc->len == 0) {
- DHD_TRACE(("%s: ioc->len=0, returns BCME_BADARG \n", __FUNCTION__));
- bcmerror = BCME_BADARG;
- goto done;
- }
+ if (data_buf)
buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
- /* optimization for direct ioctl calls from kernel */
- /*
- if (segment_eq(get_fs(), KERNEL_DS)) {
- buf = ioc->buf;
- } else {
- */
- {
- if (!(buf = MALLOC(pub->osh, buflen + 1))) {
- bcmerror = BCME_NOMEM;
- goto done;
- }
- if (copy_from_user(buf, ioc->buf, buflen)) {
- bcmerror = BCME_BADADDR;
- goto done;
- }
- *(char *)(buf + buflen) = '\0';
- }
- }
/* check for local dhd ioctl and handle it */
if (ioc->driver == DHD_IOCTL_MAGIC) {
- bcmerror = dhd_ioctl((void *)pub, ioc, buf, buflen);
+ bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
if (bcmerror)
pub->bcmerror = bcmerror;
goto done;
* prevent disassoc frame being sent before WPS-DONE frame.
*/
if (ioc->cmd == WLC_SET_KEY ||
- (ioc->cmd == WLC_SET_VAR && ioc->buf != NULL &&
- strncmp("wsec_key", ioc->buf, 9) == 0) ||
- (ioc->cmd == WLC_SET_VAR && ioc->buf != NULL &&
- strncmp("bsscfg:wsec_key", ioc->buf, 15) == 0) ||
+ (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+ strncmp("wsec_key", data_buf, 9) == 0) ||
+ (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
+ strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
ioc->cmd == WLC_DISASSOC)
dhd_wait_pend8021x(net);
#ifdef WLMEDIA_HTSF
- if (ioc->buf) {
+ if (data_buf) {
/* short cut wl ioctl calls here */
- if (strcmp("htsf", ioc->buf) == 0) {
+ if (strcmp("htsf", data_buf) == 0) {
dhd_ioctl_htsf_get(dhd, 0);
return BCME_OK;
}
- if (strcmp("htsflate", ioc->buf) == 0) {
+ if (strcmp("htsflate", data_buf) == 0) {
if (ioc->set) {
memset(ts, 0, sizeof(tstamp_t)*TSMAX);
memset(&maxdelayts, 0, sizeof(tstamp_t));
}
return BCME_OK;
}
- if (strcmp("htsfclear", ioc->buf) == 0) {
+ if (strcmp("htsfclear", data_buf) == 0) {
memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN);
memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN);
memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN);
htsf_seqnum = 0;
return BCME_OK;
}
- if (strcmp("htsfhis", ioc->buf) == 0) {
+ if (strcmp("htsfhis", data_buf) == 0) {
dhd_dump_htsfhisto(&vi_d1, "H to D");
dhd_dump_htsfhisto(&vi_d2, "D to D");
dhd_dump_htsfhisto(&vi_d3, "D to H");
dhd_dump_htsfhisto(&vi_d4, "H to H");
return BCME_OK;
}
- if (strcmp("tsport", ioc->buf) == 0) {
+ if (strcmp("tsport", data_buf) == 0) {
if (ioc->set) {
- memcpy(&tsport, ioc->buf + 7, 4);
+ memcpy(&tsport, data_buf + 7, 4);
} else {
DHD_ERROR(("current timestamp port: %d \n", tsport));
}
#endif /* WLMEDIA_HTSF */
if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
- ioc->buf != NULL && strncmp("rpc_", ioc->buf, 4) == 0) {
+ data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
#ifdef BCM_FD_AGGR
- bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, buf, buflen);
+ bcmerror = dhd_fdaggr_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
#else
bcmerror = BCME_UNSUPPORTED;
#endif
goto done;
}
- bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, buf, buflen);
+ bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
done:
dhd_check_hang(net, pub, bcmerror);
- if (!bcmerror && buf && ioc->buf) {
- if (copy_to_user(ioc->buf, buf, buflen))
- bcmerror = -EFAULT;
- }
-
- if (buf)
- MFREE(pub->osh, buf, buflen + 1);
-
return bcmerror;
}
static int
dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
dhd_ioctl_t ioc;
int bcmerror = 0;
int ifidx;
int ret;
+ void *local_buf = NULL;
+ u16 buflen = 0;
DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+
+ /* Interface up check for built-in type */
+ if (!dhd_download_fw_on_driverload && dhd->pub.up == 0) {
+ DHD_ERROR(("%s: Interface is down \n", __FUNCTION__));
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ return BCME_NOTUP;
+ }
/* send to dongle only if we are not waiting for reload already */
if (dhd->pub.hang_was_sent) {
if (ifidx == DHD_BAD_IF) {
DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
+ DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return -1;
}
if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
/* may recurse, do NOT lock */
ret = wl_iw_ioctl(net, ifr, cmd);
+ DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return ret;
}
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2)
if (cmd == SIOCETHTOOL) {
ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
+ DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return ret;
}
}
if (cmd != SIOCDEVPRIVATE) {
+ DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return -EOPNOTSUPP;
}
memset(&ioc, 0, sizeof(ioc));
- /* Copy the ioc control structure part of ioctl request */
- if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
- bcmerror = BCME_BADADDR;
- goto done;
- }
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ compat_wl_ioctl_t compat_ioc;
+ if (copy_from_user(&compat_ioc, ifr->ifr_data, sizeof(compat_wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ ioc.cmd = compat_ioc.cmd;
+ ioc.buf = compat_ptr(compat_ioc.buf);
+ ioc.len = compat_ioc.len;
+ ioc.set = compat_ioc.set;
+ ioc.used = compat_ioc.used;
+ ioc.needed = compat_ioc.needed;
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(compat_wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ } else
+#endif /* CONFIG_COMPAT */
+ {
+ /* Copy the ioc control structure part of ioctl request */
+ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
- /* To differentiate between wl and dhd read 4 more byes */
- if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
- sizeof(uint)) != 0)) {
- bcmerror = BCME_BADADDR;
- goto done;
+ /* To differentiate between wl and dhd read 4 more byes */
+ if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
+ sizeof(uint)) != 0)) {
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
}
if (!capable(CAP_NET_ADMIN)) {
goto done;
}
- bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc);
-
-done:
- DHD_OS_WAKE_UNLOCK(&dhd->pub);
+ if (ioc.len > 0) {
+ buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
+ if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
- return OSL_ERROR(bcmerror);
-}
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ if (copy_from_user(local_buf, ioc.buf, buflen)) {
+ DHD_PERIM_LOCK(&dhd->pub);
+ bcmerror = BCME_BADADDR;
+ goto done;
+ }
+ DHD_PERIM_LOCK(&dhd->pub);
-#ifdef WL_CFG80211
-static int
-dhd_cleanup_virt_ifaces(dhd_info_t *dhd)
-{
- int i = 1; /* Leave ifidx 0 [Primary Interface] */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- int rollback_lock = FALSE;
-#endif
+ *(char *)(local_buf + buflen) = '\0';
+ }
- DHD_TRACE(("%s: Enter \n", __func__));
+ bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- /* release lock for unregister_netdev */
- if (rtnl_is_locked()) {
- rtnl_unlock();
- rollback_lock = TRUE;
+ if (!bcmerror && buflen && local_buf && ioc.buf) {
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ if (copy_to_user(ioc.buf, local_buf, buflen))
+ bcmerror = -EFAULT;
+ DHD_PERIM_LOCK(&dhd->pub);
}
-#endif
- for (i = 1; i < DHD_MAX_IFS; i++) {
- dhd_net_if_lock_local(dhd);
- if (dhd->iflist[i]) {
- DHD_TRACE(("Deleting IF: %d \n", i));
- if ((dhd->iflist[i]->state != DHD_IF_DEL) &&
- (dhd->iflist[i]->state != DHD_IF_DELETING)) {
- dhd->iflist[i]->state = DHD_IF_DEL;
- dhd->iflist[i]->idx = i;
- dhd_op_if(dhd->iflist[i]);
- }
- }
- dhd_net_if_unlock_local(dhd);
- }
+done:
+ if (local_buf)
+ MFREE(dhd->pub.osh, local_buf, buflen+1);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- if (rollback_lock)
- rtnl_lock();
-#endif
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ DHD_OS_WAKE_UNLOCK(&dhd->pub);
- return 0;
+ return OSL_ERROR(bcmerror);
}
-#endif /* WL_CFG80211 */
+
static int
dhd_stop(struct net_device *net)
{
int ifidx = 0;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
DHD_OS_WAKE_LOCK(&dhd->pub);
- DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
-
+ DHD_PERIM_LOCK(&dhd->pub);
+ printk("%s: Enter %p\n", __FUNCTION__, net);
if (dhd->pub.up == 0) {
goto exit;
}
+
+ dhd_if_flush_sta(DHD_DEV_IFP(net));
+
+
ifidx = dhd_net2idx(dhd, net);
BCM_REFERENCE(ifidx);
if (!dhd_download_fw_on_driverload) {
if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
- dhd_cleanup_virt_ifaces(dhd);
+ int i;
+
+ dhd_net_if_lock_local(dhd);
+ for (i = 1; i < DHD_MAX_IFS; i++)
+ dhd_remove_if(&dhd->pub, i, FALSE);
+ dhd_net_if_unlock_local(dhd);
}
}
}
-#endif
+#endif /* WL_CFG80211 */
#ifdef PROP_TXSTATUS
- dhd_os_wlfc_block(&dhd->pub);
dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
- dhd_os_wlfc_unblock(&dhd->pub);
#endif
/* Stop the protocol module */
dhd_prot_stop(&dhd->pub);
OLD_MOD_DEC_USE_COUNT;
exit:
-#if defined(WL_CFG80211)
if (ifidx == 0 && !dhd_download_fw_on_driverload)
wl_android_wifi_off(net);
-#endif
dhd->pub.rxcnt_timeout = 0;
dhd->pub.txcnt_timeout = 0;
+
dhd->pub.hang_was_sent = 0;
+ /* Clear country spec for for built-in type driver */
+ if (!dhd_download_fw_on_driverload) {
+ dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
+ dhd->pub.dhd_cspec.rev = 0;
+ dhd->pub.dhd_cspec.ccode[0] = 0x00;
+ }
+
+ printk("%s: Exit\n", __FUNCTION__);
+ DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return 0;
}
-/* (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
+#if defined(WL_CFG80211) && defined(USE_INITIAL_SHORT_DWELL_TIME)
+extern bool g_first_broadcast_scan;
+#endif
+
+#ifdef WL11U
+static int dhd_interworking_enable(dhd_pub_t *dhd)
+{
+ char iovbuf[WLC_IOCTL_SMLEN];
+ uint32 enable = true;
+ int ret = BCME_OK;
+
+ bcm_mkiovar("interworking", (char *)&enable, sizeof(enable), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
+ }
+
+ if (ret == BCME_OK) {
+ /* basic capabilities for HS20 REL2 */
+ uint32 cap = WL_WNM_BSSTRANS | WL_WNM_NOTIF;
+ bcm_mkiovar("wnm", (char *)&cap, sizeof(cap), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s: failed to set WNM info, ret=%d\n", __FUNCTION__, ret));
+ }
+ }
+
+ return ret;
+}
+#endif /* WL11u */
+
static int
dhd_open(struct net_device *net)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd_info_t *dhd = DHD_DEV_INFO(net);
#ifdef TOE
uint32 toe_ol;
#endif
int ifidx;
int32 ret = 0;
+#ifndef WL_CFG80211
+ u32 up = 0;
+#endif
+ printk("%s: Enter %p\n", __FUNCTION__, net);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
}
#endif /* MULTIPLE_SUPPLICANT */
DHD_OS_WAKE_LOCK(&dhd->pub);
- /* Update FW path if it was changed */
- if (strlen(firmware_path) != 0) {
- if (firmware_path[strlen(firmware_path)-1] == '\n')
- firmware_path[strlen(firmware_path)-1] = '\0';
- dhd_conf_set_fw_name_by_chip(&dhd->pub, fw_path, firmware_path);
- }
-
-
+ DHD_PERIM_LOCK(&dhd->pub);
dhd->pub.dongle_trap_occured = 0;
dhd->pub.hang_was_sent = 0;
-#if !defined(WL_CFG80211)
+
+#if 0
/*
* Force start if ifconfig_up gets called before START command
* We keep WEXT's wl_control_wl_start to provide backward compatibility
goto exit;
}
- if (!dhd->iflist[ifidx] || dhd->iflist[ifidx]->state == DHD_IF_DEL) {
+ if (!dhd->iflist[ifidx]) {
DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
ret = -1;
goto exit;
if (ifidx == 0) {
atomic_set(&dhd->pend_8021x_cnt, 0);
-#if defined(WL_CFG80211)
- DHD_ERROR(("\n%s\n", dhd_version));
if (!dhd_download_fw_on_driverload) {
+ DHD_ERROR(("\n%s\n", dhd_version));
+#if defined(USE_INITIAL_SHORT_DWELL_TIME)
+ g_first_broadcast_scan = TRUE;
+#endif
ret = wl_android_wifi_on(net);
if (ret != 0) {
DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
goto exit;
}
}
-#endif
if (dhd->pub.busstate != DHD_BUS_DATA) {
/* try to bring up bus */
- if ((ret = dhd_bus_start(&dhd->pub)) != 0) {
+ DHD_PERIM_UNLOCK(&dhd->pub);
+ ret = dhd_bus_start(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+ if (ret) {
DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
ret = -1;
goto exit;
}
- /* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */
+ /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
#ifdef TOE
ret = -1;
goto exit;
}
+ dhd_set_scb_probe(&dhd->pub);
+#else
+ ret = wldev_ioctl(net, WLC_UP, &up, sizeof(up), true);
+ if (unlikely(ret)) {
+ DHD_ERROR(("WLC_UP error (%d)\n", ret));
+ }
#endif /* WL_CFG80211 */
}
if (ret)
dhd_stop(net);
+ DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
#if defined(MULTIPLE_SUPPLICANT)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
mutex_unlock(&_dhd_sdio_mutex_lock_);
#endif
#endif /* MULTIPLE_SUPPLICANT */
+ printk("%s: Exit ret=%d\n", __FUNCTION__, ret);
return ret;
}
}
#ifdef MULTIPLE_SUPPLICANT
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && 1
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) && 1 && defined(BCMSDIO)
if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
return 0;
#endif /* MULTIPLE_SUPPLICANT */
/* && defined(OEM_ANDROID) && defined(BCMSDIO) */
- dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd = DHD_DEV_INFO(net);
/* If driver is already initialized, do nothing
*/
return 0;
}
-osl_t *
-dhd_osl_attach(void *pdev, uint bustype)
+int
+dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
{
- return osl_attach(pdev, bustype, TRUE);
-}
-void
-dhd_osl_detach(osl_t *osh)
-{
- if (MALLOCED(osh)) {
- DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh)));
- }
- osl_detach(osh);
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- dhd_registration_check = FALSE;
- up(&dhd_registration_sem);
-#if defined(BCMLXSDMMC)
- up(&dhd_chipup_sem);
+#ifdef WL_CFG80211
+ if (wl_cfg80211_notify_ifadd(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+ return BCME_OK;
#endif
-#endif
+
+ /* handle IF event caused by wl commands, SoftAP, WEXT and
+ * anything else. This has to be done asynchronously otherwise
+ * DPC will be blocked (and iovars will timeout as DPC has no chance
+ * to read the response back)
+ */
+ if (ifevent->ifidx > 0) {
+ dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+
+ memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+ memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+ strncpy(if_event->name, name, IFNAMSIZ);
+ if_event->name[IFNAMSIZ - 1] = '\0';
+ dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
+ DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WORK_PRIORITY_LOW);
+ }
+
+ return BCME_OK;
}
int
-dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name,
- uint8 *mac_addr, uint32 flags, uint8 bssidx)
+dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
{
- dhd_if_t *ifp;
+ dhd_if_event_t *if_event;
- DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle));
+#ifdef WL_CFG80211
+ if (wl_cfg80211_notify_ifdel(ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
+ return BCME_OK;
+#endif /* WL_CFG80211 */
- ASSERT(dhd && (ifidx < DHD_MAX_IFS));
+ /* handle IF event caused by wl commands, SoftAP, WEXT and
+ * anything else
+ */
+ if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
+ memcpy(&if_event->event, ifevent, sizeof(if_event->event));
+ memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
+ strncpy(if_event->name, name, IFNAMSIZ);
+ if_event->name[IFNAMSIZ - 1] = '\0';
+ dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
+ dhd_ifdel_event_handler, DHD_WORK_PRIORITY_LOW);
+
+ return BCME_OK;
+}
+
+/* unregister and free the existing net_device interface (if any) in iflist and
+ * allocate a new one. the slot is reused. this function does NOT register the
+ * new interface to linux kernel. dhd_register_if does the job
+ */
+struct net_device*
+dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, char *name,
+ uint8 *mac, uint8 bssidx, bool need_rtnl_lock)
+{
+ dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
+ dhd_if_t *ifp;
+
+ ASSERT(dhdinfo && (ifidx < DHD_MAX_IFS));
+ ifp = dhdinfo->iflist[ifidx];
- ifp = dhd->iflist[ifidx];
if (ifp != NULL) {
if (ifp->net != NULL) {
- netif_stop_queue(ifp->net);
- unregister_netdev(ifp->net);
- free_netdev(ifp->net);
+ DHD_ERROR(("%s: free existing IF %s\n", __FUNCTION__, ifp->net->name));
+
+ dhd_dev_priv_clear(ifp->net); /* clear net_device private */
+
+ /* in unregister_netdev case, the interface gets freed by net->destructor
+ * (which is set to free_netdev)
+ */
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(ifp->net);
+ } else {
+ netif_stop_queue(ifp->net);
+ if (need_rtnl_lock)
+ unregister_netdev(ifp->net);
+ else
+ unregister_netdevice(ifp->net);
+ }
+ ifp->net = NULL;
}
- } else
- if ((ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t))) == NULL) {
- DHD_ERROR(("%s: OOM - dhd_if_t(%d)\n", __FUNCTION__, sizeof(dhd_if_t)));
- return -ENOMEM;
+ } else {
+ ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
+ if (ifp == NULL) {
+ DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
+ return NULL;
}
+ }
memset(ifp, 0, sizeof(dhd_if_t));
- ifp->event2cfg80211 = FALSE;
- ifp->info = dhd;
- dhd->iflist[ifidx] = ifp;
- strncpy(ifp->name, name, IFNAMSIZ);
- ifp->name[IFNAMSIZ] = '\0';
- INIT_LIST_HEAD(&ifp->ipv6_list);
- spin_lock_init(&ifp->ipv6_lock);
- if (mac_addr != NULL)
- memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN);
-
- if (handle == NULL) {
- ifp->state = DHD_IF_ADD;
- ifp->idx = ifidx;
- ifp->bssidx = bssidx;
- ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0);
- up(&dhd->thr_sysioc_ctl.sema);
- } else
- ifp->net = (struct net_device *)handle;
+ ifp->info = dhdinfo;
+ ifp->idx = ifidx;
+ ifp->bssidx = bssidx;
+ if (mac != NULL)
+ memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
- if (ifidx == 0) {
- ifp->event2cfg80211 = TRUE;
+ /* Allocate etherdev, including space for private structure */
+ ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
+ if (ifp->net == NULL) {
+ DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
+ goto fail;
}
- return 0;
+ /* Setup the dhd interface's netdevice private structure. */
+ dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
+
+ if (name && name[0]) {
+ strncpy(ifp->net->name, name, IFNAMSIZ);
+ ifp->net->name[IFNAMSIZ - 1] = '\0';
+ }
+#ifdef WL_CFG80211
+ if (ifidx == 0)
+ ifp->net->destructor = free_netdev;
+ else
+ ifp->net->destructor = dhd_netdev_free;
+#else
+ ifp->net->destructor = free_netdev;
+#endif /* WL_CFG80211 */
+ strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
+ ifp->name[IFNAMSIZ - 1] = '\0';
+ dhdinfo->iflist[ifidx] = ifp;
+
+#ifdef PCIE_FULL_DONGLE
+ /* Initialize STA info list */
+ INIT_LIST_HEAD(&ifp->sta_list);
+ DHD_IF_STA_LIST_LOCK_INIT(ifp);
+#endif /* PCIE_FULL_DONGLE */
+
+ return ifp->net;
+
+fail:
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+ dhd_dev_priv_clear(ifp->net);
+ free_netdev(ifp->net);
+ ifp->net = NULL;
+ }
+ MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
+ ifp = NULL;
+ }
+ dhdinfo->iflist[ifidx] = NULL;
+ return NULL;
}
-void
-dhd_del_if(dhd_info_t *dhd, int ifidx)
+/* unregister and free the the net_device interface associated with the indexed
+ * slot, also free the slot memory and set the slot pointer to NULL
+ */
+int
+dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
{
+ dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
dhd_if_t *ifp;
- DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx));
+ ifp = dhdinfo->iflist[ifidx];
+ if (ifp != NULL) {
+ if (ifp->net != NULL) {
+ DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
+
+ /* in unregister_netdev case, the interface gets freed by net->destructor
+ * (which is set to free_netdev)
+ */
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
+ free_netdev(ifp->net);
+ } else {
+ netif_stop_queue(ifp->net);
+
+
+
+ if (need_rtnl_lock)
+ unregister_netdev(ifp->net);
+ else
+ unregister_netdevice(ifp->net);
+ }
+ ifp->net = NULL;
+ }
+#ifdef DHD_WMF
+ dhd_wmf_cleanup(dhdpub, ifidx);
+#endif /* DHD_WMF */
+
+ dhd_if_del_sta_list(ifp);
+
+ dhdinfo->iflist[ifidx] = NULL;
+ MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
- ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS));
- ifp = dhd->iflist[ifidx];
- if (!ifp) {
- DHD_ERROR(("%s: Null interface\n", __FUNCTION__));
- return;
}
- ifp->state = DHD_IF_DEL;
- ifp->idx = ifidx;
- ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0);
- up(&dhd->thr_sysioc_ctl.sema);
+ return BCME_OK;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
};
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */
+#ifdef DEBUGGER
+extern void debugger_init(void *bus_handle);
+#endif
+
+
+#ifdef SHOW_LOGTRACE
+static char *logstrs_path = "/root/logstrs.bin";
+module_param(logstrs_path, charp, S_IRUGO);
+
+int
+dhd_init_logstrs_array(dhd_event_log_t *temp)
+{
+ struct file *filep = NULL;
+ struct kstat stat;
+ mm_segment_t fs;
+ char *raw_fmts = NULL;
+ int logstrs_size = 0;
+
+ logstr_header_t *hdr = NULL;
+ uint32 *lognums = NULL;
+ char *logstrs = NULL;
+ int ram_index = 0;
+ char **fmts;
+ int num_fmts = 0;
+ uint32 i = 0;
+ int error = 0;
+ set_fs(KERNEL_DS);
+ fs = get_fs();
+ filep = filp_open(logstrs_path, O_RDONLY, 0);
+ if (IS_ERR(filep)) {
+ DHD_ERROR(("Failed to open the file logstrs.bin in %s", __FUNCTION__));
+ goto fail;
+ }
+ error = vfs_stat(logstrs_path, &stat);
+ if (error) {
+ DHD_ERROR(("Failed in %s to find file stat", __FUNCTION__));
+ goto fail;
+ }
+ logstrs_size = (int) stat.size;
+
+ raw_fmts = kmalloc(logstrs_size, GFP_KERNEL);
+ if (raw_fmts == NULL) {
+ DHD_ERROR(("Failed to allocate raw_fmts memory"));
+ goto fail;
+ }
+ if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
+ DHD_ERROR(("Error: Log strings file read failed"));
+ goto fail;
+ }
+
+ /* Remember header from the logstrs.bin file */
+ hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
+ sizeof(logstr_header_t));
+
+ if (hdr->log_magic == LOGSTRS_MAGIC) {
+ /*
+ * logstrs.bin start with header.
+ */
+ num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
+ ram_index = (hdr->ram_lognums_offset -
+ hdr->rom_lognums_offset) / sizeof(uint32);
+ lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
+ logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
+ } else {
+ /*
+ * Legacy logstrs.bin format without header.
+ */
+ num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
+ if (num_fmts == 0) {
+ /* Legacy ROM/RAM logstrs.bin format:
+ * - ROM 'lognums' section
+ * - RAM 'lognums' section
+ * - ROM 'logstrs' section.
+ * - RAM 'logstrs' section.
+ *
+ * 'lognums' is an array of indexes for the strings in the
+ * 'logstrs' section. The first uint32 is 0 (index of first
+ * string in ROM 'logstrs' section).
+ *
+ * The 4324b5 is the only ROM that uses this legacy format. Use the
+ * fixed number of ROM fmtnums to find the start of the RAM
+ * 'lognums' section. Use the fixed first ROM string ("Con\n") to
+ * find the ROM 'logstrs' section.
+ */
+ #define NUM_4324B5_ROM_FMTS 186
+ #define FIRST_4324B5_ROM_LOGSTR "Con\n"
+ ram_index = NUM_4324B5_ROM_FMTS;
+ lognums = (uint32 *) raw_fmts;
+ num_fmts = ram_index;
+ logstrs = (char *) &raw_fmts[num_fmts << 2];
+ while (strncmp(FIRST_4324B5_ROM_LOGSTR, logstrs, 4)) {
+ num_fmts++;
+ logstrs = (char *) &raw_fmts[num_fmts << 2];
+ }
+ } else {
+ /* Legacy RAM-only logstrs.bin format:
+ * - RAM 'lognums' section
+ * - RAM 'logstrs' section.
+ *
+ * 'lognums' is an array of indexes for the strings in the
+ * 'logstrs' section. The first uint32 is an index to the
+ * start of 'logstrs'. Therefore, if this index is divided
+ * by 'sizeof(uint32)' it provides the number of logstr
+ * entries.
+ */
+ ram_index = 0;
+ lognums = (uint32 *) raw_fmts;
+ logstrs = (char *) &raw_fmts[num_fmts << 2];
+ }
+ }
+ fmts = kmalloc(num_fmts * sizeof(char *), GFP_KERNEL);
+ if (fmts == NULL) {
+ DHD_ERROR(("Failed to allocate fmts memory"));
+ goto fail;
+ }
+
+ for (i = 0; i < num_fmts; i++) {
+ /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
+ * (they are 0-indexed relative to 'rom_logstrs_offset').
+ *
+ * RAM lognums are already indexed to point to the correct RAM logstrs (they
+ * are 0-indexed relative to the start of the logstrs.bin file).
+ */
+ if (i == ram_index) {
+ logstrs = raw_fmts;
+ }
+ fmts[i] = &logstrs[lognums[i]];
+ }
+ temp->fmts = fmts;
+ temp->raw_fmts = raw_fmts;
+ temp->num_fmts = num_fmts;
+ filp_close(filep, NULL);
+ set_fs(fs);
+ return 0;
+fail:
+ if (raw_fmts) {
+ kfree(raw_fmts);
+ raw_fmts = NULL;
+ }
+ if (!IS_ERR(filep))
+ filp_close(filep, NULL);
+ set_fs(fs);
+ temp->fmts = NULL;
+ return -1;
+}
+#endif /* SHOW_LOGTRACE */
+
+
dhd_pub_t *
dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
{
dhd_info_t *dhd = NULL;
struct net_device *net = NULL;
+ char if_name[IFNAMSIZ] = {'\0'};
+ uint32 bus_type = -1;
+ uint32 bus_num = -1;
+ uint32 slot_num = -1;
+ wifi_adapter_info_t *adapter = NULL;
dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- /* updates firmware nvram path if it was provided as module parameters */
- if (strlen(nvram_path) != 0) {
- bzero(nv_path, MOD_PARAM_PATHLEN);
- strncpy(nv_path, nvram_path, sizeof(nv_path) -1);
- }
- if (strlen(config_path) != 0) {
- bzero(conf_path, MOD_PARAM_PATHLEN);
- strncpy(conf_path, config_path, sizeof(conf_path) -1);
- }
-
- /* Allocate etherdev, including space for private structure */
- if (!(net = alloc_etherdev(sizeof(dhd)))) {
- DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
- goto fail;
- }
- dhd_state |= DHD_ATTACH_STATE_NET_ALLOC;
+ /* will implement get_ids for DBUS later */
+#if defined(BCMSDIO)
+ dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
+#endif
+ adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
/* Allocate primary dhd_info */
-#if defined(CONFIG_DHD_USE_STATIC_BUF)
- dhd = (void *)dhd_os_prealloc(osh, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
- if (!dhd) {
- DHD_INFO(("%s: OOM - Pre-alloc dhd_info\n", __FUNCTION__));
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
- if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) {
- DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
- goto fail;
- }
-#if defined(CONFIG_DHD_USE_STATIC_BUF)
+ dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
+ if (dhd == NULL) {
+ dhd = MALLOC(osh, sizeof(dhd_info_t));
+ if (dhd == NULL) {
+ DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
+ goto fail;
+ }
}
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
memset(dhd, 0, sizeof(dhd_info_t));
+ dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
-#ifdef DHDTHREAD
+ dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
+
+ dhd->pub.osh = osh;
+ dhd->adapter = adapter;
+
+#ifdef GET_CUSTOM_MAC_ENABLE
+ wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
+#endif /* GET_CUSTOM_MAC_ENABLE */
dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
-#endif /* DHDTHREAD */
- dhd->dhd_tasklet_create = FALSE;
- dhd->thr_sysioc_ctl.thr_pid = DHD_PID_KT_INVALID;
- dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
- /*
- * Save the dhd_info into the priv
- */
- memcpy((void *)netdev_priv(net), &dhd, sizeof(dhd));
- dhd->pub.osh = osh;
+ /* Initialize thread based operation and lock */
+ sema_init(&dhd->sdsem, 1);
/* Link to info module */
dhd->pub.info = dhd;
+
+
/* Link to bus module */
dhd->pub.bus = bus;
dhd->pub.hdrlen = bus_hdrlen;
- if (strlen(firmware_path) != 0)
- dhd_conf_set_fw_name_by_chip(&dhd->pub, fw_path, firmware_path);
+
+ /* dhd_conf must be attached after linking dhd to dhd->pub.info,
+ * because dhd_detech will check .info is NULL or not.
+ */
+ if (dhd_conf_attach(&dhd->pub) != 0) {
+ DHD_ERROR(("dhd_conf_attach failed\n"));
+ goto fail;
+ }
+ dhd_conf_reset(&dhd->pub);
+ dhd_conf_set_chiprev(&dhd->pub, dhd_bus_chip(bus), dhd_bus_chiprev(bus));
+
+ /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
+ * This is indeed a hack but we have to make it work properly before we have a better
+ * solution
+ */
+ dhd_update_fw_nv_path(dhd);
/* Set network interface name if it was provided as module parameter */
if (iface_name[0]) {
int len;
char ch;
- strncpy(net->name, iface_name, IFNAMSIZ);
- net->name[IFNAMSIZ - 1] = 0;
- len = strlen(net->name);
- ch = net->name[len - 1];
+ strncpy(if_name, iface_name, IFNAMSIZ);
+ if_name[IFNAMSIZ - 1] = 0;
+ len = strlen(if_name);
+ ch = if_name[len - 1];
if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
- strcat(net->name, "%d");
+ strcat(if_name, "%d");
}
-
- if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF)
+ net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE);
+ if (net == NULL)
goto fail;
dhd_state |= DHD_ATTACH_STATE_ADD_IF;
#ifdef PROP_TXSTATUS
spin_lock_init(&dhd->wlfc_spinlock);
-#ifdef PROP_TXSTATUS_VSDB
- dhd->pub.wlfc_enabled = FALSE;
-#else
- if (!disable_proptx)
- dhd->pub.wlfc_enabled = TRUE;
- else
- dhd->pub.wlfc_enabled = FALSE;
-#endif /* PROP_TXSTATUS_VSDB */
- dhd->pub.ptx_opt_enabled = FALSE;
+
dhd->pub.skip_fc = dhd_wlfc_skip_fc;
- dhd->pub.plat_enable = dhd_wlfc_plat_enable;
+ dhd->pub.plat_init = dhd_wlfc_plat_init;
dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
#endif /* PROP_TXSTATUS */
spin_lock_init(&dhd->sdlock);
spin_lock_init(&dhd->txqlock);
spin_lock_init(&dhd->dhd_lock);
-#if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
spin_lock_init(&dhd->rxf_lock);
-#endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
+#if defined(RXFRAME_THREAD)
+ dhd->rxthread_enabled = TRUE;
+#endif /* defined(RXFRAME_THREAD) */
+
#ifdef DHDTCPACK_SUPPRESS
spin_lock_init(&dhd->tcpack_lock);
#endif /* DHDTCPACK_SUPPRESS */
#endif
dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
- if (dhd_conf_attach(&dhd->pub) != 0) {
- DHD_ERROR(("dhd_conf_attach failed\n"));
- goto fail;
- }
-
/* Attach and link in the protocol */
if (dhd_prot_attach(&dhd->pub) != 0) {
DHD_ERROR(("dhd_prot_attach failed\n"));
}
#endif /* defined(WL_WIRELESS_EXT) */
+#ifdef SHOW_LOGTRACE
+ dhd_init_logstrs_array(&dhd->event_data);
+#endif /* SHOW_LOGTRACE */
+
+ if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
+ DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
+ goto fail;
+ }
+
/* Set up the watchdog timer */
init_timer(&dhd->timer);
dhd->timer.function = dhd_watchdog;
dhd->default_wd_interval = dhd_watchdog_ms;
-#ifdef DHDTHREAD
- /* Initialize thread based operation and lock */
- sema_init(&dhd->sdsem, 1);
- if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) {
- dhd->threads_only = TRUE;
- }
- else {
- dhd->threads_only = FALSE;
- }
-
if (dhd_watchdog_prio >= 0) {
/* Initialize watchdog thread */
PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
dhd->thr_wdt_ctl.thr_pid = -1;
}
+#ifdef DEBUGGER
+ debugger_init((void *) bus);
+#endif
+
/* Set up the bottom half handler */
if (dhd_dpc_prio >= 0) {
/* Initialize DPC thread */
tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
dhd->thr_dpc_ctl.thr_pid = -1;
}
-#ifdef RXFRAME_THREAD
- bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
- /* Initialize RXF thread */
- PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
-#endif
-#else
- /* Set up the bottom half handler */
- tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
- dhd->dhd_tasklet_create = TRUE;
-#endif /* DHDTHREAD */
- if (dhd_sysioc) {
- PROC_START(_dhd_sysioc_thread, dhd, &dhd->thr_sysioc_ctl, 0, "dhd_sysioc");
- } else {
- dhd->thr_sysioc_ctl.thr_pid = -1;
+ if (dhd->rxthread_enabled) {
+ bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
+ /* Initialize RXF thread */
+ PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
}
- dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
- INIT_WORK(&dhd->work_hang, dhd_hang_process);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
-
- /*
- * Save the dhd_info into the priv
- */
- memcpy(netdev_priv(net), &dhd, sizeof(dhd));
+ dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
- KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
- register_pm_notifier(&dhd_sleep_pm_notifier);
-#endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP */
+#if defined(CONFIG_PM_SLEEP)
+ if (!dhd_pm_notifier_registered) {
+ dhd_pm_notifier_registered = TRUE;
+ register_pm_notifier(&dhd_pm_notifier);
+ }
+#endif /* CONFIG_PM_SLEEP */
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
#ifdef ARP_OFFLOAD_SUPPORT
dhd->pend_ipaddr = 0;
- register_inetaddr_notifier(&dhd_notifier);
+ if (!dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = TRUE;
+ register_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
#endif /* ARP_OFFLOAD_SUPPORT */
- register_inet6addr_notifier(&dhd_notifier_ipv6);
-
+#ifdef CONFIG_IPV6
+ if (!dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = TRUE;
+ register_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif
+ dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
+#ifdef DEBUG_CPU_FREQ
+ dhd->new_freq = alloc_percpu(int);
+ dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
+ cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
#ifdef DHDTCPACK_SUPPRESS
- dhd->pub.tcp_ack_info_cnt = 0;
- bzero(dhd->pub.tcp_ack_info_tbl, sizeof(struct tcp_ack_info)*MAXTCPSTREAMS);
+#ifdef BCMSDIO
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
+#elif defined(BCMPCIE)
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_REPLACE);
+#else
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* BCMSDIO */
#endif /* DHDTCPACK_SUPPRESS */
dhd_state |= DHD_ATTACH_STATE_DONE;
dhd->dhd_state = dhd_state;
+
+ dhd_found++;
return &dhd->pub;
fail:
- if (dhd_state < DHD_ATTACH_STATE_DHD_ALLOC) {
- if (net) free_netdev(net);
- } else {
+ if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
__FUNCTION__, dhd_state, &dhd->pub));
dhd->dhd_state = dhd_state;
return NULL;
}
+int dhd_get_fw_mode(dhd_info_t *dhdinfo)
+{
+ if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
+ return DHD_FLAG_HOSTAP_MODE;
+ if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
+ return DHD_FLAG_P2P_MODE;
+ if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
+ return DHD_FLAG_IBSS_MODE;
+ if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
+ return DHD_FLAG_MFG_MODE;
+
+ return DHD_FLAG_STA_MODE;
+}
+
+extern int rkwifi_set_firmware(char *fw, char *nvram);
+
+bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
+{
+ int fw_len;
+ int nv_len;
+ int conf_len;
+ const char *fw = NULL;
+ const char *nv = NULL;
+ const char *conf = NULL;
+ char firmware[100] = {0};
+ char nvram[100] = {0};
+ char config[100] = "/system/etc/firmware/config.txt";
+ wifi_adapter_info_t *adapter = dhdinfo->adapter;
+
+
+ /* Update firmware and nvram path. The path may be from adapter info or module parameter
+ * The path from adapter info is used for initialization only (as it won't change).
+ *
+ * The firmware_path/nvram_path module parameter may be changed by the system at run
+ * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
+ * command may change dhdinfo->fw_path. As such we need to clear the path info in
+ * module parameter after it is copied. We won't update the path until the module parameter
+ * is changed again (first character is not '\0')
+ */
+
+ /* set default firmware and nvram path for built-in type driver */
+// if (!dhd_download_fw_on_driverload) {
+ rkwifi_set_firmware(firmware, nvram);
+#ifdef CONFIG_BCMDHD_FW_PATH
+ fw = CONFIG_BCMDHD_FW_PATH;
+#else
+ fw = firmware;
+#endif /* CONFIG_BCMDHD_FW_PATH */
+#ifdef CONFIG_BCMDHD_NVRAM_PATH
+ nv = CONFIG_BCMDHD_NVRAM_PATH;
+#else
+ nv = nvram;
+#endif /* CONFIG_BCMDHD_NVRAM_PATH */
+#ifdef CONFIG_BCMDHD_CONFIG_PATH
+ conf = CONFIG_BCMDHD_CONFIG_PATH;
+#else
+ conf = config;
+#endif /* CONFIG_BCMDHD_CONFIG_PATH */
+// }
+
+ /* check if we need to initialize the path */
+ if (dhdinfo->fw_path[0] == '\0') {
+ if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
+ fw = adapter->fw_path;
+
+ }
+ if (dhdinfo->nv_path[0] == '\0') {
+ if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
+ nv = adapter->nv_path;
+ }
+ if (dhdinfo->conf_path[0] == '\0') {
+ if (adapter && adapter->conf_path && adapter->conf_path[0] != '\0')
+ conf = adapter->conf_path;
+ }
+
+ /* Use module parameter if it is valid, EVEN IF the path has not been initialized
+ *
+ * TODO: need a solution for multi-chip, can't use the same firmware for all chips
+ */
+ if (firmware_path[0] != '\0')
+ fw = firmware_path;
+ if (nvram_path[0] != '\0')
+ nv = nvram_path;
+ if (config_path[0] != '\0')
+ conf = config_path;
+
+ if (fw && fw[0] != '\0') {
+ fw_len = strlen(fw);
+ if (fw_len >= sizeof(dhdinfo->fw_path)) {
+ DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->fw_path, fw, sizeof(dhdinfo->fw_path));
+ if (dhdinfo->fw_path[fw_len-1] == '\n')
+ dhdinfo->fw_path[fw_len-1] = '\0';
+ }
+ if (nv && nv[0] != '\0') {
+ nv_len = strlen(nv);
+ if (nv_len >= sizeof(dhdinfo->nv_path)) {
+ DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->nv_path, nv, sizeof(dhdinfo->nv_path));
+ if (dhdinfo->nv_path[nv_len-1] == '\n')
+ dhdinfo->nv_path[nv_len-1] = '\0';
+ }
+ if (conf && conf[0] != '\0') {
+ conf_len = strlen(conf);
+ if (conf_len >= sizeof(dhdinfo->conf_path)) {
+ DHD_ERROR(("config path len exceeds max len of dhdinfo->conf_path\n"));
+ return FALSE;
+ }
+ strncpy(dhdinfo->conf_path, conf, sizeof(dhdinfo->conf_path));
+ if (dhdinfo->conf_path[conf_len-1] == '\n')
+ dhdinfo->conf_path[conf_len-1] = '\0';
+ }
+
+#if 0
+ /* clear the path in module parameter */
+ firmware_path[0] = '\0';
+ nvram_path[0] = '\0';
+ config_path[0] = '\0';
+#endif
+
+#ifndef BCMEMBEDIMAGE
+ /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
+ if (dhdinfo->fw_path[0] == '\0') {
+ DHD_ERROR(("firmware path not found\n"));
+ return FALSE;
+ }
+ if (dhdinfo->nv_path[0] == '\0') {
+ DHD_ERROR(("nvram path not found\n"));
+ return FALSE;
+ }
+ if (dhdinfo->conf_path[0] == '\0') {
+ DHD_ERROR(("config path not found\n"));
+ return FALSE;
+ }
+#endif /* BCMEMBEDIMAGE */
+
+ return TRUE;
+}
+
+
+#ifdef EXYNOS5433_PCIE_WAR
+extern int enum_wifi;
+#endif /* EXYNOS5433_PCIE_WAR */
int
dhd_bus_start(dhd_pub_t *dhdp)
{
DHD_TRACE(("Enter %s:\n", __FUNCTION__));
-#ifdef DHDTHREAD
- if (dhd->threads_only)
- dhd_os_sdlock(dhdp);
-#endif /* DHDTHREAD */
-
+ DHD_PERIM_LOCK(dhdp);
/* try to download image and nvram to the dongle */
- if ((dhd->pub.busstate == DHD_BUS_DOWN) &&
- (fw_path[0] != '\0') && (nv_path[0] != '\0')) {
-#ifdef SHOW_NVRAM_TYPE
- { /* Show nvram type in the kernel log */
- int i;
- for (i = 0; nv_path[i] != '\0'; ++i) {
- if (nv_path[i] == '.') {
- ++i;
- break;
- }
- }
- DHD_ERROR(("%s: nvram_type = [%s]\n", __FUNCTION__, &nv_path[i]));
- }
-#endif /* SHOW_NVRAM_TYPE */
- /* wake lock moved to dhdsdio_download_firmware */
- if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
- fw_path, nv_path, conf_path))) {
- DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s config = %s\n",
- __FUNCTION__, fw_path, nv_path, conf_path));
-#ifdef DHDTHREAD
- if (dhd->threads_only)
- dhd_os_sdunlock(dhdp);
-#endif /* DHDTHREAD */
- return -1;
+ if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
+ DHD_INFO(("%s download fw %s, nv %s, conf %s\n",
+ __FUNCTION__, dhd->fw_path, dhd->nv_path, dhd->conf_path));
+ ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
+ dhd->fw_path, dhd->nv_path, dhd->conf_path);
+ if (ret < 0) {
+#ifdef EXYNOS5433_PCIE_WAR
+ enum_wifi = 0;
+#endif /* EXYNOS5433_PCIE_WAR */
+ DHD_ERROR(("%s: failed to download firmware %s\n",
+ __FUNCTION__, dhd->fw_path));
+ DHD_PERIM_UNLOCK(dhdp);
+ return ret;
}
+#ifdef EXYNOS5433_PCIE_WAR
+ enum_wifi = 1;
+#endif /* EXYNOS5433_PCIE_WAR */
}
if (dhd->pub.busstate != DHD_BUS_LOAD) {
-#ifdef DHDTHREAD
- if (dhd->threads_only)
- dhd_os_sdunlock(dhdp);
-#endif /* DHDTHREAD */
+ DHD_PERIM_UNLOCK(dhdp);
return -ENETDOWN;
}
+ dhd_os_sdlock(dhdp);
+
/* Start the watchdog timer */
dhd->pub.tickcnt = 0;
dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
-#ifdef DHDTHREAD
- if (dhd->threads_only)
- dhd_os_sdunlock(dhdp);
-#endif /* DHDTHREAD */
+ dhd_os_sdunlock(dhdp);
+ DHD_PERIM_UNLOCK(dhdp);
return ret;
}
- bcmsdh_set_drvdata(dhdp); // terence 20130427: fix for null pointer issue
#if defined(OOB_INTR_ONLY)
/* Host registration for OOB interrupt */
- if (bcmsdh_register_oob_intr(dhdp)) {
+ if (dhd_bus_oob_intr_register(dhdp)) {
/* deactivate timer and wait for the handler to finish */
- flags = dhd_os_spin_lock(&dhd->pub);
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
dhd->wd_timer_valid = FALSE;
- dhd_os_spin_unlock(&dhd->pub, flags);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
del_timer_sync(&dhd->timer);
DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
-#ifdef DHDTHREAD
- if (dhd->threads_only)
- dhd_os_sdunlock(dhdp);
-#endif /* DHDTHREAD */
+ dhd_os_sdunlock(dhdp);
+ DHD_PERIM_UNLOCK(dhdp);
DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
return -ENODEV;
}
/* Enable oob at firmware */
dhd_enable_oob_intr(dhd->pub.bus, TRUE);
#endif
+#ifdef PCIE_FULL_DONGLE
+ {
+ uint8 txpush = 0;
+ uint32 num_flowrings; /* includes H2D common rings */
+ num_flowrings = dhd_bus_max_h2d_queues(dhd->pub.bus, &txpush);
+ DHD_ERROR(("%s: Initializing %u flowrings\n", __FUNCTION__,
+ num_flowrings));
+ if ((ret = dhd_flow_rings_init(&dhd->pub, num_flowrings)) != BCME_OK) {
+ dhd_os_sdunlock(dhdp);
+ DHD_PERIM_UNLOCK(dhdp);
+ return ret;
+ }
+ }
+#endif /* PCIE_FULL_DONGLE */
+
+ /* Do protocol initialization necessary for IOCTL/IOVAR */
+ dhd_prot_init(&dhd->pub);
/* If bus is not ready, can't come up */
if (dhd->pub.busstate != DHD_BUS_DATA) {
- flags = dhd_os_spin_lock(&dhd->pub);
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
dhd->wd_timer_valid = FALSE;
- dhd_os_spin_unlock(&dhd->pub, flags);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
del_timer_sync(&dhd->timer);
DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
-#ifdef DHDTHREAD
- if (dhd->threads_only)
- dhd_os_sdunlock(dhdp);
-#endif /* DHDTHREAD */
+ dhd_os_sdunlock(dhdp);
+ DHD_PERIM_UNLOCK(dhdp);
DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
return -ENODEV;
}
-#ifdef DHDTHREAD
- if (dhd->threads_only)
- dhd_os_sdunlock(dhdp);
-#endif /* DHDTHREAD */
-
- dhd_process_cid_mac(dhdp, TRUE);
+ dhd_os_sdunlock(dhdp);
- /* Bus is ready, do any protocol initialization */
- if ((ret = dhd_prot_init(&dhd->pub)) < 0)
+ /* Bus is ready, query any dongle information */
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
+ DHD_PERIM_UNLOCK(dhdp);
return ret;
-
- dhd_process_cid_mac(dhdp, FALSE);
+ }
#ifdef ARP_OFFLOAD_SUPPORT
if (dhd->pend_ipaddr) {
}
#endif /* ARP_OFFLOAD_SUPPORT */
+ DHD_PERIM_UNLOCK(dhdp);
return 0;
}
+
#ifdef WLTDLS
-int dhd_tdls_enable_disable(dhd_pub_t *dhd, bool flag)
+int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
{
char iovbuf[WLC_IOCTL_SMLEN];
- uint32 tdls = flag;
- int ret;
-#ifdef WLTDLS_AUTO_ENABLE
- uint32 tdls_auto_op = 1;
+ uint32 tdls = tdls_on;
+ int ret = 0;
+ uint32 tdls_auto_op = 0;
uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
-#endif /* WLTDLS_AUTO_ENABLE */
+ int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
+ int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
+ BCM_REFERENCE(mac);
if (!FW_SUPPORTED(dhd, tdls))
return BCME_ERROR;
+ if (dhd->tdls_enable == tdls_on)
+ goto auto_mode;
bcm_mkiovar("tdls_enable", (char *)&tdls, sizeof(tdls), iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
goto exit;
}
- dhd->tdls_enable = flag;
- if (!flag)
- goto exit;
-#ifdef WLTDLS_AUTO_ENABLE
+ dhd->tdls_enable = tdls_on;
+auto_mode:
+
+ tdls_auto_op = auto_on;
bcm_mkiovar("tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op),
iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
goto exit;
}
- bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time, sizeof(tdls_idle_time),
- iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
- goto exit;
+
+ if (tdls_auto_op) {
+ bcm_mkiovar("tdls_idle_time", (char *)&tdls_idle_time,
+ sizeof(tdls_idle_time), iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ bcm_mkiovar("tdls_rssi_high", (char *)&tdls_rssi_high, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
+ bcm_mkiovar("tdls_rssi_low", (char *)&tdls_rssi_low, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
+ goto exit;
+ }
}
-#endif /* WLTDLS_AUTO_ENABLE */
+
exit:
return ret;
}
-#endif /* WLTDLS */
+
+int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ int ret = 0;
+ if (dhd)
+ ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
+ else
+ ret = BCME_ERROR;
+ return ret;
+}
+#ifdef PCIE_FULL_DONGLE
+void dhd_tdls_update_peer_info(struct net_device *dev, bool connect, uint8 *da)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
+ tdls_peer_node_t *cur = dhdp->peer_tbl.node;
+ tdls_peer_node_t *new = NULL, *prev = NULL;
+ dhd_if_t *dhdif;
+ uint8 sa[ETHER_ADDR_LEN];
+ int ifidx = dhd_net2idx(dhd, dev);
+
+ if (ifidx == DHD_BAD_IF)
+ return;
+
+ dhdif = dhd->iflist[ifidx];
+ memcpy(sa, dhdif->mac_addr, ETHER_ADDR_LEN);
+
+ if (connect) {
+ while (cur != NULL) {
+ if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+ DHD_ERROR(("%s: TDLS Peer exist already %d\n",
+ __FUNCTION__, __LINE__));
+ return;
+ }
+ cur = cur->next;
+ }
+
+ new = MALLOC(dhdp->osh, sizeof(tdls_peer_node_t));
+ if (new == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
+ return;
+ }
+ memcpy(new->addr, da, ETHER_ADDR_LEN);
+ new->next = dhdp->peer_tbl.node;
+ dhdp->peer_tbl.node = new;
+ dhdp->peer_tbl.tdls_peer_count++;
+
+ } else {
+ while (cur != NULL) {
+ if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
+ dhd_flow_rings_delete_for_peer(dhdp, ifidx, da);
+ if (prev)
+ prev->next = cur->next;
+ else
+ dhdp->peer_tbl.node = cur->next;
+ MFREE(dhdp->osh, cur, sizeof(tdls_peer_node_t));
+ dhdp->peer_tbl.tdls_peer_count--;
+ return;
+ }
+ prev = cur;
+ cur = cur->next;
+ }
+ DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
+ }
+}
+#endif /* PCIE_FULL_DONGLE */
+#endif
bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
{
if (!dhd)
return FALSE;
- if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
- return TRUE;
- else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
- DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
- return TRUE;
- else
- return FALSE;
+ if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
+ return TRUE;
+ else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
+ DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
+ return TRUE;
+ else
+ return FALSE;
+}
+#if !defined(AP) && defined(WLP2P)
+/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
+ * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
+ * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
+ * would still be named as fw_bcmdhd_apsta.
+ */
+uint32
+dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
+{
+ int32 ret = 0;
+ char buf[WLC_IOCTL_SMLEN];
+ bool mchan_supported = FALSE;
+ /* if dhd->op_mode is already set for HOSTAP and Manufacturing
+ * test mode, that means we only will use the mode as it is
+ */
+ if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
+ return 0;
+ if (FW_SUPPORTED(dhd, vsdb)) {
+ mchan_supported = TRUE;
+ }
+ if (!FW_SUPPORTED(dhd, p2p)) {
+ DHD_TRACE(("Chip does not support p2p\n"));
+ return 0;
+ }
+ else {
+ /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
+ FALSE, 0)) < 0) {
+ DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
+ return 0;
+ }
+ else {
+ if (buf[0] == 1) {
+ /* By default, chip supports single chan concurrency,
+ * now lets check for mchan
+ */
+ ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
+ if (mchan_supported)
+ ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
+ /* For customer_hw4, although ICS,
+ * we still support concurrent mode
+ */
+ return ret;
+#else
+ return 0;
+#endif
+ }
+ }
+ }
+ return 0;
+}
+#endif
+#if defined(READ_CONFIG_FROM_FILE)
+#include <linux/fs.h>
+#include <linux/ctype.h>
+
+#define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
+bool PM_control = TRUE;
+
+static int dhd_preinit_proc(dhd_pub_t *dhd, int ifidx, char *name, char *value)
+{
+ int var_int;
+ wl_country_t cspec = {{0}, -1, {0}};
+ char *revstr;
+ char *endptr = NULL;
+ int iolen;
+ char smbuf[WLC_IOCTL_SMLEN*2];
+
+ if (!strcmp(name, "country")) {
+ revstr = strchr(value, '/');
+ if (revstr) {
+ cspec.rev = strtoul(revstr + 1, &endptr, 10);
+ memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
+ cspec.country_abbrev[2] = '\0';
+ memcpy(cspec.ccode, cspec.country_abbrev, WLC_CNTRY_BUF_SZ);
+ } else {
+ cspec.rev = -1;
+ memcpy(cspec.country_abbrev, value, WLC_CNTRY_BUF_SZ);
+ memcpy(cspec.ccode, value, WLC_CNTRY_BUF_SZ);
+ get_customized_country_code(dhd->info->adapter,
+ (char *)&cspec.country_abbrev, &cspec);
+ }
+ memset(smbuf, 0, sizeof(smbuf));
+ DHD_ERROR(("config country code is country : %s, rev : %d !!\n",
+ cspec.country_abbrev, cspec.rev));
+ iolen = bcm_mkiovar("country", (char*)&cspec, sizeof(cspec),
+ smbuf, sizeof(smbuf));
+ return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ smbuf, iolen, TRUE, 0);
+ } else if (!strcmp(name, "roam_scan_period")) {
+ var_int = (int)simple_strtol(value, NULL, 0);
+ return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD,
+ &var_int, sizeof(var_int), TRUE, 0);
+ } else if (!strcmp(name, "roam_delta")) {
+ struct {
+ int val;
+ int band;
+ } x;
+ x.val = (int)simple_strtol(value, NULL, 0);
+ /* x.band = WLC_BAND_AUTO; */
+ x.band = WLC_BAND_ALL;
+ return dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, &x, sizeof(x), TRUE, 0);
+ } else if (!strcmp(name, "roam_trigger")) {
+ int ret = 0;
+
+ roam_trigger[0] = (int)simple_strtol(value, NULL, 0);
+ roam_trigger[1] = WLC_BAND_ALL;
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, &roam_trigger,
+ sizeof(roam_trigger), TRUE, 0);
+
+ return ret;
+ } else if (!strcmp(name, "PM")) {
+ int ret = 0;
+ var_int = (int)simple_strtol(value, NULL, 0);
+
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_PM,
+ &var_int, sizeof(var_int), TRUE, 0);
+
+#if defined(CONFIG_PM_LOCK)
+ if (var_int == 0) {
+ g_pm_control = TRUE;
+ printk("%s var_int=%d don't control PM\n", __func__, var_int);
+ } else {
+ g_pm_control = FALSE;
+ printk("%s var_int=%d do control PM\n", __func__, var_int);
+ }
+#endif
+
+ return ret;
+ }
+#ifdef WLBTAMP
+ else if (!strcmp(name, "btamp_chan")) {
+ int btamp_chan;
+ int iov_len = 0;
+ char iovbuf[128];
+ int ret;
+
+ btamp_chan = (int)simple_strtol(value, NULL, 0);
+ iov_len = bcm_mkiovar("btamp_chan", (char *)&btamp_chan, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0) < 0))
+ DHD_ERROR(("%s btamp_chan=%d set failed code %d\n",
+ __FUNCTION__, btamp_chan, ret));
+ else
+ DHD_ERROR(("%s btamp_chan %d set success\n",
+ __FUNCTION__, btamp_chan));
+ }
+#endif /* WLBTAMP */
+ else if (!strcmp(name, "band")) {
+ int ret;
+ if (!strcmp(value, "auto"))
+ var_int = WLC_BAND_AUTO;
+ else if (!strcmp(value, "a"))
+ var_int = WLC_BAND_5G;
+ else if (!strcmp(value, "b"))
+ var_int = WLC_BAND_2G;
+ else if (!strcmp(value, "all"))
+ var_int = WLC_BAND_ALL;
+ else {
+ printk(" set band value should be one of the a or b or all\n");
+ var_int = WLC_BAND_AUTO;
+ }
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, &var_int,
+ sizeof(var_int), TRUE, 0)) < 0)
+ printk(" set band err=%d\n", ret);
+ return ret;
+ } else if (!strcmp(name, "cur_etheraddr")) {
+ struct ether_addr ea;
+ char buf[32];
+ uint iovlen;
+ int ret;
+
+ bcm_ether_atoe(value, &ea);
+
+ ret = memcmp(&ea.octet, dhd->mac.octet, ETHER_ADDR_LEN);
+ if (ret == 0) {
+ DHD_ERROR(("%s: Same Macaddr\n", __FUNCTION__));
+ return 0;
+ }
+
+ DHD_ERROR(("%s: Change Macaddr = %02X:%02X:%02X:%02X:%02X:%02X\n", __FUNCTION__,
+ ea.octet[0], ea.octet[1], ea.octet[2],
+ ea.octet[3], ea.octet[4], ea.octet[5]));
+
+ iovlen = bcm_mkiovar("cur_etheraddr", (char*)&ea, ETHER_ADDR_LEN, buf, 32);
+
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, iovlen, TRUE, 0);
+ if (ret < 0) {
+ DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
+ return ret;
+ }
+ else {
+ memcpy(dhd->mac.octet, (void *)&ea, ETHER_ADDR_LEN);
+ return ret;
+ }
+ } else {
+ uint iovlen;
+ char iovbuf[WLC_IOCTL_SMLEN];
+
+ /* wlu_iovar_setint */
+ var_int = (int)simple_strtol(value, NULL, 0);
+
+ /* Setup timeout bcn_timeout from dhd driver 4.217.48 */
+ if (!strcmp(name, "roam_off")) {
+ /* Setup timeout if Beacons are lost to report link down */
+ if (var_int) {
+ uint bcn_timeout = 2;
+ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4,
+ iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+ }
+ /* Setup timeout bcm_timeout from dhd driver 4.217.48 */
+
+ DHD_INFO(("%s:[%s]=[%d]\n", __FUNCTION__, name, var_int));
+
+ iovlen = bcm_mkiovar(name, (char *)&var_int, sizeof(var_int),
+ iovbuf, sizeof(iovbuf));
+ return dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iovbuf, iovlen, TRUE, 0);
+ }
+
+ return 0;
}
-#if !defined(AP) && defined(WLP2P)
-/* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
- * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
- * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
- * would still be named as fw_bcmdhd_apsta.
- */
-uint32
-dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
+
+static int dhd_preinit_config(dhd_pub_t *dhd, int ifidx)
{
- int32 ret = 0;
- char buf[WLC_IOCTL_SMLEN];
- bool mchan_supported = FALSE;
- /* if dhd->op_mode is already set for HOSTAP,
- * that means we only will use the mode as it is
- */
- if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
+ mm_segment_t old_fs;
+ struct kstat stat;
+ struct file *fp = NULL;
+ unsigned int len;
+ char *buf = NULL, *p, *name, *value;
+ int ret = 0;
+ char *config_path;
+
+ config_path = CONFIG_BCMDHD_CONFIG_PATH;
+
+ if (!config_path)
+ {
+ printk(KERN_ERR "config_path can't read. \n");
return 0;
- if (FW_SUPPORTED(dhd, vsdb)) {
- mchan_supported = TRUE;
}
- if (!FW_SUPPORTED(dhd, p2p)) {
- DHD_TRACE(("Chip does not support p2p\n"));
- return 0;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ if ((ret = vfs_stat(config_path, &stat))) {
+ set_fs(old_fs);
+ printk(KERN_ERR "%s: Failed to get information (%d)\n",
+ config_path, ret);
+ return ret;
}
- else {
- /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
- memset(buf, 0, sizeof(buf));
- bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf),
- FALSE, 0)) < 0) {
- DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
- return 0;
- }
- else {
- if (buf[0] == 1) {
- /* By default, chip supports single chan concurrency,
- * now lets check for mchan
- */
- ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
- if (mchan_supported)
- ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
-#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
- /* For customer_hw4, although ICS,
- * we still support concurrent mode
- */
- return ret;
-#else
- return 0;
-#endif
+ set_fs(old_fs);
+
+ if (!(buf = MALLOC(dhd->osh, stat.size + 1))) {
+ printk(KERN_ERR "Failed to allocate memory %llu bytes\n", stat.size);
+ return -ENOMEM;
+ }
+
+ printk("dhd_preinit_config : config path : %s \n", config_path);
+
+ if (!(fp = dhd_os_open_image(config_path)) ||
+ (len = dhd_os_get_image_block(buf, stat.size, fp)) < 0)
+ goto err;
+
+ buf[stat.size] = '\0';
+ for (p = buf; *p; p++) {
+ if (isspace(*p))
+ continue;
+ for (name = p++; *p && !isspace(*p); p++) {
+ if (*p == '=') {
+ *p = '\0';
+ p++;
+ for (value = p; *p && !isspace(*p); p++);
+ *p = '\0';
+ if ((ret = dhd_preinit_proc(dhd, ifidx, name, value)) < 0) {
+ printk(KERN_ERR "%s: %s=%s\n",
+ bcmerrorstr(ret), name, value);
+ }
+ break;
}
}
}
- return 0;
+ ret = 0;
+
+out:
+ if (fp)
+ dhd_os_close_image(fp);
+ if (buf)
+ MFREE(dhd->osh, buf, stat.size+1);
+ return ret;
+
+err:
+ ret = -1;
+ goto out;
}
-#endif
+#endif /* READ_CONFIG_FROM_FILE */
int
dhd_preinit_ioctls(dhd_pub_t *dhd)
char eventmask[WL_EVENTING_MASK_LEN];
char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
uint32 buf_key_b4_m4 = 1;
+ uint8 msglen;
+ eventmsgs_ext_t *eventmask_msg;
+ char iov_buf[WLC_IOCTL_SMLEN];
+ int ret2 = 0;
+#ifdef WLAIBSS
+ aibss_bcn_force_config_t bcn_config;
+ uint32 aibss;
+#ifdef WLAIBSS_PS
+ uint32 aibss_ps;
+#endif /* WLAIBSS_PS */
+#endif /* WLAIBSS */
#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
uint32 sup_wpa = 0;
#endif
-#ifdef CUSTOM_AMPDU_BA_WSIZE
- uint32 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
-#endif /* CUSTOM_AMPDU_BA_WSIZE */
+#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
+ defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
+ uint32 ampdu_ba_wsize = 0;
+#endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
+#if defined(CUSTOM_AMPDU_MPDU)
+ int32 ampdu_mpdu = 0;
+#endif
+#if defined(CUSTOM_AMPDU_RELEASE)
+ int32 ampdu_release = 0;
+#endif
+
+#if defined(BCMSDIO)
+#ifdef PROP_TXSTATUS
+ int wlfc_enable = TRUE;
+#ifndef DISABLE_11N
+ uint32 hostreorder = 1;
+ uint wl_down = 1;
+#endif /* DISABLE_11N */
+#endif /* PROP_TXSTATUS */
+#endif
+#ifdef PCIE_FULL_DONGLE
+ uint32 wl_ap_isolate;
+#endif /* PCIE_FULL_DONGLE */
+
#ifdef DHD_ENABLE_LPC
uint32 lpc = 1;
#endif /* DHD_ENABLE_LPC */
uint power_mode = PM_FAST;
uint32 dongle_align = DHD_SDALIGN;
+#if defined(BCMSDIO)
uint32 glom = CUSTOM_GLOM_SETTING;
- uint bcn_timeout = 4;
+#endif /* defined(BCMSDIO) */
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+ uint32 credall = 1;
+#endif
+ uint bcn_timeout = dhd->conf->bcn_timeout;
uint retry_max = 3;
#if defined(ARP_OFFLOAD_SUPPORT)
int arpoe = 1;
uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */
struct ether_addr p2p_ea;
#endif
+#ifdef BCMCCX
+ uint32 ccx = 1;
+#endif
-#if defined(AP) || defined(WLP2P)
+#if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
uint32 apsta = 1; /* Enable APSTA mode */
-#endif /* defined(AP) || defined(WLP2P) */
+#elif defined(SOFTAP_AND_GC)
+ uint32 apsta = 0;
+ int ap_mode = 1;
+#endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
#ifdef GET_CUSTOM_MAC_ENABLE
struct ether_addr ea_addr;
#endif /* GET_CUSTOM_MAC_ENABLE */
-#ifdef BCMCCX
- uint32 ccx = 1;
-#endif
#ifdef DISABLE_11N
uint32 nmode = 0;
#endif /* DISABLE_11N */
+
+#if defined(DISABLE_11AC)
+ uint32 vhtmode = 0;
+#endif /* DISABLE_11AC */
#ifdef USE_WL_TXBF
uint32 txbf = 1;
#endif /* USE_WL_TXBF */
+#ifdef AMPDU_VO_ENABLE
+ struct ampdu_tid_control tid;
+#endif
#ifdef USE_WL_FRAMEBURST
uint32 frameburst = 1;
#endif /* USE_WL_FRAMEBURST */
#ifdef SUPPORT_2G_VHT
uint32 vht_features = 0x3; /* 2G enable | rates all */
#endif /* SUPPORT_2G_VHT */
-#ifdef PROP_TXSTATUS
-#ifdef PROP_TXSTATUS_VSDB
- /* In case the host does not support proptxstatus, hostreorder in dongle should be off */
- uint32 hostreorder = 0;
- dhd->wlfc_enabled = FALSE;
- /* enable WLFC only if the firmware is VSDB */
-#else
- if (!disable_proptx)
- dhd->wlfc_enabled = TRUE;
- else
- dhd->wlfc_enabled = FALSE;
-#endif /* PROP_TXSTATUS_VSDB */
-#endif /* PROP_TXSTATUS */
-
+#ifdef CUSTOM_PSPRETEND_THR
+ uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
+#endif
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = TRUE;
+#endif /* PKT_FILTER_SUPPORT */
+#ifdef WLTDLS
+ dhd->tdls_enable = FALSE;
+#endif /* WLTDLS */
dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
DHD_TRACE(("Enter %s\n", __FUNCTION__));
dhd_conf_set_band(dhd);
dhd->op_mode = 0;
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+ (op_mode == DHD_FLAG_MFG_MODE)) {
+ /* Check and adjust IOCTL response timeout for Manufactring firmware */
+ dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
+ DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
+ __FUNCTION__));
+ }
+ else {
+ dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
+ DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
+ }
#ifdef GET_CUSTOM_MAC_ENABLE
- ret = dhd_custom_get_mac_address(ea_addr.octet);
+ ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
if (!ret) {
memset(buf, 0, sizeof(buf));
bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf));
}
#endif /* GET_CUSTOM_MAC_ENABLE */
- DHD_TRACE(("Firmware = %s\n", fw_path));
/* get a capabilities from firmware */
memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
bcm_mkiovar("cap", 0, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities));
__FUNCTION__, ret));
return 0;
}
- if ((!op_mode && strstr(fw_path, "_apsta") != NULL) ||
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
(op_mode == DHD_FLAG_HOSTAP_MODE)) {
#ifdef SET_RANDOM_MAC_SOFTAP
uint rand_mac;
DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret));
}
#endif
- }
- else {
+ } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
+ (op_mode == DHD_FLAG_MFG_MODE)) {
+#if defined(ARP_OFFLOAD_SUPPORT)
+ arpoe = 0;
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef PKT_FILTER_SUPPORT
+ dhd_pkt_filter_enable = FALSE;
+#endif /* PKT_FILTER_SUPPORT */
+ dhd->op_mode = DHD_FLAG_MFG_MODE;
+ } else {
uint32 concurrent_mode = 0;
- if ((!op_mode && strstr(fw_path, "_p2p") != NULL) ||
+ if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
(op_mode == DHD_FLAG_P2P_MODE)) {
#if defined(ARP_OFFLOAD_SUPPORT)
arpoe = 0;
dhd_pkt_filter_enable = FALSE;
#endif
dhd->op_mode = DHD_FLAG_P2P_MODE;
- } else if (op_mode == DHD_FLAG_IBSS_MODE ||
- (!op_mode && strstr(fw_path, "_ibss") != NULL)) {
+ } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
+ (op_mode == DHD_FLAG_IBSS_MODE)) {
dhd->op_mode = DHD_FLAG_IBSS_MODE;
- } else {
+ } else
dhd->op_mode = DHD_FLAG_STA_MODE;
- }
#if !defined(AP) && defined(WLP2P)
if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
(concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
}
+#if defined(SOFTAP_AND_GC)
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
+ (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
+ }
+#endif
memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
ETHER_SET_LOCALADDR(&p2p_ea);
bcm_mkiovar("p2p_da_override", (char *)&p2p_ea,
#endif
}
- DHD_ERROR(("Firmware up: op_mode=0x%04x, "
- "Broadcom Dongle Host Driver mac="MACDBG"\n",
- dhd->op_mode,
- MAC2STRDBG(dhd->mac.octet)));
+ DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
+ dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
/* Set Country code */
if (dhd->dhd_cspec.ccode[0] != 0) {
printf("Set country %s, revision %d\n", dhd->dhd_cspec.ccode, dhd->dhd_cspec.rev);
sizeof(wl_country_t), iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
printf("%s: country code setting failed %d\n", __FUNCTION__, ret);
- } else
+ } else {
dhd_conf_set_country(dhd);
- dhd_conf_get_country(dhd);
+ dhd_conf_fix_country(dhd);
+ }
+ dhd_conf_get_country(dhd, &dhd->dhd_cspec);
+
+#if defined(DISABLE_11AC)
+ bcm_mkiovar("vhtmode", (char *)&vhtmode, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s wl vhtmode 0 failed %d\n", __FUNCTION__, ret));
+#endif /* DISABLE_11AC */
/* Set Listen Interval */
bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf));
#ifdef BCMCCX
bcm_mkiovar("ccx_enable", (char *)&ccx, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
-#endif
+#endif /* BCMCCX */
#ifdef WLTDLS
- dhd_tdls_enable_disable(dhd, 1);
+ /* by default TDLS on and auto mode off */
+ _dhd_tdls_enable(dhd, true, false, NULL);
#endif /* WLTDLS */
#ifdef DHD_ENABLE_LPC
bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#if defined(CUSTOMER_HW2) && defined(USE_WL_CREDALL)
+ /* enable credall to reduce the chance of no bus credit happened. */
+ bcm_mkiovar("bus:credall", (char *)&credall, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif
+
+#if defined(BCMSDIO)
if (glom != DEFAULT_GLOM_VALUE) {
DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
}
+#endif /* defined(BCMSDIO) */
+ dhd_conf_set_glom(dhd);
/* Setup timeout if Beacons are lost and roam is off to report link down */
bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf));
bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf));
dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
#endif /* defined(AP) && !defined(WLP2P) */
- dhd_conf_set_bw(dhd);
+ dhd_conf_set_mimo_bw_cap(dhd);
dhd_conf_force_wme(dhd);
dhd_conf_set_stbc(dhd);
+ dhd_conf_set_srl(dhd);
+ dhd_conf_set_lrl(dhd);
+ dhd_conf_set_spect(dhd);
#if defined(SOFTAP)
if (ap_fw_loaded == TRUE) {
#if defined(SOFTAP)
if (ap_fw_loaded == FALSE)
#endif
- if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ if (!(dhd->op_mode &
+ (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
if ((res = dhd_keep_alive_onoff(dhd)) < 0)
DHD_ERROR(("%s set keeplive failed %d\n",
__FUNCTION__, res));
}
}
#endif /* defined(KEEP_ALIVE) */
+
#ifdef USE_WL_TXBF
bcm_mkiovar("txbf", (char *)&txbf, 4, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
}
#endif /* DHD_SET_FW_HIGHSPEED */
+#if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
+ defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
+ /* Set ampdu ba wsize to 64 or 16 */
#ifdef CUSTOM_AMPDU_BA_WSIZE
- /* Set ampdu ba wsize to 64 */
- bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
- if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
- sizeof(iovbuf), TRUE, 0)) < 0) {
- DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
- __FUNCTION__, CUSTOM_AMPDU_BA_WSIZE, ret));
+ ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
+#endif
+#if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
+ if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
+ ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
+#endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
+ if (ampdu_ba_wsize != 0) {
+ bcm_mkiovar("ampdu_ba_wsize", (char *)&du_ba_wsize, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
+ __FUNCTION__, ampdu_ba_wsize, ret));
+ }
+ }
+#endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
+
+#ifdef WLAIBSS
+ /* Configure custom IBSS beacon transmission */
+ if (dhd->op_mode & DHD_FLAG_IBSS_MODE)
+ {
+ aibss = 1;
+ bcm_mkiovar("aibss", (char *)&aibss, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set aibss to %d failed %d\n",
+ __FUNCTION__, aibss, ret));
+ }
+#ifdef WLAIBSS_PS
+ aibss_ps = 1;
+ bcm_mkiovar("aibss_ps", (char *)&aibss_ps, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
+ __FUNCTION__, aibss, ret));
+ }
+#endif /* WLAIBSS_PS */
+ }
+ memset(&bcn_config, 0, sizeof(bcn_config));
+ bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
+ bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
+ bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
+ bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
+ bcn_config.len = sizeof(bcn_config);
+
+ bcm_mkiovar("aibss_bcn_force_config", (char *)&bcn_config,
+ sizeof(aibss_bcn_force_config_t), iov_buf, sizeof(iov_buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iov_buf,
+ sizeof(iov_buf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
+ __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
+ AIBSS_BCN_FLOOD_DUR, ret));
+ }
+#endif /* WLAIBSS */
+
+#if defined(CUSTOM_AMPDU_MPDU)
+ ampdu_mpdu = CUSTOM_AMPDU_MPDU;
+ if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
+ bcm_mkiovar("ampdu_mpdu", (char *)&du_mpdu, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
+ }
}
-#endif /* CUSTOM_AMPDU_BA_WSIZE */
+#endif /* CUSTOM_AMPDU_MPDU */
+ dhd_conf_set_ampdu_ba_wsize(dhd);
+
+#if defined(CUSTOM_AMPDU_RELEASE)
+ ampdu_release = CUSTOM_AMPDU_RELEASE;
+ if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
+ bcm_mkiovar("ampdu_release", (char *)&du_release, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
+ __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
+ }
+ }
+#endif /* CUSTOM_AMPDU_RELEASE */
+
#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
- /* Read 4-way handshake requirements. */
- bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4,
- iovbuf, sizeof(iovbuf));
- ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
- if (ret >= 0)
- dhd->fw_4way_handshake = TRUE;
- DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
+ /* Read 4-way handshake requirements */
+ if (dhd_use_idsup == 1) {
+ bcm_mkiovar("sup_wpa", (char *)&sup_wpa, 4, iovbuf, sizeof(iovbuf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
+ * in-dongle supplicant.
+ */
+ if (ret >= 0 || ret == BCME_NOTREADY)
+ dhd->fw_4way_handshake = TRUE;
+ DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
+ }
#endif /* BCMSUP_4WAY_HANDSHAKE && WLAN_AKM_SUITE_FT_8021X */
#ifdef SUPPORT_2G_VHT
bcm_mkiovar("vht_features", (char *)&vht_features, 4, iovbuf, sizeof(iovbuf));
DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
}
#endif /* SUPPORT_2G_VHT */
+#ifdef CUSTOM_PSPRETEND_THR
+ /* Turn off MPC in AP mode */
+ bcm_mkiovar("pspretend_threshold", (char *)&pspretend_thr, 4,
+ iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
+ sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
+ __FUNCTION__, ret));
+ }
+#endif
bcm_mkiovar("buf_key_b4_m4", (char *)&buf_key_b4_m4, 4, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf,
setbit(eventmask, WLC_E_SET_SSID);
setbit(eventmask, WLC_E_PRUNE);
setbit(eventmask, WLC_E_AUTH);
+ setbit(eventmask, WLC_E_AUTH_IND);
setbit(eventmask, WLC_E_ASSOC);
setbit(eventmask, WLC_E_REASSOC);
setbit(eventmask, WLC_E_REASSOC_IND);
#endif /* PNO_SUPPORT */
/* enable dongle roaming event */
setbit(eventmask, WLC_E_ROAM);
+ setbit(eventmask, WLC_E_BSSID);
#ifdef BCMCCX
setbit(eventmask, WLC_E_ADDTS_IND);
setbit(eventmask, WLC_E_DELTS_IND);
setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
}
#endif /* WL_CFG80211 */
+#ifdef WLAIBSS
+ setbit(eventmask, WLC_E_AIBSS_TXFAIL);
+#endif /* WLAIBSS */
+ setbit(eventmask, WLC_E_TRACE);
/* Write updated Event mask */
bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf));
goto done;
}
+ /* make up event mask ext message iovar for event larger than 128 */
+ msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
+ eventmask_msg = (eventmsgs_ext_t*)kmalloc(msglen, GFP_KERNEL);
+ if (eventmask_msg == NULL) {
+ DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
+ return BCME_NOMEM;
+ }
+ bzero(eventmask_msg, msglen);
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+
+ /* Read event_msgs_ext mask */
+ bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf, sizeof(iov_buf));
+ ret2 = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iov_buf, sizeof(iov_buf), FALSE, 0);
+ if (ret2 != BCME_UNSUPPORTED)
+ ret = ret2;
+ if (ret2 == 0) { /* event_msgs_ext must be supported */
+ bcopy(iov_buf, eventmask_msg, msglen);
+
+#ifdef BT_WIFI_HANDOVER
+ setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
+#endif /* BT_WIFI_HANDOVER */
+
+ /* Write updated Event mask */
+ eventmask_msg->ver = EVENTMSGS_VER;
+ eventmask_msg->command = EVENTMSGS_SET_MASK;
+ eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
+ bcm_mkiovar("event_msgs_ext", (char *)eventmask_msg,
+ msglen, iov_buf, sizeof(iov_buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR,
+ iov_buf, sizeof(iov_buf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
+ kfree(eventmask_msg);
+ goto done;
+ }
+ } else if (ret2 < 0 && ret2 != BCME_UNSUPPORTED) {
+ DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
+ kfree(eventmask_msg);
+ goto done;
+ } /* unsupported is ok */
+ kfree(eventmask_msg);
+
dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
sizeof(scan_assoc_time), TRUE, 0);
dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
if (arpoe && !ap_fw_loaded)
#else
if (arpoe)
-#endif
+#endif
{
dhd_arp_offload_enable(dhd, TRUE);
dhd_arp_offload_set(dhd, dhd_arp_mode);
#endif /* ARP_OFFLOAD_SUPPORT */
#ifdef PKT_FILTER_SUPPORT
- if (dhd->conf->filter_out_all_packets) {
- dhd_master_mode = FALSE;
- dhd->pktfilter_count = 1;
- dhd->pktfilter[0] = "99 0 0 0 0x000000000000 0xFFFFFFFFFFFF";
- } else {
- /* Setup default defintions for pktfilter , enable in suspend */
- dhd->pktfilter_count = 6;
- /* Setup filter to allow only unicast */
+ /* Setup default defintions for pktfilter , enable in suspend */
+ dhd->pktfilter_count = 6;
+ /* Setup filter to allow only unicast */
+ if (dhd_master_mode) {
dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
/* apply APP pktfilter */
dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
- }
+ } else
+ dhd_conf_discard_pkt_filter(dhd);
+ dhd_conf_add_pkt_filter(dhd);
#if defined(SOFTAP)
if (ap_fw_loaded) {
bcm_mkiovar("nmode", (char *)&nmode, 4, iovbuf, sizeof(iovbuf));
if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
-#else
-#if defined(PROP_TXSTATUS) && defined(PROP_TXSTATUS_VSDB)
- bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, buf, sizeof(buf));
- dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
-#endif
#endif /* DISABLE_11N */
+#ifdef AMPDU_VO_ENABLE
+ tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
+ tid.enable = TRUE;
+ bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
+ tid.enable = TRUE;
+ bcm_mkiovar("ampdu_tid", (char *)&tid, sizeof(tid), iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+#endif
+#if defined(SOFTAP_TPUT_ENHANCE)
+ if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+ dhd_bus_setidletime(dhd, (int)100);
+#ifdef DHDTCPACK_SUPPRESS
+ dhd->tcpack_sup_enabled = FALSE;
+#endif
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+ dhd_use_tcp_window_size_adjust = TRUE;
+#endif
+
+ memset(buf, 0, sizeof(buf));
+ bcm_mkiovar("bus:txglom_auto_control", 0, 0, buf, sizeof(buf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) {
+ glom = 0;
+ bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+ else {
+ if (buf[0] == 0) {
+ glom = 1;
+ bcm_mkiovar("bus:txglom_auto_control", (char *)&glom, 4, iovbuf,
+ sizeof(iovbuf));
+ dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+ }
+ }
+#endif /* SOFTAP_TPUT_ENHANCE */
+
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
ptr = buf;
bcmstrtok(&ptr, "\n", 0);
/* Print fw version info */
DHD_ERROR(("Firmware version = %s\n", buf));
+#if defined(BCMSDIO)
dhd_set_version_info(dhd, buf);
+#endif /* defined(BCMSDIO) */
+ }
- /* Check and adjust IOCTL response timeout for Manufactring firmware */
- if (strstr(buf, MANUFACTRING_FW) != NULL) {
- dhd_os_set_ioctl_resp_timeout(20000);
- DHD_ERROR(("%s : adjust IOCTL response time for Manufactring Firmware\n",
- __FUNCTION__));
- }
+#if defined(BCMSDIO)
+ dhd_txglom_enable(dhd, TRUE);
+#endif /* defined(BCMSDIO) */
+
+#if defined(BCMSDIO)
+#ifdef PROP_TXSTATUS
+ if (disable_proptx ||
+#ifdef PROP_TXSTATUS_VSDB
+ /* enable WLFC only if the firmware is VSDB when it is in STA mode */
+ (dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) ||
+#endif /* PROP_TXSTATUS_VSDB */
+ FALSE) {
+ wlfc_enable = FALSE;
}
-#ifdef BCMSDIOH_TXGLOM
- if (bcmsdh_glom_enabled()) {
- dhd_txglom_enable(dhd, TRUE);
+#ifndef DISABLE_11N
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down, sizeof(wl_down), TRUE, 0);
+ bcm_mkiovar("ampdu_hostreorder", (char *)&hostreorder, 4, iovbuf, sizeof(iovbuf));
+ if ((ret2 = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) {
+ DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
+ if (ret2 != BCME_UNSUPPORTED)
+ ret = ret2;
+ if (ret2 != BCME_OK)
+ hostreorder = 0;
}
-#endif /* BCMSDIOH_TXGLOM */
+#endif /* DISABLE_11N */
-#if defined(PROP_TXSTATUS) && !defined(PROP_TXSTATUS_VSDB)
- dhd_wlfc_init(dhd);
-#endif /* PROP_TXSTATUS && !PROP_TXSTATUS_VSDB */
+#ifdef READ_CONFIG_FROM_FILE
+ dhd_preinit_config(dhd, 0);
+#endif /* READ_CONFIG_FROM_FILE */
+
+ if (wlfc_enable)
+ dhd_wlfc_init(dhd);
+#ifndef DISABLE_11N
+ else if (hostreorder)
+ dhd_wlfc_hostreorder_init(dhd);
+#endif /* DISABLE_11N */
+
+#endif /* PROP_TXSTATUS */
+#endif /* BCMSDIO || BCMBUS */
+#ifdef PCIE_FULL_DONGLE
+ /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
+ if (FW_SUPPORTED(dhd, ap)) {
+ wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
+ bcm_mkiovar("ap_isolate", (char *)&wl_ap_isolate, 4, iovbuf, sizeof(iovbuf));
+ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0)
+ DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
+ }
+#endif /* PCIE_FULL_DONGLE */
#ifdef PNO_SUPPORT
if (!dhd->pno_state) {
dhd_pno_init(dhd);
}
#endif
+#ifdef WL11U
+ dhd_interworking_enable(dhd);
+#endif /* WL11U */
done:
return ret;
* whenever there is an event related to an IP address.
* ptr : kernel provided pointer to IP address that has changed
*/
-static int dhd_device_event(struct notifier_block *this,
+static int dhd_inetaddr_notifier_call(struct notifier_block *this,
unsigned long event,
void *ptr)
{
}
#endif /* LINUX_VERSION_CODE */
- dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev);
+ dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
if (!dhd)
return NOTIFY_DONE;
aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
#endif /* AOE_IP_ALIAS_SUPPORT */
break;
-
+
+ case NETDEV_DOWN:
+ DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
+ __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
+ dhd->pend_ipaddr = 0;
+#ifdef AOE_IP_ALIAS_SUPPORT
+ DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
+ __FUNCTION__));
+ aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
+#else
+ dhd_aoe_hostip_clr(&dhd->pub, idx);
+ dhd_aoe_arp_clr(&dhd->pub, idx);
+#endif /* AOE_IP_ALIAS_SUPPORT */
+ break;
+
+ default:
+ DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
+ __func__, ifa->ifa_label, event));
+ break;
+ }
+ return NOTIFY_DONE;
+}
+#endif /* ARP_OFFLOAD_SUPPORT */
+
+#ifdef CONFIG_IPV6
+/* Neighbor Discovery Offload: defered handler */
+static void
+dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
+{
+ struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
+ dhd_pub_t *pub = &((dhd_info_t *)dhd_info)->pub;
+ int ret;
+
+ if (event != DHD_WQ_WORK_IPV6_NDO) {
+ DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
+ return;
+ }
+
+ if (!ndo_work) {
+ DHD_ERROR(("%s: ipv6 work info is not initialized \n", __FUNCTION__));
+ return;
+ }
+
+ if (!pub) {
+ DHD_ERROR(("%s: dhd pub is not initialized \n", __FUNCTION__));
+ return;
+ }
+
+ if (ndo_work->if_idx) {
+ DHD_ERROR(("%s: idx %d \n", __FUNCTION__, ndo_work->if_idx));
+ return;
+ }
+
+ switch (ndo_work->event) {
+ case NETDEV_UP:
+ DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
+ ret = dhd_ndo_enable(pub, TRUE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
+ }
+
+ ret = dhd_ndo_add_ip(pub, &ndo_work->ipv6_addr[0], ndo_work->if_idx);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Adding host ip for NDO failed %d\n",
+ __FUNCTION__, ret));
+ }
+ break;
case NETDEV_DOWN:
- DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
- __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
- dhd->pend_ipaddr = 0;
-#ifdef AOE_IP_ALIAS_SUPPORT
- DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
- __FUNCTION__));
- aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
-#else
- dhd_aoe_hostip_clr(&dhd->pub, idx);
- dhd_aoe_arp_clr(&dhd->pub, idx);
-#endif /* AOE_IP_ALIAS_SUPPORT */
- break;
+ DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
+ ret = dhd_ndo_remove_ip(pub, ndo_work->if_idx);
+ if (ret < 0) {
+ DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
+ __FUNCTION__, ret));
+ goto done;
+ }
+ ret = dhd_ndo_enable(pub, FALSE);
+ if (ret < 0) {
+ DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
+ goto done;
+ }
+ break;
default:
- DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
- __func__, ifa->ifa_label, event));
+ DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
break;
}
- return NOTIFY_DONE;
+done:
+ /* free ndo_work. alloced while scheduling the work */
+ kfree(ndo_work);
+
+ return;
}
-#endif /* ARP_OFFLOAD_SUPPORT */
/*
* Neighbor Discovery Offload: Called when an interface
* is assigned with ipv6 address.
* Handles only primary interface
*/
-static int dhd_device_ipv6_event(struct notifier_block *this,
+static int dhd_inet6addr_notifier_call(struct notifier_block *this,
unsigned long event,
void *ptr)
{
dhd_info_t *dhd;
dhd_pub_t *dhd_pub;
- struct ipv6_addr *_ipv6_addr = NULL;
struct inet6_ifaddr *inet6_ifa = ptr;
- int idx = 0;
+ struct in6_addr *ipv6_addr = &inet6_ifa->addr;
+ struct ipv6_work_info_t *ndo_info;
+ int idx = 0; /* REVISIT */
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
/* Filter notifications meant for non Broadcom devices */
if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
- goto exit;
+ return NOTIFY_DONE;
}
#endif /* LINUX_VERSION_CODE */
- dhd = *(dhd_info_t **)netdev_priv(inet6_ifa->idev->dev);
+ dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
if (!dhd)
- goto exit;
+ return NOTIFY_DONE;
- idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
- if (idx == DHD_BAD_IF) {
- DHD_ERROR(("Cannot find ifidx"));
- goto exit;
- }
+ if (dhd->iflist[idx] && dhd->iflist[idx]->net != inet6_ifa->idev->dev)
+ return NOTIFY_DONE;
dhd_pub = &dhd->pub;
if (!FW_SUPPORTED(dhd_pub, ndoe))
- goto exit;
- if (event == NETDEV_UP || event == NETDEV_DOWN) {
- _ipv6_addr = NATIVE_MALLOC(dhd_pub->osh, sizeof(struct ipv6_addr));
- if (_ipv6_addr == NULL) {
- DHD_ERROR(("Failed to allocate ipv6\n"));
- goto exit;
- }
- memcpy(&_ipv6_addr->ipv6_addr[0], &inet6_ifa->addr, IPV6_ADDR_LEN);
- DHD_TRACE(("IPV6 address : %pI6\n", &inet6_ifa->addr));
- }
- switch (event) {
- case NETDEV_UP:
- DHD_TRACE(("%s: Enable NDO and add ipv6 into table \n ", __FUNCTION__));
- _ipv6_addr->ipv6_oper = DHD_IPV6_ADDR_ADD;
- break;
- case NETDEV_DOWN:
- DHD_TRACE(("%s: clear ipv6 table \n", __FUNCTION__));
- _ipv6_addr->ipv6_oper = DHD_IPV6_ADDR_DELETE;
- break;
- default:
- DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
- goto exit;
+ return NOTIFY_DONE;
+
+ ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
+ if (!ndo_info) {
+ DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
+ return NOTIFY_DONE;
}
- spin_lock_bh(&dhd->iflist[idx]->ipv6_lock);
- list_add_tail(&_ipv6_addr->list, &dhd->iflist[idx]->ipv6_list);
- spin_unlock_bh(&dhd->iflist[idx]->ipv6_lock);
- up(&dhd->thr_sysioc_ctl.sema);
-exit:
+
+ ndo_info->event = event;
+ ndo_info->if_idx = idx;
+ memcpy(&ndo_info->ipv6_addr[0], ipv6_addr, IPV6_ADDR_LEN);
+
+ /* defer the work to thread as it may block kernel */
+ dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
+ dhd_inet6_work_handler, DHD_WORK_PRIORITY_LOW);
return NOTIFY_DONE;
}
+#endif /* #ifdef CONFIG_IPV6 */
int
-dhd_net_attach(dhd_pub_t *dhdp, int ifidx)
+dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
{
dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
+ dhd_if_t *ifp;
struct net_device *net = NULL;
int err = 0;
uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
ASSERT(dhd && dhd->iflist[ifidx]);
-
- net = dhd->iflist[ifidx]->net;
- ASSERT(net);
+ ifp = dhd->iflist[ifidx];
+ net = ifp->net;
+ ASSERT(net && (ifp->idx == ifidx));
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
ASSERT(!net->open);
/*
* We have to use the primary MAC for virtual interfaces
*/
- memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN);
+ memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
/*
* Android sets the locally administered bit to indicate that this is a
* portable hotspot. This will not work in simultaneous AP/STA mode,
memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
- net->ifindex = 0;
- if ((err = register_netdev(net)) != 0) {
- DHD_ERROR(("couldn't register the net device, err %d\n", err));
+ if (ifidx == 0)
+ printf("%s\n", dhd_version);
+
+ if (need_rtnl_lock)
+ err = register_netdev(net);
+ else
+ err = register_netdevice(net);
+
+ if (err != 0) {
+ DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
goto fail;
}
- printf("Broadcom Dongle Host Driver: register interface [%s]"
- " MAC: "MACDBG"\n",
- net->name,
+
+
+
+ printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
MAC2STRDBG(net->dev_addr));
#if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211)
- wl_iw_iscan_set_scan_broadcast_prep(net, 1);
+// wl_iw_iscan_set_scan_broadcast_prep(net, 1);
#endif
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+#if 1 && (defined(BCMPCIE) || (defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= \
+ KERNEL_VERSION(2, 6, 27))))
if (ifidx == 0) {
- dhd_registration_check = TRUE;
+#ifdef BCMLXSDMMC
up(&dhd_registration_sem);
+#endif
+ if (!dhd_download_fw_on_driverload) {
+ dhd_net_bus_devreset(net, TRUE);
+#ifdef BCMLXSDMMC
+ dhd_net_bus_suspend(net);
+#endif /* BCMLXSDMMC */
+ wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
+ }
}
-#endif
+#endif /* OEM_ANDROID && (BCMPCIE || (BCMLXSDMMC && KERNEL_VERSION >= 2.6.27)) */
return 0;
fail:
}
#if defined(OOB_INTR_ONLY)
- bcmsdh_unregister_oob_intr();
+ dhd_bus_oob_intr_unregister(dhdp);
#endif
}
}
return;
DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
-#ifdef ARP_OFFLOAD_SUPPORT
- unregister_inetaddr_notifier(&dhd_notifier);
-#endif /* ARP_OFFLOAD_SUPPORT */
- unregister_inet6addr_notifier(&dhd_notifier_ipv6);
dhd->pub.up = 0;
if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
}
if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
+#ifdef PCIE_FULL_DONGLE
+ dhd_flow_rings_deinit(dhdp);
+#endif
dhd_bus_detach(dhdp);
if (dhdp->prot)
dhd_prot_detach(dhdp);
}
+#ifdef ARP_OFFLOAD_SUPPORT
+ if (dhd_inetaddr_notifier_registered) {
+ dhd_inetaddr_notifier_registered = FALSE;
+ unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
+ }
+#endif /* ARP_OFFLOAD_SUPPORT */
+#ifdef CONFIG_IPV6
+ if (dhd_inet6addr_notifier_registered) {
+ dhd_inet6addr_notifier_registered = FALSE;
+ unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
+ }
+#endif
+
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
if (dhd->early_suspend.suspend)
}
#endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- cancel_work_sync(&dhd->work_hang);
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
-
#if defined(WL_WIRELESS_EXT)
if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
/* Detatch and unlink in the iw */
}
#endif /* defined(WL_WIRELESS_EXT) */
- if (dhd->thr_sysioc_ctl.thr_pid >= 0) {
- PROC_STOP(&dhd->thr_sysioc_ctl);
- }
-
/* delete all interfaces, start with virtual */
if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
int i = 1;
dhd_if_t *ifp;
/* Cleanup virtual interfaces */
+ dhd_net_if_lock_local(dhd);
for (i = 1; i < DHD_MAX_IFS; i++) {
- dhd_net_if_lock_local(dhd);
- if (dhd->iflist[i]) {
- dhd->iflist[i]->state = DHD_IF_DEL;
- dhd->iflist[i]->idx = i;
- dhd_op_if(dhd->iflist[i]);
- }
-
- dhd_net_if_unlock_local(dhd);
+ if (dhd->iflist[i])
+ dhd_remove_if(&dhd->pub, i, TRUE);
}
+ dhd_net_if_unlock_local(dhd);
+
/* delete primary interface 0 */
ifp = dhd->iflist[0];
ASSERT(ifp);
ASSERT(ifp->net);
if (ifp && ifp->net) {
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
- if (ifp->net->open)
-#else
- if (ifp->net->netdev_ops == &dhd_ops_pri)
-#endif
- {
- unregister_netdev(ifp->net);
+
+
+
+ /* in unregister_netdev case, the interface gets freed by net->destructor
+ * (which is set to free_netdev)
+ */
+ if (ifp->net->reg_state == NETREG_UNINITIALIZED)
free_netdev(ifp->net);
- ifp->net = NULL;
- MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
- dhd->iflist[0] = NULL;
- }
+ else
+ unregister_netdev(ifp->net);
+ ifp->net = NULL;
+#ifdef DHD_WMF
+ dhd_wmf_cleanup(dhdp, 0);
+#endif /* DHD_WMF */
+
+ dhd_if_del_sta_list(ifp);
+
+ MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
+ dhd->iflist[0] = NULL;
}
}
/* Clear the watchdog timer */
- flags = dhd_os_spin_lock(&dhd->pub);
+ DHD_GENERAL_LOCK(&dhd->pub, flags);
timer_valid = dhd->wd_timer_valid;
dhd->wd_timer_valid = FALSE;
- dhd_os_spin_unlock(&dhd->pub, flags);
+ DHD_GENERAL_UNLOCK(&dhd->pub, flags);
if (timer_valid)
del_timer_sync(&dhd->timer);
if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
-#ifdef DHDTHREAD
if (dhd->thr_wdt_ctl.thr_pid >= 0) {
PROC_STOP(&dhd->thr_wdt_ctl);
}
- if (dhd->thr_dpc_ctl.thr_pid >= 0) {
- PROC_STOP(&dhd->thr_dpc_ctl);
- }
-#ifdef RXFRAME_THREAD
- if (dhd->thr_rxf_ctl.thr_pid >= 0) {
+ if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
PROC_STOP(&dhd->thr_rxf_ctl);
}
-#endif
- else
-#endif /* DHDTHREAD */
- tasklet_kill(&dhd->tasklet);
+
+ if (dhd->thr_dpc_ctl.thr_pid >= 0) {
+ PROC_STOP(&dhd->thr_dpc_ctl);
+ } else
+ tasklet_kill(&dhd->tasklet);
}
#ifdef WL_CFG80211
if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
dhd_monitor_uninit();
}
#endif
- dhd_conf_detach(dhdp);
+ /* free deferred work queue */
+ dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
+ dhd->dhd_deferred_wq = NULL;
+
+#ifdef SHOW_LOGTRACE
+ if (dhd->event_data.fmts)
+ kfree(dhd->event_data.fmts);
+ if (dhd->event_data.raw_fmts)
+ kfree(dhd->event_data.raw_fmts);
+#endif /* SHOW_LOGTRACE */
#ifdef PNO_SUPPORT
if (dhdp->pno_state)
dhd_pno_deinit(dhdp);
#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \
- KERNEL_VERSION(2, 6, 39)) && defined(CONFIG_PM_SLEEP)
- unregister_pm_notifier(&dhd_sleep_pm_notifier);
-#endif /* (LINUX_VERSION >= 2.6.27 && LINUX_VERSION <= 2.6.39 && CONFIG_PM_SLEEP */
-
+#if defined(CONFIG_PM_SLEEP)
+ if (dhd_pm_notifier_registered) {
+ unregister_pm_notifier(&dhd_pm_notifier);
+ dhd_pm_notifier_registered = FALSE;
+ }
+#endif /* CONFIG_PM_SLEEP */
+#ifdef DEBUG_CPU_FREQ
+ if (dhd->new_freq)
+ free_percpu(dhd->new_freq);
+ dhd->new_freq = NULL;
+ cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
#ifdef CONFIG_HAS_WAKELOCK
wake_lock_destroy(&dhd->wl_wdwake);
#endif /* CONFIG_HAS_WAKELOCK */
}
+
+
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* This will free all MEM allocated for TCPACK SUPPRESS */
+ dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
+#endif /* DHDTCPACK_SUPPRESS */
+ dhd_conf_detach(dhdp);
}
dhdp->reorder_bufs[i] = NULL;
}
}
+
+ dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
+
dhd = (dhd_info_t *)dhdp->info;
-#if defined(CONFIG_DHD_USE_STATIC_BUF)
/* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
- if (dhd != (dhd_info_t *)dhd_os_prealloc(NULL, DHD_PREALLOC_DHD_INFO, 0)) {
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
- if (dhd)
- MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
-#if defined(CONFIG_DHD_USE_STATIC_BUF)
- }
- else {
- if (dhd)
- dhd = NULL;
- }
-#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ if (dhd &&
+ dhd != (dhd_info_t *)dhd_os_prealloc(dhdp, DHD_PREALLOC_DHD_INFO, 0, FALSE))
+ MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
+ dhd = NULL;
}
}
-static void
+static void
dhd_module_cleanup(void)
{
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ printk("%s: Enter\n", __FUNCTION__);
dhd_bus_unregister();
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
- wl_android_wifictrl_func_del();
-#endif /* CONFIG_WIFI_CONTROL_FUNC */
wl_android_exit();
- /* Call customer gpio to turn off power with WL_REG_ON signal */
- dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
+ dhd_wifi_platform_unregister_drv();
+ printk("%s: Exit\n", __FUNCTION__);
}
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
-extern bool g_wifi_poweron;
-#endif /* CONFIG_WIFI_CONTROL_FUNC */
-
+static void
+dhd_module_exit(void)
+{
+ dhd_module_cleanup();
+ unregister_reboot_notifier(&dhd_reboot_notifier);
+}
static int
dhd_module_init(void)
{
- int error = 0;
-
-#if 1 && defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
+ int err;
int retry = POWERUP_MAX_RETRY;
- int chip_up = 0;
-#endif
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- wl_android_init();
+ printk("%s: in\n", __FUNCTION__);
-#if defined(DHDTHREAD)
- /* Sanity check on the module parameters */
- do {
- /* Both watchdog and DPC as tasklets are ok */
- if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0))
- break;
+ DHD_PERIM_RADIO_INIT();
- /* If both watchdog and DPC are threads, TX must be deferred */
- if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx)
- break;
+ if (firmware_path[0] != '\0') {
+ strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
+ fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+ }
- DHD_ERROR(("Invalid module parameters.\n"));
- error = -EINVAL;
- } while (0);
-#endif
- if (error)
- goto fail_0;
+ if (nvram_path[0] != '\0') {
+ strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
+ nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
+ }
-#if 1 && defined(BCMLXSDMMC) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
do {
- sema_init(&dhd_chipup_sem, 0);
- dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
- dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
- if (wl_android_wifictrl_func_add() < 0) {
- dhd_bus_unreg_sdio_notify();
- goto fail_1;
- }
-#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
- if (down_timeout(&dhd_chipup_sem,
- msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
- dhd_bus_unreg_sdio_notify();
- chip_up = 1;
+ err = dhd_wifi_platform_register_drv();
+ if (!err) {
+ register_reboot_notifier(&dhd_reboot_notifier);
break;
}
- DHD_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
- retry+1));
- dhd_bus_unreg_sdio_notify();
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
- wl_android_wifictrl_func_del();
-#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
- dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
- } while (retry-- > 0);
-
- if (!chip_up) {
- DHD_ERROR(("\nfailed to power up wifi chip, max retry reached, exits **\n\n"));
- error = -ENODEV;
- goto fail_0;
- }
-#else
- dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON);
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
- if (wl_android_wifictrl_func_add() < 0)
- goto fail_1;
-#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
-
-#endif
-
-#if defined(CONFIG_WIFI_CONTROL_FUNC) && defined(BCMLXSDMMC)
- /* If the wifi_set_power() is failed,
- * we need to jump error handling routines.
- */
- if (!g_wifi_poweron) {
- printk("%s: wifi_set_power() failed\n", __FUNCTION__);
- error = -ENODEV;
- goto fail_1;
- }
-#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
-
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
- sema_init(&dhd_registration_sem, 0);
-#endif
-
+ else {
+ DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
+ __FUNCTION__, retry));
+ strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
+ firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
+ strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
+ nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
+ }
+ } while (retry--);
- error = dhd_bus_register();
+ if (err)
+ DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
- if (!error)
- printf("\n%s\n", dhd_version);
- else {
- DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
- goto fail_1;
- }
+ printk("%s: Exit err=%d\n", __FUNCTION__, err);
+ return err;
+}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
- /*
- * Wait till MMC sdio_register_driver callback called and made driver attach.
- * It's needed to make sync up exit from dhd insmod and
- * Kernel MMC sdio device callback registration
- */
- if ((down_timeout(&dhd_registration_sem,
- msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) ||
- (dhd_registration_check != TRUE)) {
- error = -ENODEV;
- DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__));
- goto fail_2;
+static int
+dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
+{
+ DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
+ if (code == SYS_RESTART) {
}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
-#if defined(WL_CFG80211)
- wl_android_post_init();
-#endif /* defined(WL_CFG80211) */
-
- return error;
-
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(BCMLXSDMMC)
-fail_2:
- dhd_bus_unregister();
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
-
-fail_1:
-
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
- wl_android_wifictrl_func_del();
-#endif
-
- /* Call customer gpio to turn off power with WL_REG_ON signal */
- dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF);
-
-fail_0:
-
- wl_android_exit();
- return error;
+ return NOTIFY_DONE;
}
extern char WIFI_MODULE_NAME[];
printk("=======================================================\n");
printk("== Dis-launching Wi-Fi driver! (Powered by Rockchip) ==\n");
printk("=======================================================\n");
- dhd_module_cleanup();
+ dhd_module_exit();
}
EXPORT_SYMBOL(rockchip_wifi_init_module_rkwifi);
EXPORT_SYMBOL(rockchip_wifi_exit_module_rkwifi);
-//late_initcall(rockchip_wifi_init_module);
-//module_exit(rockchip_wifi_exit_module);
-/*
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-#ifdef USE_LATE_INITCALL_SYNC
-late_initcall_sync(dhd_module_init);
-#else
-late_initcall(dhd_module_init);
-#endif // USE_LATE_INITCALL_SYNC
-#else
-module_init(dhd_module_init);
-#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
-
-module_exit(dhd_module_cleanup);
-*/
+//#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
+//#if defined(CONFIG_DEFERRED_INITCALLS)
+//deferred_module_init(dhd_module_init);
+//#elif defined(USE_LATE_INITCALL_SYNC)
+//late_initcall_sync(dhd_module_init);
+//#else
+//late_initcall(dhd_module_init);
+//#endif /* USE_LATE_INITCALL_SYNC */
+//#else
+//module_init(dhd_module_init);
+//#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */
+//
+//module_exit(dhd_module_exit);
/*
* OS specific functions required to implement DHD driver in OS independent way
dhd_info_t * dhd = (dhd_info_t *)(pub->info);
if (dhd) {
+ DHD_PERIM_UNLOCK(pub);
+
down(&dhd->proto_sem);
+
+ DHD_PERIM_LOCK(pub);
return 1;
}
timeout = dhd_ioctl_timeout_msec * HZ / 1000;
#endif
+ DHD_PERIM_UNLOCK(pub);
+
timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
+
+ DHD_PERIM_LOCK(pub);
+
return timeout;
}
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
- if (waitqueue_active(&dhd->ioctl_resp_wait)) {
- wake_up(&dhd->ioctl_resp_wait);
- }
-
+ wake_up(&dhd->ioctl_resp_wait);
return 0;
}
return;
}
- flags = dhd_os_spin_lock(pub);
+ DHD_GENERAL_LOCK(pub, flags);
/* don't start the wd until fw is loaded */
if (pub->busstate == DHD_BUS_DOWN) {
- dhd_os_spin_unlock(pub, flags);
+ DHD_GENERAL_UNLOCK(pub, flags);
if (!wdtick)
DHD_OS_WD_WAKE_UNLOCK(pub);
return;
/* Totally stop the timer */
if (!wdtick && dhd->wd_timer_valid == TRUE) {
dhd->wd_timer_valid = FALSE;
- dhd_os_spin_unlock(pub, flags);
-#ifdef DHDTHREAD
+ DHD_GENERAL_UNLOCK(pub, flags);
del_timer_sync(&dhd->timer);
-#else
- del_timer(&dhd->timer);
-#endif /* DHDTHREAD */
DHD_OS_WD_WAKE_UNLOCK(pub);
return;
}
mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
dhd->wd_timer_valid = TRUE;
}
- dhd_os_spin_unlock(pub, flags);
+ DHD_GENERAL_UNLOCK(pub, flags);
}
void *
filp_close((struct file *)image, NULL);
}
-
void
dhd_os_sdlock(dhd_pub_t *pub)
{
dhd = (dhd_info_t *)(pub->info);
-#ifdef DHDTHREAD
- if (dhd->threads_only)
+ if (dhd_dpc_prio >= 0)
down(&dhd->sdsem);
else
-#endif /* DHDTHREAD */
- spin_lock_bh(&dhd->sdlock);
+ spin_lock_bh(&dhd->sdlock);
}
void
dhd = (dhd_info_t *)(pub->info);
-#ifdef DHDTHREAD
- if (dhd->threads_only)
+ if (dhd_dpc_prio >= 0)
up(&dhd->sdsem);
else
-#endif /* DHDTHREAD */
- spin_unlock_bh(&dhd->sdlock);
+ spin_unlock_bh(&dhd->sdlock);
}
void
{
}
-void
-dhd_os_sdtxlock(dhd_pub_t *pub)
-{
- dhd_os_sdlock(pub);
-}
-
-void
-dhd_os_sdtxunlock(dhd_pub_t *pub)
-{
- dhd_os_sdunlock(pub);
-}
-
-#if defined(DHDTHREAD) && defined(RXFRAME_THREAD)
static void
dhd_os_rxflock(dhd_pub_t *pub)
{
dhd = (dhd_info_t *)(pub->info);
spin_unlock_bh(&dhd->rxf_lock);
}
-#endif /* defined(DHDTHREAD) && defined(RXFRAME_THREAD) */
#ifdef DHDTCPACK_SUPPRESS
void
}
#endif /* DHDTCPACK_SUPPRESS */
-#if defined(CONFIG_DHD_USE_STATIC_BUF)
-uint8* dhd_os_prealloc(void *osh, int section, uint size)
+uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
{
- return (uint8*)wl_android_prealloc(section, size);
+ uint8* buf;
+ gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
+
+ buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
+ if (buf == NULL && kmalloc_if_fail)
+ buf = kmalloc(size, flags);
+
+ return buf;
}
-void dhd_os_prefree(void *osh, void *addr, uint size)
+void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
{
}
-#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
#if defined(WL_WIRELESS_EXT)
struct iw_statistics *
dhd_get_wireless_stats(struct net_device *dev)
{
int res = 0;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (!dhd->pub.up) {
return NULL;
int bcmerror = 0;
ASSERT(dhd != NULL);
- bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data);
+#ifdef SHOW_LOGTRACE
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, &dhd->event_data);
+#else
+ bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data, NULL);
+#endif /* SHOW_LOGTRACE */
+
if (bcmerror != BCME_OK)
return (bcmerror);
#endif /* defined(WL_WIRELESS_EXT) */
#ifdef WL_CFG80211
- if ((ntoh32(event->event_type) == WLC_E_IF) &&
- (((dhd_if_event_t *)*data)->action == WLC_E_IF_ADD))
- /* If ADD_IF has been called directly by wl utility then we
- * should not report this. In case if ADD_IF was called from
- * CFG stack, then too this event need not be reported back
- */
- return (BCME_OK);
- if ((wl_cfg80211_is_progress_ifchange() ||
- wl_cfg80211_is_progress_ifadd()) && (*ifidx != 0)) {
- /*
- * If IF_ADD/CHANGE operation is going on,
- * discard any event received on the virtual I/F
- */
- return (BCME_OK);
- }
-
ASSERT(dhd->iflist[*ifidx] != NULL);
ASSERT(dhd->iflist[*ifidx]->net != NULL);
- if (dhd->iflist[*ifidx]->event2cfg80211 && dhd->iflist[*ifidx]->net) {
+ if (dhd->iflist[*ifidx]->net)
wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data);
- }
#endif /* defined(WL_CFG80211) */
return (bcmerror);
}
}
+#ifdef LOG_INTO_TCPDUMP
+void
+dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
+{
+ struct sk_buff *p, *skb;
+ uint32 pktlen;
+ int len;
+ dhd_if_t *ifp;
+ dhd_info_t *dhd;
+ uchar *skb_data;
+ int ifidx = 0;
+ struct ether_header eth;
+
+ pktlen = sizeof(eth) + data_len;
+ dhd = dhdp->info;
+
+ if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
+ ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
+
+ bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
+ bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
+ ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
+ eth.ether_type = hton16(ETHER_TYPE_BRCM);
+
+ bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
+ bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
+ skb = PKTTONATIVE(dhdp->osh, p);
+ skb_data = skb->data;
+ len = skb->len;
+
+ ifidx = dhd_ifname2idx(dhd, "wlan0");
+ ifp = dhd->iflist[ifidx];
+ if (ifp == NULL)
+ ifp = dhd->iflist[0];
+
+ ASSERT(ifp);
+ skb->dev = ifp->net;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->data = skb_data;
+ skb->len = len;
+
+ /* Strip header, count, deliver upward */
+ skb_pull(skb, ETH_HLEN);
+
+ /* Send the packet */
+ if (in_interrupt()) {
+ netif_rx(skb);
+ } else {
+ netif_rx_ni(skb);
+ }
+ }
+ else {
+ /* Could not allocate a sk_buf */
+ DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
+ }
+}
+#endif /* LOG_INTO_TCPDUMP */
+
void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
{
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
struct dhd_info *dhdinfo = dhd->info;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27))
dhd_os_sdunlock(dhd);
wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
dhd_os_sdlock(dhd);
-#endif
+#endif /* defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
return;
}
void dhd_wait_event_wakeup(dhd_pub_t *dhd)
{
-#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+#if defined(BCMSDIO) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
struct dhd_info *dhdinfo = dhd->info;
if (waitqueue_active(&dhdinfo->ctrl_wait))
wake_up(&dhdinfo->ctrl_wait);
return;
}
+#if defined(BCMSDIO) || defined(BCMPCIE)
int
-dhd_dev_reset(struct net_device *dev, uint8 flag)
+dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
{
- int ret;
-
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ int ret = 0;
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (flag == TRUE) {
/* Issue wl down command before resetting the chip */
if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
}
-#if defined(PROP_TXSTATUS) && !defined(PROP_TXSTATUS_VSDB)
- dhd_wlfc_deinit(&dhd->pub);
- if (dhd->pub.plat_deinit)
- dhd->pub.plat_deinit((void *)&dhd->pub);
-#endif /* PROP_TXSTATUS && !PROP_TXSTATUS_VSDB */
+#ifdef PROP_TXSTATUS
+ if (dhd->pub.wlfc_enabled)
+ dhd_wlfc_deinit(&dhd->pub);
+#endif /* PROP_TXSTATUS */
+#ifdef PNO_SUPPORT
+ if (dhd->pub.pno_state)
+ dhd_pno_deinit(&dhd->pub);
+#endif
+ }
+
+#ifdef BCMSDIO
+ if (!flag) {
+ dhd_update_fw_nv_path(dhd);
+ /* update firmware and nvram path to sdio bus */
+ dhd_bus_update_fw_nv_path(dhd->pub.bus,
+ dhd->fw_path, dhd->nv_path, dhd->conf_path);
}
+#endif /* BCMSDIO */
ret = dhd_bus_devreset(&dhd->pub, flag);
if (ret) {
return ret;
}
+#ifdef BCMSDIO
+int
+dhd_net_bus_suspend(struct net_device *dev)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_suspend(&dhd->pub);
+}
+
+int
+dhd_net_bus_resume(struct net_device *dev, uint8 stage)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return dhd_bus_resume(&dhd->pub, stage);
+}
+
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE */
+
int net_os_set_suspend_disable(struct net_device *dev, int val)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
if (dhd) {
int net_os_set_suspend(struct net_device *dev, int val, int force)
{
int ret = 0;
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (dhd) {
#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (dhd)
dhd->pub.suspend_bcn_li_dtim = val;
#ifdef PKT_FILTER_SUPPORT
int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
char *filterp = NULL;
int filter_id = 0;
int ret = 0;
- if (dhd->pub.conf->filter_out_all_packets)
- return 0;
+ if (!dhd_master_mode)
+ add_remove = !add_remove;
if (!dhd || (num == DHD_UNICAST_FILTER_NUM) ||
(num == DHD_MDNS_FILTER_NUM))
dhd->pub.pktfilter[num] = filterp;
dhd_pktfilter_offload_set(&dhd->pub, dhd->pub.pktfilter[num]);
} else { /* Delete filter */
- dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
+ if (dhd->pub.pktfilter[num] != NULL) {
+ dhd_pktfilter_offload_delete(&dhd->pub, filter_id);
+ dhd->pub.pktfilter[num] = NULL;
+ }
}
return ret;
}
int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
+
{
int ret = 0;
/* function to enable/disable packet for Network device */
int net_os_enable_packet_filter(struct net_device *dev, int val)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
return dhd_os_enable_packet_filter(&dhd->pub, val);
}
int
dhd_dev_init_ioctl(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret;
- dhd_process_cid_mac(&dhd->pub, TRUE);
-
- if ((ret = dhd_preinit_ioctls(&dhd->pub)) < 0)
+ if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
goto done;
- dhd_process_cid_mac(&dhd->pub, FALSE);
-
done:
return ret;
}
int
dhd_dev_pno_stop_for_ssid(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
return (dhd_pno_stop_for_ssid(&dhd->pub));
}
dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid,
uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
pno_repeat, pno_freq_expo_max, channel_list, nchan));
int
dhd_dev_pno_enable(struct net_device *dev, int enable)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
return (dhd_pno_enable(&dhd->pub, enable));
}
dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
struct dhd_pno_hotlist_params *hotlist_params)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
}
/* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
int
dhd_dev_pno_stop_for_batch(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
return (dhd_pno_stop_for_batch(&dhd->pub));
}
/* Linux wrapper to call common dhd_dev_pno_set_for_batch */
int
dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
}
/* Linux wrapper to call common dhd_dev_pno_get_for_batch */
int
dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
}
#endif /* PNO_SUPPORT */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (1)
-static void dhd_hang_process(struct work_struct *work)
+static void dhd_hang_process(void *dhd_info, void *event_info, u8 event)
{
dhd_info_t *dhd;
struct net_device *dev;
- dhd = (dhd_info_t *)container_of(work, dhd_info_t, work_hang);
+ dhd = (dhd_info_t *)dhd_info;
dev = dhd->iflist[0]->net;
if (dev) {
if (dhdp) {
if (!dhdp->hang_was_sent) {
dhdp->hang_was_sent = 1;
- schedule_work(&dhdp->info->work_hang);
+ dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dhdp,
+ DHD_WQ_WORK_HANG_MSG, dhd_hang_process, DHD_WORK_PRIORITY_HIGH);
}
}
return ret;
int net_os_send_hang_message(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
if (dhd) {
}
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) && OEM_ANDROID */
+
+int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ return wifi_platform_set_power(dhd->adapter, on, delay_msec);
+}
+
+void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+ wl_country_t *cspec)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+ get_customized_country_code(dhd->adapter, country_iso_code, cspec);
+}
void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (dhd && dhd->pub.up) {
memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
#ifdef WL_CFG80211
void dhd_bus_band_set(struct net_device *dev, uint band)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
if (dhd && dhd->pub.up) {
#ifdef WL_CFG80211
wl_update_wiphybands(NULL, true);
}
}
+int dhd_net_set_fw_path(struct net_device *dev, char *fw)
+{
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
+
+ if (!fw || fw[0] == '\0')
+ return -EINVAL;
+
+ strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
+ dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
+
+#if defined(SOFTAP)
+ if (strstr(fw, "apsta") != NULL) {
+ DHD_INFO(("GOT APSTA FIRMWARE\n"));
+ ap_fw_loaded = TRUE;
+ } else {
+ DHD_INFO(("GOT STA FIRMWARE\n"));
+ ap_fw_loaded = FALSE;
+ }
+#endif
+ return 0;
+}
+
void dhd_net_if_lock(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
dhd_net_if_lock_local(dhd);
}
void dhd_net_if_unlock(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
dhd_net_if_unlock_local(dhd);
}
#endif
}
-unsigned long dhd_os_spin_lock(dhd_pub_t *pub)
+unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
unsigned long flags = 0;
return flags;
}
-void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags)
+void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
{
dhd_info_t *dhd = (dhd_info_t *)(pub->info);
spin_unlock_irqrestore(&dhd->dhd_lock, flags);
}
+/* Linux specific multipurpose spinlock API */
+void *
+dhd_os_spin_lock_init(osl_t *osh)
+{
+ /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
+ /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
+ /* and this results in kernel asserts in internal builds */
+ spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
+ if (lock)
+ spin_lock_init(lock);
+ return ((void *)lock);
+}
+void
+dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
+{
+ MFREE(osh, lock, sizeof(spinlock_t) + 4);
+}
+unsigned long
+dhd_os_spin_lock(void *lock)
+{
+ unsigned long flags = 0;
+
+ if (lock)
+ spin_lock_irqsave((spinlock_t *)lock, flags);
+
+ return flags;
+}
+void
+dhd_os_spin_unlock(void *lock, unsigned long flags)
+{
+ if (lock)
+ spin_unlock_irqrestore((spinlock_t *)lock, flags);
+}
+
static int
dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
{
return (atomic_read(&dhd->pend_8021x_cnt));
}
-#define MAX_WAIT_FOR_8021X_TX 50
+#define MAX_WAIT_FOR_8021X_TX 100
int
dhd_wait_pend8021x(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int timeout = msecs_to_jiffies(10);
int ntimes = MAX_WAIT_FOR_8021X_TX;
int pend = dhd_get_pend_8021x_cnt(dhd);
while (ntimes && pend) {
if (pend) {
set_current_state(TASK_INTERRUPTIBLE);
+ DHD_PERIM_UNLOCK(&dhd->pub);
schedule_timeout(timeout);
+ DHD_PERIM_LOCK(&dhd->pub);
set_current_state(TASK_RUNNING);
ntimes--;
}
int net_os_wake_lock_timeout(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
if (dhd)
return 0;
}
+int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ dhd->wakelock_ctrl_timeout_enable = 0;
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wake_lock_active(&dhd->wl_ctrlwake))
+ wake_unlock(&dhd->wl_ctrlwake);
+#endif
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return 0;
+}
+
int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
if (dhd)
int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
if (dhd)
if (dhd) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+
+ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
#ifdef CONFIG_HAS_WAKELOCK
- if (!dhd->wakelock_counter)
wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_stay_awake(pub);
#endif
+ }
dhd->wakelock_counter++;
ret = dhd->wakelock_counter;
spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
int net_os_wake_lock(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
if (dhd)
dhd_os_wake_lock_timeout(pub);
if (dhd) {
spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
- if (dhd->wakelock_counter) {
+ if (dhd->wakelock_counter > 0) {
dhd->wakelock_counter--;
+ if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
#ifdef CONFIG_HAS_WAKELOCK
- if (!dhd->wakelock_counter)
wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_relax(pub);
#endif
+ }
ret = dhd->wakelock_counter;
}
spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
return ret;
}
-int dhd_os_check_wakelock(void *dhdp)
+int dhd_os_check_wakelock(dhd_pub_t *pub)
{
-#if defined(CONFIG_HAS_WAKELOCK) || (1 && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, \
- 36)))
- dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+#if defined(CONFIG_HAS_WAKELOCK) || (defined(BCMSDIO) && (LINUX_VERSION_CODE > \
+ KERNEL_VERSION(2, 6, 36)))
dhd_info_t *dhd;
if (!pub)
if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
(wake_lock_active(&dhd->wl_wdwake))))
return 1;
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
+ return 1;
#endif
return 0;
}
-
int net_os_wake_unlock(struct net_device *dev)
{
- dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
+ dhd_info_t *dhd = DHD_DEV_INFO(dev);
int ret = 0;
if (dhd)
return ret;
}
-int dhd_os_check_if_up(void *dhdp)
+/* waive wakelocks for operations such as IOVARs in suspend function, must be closed
+ * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
+ */
+int dhd_os_wake_lock_waive(dhd_pub_t *pub)
{
- dhd_pub_t *pub = (dhd_pub_t *)dhdp;
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
- if (!pub)
+ if (dhd) {
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+ if (dhd->waive_wakelock == FALSE) {
+ /* record current lock status */
+ dhd->wakelock_before_waive = dhd->wakelock_counter;
+ dhd->waive_wakelock = TRUE;
+ }
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ }
+ return ret;
+}
+
+int dhd_os_wake_lock_restore(dhd_pub_t *pub)
+{
+ dhd_info_t *dhd = (dhd_info_t *)(pub->info);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!dhd)
return 0;
+
+ spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
+ /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
+ if (!dhd->waive_wakelock)
+ goto exit;
+
+ dhd->waive_wakelock = FALSE;
+ /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
+ * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
+ * the lock in between, do the same by calling wake_unlock or pm_relax
+ */
+ if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_stay_awake(&dhd->pub);
+#endif
+ } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_unlock(&dhd->wl_wifi);
+#elif defined(BCMSDIO) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36))
+ dhd_bus_dev_pm_relax(&dhd->pub);
+#endif
+ }
+ dhd->wakelock_before_waive = 0;
+exit:
+ ret = dhd->wakelock_wd_counter;
+ spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
+ return ret;
+}
+
+bool dhd_os_check_if_up(dhd_pub_t *pub)
+{
+ if (!pub)
+ return FALSE;
return pub->up;
}
+#if defined(BCMSDIO)
/* function to collect firmware, chip id and chip version info */
void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
{
"\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
}
-
+#endif /* defined(BCMSDIO) */
int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
{
int ifidx;
int ret = 0;
dhd_info_t *dhd = NULL;
- if (!net || !netdev_priv(net)) {
+ if (!net || !DEV_PRIV(net)) {
DHD_ERROR(("%s invalid parameter\n", __FUNCTION__));
return -EINVAL;
}
- dhd = *(dhd_info_t **)netdev_priv(net);
+ dhd = DHD_DEV_INFO(net);
if (!dhd)
return -EINVAL;
}
DHD_OS_WAKE_LOCK(&dhd->pub);
+ DHD_PERIM_LOCK(&dhd->pub);
+
ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
dhd_check_hang(net, &dhd->pub, ret);
+
+ DHD_PERIM_UNLOCK(&dhd->pub);
DHD_OS_WAKE_UNLOCK(&dhd->pub);
return ret;
struct net_device *net;
net = dhd_idx2net(dhdp, ifidx);
+ if (!net) {
+ DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
+ return -EINVAL;
+ }
+
return dhd_check_hang(net, dhdp, ret);
}
-
-#ifdef PROP_TXSTATUS
-extern int dhd_wlfc_interface_entry_update(void* state, ewlfc_mac_entry_action_t action, uint8 ifid,
- uint8 iftype, uint8* ea);
-extern int dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits);
-
-int dhd_wlfc_interface_event(struct dhd_info *dhd,
- ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+/* Return instance */
+int dhd_get_instance(dhd_pub_t *dhdp)
{
- if (dhd->pub.wlfc_state == NULL)
- return BCME_OK;
-
- return dhd_wlfc_interface_entry_update(dhd->pub.wlfc_state, action, ifid, iftype, ea);
+ return dhdp->info->unit;
}
-int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data)
-{
- if (dhd->pub.wlfc_state == NULL)
- return BCME_OK;
-
- return dhd_wlfc_FIFOcreditmap_update(dhd->pub.wlfc_state, event_data);
-}
-int dhd_wlfc_event(struct dhd_info *dhd)
-{
- return dhd_wlfc_enable(&dhd->pub);
-}
+#ifdef PROP_TXSTATUS
-void dhd_wlfc_plat_enable(void *dhd)
+void dhd_wlfc_plat_init(void *dhd)
{
return;
}
bool dhd_wlfc_skip_fc(void)
{
-
-#ifdef WL_CFG80211
- extern struct wl_priv *wlcfg_drv_priv;
-
- /* enable flow control in vsdb mode */
- return !(wlcfg_drv_priv && wlcfg_drv_priv->vsdb_mode);
-#else
- return TRUE; /* skip flow control */
-#endif /* WL_CFG80211 */
+ return FALSE;
}
#endif /* PROP_TXSTATUS */
}
#endif /* WLMEDIA_HTSF */
+
+#ifdef CUSTOM_SET_CPUCORE
+void dhd_set_cpucore(dhd_pub_t *dhd, int set)
+{
+ int e_dpc = 0, e_rxf = 0, retry_set = 0;
+
+ if (!(dhd->chan_isvht80)) {
+ DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
+ return;
+ }
+
+ if (DPC_CPUCORE) {
+ do {
+ if (set == TRUE) {
+ e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+ cpumask_of(DPC_CPUCORE));
+ } else {
+ e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
+ cpumask_of(PRIMARY_CPUCORE));
+ }
+ if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+ DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
+ return;
+ }
+ if (e_dpc < 0)
+ OSL_SLEEP(1);
+ } while (e_dpc < 0);
+ }
+ if (RXF_CPUCORE) {
+ do {
+ if (set == TRUE) {
+ e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+ cpumask_of(RXF_CPUCORE));
+ } else {
+ e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
+ cpumask_of(PRIMARY_CPUCORE));
+ }
+ if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
+ DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
+ return;
+ }
+ if (e_rxf < 0)
+ OSL_SLEEP(1);
+ } while (e_rxf < 0);
+ }
+#ifdef DHD_OF_SUPPORT
+ interrupt_set_cpucore(set);
+#endif /* DHD_OF_SUPPORT */
+ DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
+
+ return;
+}
+#endif /* CUSTOM_SET_CPUCORE */
+#if defined(DHD_TCP_WINSIZE_ADJUST)
+static int dhd_port_list_match(int port)
+{
+ int i;
+ for (i = 0; i < MAX_TARGET_PORTS; i++) {
+ if (target_ports[i] == port)
+ return 1;
+ }
+ return 0;
+}
+static void dhd_adjust_tcp_winsize(int op_mode, struct sk_buff *skb)
+{
+ struct iphdr *ipheader;
+ struct tcphdr *tcpheader;
+ uint16 win_size;
+ int32 incremental_checksum;
+
+ if (!(op_mode & DHD_FLAG_HOSTAP_MODE))
+ return;
+ if (skb == NULL || skb->data == NULL)
+ return;
+
+ ipheader = (struct iphdr*)(skb->data);
+
+ if (ipheader->protocol == IPPROTO_TCP) {
+ tcpheader = (struct tcphdr*) skb_pull(skb, (ipheader->ihl)<<2);
+ if (tcpheader) {
+ win_size = ntoh16(tcpheader->window);
+ if (win_size < MIN_TCP_WIN_SIZE &&
+ dhd_port_list_match(ntoh16(tcpheader->dest))) {
+ incremental_checksum = ntoh16(tcpheader->check);
+ incremental_checksum += win_size - win_size*WIN_SIZE_SCALE_FACTOR;
+ if (incremental_checksum < 0)
+ --incremental_checksum;
+ tcpheader->window = hton16(win_size*WIN_SIZE_SCALE_FACTOR);
+ tcpheader->check = hton16((unsigned short)incremental_checksum);
+ }
+ }
+ skb_push(skb, (ipheader->ihl)<<2);
+ }
+}
+#endif /* DHD_TCP_WINSIZE_ADJUST */
+
+/* Get interface specific ap_isolate configuration */
+int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ return ifp->ap_isolate;
+}
+
+/* Set interface specific ap_isolate configuration */
+int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+
+ ifp->ap_isolate = val;
+
+ return 0;
+}
+
+#ifdef DHD_WMF
+/* Returns interface specific WMF configuration */
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx)
+{
+ dhd_info_t *dhd = dhdp->info;
+ dhd_if_t *ifp;
+
+ ASSERT(idx < DHD_MAX_IFS);
+
+ ifp = dhd->iflist[idx];
+ return &ifp->wmf;
+}
+#endif /* DHD_WMF */
+
+
+#ifdef DHD_UNICAST_DHCP
+static int
+dhd_get_pkt_ether_type(dhd_pub_t *pub, void *pktbuf,
+ uint8 **data_ptr, int *len_ptr, uint16 *et_ptr, bool *snap_ptr)
+{
+ uint8 *frame = PKTDATA(pub->osh, pktbuf);
+ int length = PKTLEN(pub->osh, pktbuf);
+ uint8 *pt; /* Pointer to type field */
+ uint16 ethertype;
+ bool snap = FALSE;
+ /* Process Ethernet II or SNAP-encapsulated 802.3 frames */
+ if (length < ETHER_HDR_LEN) {
+ DHD_ERROR(("dhd: %s: short eth frame (%d)\n",
+ __FUNCTION__, length));
+ return BCME_ERROR;
+ } else if (ntoh16_ua(frame + ETHER_TYPE_OFFSET) >= ETHER_TYPE_MIN) {
+ /* Frame is Ethernet II */
+ pt = frame + ETHER_TYPE_OFFSET;
+ } else if (length >= ETHER_HDR_LEN + SNAP_HDR_LEN + ETHER_TYPE_LEN &&
+ !bcmp(llc_snap_hdr, frame + ETHER_HDR_LEN, SNAP_HDR_LEN)) {
+ pt = frame + ETHER_HDR_LEN + SNAP_HDR_LEN;
+ snap = TRUE;
+ } else {
+ DHD_INFO(("DHD: %s: non-SNAP 802.3 frame\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ ethertype = ntoh16_ua(pt);
+
+ /* Skip VLAN tag, if any */
+ if (ethertype == ETHER_TYPE_8021Q) {
+ pt += VLAN_TAG_LEN;
+
+ if ((pt + ETHER_TYPE_LEN) > (frame + length)) {
+ DHD_ERROR(("dhd: %s: short VLAN frame (%d)\n",
+ __FUNCTION__, length));
+ return BCME_ERROR;
+ }
+
+ ethertype = ntoh16_ua(pt);
+ }
+
+ *data_ptr = pt + ETHER_TYPE_LEN;
+ *len_ptr = length - (pt + ETHER_TYPE_LEN - frame);
+ *et_ptr = ethertype;
+ *snap_ptr = snap;
+ return BCME_OK;
+}
+
+static int
+dhd_get_pkt_ip_type(dhd_pub_t *pub, void *pktbuf,
+ uint8 **data_ptr, int *len_ptr, uint8 *prot_ptr)
+{
+ struct ipv4_hdr *iph; /* IP frame pointer */
+ int iplen; /* IP frame length */
+ uint16 ethertype, iphdrlen, ippktlen;
+ uint16 iph_frag;
+ uint8 prot;
+ bool snap;
+
+ if (dhd_get_pkt_ether_type(pub, pktbuf, (uint8 **)&iph,
+ &iplen, ðertype, &snap) != 0)
+ return BCME_ERROR;
+
+ if (ethertype != ETHER_TYPE_IP) {
+ return BCME_ERROR;
+ }
+
+ /* We support IPv4 only */
+ if (iplen < IPV4_OPTIONS_OFFSET || (IP_VER(iph) != IP_VER_4)) {
+ return BCME_ERROR;
+ }
+
+ /* Header length sanity */
+ iphdrlen = IPV4_HLEN(iph);
+
+ /*
+ * Packet length sanity; sometimes we receive eth-frame size bigger
+ * than the IP content, which results in a bad tcp chksum
+ */
+ ippktlen = ntoh16(iph->tot_len);
+ if (ippktlen < iplen) {
+
+ DHD_INFO(("%s: extra frame length ignored\n",
+ __FUNCTION__));
+ iplen = ippktlen;
+ } else if (ippktlen > iplen) {
+ DHD_ERROR(("dhd: %s: truncated IP packet (%d)\n",
+ __FUNCTION__, ippktlen - iplen));
+ return BCME_ERROR;
+ }
+
+ if (iphdrlen < IPV4_OPTIONS_OFFSET || iphdrlen > iplen) {
+ DHD_ERROR(("DHD: %s: IP-header-len (%d) out of range (%d-%d)\n",
+ __FUNCTION__, iphdrlen, IPV4_OPTIONS_OFFSET, iplen));
+ return BCME_ERROR;
+ }
+
+ /*
+ * We don't handle fragmented IP packets. A first frag is indicated by the MF
+ * (more frag) bit and a subsequent frag is indicated by a non-zero frag offset.
+ */
+ iph_frag = ntoh16(iph->frag);
+
+ if ((iph_frag & IPV4_FRAG_MORE) || (iph_frag & IPV4_FRAG_OFFSET_MASK) != 0) {
+ DHD_INFO(("DHD:%s: IP fragment not handled\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ prot = IPV4_PROT(iph);
+
+ *data_ptr = (((uint8 *)iph) + iphdrlen);
+ *len_ptr = iplen - iphdrlen;
+ *prot_ptr = prot;
+ return BCME_OK;
+}
+
+/** check the packet type, if it is DHCP ACK/REPLY, convert into unicast packet */
+static
+int dhd_convert_dhcp_broadcast_ack_to_unicast(dhd_pub_t *pub, void *pktbuf, int ifidx)
+{
+ dhd_sta_t* stainfo;
+ uint8 *eh = PKTDATA(pub->osh, pktbuf);
+ uint8 *udph;
+ uint8 *dhcp;
+ uint8 *chaddr;
+ int udpl;
+ int dhcpl;
+ uint16 port;
+ uint8 prot;
+
+ if (!ETHER_ISMULTI(eh + ETHER_DEST_OFFSET))
+ return BCME_ERROR;
+ if (dhd_get_pkt_ip_type(pub, pktbuf, &udph, &udpl, &prot) != 0)
+ return BCME_ERROR;
+ if (prot != IP_PROT_UDP)
+ return BCME_ERROR;
+ /* check frame length, at least UDP_HDR_LEN */
+ if (udpl < UDP_HDR_LEN) {
+ DHD_ERROR(("DHD: %s: short UDP frame, ignored\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ port = ntoh16_ua(udph + UDP_DEST_PORT_OFFSET);
+ /* only process DHCP packets from server to client */
+ if (port != DHCP_PORT_CLIENT)
+ return BCME_ERROR;
+
+ dhcp = udph + UDP_HDR_LEN;
+ dhcpl = udpl - UDP_HDR_LEN;
+
+ if (dhcpl < DHCP_CHADDR_OFFSET + ETHER_ADDR_LEN) {
+ DHD_ERROR(("DHD: %s: short DHCP frame, ignored\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+ }
+ /* only process DHCP reply(offer/ack) packets */
+ if (*(dhcp + DHCP_TYPE_OFFSET) != DHCP_TYPE_REPLY)
+ return BCME_ERROR;
+ chaddr = dhcp + DHCP_CHADDR_OFFSET;
+ stainfo = dhd_find_sta(pub, ifidx, chaddr);
+ if (stainfo) {
+ bcopy(chaddr, eh + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
+ return BCME_OK;
+ }
+ return BCME_ERROR;
+}
+#endif /* DHD_UNICAST_DHD */
+#ifdef DHD_L2_FILTER
+/* Check if packet type is ICMP ECHO */
+static
+int dhd_l2_filter_block_ping(dhd_pub_t *pub, void *pktbuf, int ifidx)
+{
+ struct bcmicmp_hdr *icmph;
+ int udpl;
+ uint8 prot;
+
+ if (dhd_get_pkt_ip_type(pub, pktbuf, (uint8 **)&icmph, &udpl, &prot) != 0)
+ return BCME_ERROR;
+ if (prot == IP_PROT_ICMP) {
+ if (icmph->type == ICMP_TYPE_ECHO_REQUEST)
+ return BCME_OK;
+ }
+ return BCME_ERROR;
+}
+#endif /* DHD_L2_FILTER */
+
+void *dhd_get_pub(struct net_device *dev)
+{
+ dhd_info_t *dhdinfo = *(dhd_info_t **)netdev_priv(dev);
+ return (void *)&dhdinfo->pub;
+}
--- /dev/null
+/*
+ * DHD Linux header file (dhd_linux exports for cfg80211 and other components)
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux.h 399301 2013-04-29 21:41:52Z $
+ */
+
+/* wifi platform functions for power, interrupt and pre-alloc, either
+ * from Android-like platform device data, or Broadcom wifi platform
+ * device data.
+ *
+ */
+#ifndef __DHD_LINUX_H__
+#define __DHD_LINUX_H__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#ifdef DHD_WMF
+#include <dhd_wmf_linux.h>
+#endif
+/* Linux wireless extension support */
+#if defined(WL_WIRELESS_EXT)
+#include <wl_iw.h>
+#endif /* defined(WL_WIRELESS_EXT) */
+#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
+
+#define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */
+
+typedef struct wifi_adapter_info {
+ const char *name;
+ uint irq_num;
+ uint intr_flags;
+ const char *fw_path;
+ const char *nv_path;
+ const char *conf_path;
+ void *wifi_plat_data; /* wifi ctrl func, for backward compatibility */
+ uint bus_type;
+ uint bus_num;
+ uint slot_num;
+} wifi_adapter_info_t;
+
+typedef struct bcmdhd_wifi_platdata {
+ uint num_adapters;
+ wifi_adapter_info_t *adapters;
+} bcmdhd_wifi_platdata_t;
+
+/** Per STA params. A list of dhd_sta objects are managed in dhd_if */
+typedef struct dhd_sta {
+ uint16 flowid[NUMPRIO]; /* allocated flow ring ids (by priority) */
+ void * ifp; /* associated dhd_if */
+ struct ether_addr ea; /* stations ethernet mac address */
+ struct list_head list; /* link into dhd_if::sta_list */
+ int idx; /* index of self in dhd_pub::sta_pool[] */
+ int ifidx; /* index of interface in dhd */
+} dhd_sta_t;
+typedef dhd_sta_t dhd_sta_pool_t;
+
+int dhd_wifi_platform_register_drv(void);
+void dhd_wifi_platform_unregister_drv(void);
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num,
+ uint32 slot_num);
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec);
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present);
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr);
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf);
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode);
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size);
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter);
+
+int dhd_get_fw_mode(struct dhd_info *dhdinfo);
+bool dhd_update_fw_nv_path(struct dhd_info *dhdinfo);
+
+#ifdef DHD_WMF
+dhd_wmf_t* dhd_wmf_conf(dhd_pub_t *dhdp, uint32 idx);
+#endif /* DHD_WMF */
+#endif /* __DHD_LINUX_H__ */
--- /dev/null
+/*
+ * Linux platform device for DHD WLAN adapter
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux_platdev.c 401742 2013-05-13 15:03:21Z $
+ */
+#include <typedefs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <bcmutils.h>
+#include <linux_osl.h>
+#include <dhd_dbg.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_linux.h>
+#include <wl_android.h>
+#if defined(CONFIG_WIFI_CONTROL_FUNC)
+#include <linux/wlan_plat.h>
+#endif
+#ifdef CONFIG_DTS
+#include<linux/regulator/consumer.h>
+#include<linux/of_gpio.h>
+#endif /* CONFIG_DTS */
+
+#ifdef CUSTOMER_HW
+#if defined(CUSTOMER_OOB)
+extern uint bcm_wlan_get_oob_irq(void);
+extern uint bcm_wlan_get_oob_irq_flags(void);
+#endif
+extern int bcm_wlan_set_plat_data(void);
+#endif /* CUSTOMER_HW */
+
+struct wifi_platform_data {
+ int (*set_power)(bool val);
+ int (*set_carddetect)(bool val);
+ void *(*mem_prealloc)(int section, unsigned long size);
+ int (*get_mac_addr)(unsigned char *buf);
+ void *(*get_country_code)(char *ccode);
+};
+
+#define WIFI_PLAT_NAME "bcmdhd_wlan"
+#define WIFI_PLAT_NAME2 "bcm4329_wlan"
+#define WIFI_PLAT_EXT "bcmdhd_wifi_platform"
+
+#ifdef CONFIG_DTS
+struct regulator *wifi_regulator = NULL;
+#endif /* CONFIG_DTS */
+
+bool cfg_multichip = FALSE;
+bcmdhd_wifi_platdata_t *dhd_wifi_platdata = NULL;
+static int wifi_plat_dev_probe_ret = 0;
+static bool is_power_on = FALSE;
+#if !defined(CONFIG_DTS)
+#if defined(DHD_OF_SUPPORT)
+static bool dts_enabled = TRUE;
+extern struct resource dhd_wlan_resources;
+extern struct wifi_platform_data dhd_wlan_control;
+#else
+static bool dts_enabled = FALSE;
+struct resource dhd_wlan_resources = {0};
+struct wifi_platform_data dhd_wlan_control = {0};
+#endif /* CONFIG_OF && !defined(CONFIG_ARCH_MSM) */
+#endif /* !defind(CONFIG_DTS) */
+
+static int dhd_wifi_platform_load(void);
+
+extern void* wl_cfg80211_get_dhdp(void);
+
+#ifdef ENABLE_4335BT_WAR
+extern int bcm_bt_lock(int cookie);
+extern void bcm_bt_unlock(int cookie);
+static int lock_cookie_wifi = 'W' | 'i'<<8 | 'F'<<16 | 'i'<<24; /* cookie is "WiFi" */
+#endif /* ENABLE_4335BT_WAR */
+
+wifi_adapter_info_t* dhd_wifi_platform_get_adapter(uint32 bus_type, uint32 bus_num, uint32 slot_num)
+{
+ int i;
+
+ if (dhd_wifi_platdata == NULL)
+ return NULL;
+
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ wifi_adapter_info_t *adapter = &dhd_wifi_platdata->adapters[i];
+ if ((adapter->bus_type == -1 || adapter->bus_type == bus_type) &&
+ (adapter->bus_num == -1 || adapter->bus_num == bus_num) &&
+ (adapter->slot_num == -1 || adapter->slot_num == slot_num)) {
+ DHD_TRACE(("found adapter info '%s'\n", adapter->name));
+ return adapter;
+ }
+ }
+ return NULL;
+}
+
+void* wifi_platform_prealloc(wifi_adapter_info_t *adapter, int section, unsigned long size)
+{
+ void *alloc_ptr = NULL;
+ struct wifi_platform_data *plat_data;
+
+ if (!adapter || !adapter->wifi_plat_data)
+ return NULL;
+ plat_data = adapter->wifi_plat_data;
+ if (plat_data->mem_prealloc) {
+ alloc_ptr = plat_data->mem_prealloc(section, size);
+ if (alloc_ptr) {
+ DHD_INFO(("success alloc section %d\n", section));
+ if (size != 0L)
+ bzero(alloc_ptr, size);
+ return alloc_ptr;
+ }
+ } else
+ return NULL;
+
+ DHD_ERROR(("%s: failed to alloc static mem section %d\n", __FUNCTION__, section));
+ return NULL;
+}
+
+void* wifi_platform_get_prealloc_func_ptr(wifi_adapter_info_t *adapter)
+{
+ struct wifi_platform_data *plat_data;
+
+ if (!adapter || !adapter->wifi_plat_data)
+ return NULL;
+ plat_data = adapter->wifi_plat_data;
+ return plat_data->mem_prealloc;
+}
+
+int wifi_platform_get_irq_number(wifi_adapter_info_t *adapter, unsigned long *irq_flags_ptr)
+{
+ if (adapter == NULL)
+ return -1;
+ if (irq_flags_ptr)
+ *irq_flags_ptr = adapter->intr_flags;
+ return adapter->irq_num;
+}
+
+int wifi_platform_set_power(wifi_adapter_info_t *adapter, bool on, unsigned long msec)
+{
+ int err = 0;
+#ifdef CONFIG_DTS
+ if (on) {
+ err = regulator_enable(wifi_regulator);
+ is_power_on = TRUE;
+ }
+ else {
+ err = regulator_disable(wifi_regulator);
+ is_power_on = FALSE;
+ }
+ if (err < 0)
+ DHD_ERROR(("%s: regulator enable/disable failed", __FUNCTION__));
+#else
+ struct wifi_platform_data *plat_data;
+
+ if (!adapter || !adapter->wifi_plat_data)
+ return -EINVAL;
+ plat_data = adapter->wifi_plat_data;
+
+ DHD_ERROR(("%s = %d\n", __FUNCTION__, on));
+ if (plat_data->set_power) {
+#ifdef ENABLE_4335BT_WAR
+ if (on) {
+ printk("WiFi: trying to acquire BT lock\n");
+ if (bcm_bt_lock(lock_cookie_wifi) != 0)
+ printk("** WiFi: timeout in acquiring bt lock**\n");
+ printk("%s: btlock acquired\n", __FUNCTION__);
+ }
+ else {
+ /* For a exceptional case, release btlock */
+ bcm_bt_unlock(lock_cookie_wifi);
+ }
+#endif /* ENABLE_4335BT_WAR */
+
+ err = plat_data->set_power(on);
+ }
+
+ if (msec && !err)
+ OSL_SLEEP(msec);
+
+ if (on && !err)
+ is_power_on = TRUE;
+ else
+ is_power_on = FALSE;
+
+#endif /* CONFIG_DTS */
+
+ return err;
+}
+
+int wifi_platform_bus_enumerate(wifi_adapter_info_t *adapter, bool device_present)
+{
+ int err = 0;
+ struct wifi_platform_data *plat_data;
+
+ if (!adapter || !adapter->wifi_plat_data)
+ return -EINVAL;
+ plat_data = adapter->wifi_plat_data;
+
+ DHD_ERROR(("%s device present %d\n", __FUNCTION__, device_present));
+ if (plat_data->set_carddetect) {
+ err = plat_data->set_carddetect(device_present);
+ }
+ return err;
+
+}
+
+int wifi_platform_get_mac_addr(wifi_adapter_info_t *adapter, unsigned char *buf)
+{
+ struct wifi_platform_data *plat_data;
+
+ DHD_ERROR(("%s\n", __FUNCTION__));
+ if (!buf || !adapter || !adapter->wifi_plat_data)
+ return -EINVAL;
+ plat_data = adapter->wifi_plat_data;
+ if (plat_data->get_mac_addr) {
+ return plat_data->get_mac_addr(buf);
+ }
+ return -EOPNOTSUPP;
+}
+
+void *wifi_platform_get_country_code(wifi_adapter_info_t *adapter, char *ccode)
+{
+ /* get_country_code was added after 2.6.39 */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
+ struct wifi_platform_data *plat_data;
+
+ if (!ccode || !adapter || !adapter->wifi_plat_data)
+ return NULL;
+ plat_data = adapter->wifi_plat_data;
+
+ DHD_TRACE(("%s\n", __FUNCTION__));
+ if (plat_data->get_country_code) {
+ return plat_data->get_country_code(ccode);
+ }
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
+
+ return NULL;
+}
+
+static int wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+ struct resource *resource;
+ wifi_adapter_info_t *adapter;
+#ifdef CONFIG_DTS
+ int irq, gpio;
+#endif /* CONFIG_DTS */
+
+ /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+ * is kept for backward compatibility and supports only 1 adapter
+ */
+ ASSERT(dhd_wifi_platdata != NULL);
+ ASSERT(dhd_wifi_platdata->num_adapters == 1);
+ adapter = &dhd_wifi_platdata->adapters[0];
+ adapter->wifi_plat_data = (struct wifi_platform_data *)(pdev->dev.platform_data);
+
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
+ if (resource == NULL)
+ resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcm4329_wlan_irq");
+ if (resource) {
+ adapter->irq_num = resource->start;
+ adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+ }
+
+#ifdef CONFIG_DTS
+ wifi_regulator = regulator_get(&pdev->dev, "wlreg_on");
+ if (wifi_regulator == NULL) {
+ DHD_ERROR(("%s regulator is null\n", __FUNCTION__));
+ return -1;
+ }
+
+ /* This is to get the irq for the OOB */
+ gpio = of_get_gpio(pdev->dev.of_node, 0);
+
+ if (gpio < 0) {
+ DHD_ERROR(("%s gpio information is incorrect\n", __FUNCTION__));
+ return -1;
+ }
+ irq = gpio_to_irq(gpio);
+ if (irq < 0) {
+ DHD_ERROR(("%s irq information is incorrect\n", __FUNCTION__));
+ return -1;
+ }
+ adapter->irq_num = irq;
+
+ /* need to change the flags according to our requirement */
+ adapter->intr_flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
+ IORESOURCE_IRQ_SHAREABLE;
+#endif /* CONFIG_DTS */
+
+ wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+ return wifi_plat_dev_probe_ret;
+}
+
+static int wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+ wifi_adapter_info_t *adapter;
+
+ /* Android style wifi platform data device ("bcmdhd_wlan" or "bcm4329_wlan")
+ * is kept for backward compatibility and supports only 1 adapter
+ */
+ ASSERT(dhd_wifi_platdata != NULL);
+ ASSERT(dhd_wifi_platdata->num_adapters == 1);
+ adapter = &dhd_wifi_platdata->adapters[0];
+ if (is_power_on) {
+#ifdef BCMPCIE
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+#else
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+#endif /* BCMPCIE */
+ }
+
+#ifdef CONFIG_DTS
+ regulator_put(wifi_regulator);
+#endif /* CONFIG_DTS */
+ return 0;
+}
+
+static int wifi_plat_dev_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+ defined(BCMSDIO)
+ bcmsdh_oob_intr_set(0);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+static int wifi_plat_dev_drv_resume(struct platform_device *pdev)
+{
+ DHD_TRACE(("##> %s\n", __FUNCTION__));
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && \
+ defined(BCMSDIO)
+ if (dhd_os_check_if_up(wl_cfg80211_get_dhdp()))
+ bcmsdh_oob_intr_set(1);
+#endif /* (OOB_INTR_ONLY) */
+ return 0;
+}
+
+#ifdef CONFIG_DTS
+static const struct of_device_id wifi_device_dt_match[] = {
+ { .compatible = "android,bcmdhd_wlan", },
+ {},
+};
+#endif /* CONFIG_DTS */
+static struct platform_driver wifi_platform_dev_driver = {
+ .probe = wifi_plat_dev_drv_probe,
+ .remove = wifi_plat_dev_drv_remove,
+ .suspend = wifi_plat_dev_drv_suspend,
+ .resume = wifi_plat_dev_drv_resume,
+ .driver = {
+ .name = WIFI_PLAT_NAME,
+#ifdef CONFIG_DTS
+ .of_match_table = wifi_device_dt_match,
+#endif /* CONFIG_DTS */
+ }
+};
+
+static struct platform_driver wifi_platform_dev_driver_legacy = {
+ .probe = wifi_plat_dev_drv_probe,
+ .remove = wifi_plat_dev_drv_remove,
+ .suspend = wifi_plat_dev_drv_suspend,
+ .resume = wifi_plat_dev_drv_resume,
+ .driver = {
+ .name = WIFI_PLAT_NAME2,
+ }
+};
+
+static int wifi_platdev_match(struct device *dev, void *data)
+{
+ char *name = (char*)data;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (strcmp(pdev->name, name) == 0) {
+ DHD_ERROR(("found wifi platform device %s\n", name));
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static int wifi_ctrlfunc_register_drv(void)
+{
+ int err = 0;
+ struct device *dev1, *dev2;
+ wifi_adapter_info_t *adapter;
+
+ dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+ dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+
+#if !defined(CONFIG_DTS)
+ if (!dts_enabled) {
+ if (dev1 == NULL && dev2 == NULL) {
+ DHD_ERROR(("no wifi platform data, skip\n"));
+ return -ENXIO;
+ }
+ }
+#endif /* !defined(CONFIG_DTS) */
+
+ /* multi-chip support not enabled, build one adapter information for
+ * DHD (either SDIO, USB or PCIe)
+ */
+ adapter = kzalloc(sizeof(wifi_adapter_info_t), GFP_KERNEL);
+ adapter->name = "DHD generic adapter";
+ adapter->bus_type = -1;
+ adapter->bus_num = -1;
+ adapter->slot_num = -1;
+ adapter->irq_num = -1;
+ is_power_on = FALSE;
+ wifi_plat_dev_probe_ret = 0;
+ dhd_wifi_platdata = kzalloc(sizeof(bcmdhd_wifi_platdata_t), GFP_KERNEL);
+ dhd_wifi_platdata->num_adapters = 1;
+ dhd_wifi_platdata->adapters = adapter;
+
+ if (dev1) {
+ err = platform_driver_register(&wifi_platform_dev_driver);
+ if (err) {
+ DHD_ERROR(("%s: failed to register wifi ctrl func driver\n",
+ __FUNCTION__));
+ return err;
+ }
+ }
+ if (dev2) {
+ err = platform_driver_register(&wifi_platform_dev_driver_legacy);
+ if (err) {
+ DHD_ERROR(("%s: failed to register wifi ctrl func legacy driver\n",
+ __FUNCTION__));
+ return err;
+ }
+ }
+
+#if !defined(CONFIG_DTS)
+ if (dts_enabled) {
+ adapter->wifi_plat_data = (void *)&dhd_wlan_control;
+#ifdef CUSTOMER_HW
+ bcm_wlan_set_plat_data();
+#ifdef CUSTOMER_OOB
+ adapter->irq_num = bcm_wlan_get_oob_irq();
+ adapter->intr_flags = bcm_wlan_get_oob_irq_flags();
+#endif
+#else
+ struct resource *resource;
+ resource = &dhd_wlan_resources;
+ adapter->irq_num = resource->start;
+ adapter->intr_flags = resource->flags & IRQF_TRIGGER_MASK;
+#endif
+ wifi_plat_dev_probe_ret = dhd_wifi_platform_load();
+ }
+#endif /* !defined(CONFIG_DTS) */
+
+
+#ifdef CONFIG_DTS
+ wifi_plat_dev_probe_ret = platform_driver_register(&wifi_platform_dev_driver);
+#endif /* CONFIG_DTS */
+
+ /* return probe function's return value if registeration succeeded */
+ return wifi_plat_dev_probe_ret;
+}
+
+void wifi_ctrlfunc_unregister_drv(void)
+{
+ struct device *dev1, *dev2;
+
+#ifdef CONFIG_DTS
+ DHD_ERROR(("unregister wifi platform drivers\n"));
+ platform_driver_unregister(&wifi_platform_dev_driver);
+#else
+ dev1 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME, wifi_platdev_match);
+ dev2 = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_NAME2, wifi_platdev_match);
+ if (!dts_enabled)
+ if (dev1 == NULL && dev2 == NULL)
+ return;
+
+ DHD_ERROR(("unregister wifi platform drivers\n"));
+ if (dev1)
+ platform_driver_unregister(&wifi_platform_dev_driver);
+ if (dev2)
+ platform_driver_unregister(&wifi_platform_dev_driver_legacy);
+ if (dts_enabled) {
+ wifi_adapter_info_t *adapter;
+ adapter = &dhd_wifi_platdata->adapters[0];
+ if (is_power_on) {
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ }
+ }
+#endif /* !defined(CONFIG_DTS) */
+
+ kfree(dhd_wifi_platdata->adapters);
+ dhd_wifi_platdata->adapters = NULL;
+ dhd_wifi_platdata->num_adapters = 0;
+ kfree(dhd_wifi_platdata);
+ dhd_wifi_platdata = NULL;
+}
+
+static int bcmdhd_wifi_plat_dev_drv_probe(struct platform_device *pdev)
+{
+ dhd_wifi_platdata = (bcmdhd_wifi_platdata_t *)(pdev->dev.platform_data);
+
+ return dhd_wifi_platform_load();
+}
+
+static int bcmdhd_wifi_plat_dev_drv_remove(struct platform_device *pdev)
+{
+ int i;
+ wifi_adapter_info_t *adapter;
+ ASSERT(dhd_wifi_platdata != NULL);
+
+ /* power down all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ }
+ return 0;
+}
+
+static struct platform_driver dhd_wifi_platform_dev_driver = {
+ .probe = bcmdhd_wifi_plat_dev_drv_probe,
+ .remove = bcmdhd_wifi_plat_dev_drv_remove,
+ .driver = {
+ .name = WIFI_PLAT_EXT,
+ }
+};
+
+int dhd_wifi_platform_register_drv(void)
+{
+ int err = 0;
+ struct device *dev;
+
+ /* register Broadcom wifi platform data driver if multi-chip is enabled,
+ * otherwise use Android style wifi platform data (aka wifi control function)
+ * if it exists
+ *
+ * to support multi-chip DHD, Broadcom wifi platform data device must
+ * be added in kernel early boot (e.g. board config file).
+ */
+ if (cfg_multichip) {
+ dev = bus_find_device(&platform_bus_type, NULL, WIFI_PLAT_EXT, wifi_platdev_match);
+ if (dev == NULL) {
+ DHD_ERROR(("bcmdhd wifi platform data device not found!!\n"));
+ return -ENXIO;
+ }
+ err = platform_driver_register(&dhd_wifi_platform_dev_driver);
+ } else {
+ err = wifi_ctrlfunc_register_drv();
+
+ /* no wifi ctrl func either, load bus directly and ignore this error */
+ if (err) {
+ if (err == -ENXIO) {
+ /* wifi ctrl function does not exist */
+ err = dhd_wifi_platform_load();
+ } else {
+ /* unregister driver due to initialization failure */
+ wifi_ctrlfunc_unregister_drv();
+ }
+ }
+ }
+
+ return err;
+}
+
+#ifdef BCMPCIE
+static int dhd_wifi_platform_load_pcie(void)
+{
+ int err = 0;
+ int i;
+ wifi_adapter_info_t *adapter;
+
+ BCM_REFERENCE(i);
+ BCM_REFERENCE(adapter);
+
+ if (dhd_wifi_platdata == NULL) {
+ err = dhd_bus_register();
+ } else {
+ if (dhd_download_fw_on_driverload) {
+ /* power up all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ int retry = POWERUP_MAX_RETRY;
+ adapter = &dhd_wifi_platdata->adapters[i];
+
+ DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+ DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+ adapter->irq_num, adapter->intr_flags, adapter->fw_path,
+ adapter->nv_path));
+ DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+ adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+ do {
+ err = wifi_platform_set_power(adapter,
+ TRUE, WIFI_TURNON_DELAY);
+ if (err) {
+ DHD_ERROR(("failed to power up %s,"
+ " %d retry left\n",
+ adapter->name, retry));
+ /* WL_REG_ON state unknown, Power off forcely */
+ wifi_platform_set_power(adapter,
+ FALSE, WIFI_TURNOFF_DELAY);
+ continue;
+ } else {
+ err = wifi_platform_bus_enumerate(adapter, TRUE);
+ if (err) {
+ DHD_ERROR(("failed to enumerate bus %s, "
+ "%d retry left\n",
+ adapter->name, retry));
+ wifi_platform_set_power(adapter, FALSE,
+ WIFI_TURNOFF_DELAY);
+ } else {
+ break;
+ }
+ }
+ } while (retry--);
+
+ if (!retry) {
+ DHD_ERROR(("failed to power up %s, max retry reached**\n",
+ adapter->name));
+ return -ENODEV;
+ }
+ }
+ }
+
+ err = dhd_bus_register();
+
+ if (err) {
+ DHD_ERROR(("%s: pcie_register_driver failed\n", __FUNCTION__));
+ if (dhd_download_fw_on_driverload) {
+ /* power down all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ wifi_platform_set_power(adapter,
+ FALSE, WIFI_TURNOFF_DELAY);
+ }
+ }
+ }
+ }
+
+ return err;
+}
+#else
+static int dhd_wifi_platform_load_pcie(void)
+{
+ return 0;
+}
+#endif /* BCMPCIE */
+
+
+void dhd_wifi_platform_unregister_drv(void)
+{
+ if (cfg_multichip)
+ platform_driver_unregister(&dhd_wifi_platform_dev_driver);
+ else
+ wifi_ctrlfunc_unregister_drv();
+}
+
+extern int dhd_watchdog_prio;
+extern int dhd_dpc_prio;
+extern uint dhd_deferred_tx;
+#if defined(BCMLXSDMMC)
+extern struct semaphore dhd_registration_sem;
+#endif
+
+#ifdef BCMSDIO
+static int dhd_wifi_platform_load_sdio(void)
+{
+ int i;
+ int err = 0;
+ wifi_adapter_info_t *adapter;
+
+ BCM_REFERENCE(i);
+ BCM_REFERENCE(adapter);
+ /* Sanity check on the module parameters
+ * - Both watchdog and DPC as tasklets are ok
+ * - If both watchdog and DPC are threads, TX must be deferred
+ */
+ if (!(dhd_watchdog_prio < 0 && dhd_dpc_prio < 0) &&
+ !(dhd_watchdog_prio >= 0 && dhd_dpc_prio >= 0 && dhd_deferred_tx))
+ return -EINVAL;
+
+#if defined(BCMLXSDMMC)
+ if (dhd_wifi_platdata == NULL) {
+ DHD_ERROR(("DHD wifi platform data is required for Android build\n"));
+ return -EINVAL;
+ }
+
+ sema_init(&dhd_registration_sem, 0);
+ /* power up all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ bool chip_up = FALSE;
+ int retry = POWERUP_MAX_RETRY;
+ struct semaphore dhd_chipup_sem;
+
+ adapter = &dhd_wifi_platdata->adapters[i];
+
+ DHD_ERROR(("Power-up adapter '%s'\n", adapter->name));
+ DHD_INFO((" - irq %d [flags %d], firmware: %s, nvram: %s\n",
+ adapter->irq_num, adapter->intr_flags, adapter->fw_path, adapter->nv_path));
+ DHD_INFO((" - bus type %d, bus num %d, slot num %d\n\n",
+ adapter->bus_type, adapter->bus_num, adapter->slot_num));
+
+ do {
+ sema_init(&dhd_chipup_sem, 0);
+ err = dhd_bus_reg_sdio_notify(&dhd_chipup_sem);
+ if (err) {
+ DHD_ERROR(("%s dhd_bus_reg_sdio_notify fail(%d)\n\n",
+ __FUNCTION__, err));
+ return err;
+ }
+ err = wifi_platform_set_power(adapter, TRUE, WIFI_TURNON_DELAY);
+ if (err) {
+ /* WL_REG_ON state unknown, Power off forcely */
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ continue;
+ } else {
+ wifi_platform_bus_enumerate(adapter, TRUE);
+ err = 0;
+ }
+
+ if (down_timeout(&dhd_chipup_sem, msecs_to_jiffies(POWERUP_WAIT_MS)) == 0) {
+ dhd_bus_unreg_sdio_notify();
+ chip_up = TRUE;
+ break;
+ }
+
+ DHD_ERROR(("failed to power up %s, %d retry left\n", adapter->name, retry));
+ dhd_bus_unreg_sdio_notify();
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ } while (retry--);
+
+ if (!chip_up) {
+ DHD_ERROR(("failed to power up %s, max retry reached**\n", adapter->name));
+ return -ENODEV;
+ }
+
+ }
+
+ err = dhd_bus_register();
+
+ if (err) {
+ DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+
+ /*
+ * Wait till MMC sdio_register_driver callback called and made driver attach.
+ * It's needed to make sync up exit from dhd insmod and
+ * Kernel MMC sdio device callback registration
+ */
+ err = down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT));
+ if (err) {
+ DHD_ERROR(("%s: sdio_register_driver timeout or error \n", __FUNCTION__));
+ dhd_bus_unregister();
+ goto fail;
+ }
+
+ return err;
+
+fail:
+ /* power down all adapters */
+ for (i = 0; i < dhd_wifi_platdata->num_adapters; i++) {
+ adapter = &dhd_wifi_platdata->adapters[i];
+ wifi_platform_set_power(adapter, FALSE, WIFI_TURNOFF_DELAY);
+ wifi_platform_bus_enumerate(adapter, FALSE);
+ }
+#else
+
+ /* x86 bring-up PC needs no power-up operations */
+ err = dhd_bus_register();
+
+#endif
+
+ return err;
+}
+#else /* BCMSDIO */
+static int dhd_wifi_platform_load_sdio(void)
+{
+ return 0;
+}
+#endif /* BCMSDIO */
+
+static int dhd_wifi_platform_load_usb(void)
+{
+ return 0;
+}
+
+static int dhd_wifi_platform_load()
+{
+ int err = 0;
+ printf("%s: Enter\n", __FUNCTION__);
+
+ wl_android_init();
+
+ if ((err = dhd_wifi_platform_load_usb()))
+ goto end;
+ else if ((err = dhd_wifi_platform_load_sdio()))
+ goto end;
+ else
+ err = dhd_wifi_platform_load_pcie();
+
+end:
+ if (err)
+ wl_android_exit();
+ else
+ wl_android_post_init();
+
+ return err;
+}
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_linux_sched.c 291086 2011-10-21 01:17:24Z $
+ * $Id: dhd_linux_sched.c 457570 2014-02-23 13:54:46Z $
*/
#include <linux/kernel.h>
#include <linux/module.h>
#endif /* LinuxVer */
return rc;
}
+
+int get_scheduler_policy(struct task_struct *p)
+{
+ int rc = SCHED_NORMAL;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+ rc = p->policy;
+#endif /* LinuxVer */
+ return rc;
+}
--- /dev/null
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux_wq.c 449578 2014-01-17 13:53:20Z $
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/fcntl.h>
+#include <linux/fs.h>
+#include <linux/ip.h>
+#include <linux/kfifo.h>
+
+#include <linuxver.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_dbg.h>
+#include <dhd_linux_wq.h>
+
+struct dhd_deferred_event_t {
+ u8 event; /* holds the event */
+ void *event_data; /* Holds event specific data */
+ event_handler_t event_handler;
+};
+#define DEFRD_EVT_SIZE sizeof(struct dhd_deferred_event_t)
+
+struct dhd_deferred_wq {
+ struct work_struct deferred_work; /* should be the first member */
+
+ /*
+ * work events may occur simultaneously.
+ * Can hold upto 64 low priority events and 4 high priority events
+ */
+#define DHD_PRIO_WORK_FIFO_SIZE (4 * sizeof(struct dhd_deferred_event_t))
+#define DHD_WORK_FIFO_SIZE (64 * sizeof(struct dhd_deferred_event_t))
+ struct kfifo *prio_fifo;
+ struct kfifo *work_fifo;
+ u8 *prio_fifo_buf;
+ u8 *work_fifo_buf;
+ spinlock_t work_lock;
+ void *dhd_info; /* review: does it require */
+};
+
+static inline struct kfifo*
+dhd_kfifo_init(u8 *buf, int size, spinlock_t *lock)
+{
+ struct kfifo *fifo;
+ gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
+ fifo = kfifo_init(buf, size, flags, lock);
+#else
+ fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
+ if (!fifo) {
+ return NULL;
+ }
+ kfifo_init(fifo, buf, size);
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+ return fifo;
+}
+
+static inline void
+dhd_kfifo_free(struct kfifo *fifo)
+{
+ kfifo_free(fifo);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
+ /* FC11 releases the fifo memory */
+ kfree(fifo);
+#endif
+}
+
+/* deferred work functions */
+static void dhd_deferred_work_handler(struct work_struct *data);
+
+void*
+dhd_deferred_work_init(void *dhd_info)
+{
+ struct dhd_deferred_wq *work = NULL;
+ u8* buf;
+ unsigned long fifo_size = 0;
+ gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
+
+ if (!dhd_info) {
+ DHD_ERROR(("%s: dhd info not initialized\n", __FUNCTION__));
+ goto return_null;
+ }
+
+ work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
+ flags);
+
+ if (!work) {
+ DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__));
+ goto return_null;
+ }
+
+ INIT_WORK((struct work_struct *)work, dhd_deferred_work_handler);
+
+ /* initialize event fifo */
+ spin_lock_init(&work->work_lock);
+
+ /* allocate buffer to hold prio events */
+ fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
+ fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
+ buf = (u8*)kzalloc(fifo_size, flags);
+ if (!buf) {
+ DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__));
+ goto return_null;
+ }
+
+ /* Initialize prio event fifo */
+ work->prio_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+ if (!work->prio_fifo) {
+ kfree(buf);
+ goto return_null;
+ }
+
+ /* allocate buffer to hold work events */
+ fifo_size = DHD_WORK_FIFO_SIZE;
+ fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
+ buf = (u8*)kzalloc(fifo_size, flags);
+ if (!buf) {
+ DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__));
+ goto return_null;
+ }
+
+ /* Initialize event fifo */
+ work->work_fifo = dhd_kfifo_init(buf, fifo_size, &work->work_lock);
+ if (!work->work_fifo) {
+ kfree(buf);
+ goto return_null;
+ }
+
+ work->dhd_info = dhd_info;
+ DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__));
+ return work;
+
+return_null:
+
+ if (work)
+ dhd_deferred_work_deinit(work);
+
+ return NULL;
+}
+
+void
+dhd_deferred_work_deinit(void *work)
+{
+ struct dhd_deferred_wq *deferred_work = work;
+
+
+ if (!deferred_work) {
+ DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__));
+ return;
+ }
+
+ /* cancel the deferred work handling */
+ cancel_work_sync((struct work_struct *)deferred_work);
+
+ /*
+ * free work event fifo.
+ * kfifo_free frees locally allocated fifo buffer
+ */
+ if (deferred_work->prio_fifo)
+ dhd_kfifo_free(deferred_work->prio_fifo);
+
+ if (deferred_work->work_fifo)
+ dhd_kfifo_free(deferred_work->work_fifo);
+
+ kfree(deferred_work);
+}
+
+/*
+ * Prepares event to be queued
+ * Schedules the event
+ */
+int
+dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+ event_handler_t event_handler, u8 priority)
+{
+ struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq;
+ struct dhd_deferred_event_t deferred_event;
+ int status;
+
+ if (!deferred_wq) {
+ DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
+ ASSERT(0);
+ return DHD_WQ_STS_UNINITIALIZED;
+ }
+
+ if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
+ DHD_ERROR(("%s: Unknown event \n", __FUNCTION__));
+ return DHD_WQ_STS_UNKNOWN_EVENT;
+ }
+
+ /*
+ * default element size is 1, which can be changed
+ * using kfifo_esize(). Older kernel(FC11) doesn't support
+ * changing element size. For compatibility changing
+ * element size is not prefered
+ */
+ ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+ ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+ deferred_event.event = event;
+ deferred_event.event_data = event_data;
+ deferred_event.event_handler = event_handler;
+
+ if (priority == DHD_WORK_PRIORITY_HIGH) {
+ status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event,
+ DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+ } else {
+ status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event,
+ DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+ }
+
+ if (!status) {
+ return DHD_WQ_STS_SCHED_FAILED;
+ }
+ schedule_work((struct work_struct *)deferred_wq);
+ return DHD_WQ_STS_OK;
+}
+
+static int
+dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event)
+{
+ int status = 0;
+
+ if (!deferred_wq) {
+ DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
+ return DHD_WQ_STS_UNINITIALIZED;
+ }
+
+ /*
+ * default element size is 1 byte, which can be changed
+ * using kfifo_esize(). Older kernel(FC11) doesn't support
+ * changing element size. For compatibility changing
+ * element size is not prefered
+ */
+ ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
+ ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
+
+ /* first read priorit event fifo */
+ status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event,
+ DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+
+ if (!status) {
+ /* priority fifo is empty. Now read low prio work fifo */
+ status = kfifo_out_spinlocked(deferred_wq->work_fifo, event,
+ DEFRD_EVT_SIZE, &deferred_wq->work_lock);
+ }
+
+ return status;
+}
+
+/*
+ * Called when work is scheduled
+ */
+static void
+dhd_deferred_work_handler(struct work_struct *work)
+{
+ struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
+ struct dhd_deferred_event_t work_event;
+ int status;
+
+ if (!deferred_work) {
+ DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
+ return;
+ }
+
+ do {
+ status = dhd_get_scheduled_work(deferred_work, &work_event);
+ DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status));
+ if (!status) {
+ DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status));
+ break;
+ }
+
+ if (work_event.event > DHD_MAX_WQ_EVENTS) {
+ DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event));
+ break;
+ }
+
+ if (work_event.event_handler) {
+ work_event.event_handler(deferred_work->dhd_info,
+ work_event.event_data, work_event.event);
+ } else {
+ DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event));
+ }
+ } while (1);
+ return;
+}
--- /dev/null
+/*
+ * Broadcom Dongle Host Driver (DHD), Generic work queue framework
+ * Generic interface to handle dhd deferred work events
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_linux_wq.h 449578 2014-01-17 13:53:20Z $
+ */
+#ifndef _dhd_linux_wq_h_
+#define _dhd_linux_wq_h_
+/*
+ * Work event definitions
+ */
+enum _wq_event {
+ DHD_WQ_WORK_IF_ADD = 1,
+ DHD_WQ_WORK_IF_DEL,
+ DHD_WQ_WORK_SET_MAC,
+ DHD_WQ_WORK_SET_MCAST_LIST,
+ DHD_WQ_WORK_IPV6_NDO,
+ DHD_WQ_WORK_HANG_MSG,
+
+ DHD_MAX_WQ_EVENTS
+};
+
+/*
+ * Work event priority
+ */
+#define DHD_WORK_PRIORITY_LOW 0
+#define DHD_WORK_PRIORITY_HIGH 1
+
+/*
+ * Error definitions
+ */
+#define DHD_WQ_STS_OK 0
+#define DHD_WQ_STS_FAILED -1 /* General failure */
+#define DHD_WQ_STS_UNINITIALIZED -2
+#define DHD_WQ_STS_SCHED_FAILED -3
+#define DHD_WQ_STS_UNKNOWN_EVENT -4
+
+typedef void (*event_handler_t)(void *handle, void *event_data, u8 event);
+
+void *dhd_deferred_work_init(void *dhd);
+void dhd_deferred_work_deinit(void *workq);
+int dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
+ event_handler_t evt_handler, u8 priority);
+#endif /* _dhd_linux_wq_h_ */
--- /dev/null
+/*
+ * DHD logging module for internal debug
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_sdio.c 281456 2011-09-02 01:49:45Z $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+
+#include <proto/ethernet.h>
+#include <proto/802.1d.h>
+#include <proto/802.11.h>
+
+#include <linux/inet.h>
+
+void dhd_blog(char *cp, int size)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
+ static struct socket * _udpSocket = NULL;
+ struct sockaddr_in _saAddr;
+ struct iovec iov;
+ struct msghdr msg;
+ if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &_udpSocket) >= 0)
+ {
+
+ {
+ memset(&_saAddr, 0, sizeof(_saAddr));
+ _saAddr.sin_family = AF_INET;
+ _saAddr.sin_port = htons(7651);
+ _saAddr.sin_addr.s_addr = in_aton("10.19.74.43");
+
+ iov.iov_base = cp;
+ iov.iov_len = size;
+
+ msg.msg_name = &_saAddr;
+ msg.msg_namelen = sizeof(struct sockaddr_in);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ {
+ mm_segment_t fs = get_fs();
+ set_fs(get_ds());
+
+ sock_sendmsg(_udpSocket, &msg, size);
+
+ set_fs(fs);
+ }
+ }
+
+ sock_release(_udpSocket);
+ }
+#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
+}
--- /dev/null
+/*
+ * Header file describing the internal (inter-module) DHD interfaces.
+ *
+ * Provides type definitions and function prototypes used to link the
+ * DHD OS, bus, and protocol modules.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_msgbuf.c 490973 2014-07-14 12:32:56Z $
+ */
+#include <typedefs.h>
+#include <osl.h>
+
+#include <bcmutils.h>
+#include <bcmmsgbuf.h>
+#include <bcmendian.h>
+
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhd_proto.h>
+#include <dhd_bus.h>
+#include <dhd_dbg.h>
+
+#include <siutils.h>
+
+
+#include <dhd_flowring.h>
+
+#ifdef PROP_TXSTATUS
+#include <wlfc_proto.h>
+#include <dhd_wlfc.h>
+#endif
+
+#include <pcie_core.h>
+#include <bcmpcie.h>
+
+#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
+#define IOCTL_HDR_LEN 12
+
+#define DEFAULT_RX_BUFFERS_TO_POST 256
+#define RXBUFPOST_THRESHOLD 32
+#define RX_BUF_BURST 16
+
+#define DHD_STOP_QUEUE_THRESHOLD 200
+#define DHD_START_QUEUE_THRESHOLD 100
+
+#define MODX(x, n) ((x) & ((n) -1))
+#define align(x, n) (MODX(x, n) ? ((x) - MODX(x, n) + (n)) : ((x) - MODX(x, n)))
+#define RX_DMA_OFFSET 8
+#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
+
+#define DMA_D2H_SCRATCH_BUF_LEN 8
+#define DMA_ALIGN_LEN 4
+#define DMA_XFER_LEN_LIMIT 0x400000
+
+#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
+
+#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
+#define DHD_FLOWRING_MAX_EVENTBUF_POST 8
+#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
+
+#define DHD_PROT_FUNCS 22
+
+typedef struct dhd_mem_map {
+ void *va;
+ dmaaddr_t pa;
+ void *dmah;
+} dhd_mem_map_t;
+
+typedef struct dhd_dmaxfer {
+ dhd_mem_map_t srcmem;
+ dhd_mem_map_t destmem;
+ uint32 len;
+ uint32 srcdelay;
+ uint32 destdelay;
+} dhd_dmaxfer_t;
+
+#define TXP_FLUSH_NITEMS
+#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
+
+typedef struct msgbuf_ring {
+ bool inited;
+ uint16 idx;
+ uchar name[24];
+ dhd_mem_map_t ring_base;
+#ifdef TXP_FLUSH_NITEMS
+ void* start_addr;
+ uint16 pend_items_count;
+#endif /* TXP_FLUSH_NITEMS */
+ ring_mem_t *ringmem;
+ ring_state_t *ringstate;
+} msgbuf_ring_t;
+
+
+typedef struct dhd_prot {
+ osl_t *osh; /* OSL handle */
+ uint32 reqid;
+ uint32 lastcmd;
+ uint32 pending;
+ uint16 rxbufpost;
+ uint16 max_rxbufpost;
+ uint16 max_eventbufpost;
+ uint16 max_ioctlrespbufpost;
+ uint16 cur_event_bufs_posted;
+ uint16 cur_ioctlresp_bufs_posted;
+ uint16 active_tx_count;
+ uint16 max_tx_count;
+ uint16 txp_threshold;
+ /* Ring info */
+ msgbuf_ring_t *h2dring_txp_subn;
+ msgbuf_ring_t *h2dring_rxp_subn;
+ msgbuf_ring_t *h2dring_ctrl_subn; /* Cbuf handle for H2D ctrl ring */
+ msgbuf_ring_t *d2hring_tx_cpln;
+ msgbuf_ring_t *d2hring_rx_cpln;
+ msgbuf_ring_t *d2hring_ctrl_cpln; /* Cbuf handle for D2H ctrl ring */
+ uint32 rx_dataoffset;
+ dhd_mem_map_t retbuf;
+ dhd_mem_map_t ioctbuf; /* For holding ioct request buf */
+ dhd_mb_ring_t mb_ring_fn;
+
+ uint32 d2h_dma_scratch_buf_len; /* For holding ioct request buf */
+ dhd_mem_map_t d2h_dma_scratch_buf; /* For holding ioct request buf */
+
+ uint32 h2d_dma_writeindx_buf_len; /* For holding dma ringupd buf - submission write */
+ dhd_mem_map_t h2d_dma_writeindx_buf; /* For holding dma ringupd buf - submission write */
+
+ uint32 h2d_dma_readindx_buf_len; /* For holding dma ringupd buf - submission read */
+ dhd_mem_map_t h2d_dma_readindx_buf; /* For holding dma ringupd buf - submission read */
+
+ uint32 d2h_dma_writeindx_buf_len; /* For holding dma ringupd buf - completion write */
+ dhd_mem_map_t d2h_dma_writeindx_buf; /* For holding dma ringupd buf - completion write */
+
+ uint32 d2h_dma_readindx_buf_len; /* For holding dma ringupd buf - completion read */
+ dhd_mem_map_t d2h_dma_readindx_buf; /* For holding dma ringupd buf - completion read */
+
+ dhd_dmaxfer_t dmaxfer;
+ bool dmaxfer_in_progress;
+
+ uint16 ioctl_seq_no;
+ uint16 data_seq_no;
+ uint16 ioctl_trans_id;
+ void *pktid_map_handle;
+ uint16 rx_metadata_offset;
+ uint16 tx_metadata_offset;
+ uint16 rx_cpln_early_upd_idx;
+} dhd_prot_t;
+
+static int dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+ void *buf, uint len, uint8 action);
+static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
+ void *buf, uint len, uint8 action);
+static int dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf);
+
+static int dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd);
+static int dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count);
+static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt);
+static void dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len);
+static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
+static int dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len);
+
+static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static void* dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
+ uint16 msglen, uint16 *alloced);
+static int dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf,
+ int ifidx);
+static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid);
+static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid);
+static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
+static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
+ uint destdelay, dhd_dmaxfer_t *dma);
+static void dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void *buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+static void dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen);
+
+
+
+
+#ifdef DHD_RX_CHAINING
+#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
+ (!ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
+ !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
+ !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
+ !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
+ ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
+ ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
+ (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
+
+static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
+static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
+static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
+
+#define DHD_PKT_CTF_MAX_CHAIN_LEN 64
+#endif /* DHD_RX_CHAINING */
+
+static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post);
+static int dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
+static int dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
+
+static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+static void dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static msgbuf_ring_t* prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item,
+ uint16 len_item, uint16 ringid);
+static void* prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced);
+static void dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index);
+static uint16 dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid);
+static void prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 len);
+static void prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+static uint8* prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 *available_len);
+static void prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
+static void prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring);
+
+typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void * buf, uint16 msglen);
+static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
+ NULL,
+ dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
+ dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
+ NULL,
+ dhd_prot_process_flow_ring_create_response, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
+ NULL,
+ dhd_prot_process_flow_ring_delete_response, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
+ NULL,
+ dhd_prot_process_flow_ring_flush_response, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
+ NULL,
+ dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
+ NULL,
+ dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
+ NULL,
+ dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
+ NULL,
+ dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
+ NULL,
+ dhd_prot_rxcmplt_process, /* MSG_TYPE_RX_CMPLT */
+ NULL,
+ dhdmsgbuf_dmaxfer_compare, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
+ NULL,
+};
+
+/*
+ * +---------------------------------------------------------------------------+
+ * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
+ * The packet id map, also includes storage for some packet parameters that
+ * may be saved. A native packet pointer along with the parameters may be saved
+ * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
+ * and the metadata may be retrieved using the previously allocated packet id.
+ * +---------------------------------------------------------------------------+
+ */
+#define MAX_PKTID_ITEMS (3072) /* Maximum number of pktids supported */
+
+typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
+
+/* Construct a packet id mapping table, returing an opaque map handle */
+static dhd_pktid_map_handle_t *dhd_pktid_map_init(void *osh, uint32 num_items);
+
+/* Destroy a packet id mapping table, freeing all packets active in the table */
+static void dhd_pktid_map_fini(dhd_pktid_map_handle_t *map);
+
+/* Determine number of pktids that are available */
+static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *map);
+
+/* Allocate a unique pktid against which a pkt and some metadata is saved */
+static INLINE uint32 dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle,
+ void *pkt);
+static INLINE void dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt,
+ uint32 nkey, dmaaddr_t physaddr, uint32 len, uint8 dma);
+static uint32 dhd_pktid_map_alloc(dhd_pktid_map_handle_t *map, void *pkt,
+ dmaaddr_t physaddr, uint32 len, uint8 dma);
+
+/* Return an allocated pktid, retrieving previously saved pkt and metadata */
+static void *dhd_pktid_map_free(dhd_pktid_map_handle_t *map, uint32 id,
+ dmaaddr_t *physaddr, uint32 *len);
+
+/* Packet metadata saved in packet id mapper */
+typedef struct dhd_pktid_item {
+ bool inuse; /* tag an item to be in use */
+ uint8 dma; /* map direction: flush or invalidate */
+ uint16 len; /* length of mapped packet's buffer */
+ void *pkt; /* opaque native pointer to a packet */
+ dmaaddr_t physaddr; /* physical address of mapped packet's buffer */
+} dhd_pktid_item_t;
+
+typedef struct dhd_pktid_map {
+ void *osh;
+ int items; /* total items in map */
+ int avail; /* total available items */
+ int failures; /* lockers unavailable count */
+ uint32 keys[MAX_PKTID_ITEMS + 1]; /* stack of unique pkt ids */
+ dhd_pktid_item_t lockers[0]; /* metadata storage */
+} dhd_pktid_map_t;
+
+/*
+ * PktId (Locker) #0 is never allocated and is considered invalid.
+ *
+ * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
+ * depleted pktid pool and must not be used by the caller.
+ *
+ * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
+ */
+#define DHD_PKTID_INVALID (0U)
+
+#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
+#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
+ (DHD_PKTID_ITEM_SZ * ((items) + 1)))
+
+#define NATIVE_TO_PKTID_INIT(osh, items) dhd_pktid_map_init((osh), (items))
+#define NATIVE_TO_PKTID_FINI(map) dhd_pktid_map_fini(map)
+#define NATIVE_TO_PKTID_CLEAR(map) dhd_pktid_map_clear(map)
+
+#define NATIVE_TO_PKTID_RSV(map, pkt) dhd_pktid_map_reserve((map), (pkt))
+#define NATIVE_TO_PKTID_SAVE(map, pkt, nkey, pa, len, dma) \
+ dhd_pktid_map_save((map), (void *)(pkt), (nkey), (pa), (uint32)(len), (uint8)dma)
+#define NATIVE_TO_PKTID(map, pkt, pa, len, dma) \
+ dhd_pktid_map_alloc((map), (void *)(pkt), (pa), (uint32)(len), (uint8)dma)
+
+#define PKTID_TO_NATIVE(map, pktid, pa, len) \
+ dhd_pktid_map_free((map), (uint32)(pktid), \
+ (dmaaddr_t *)&(pa), (uint32 *)&(len))
+
+#define PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
+
+/*
+ * +---------------------------------------------------------------------------+
+ * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
+ *
+ * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_PKTID_ITEMS].
+ *
+ * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
+ * packet id is returned. This unique packet id may be used to retrieve the
+ * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
+ * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
+ * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
+ *
+ * Implementation Note:
+ * Convert this into a <key,locker> abstraction and place into bcmutils !
+ * Locker abstraction should treat contents as opaque storage, and a
+ * callback should be registered to handle inuse lockers on destructor.
+ *
+ * +---------------------------------------------------------------------------+
+ */
+
+/* Allocate and initialize a mapper of num_items <numbered_key, locker> */
+static dhd_pktid_map_handle_t *
+dhd_pktid_map_init(void *osh, uint32 num_items)
+{
+ uint32 nkey;
+ dhd_pktid_map_t *map;
+ uint32 dhd_pktid_map_sz;
+
+ ASSERT((num_items >= 1) && num_items <= MAX_PKTID_ITEMS);
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
+
+ if ((map = (dhd_pktid_map_t *)MALLOC(osh, dhd_pktid_map_sz)) == NULL) {
+ DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
+ __FUNCTION__, __LINE__, dhd_pktid_map_sz));
+ return NULL;
+ }
+ bzero(map, dhd_pktid_map_sz);
+
+ map->osh = osh;
+ map->items = num_items;
+ map->avail = num_items;
+
+ map->lockers[DHD_PKTID_INVALID].inuse = TRUE; /* tag locker #0 as inuse */
+
+ for (nkey = 1; nkey <= num_items; nkey++) { /* locker #0 is reserved */
+ map->keys[nkey] = nkey; /* populate with unique keys */
+ map->lockers[nkey].inuse = FALSE;
+ }
+
+ return (dhd_pktid_map_handle_t *)map; /* opaque handle */
+}
+
+/*
+ * Retrieve all allocated keys and free all <numbered_key, locker>.
+ * Freeing implies: unmapping the buffers and freeing the native packet
+ * This could have been a callback registered with the pktid mapper.
+ */
+static void
+dhd_pktid_map_fini(dhd_pktid_map_handle_t *handle)
+{
+ void *osh;
+ int nkey;
+ dhd_pktid_map_t *map;
+ uint32 dhd_pktid_map_sz;
+ dhd_pktid_item_t *locker;
+
+ if (handle == NULL)
+ return;
+
+ map = (dhd_pktid_map_t *)handle;
+ osh = map->osh;
+ dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
+
+ nkey = 1; /* skip reserved KEY #0, and start from 1 */
+ locker = &map->lockers[nkey];
+
+ for (; nkey <= map->items; nkey++, locker++) {
+ if (locker->inuse == TRUE) { /* numbered key still in use */
+ locker->inuse = FALSE; /* force open the locker */
+
+ { /* This could be a callback registered with dhd_pktid_map */
+ DMA_UNMAP(osh, locker->physaddr, locker->len,
+ locker->dma, 0, 0);
+ PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+ }
+ }
+ }
+
+ MFREE(osh, handle, dhd_pktid_map_sz);
+}
+
+static void
+dhd_pktid_map_clear(dhd_pktid_map_handle_t *handle)
+{
+ void *osh;
+ int nkey;
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+
+ DHD_TRACE(("%s\n", __FUNCTION__));
+
+ if (handle == NULL)
+ return;
+
+ map = (dhd_pktid_map_t *)handle;
+ osh = map->osh;
+ map->failures = 0;
+
+ nkey = 1; /* skip reserved KEY #0, and start from 1 */
+ locker = &map->lockers[nkey];
+
+ for (; nkey <= map->items; nkey++, locker++) {
+ map->keys[nkey] = nkey; /* populate with unique keys */
+ if (locker->inuse == TRUE) { /* numbered key still in use */
+ locker->inuse = FALSE; /* force open the locker */
+ DHD_TRACE(("%s free id%d\n", __FUNCTION__, nkey));
+ DMA_UNMAP(osh, (uint32)locker->physaddr, locker->len,
+ locker->dma, 0, 0);
+ PKTFREE(osh, (ulong*)locker->pkt, FALSE);
+ }
+ }
+ map->avail = map->items;
+}
+
+/* Get the pktid free count */
+static INLINE uint32 BCMFASTPATH
+dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
+{
+ dhd_pktid_map_t *map;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+
+ return map->avail;
+}
+
+/*
+ * Allocate locker, save pkt contents, and return the locker's numbered key.
+ * dhd_pktid_map_alloc() is not reentrant, and is the caller's responsibility.
+ * Caller must treat a returned value DHD_PKTID_INVALID as a failure case,
+ * implying a depleted pool of pktids.
+ */
+static INLINE uint32
+dhd_pktid_map_reserve(dhd_pktid_map_handle_t *handle, void *pkt)
+{
+ uint32 nkey;
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+
+ if (map->avail <= 0) { /* no more pktids to allocate */
+ map->failures++;
+ DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
+ return DHD_PKTID_INVALID; /* failed alloc request */
+ }
+ ASSERT(map->avail <= map->items);
+
+ nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
+ map->avail--;
+
+ locker = &map->lockers[nkey]; /* save packet metadata in locker */
+ locker->inuse = TRUE; /* reserve this locker */
+ locker->pkt = pkt;
+
+ ASSERT(nkey != DHD_PKTID_INVALID);
+ return nkey; /* return locker's numbered key */
+}
+
+static INLINE void
+dhd_pktid_map_save(dhd_pktid_map_handle_t *handle, void *pkt, uint32 nkey,
+ dmaaddr_t physaddr, uint32 len, uint8 dma)
+{
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+
+ ASSERT(handle != NULL);
+ map = (dhd_pktid_map_t *)handle;
+
+ ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items));
+
+ locker = &map->lockers[nkey];
+ ASSERT(locker->pkt == pkt);
+
+ locker->dma = dma; /* store contents in locker */
+ locker->physaddr = physaddr;
+ locker->len = (uint16)len; /* 16bit len */
+}
+
+static uint32 BCMFASTPATH
+dhd_pktid_map_alloc(dhd_pktid_map_handle_t *handle, void *pkt,
+ dmaaddr_t physaddr, uint32 len, uint8 dma)
+{
+ uint32 nkey = dhd_pktid_map_reserve(handle, pkt);
+ if (nkey != DHD_PKTID_INVALID) {
+ dhd_pktid_map_save(handle, pkt, nkey, physaddr, len, dma);
+ }
+ return nkey;
+}
+
+/*
+ * Given a numbered key, return the locker contents.
+ * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
+ * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
+ * value. Only a previously allocated pktid may be freed.
+ */
+static void * BCMFASTPATH
+dhd_pktid_map_free(dhd_pktid_map_handle_t *handle, uint32 nkey,
+ dmaaddr_t *physaddr, uint32 *len)
+{
+ dhd_pktid_map_t *map;
+ dhd_pktid_item_t *locker;
+
+ ASSERT(handle != NULL);
+
+ map = (dhd_pktid_map_t *)handle;
+ ASSERT((nkey != DHD_PKTID_INVALID) && (nkey <= (uint32)map->items));
+
+ locker = &map->lockers[nkey];
+
+ if (locker->inuse == FALSE) { /* Debug check for cloned numbered key */
+ DHD_ERROR(("%s:%d: Error! freeing invalid pktid<%u>\n",
+ __FUNCTION__, __LINE__, nkey));
+ ASSERT(locker->inuse != FALSE);
+ return NULL;
+ }
+
+ map->avail++;
+ map->keys[map->avail] = nkey; /* make this numbered key available */
+ locker->inuse = FALSE; /* open and free Locker */
+
+ *physaddr = locker->physaddr; /* return contents of locker */
+ *len = (uint32)locker->len;
+
+ return locker->pkt;
+}
+
+/* Linkage, sets prot link and updates hdrlen in pub */
+int dhd_prot_attach(dhd_pub_t *dhd)
+{
+ uint alloced = 0;
+
+ dhd_prot_t *prot;
+
+ /* Allocate prot structure */
+ if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
+ sizeof(dhd_prot_t)))) {
+ DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
+ goto fail;
+ }
+ memset(prot, 0, sizeof(*prot));
+
+ prot->osh = dhd->osh;
+ dhd->prot = prot;
+
+ /* DMAing ring completes supported? FALSE by default */
+ dhd->dma_d2h_ring_upd_support = FALSE;
+ dhd->dma_h2d_ring_upd_support = FALSE;
+
+ /* Ring Allocations */
+ /* 1.0 H2D TXPOST ring */
+ if (!(prot->h2dring_txp_subn = prot_ring_attach(prot, "h2dtxp",
+ H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
+ BCMPCIE_H2D_TXFLOWRINGID))) {
+ DHD_ERROR(("%s: kmalloc for H2D TXPOST ring failed\n", __FUNCTION__));
+ goto fail;
+ }
+
+ /* 2.0 H2D RXPOST ring */
+ if (!(prot->h2dring_rxp_subn = prot_ring_attach(prot, "h2drxp",
+ H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
+ BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT))) {
+ DHD_ERROR(("%s: kmalloc for H2D RXPOST ring failed\n", __FUNCTION__));
+ goto fail;
+
+ }
+
+ /* 3.0 H2D CTRL_SUBMISSION ring */
+ if (!(prot->h2dring_ctrl_subn = prot_ring_attach(prot, "h2dctrl",
+ H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
+ BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT))) {
+ DHD_ERROR(("%s: kmalloc for H2D CTRL_SUBMISSION ring failed\n",
+ __FUNCTION__));
+ goto fail;
+
+ }
+
+ /* 4.0 D2H TX_COMPLETION ring */
+ if (!(prot->d2hring_tx_cpln = prot_ring_attach(prot, "d2htxcpl",
+ D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_TX_COMPLETE))) {
+ DHD_ERROR(("%s: kmalloc for D2H TX_COMPLETION ring failed\n",
+ __FUNCTION__));
+ goto fail;
+
+ }
+
+ /* 5.0 D2H RX_COMPLETION ring */
+ if (!(prot->d2hring_rx_cpln = prot_ring_attach(prot, "d2hrxcpl",
+ D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_RX_COMPLETE))) {
+ DHD_ERROR(("%s: kmalloc for D2H RX_COMPLETION ring failed\n",
+ __FUNCTION__));
+ goto fail;
+
+ }
+
+ /* 6.0 D2H CTRL_COMPLETION ring */
+ if (!(prot->d2hring_ctrl_cpln = prot_ring_attach(prot, "d2hctrl",
+ D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
+ BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE))) {
+ DHD_ERROR(("%s: kmalloc for D2H CTRL_COMPLETION ring failed\n",
+ __FUNCTION__));
+ goto fail;
+ }
+
+ /* Return buffer for ioctl */
+ prot->retbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN,
+ &alloced, &prot->retbuf.pa, &prot->retbuf.dmah);
+ if (prot->retbuf.va == NULL) {
+ ASSERT(0);
+ return BCME_NOMEM;
+ }
+
+ ASSERT(MODX((unsigned long)prot->retbuf.va, DMA_ALIGN_LEN) == 0);
+ bzero(prot->retbuf.va, IOCT_RETBUF_SIZE);
+ OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+
+ /* IOCTL request buffer */
+ prot->ioctbuf.va = DMA_ALLOC_CONSISTENT(dhd->osh, IOCT_RETBUF_SIZE, DMA_ALIGN_LEN,
+ &alloced, &prot->ioctbuf.pa, &prot->ioctbuf.dmah);
+
+ if (prot->ioctbuf.va == NULL) {
+ ASSERT(0);
+ return BCME_NOMEM;
+ }
+
+ ASSERT(MODX((unsigned long)prot->ioctbuf.va, DMA_ALIGN_LEN) == 0);
+ bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+ OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+
+ /* Scratch buffer for dma rx offset */
+ prot->d2h_dma_scratch_buf_len = DMA_D2H_SCRATCH_BUF_LEN;
+ prot->d2h_dma_scratch_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh, DMA_D2H_SCRATCH_BUF_LEN,
+ DMA_ALIGN_LEN, &alloced, &prot->d2h_dma_scratch_buf.pa,
+ &prot->d2h_dma_scratch_buf.dmah);
+
+ if (prot->d2h_dma_scratch_buf.va == NULL) {
+ ASSERT(0);
+ return BCME_NOMEM;
+ }
+ ASSERT(MODX((unsigned long)prot->d2h_dma_scratch_buf.va, DMA_ALIGN_LEN) == 0);
+ bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+ OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+
+
+ /* PKTID handle INIT */
+ prot->pktid_map_handle = NATIVE_TO_PKTID_INIT(dhd->osh, MAX_PKTID_ITEMS);
+ if (prot->pktid_map_handle == NULL) {
+ ASSERT(0);
+ return BCME_NOMEM;
+ }
+
+ prot->dmaxfer.srcmem.va = NULL;
+ prot->dmaxfer.destmem.va = NULL;
+ prot->dmaxfer_in_progress = FALSE;
+
+ prot->rx_metadata_offset = 0;
+ prot->tx_metadata_offset = 0;
+
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_reset(&prot->rxchain);
+#endif
+
+ return 0;
+
+fail:
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ if (prot != NULL)
+ dhd_prot_detach(dhd);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ return BCME_NOMEM;
+}
+
+/* Init memory block on host DMA'ing indices */
+int
+dhd_prot_init_index_dma_block(dhd_pub_t *dhd, uint8 type, uint32 length)
+{
+ uint alloced = 0;
+
+ dhd_prot_t *prot = dhd->prot;
+ uint32 dma_block_size = 4 * length;
+
+ if (prot == NULL) {
+ DHD_ERROR(("prot is not inited\n"));
+ return BCME_ERROR;
+ }
+
+ switch (type) {
+ case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
+ /* ring update dma buffer for submission write */
+ prot->h2d_dma_writeindx_buf_len = dma_block_size;
+ prot->h2d_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+ dma_block_size, DMA_ALIGN_LEN, &alloced,
+ &prot->h2d_dma_writeindx_buf.pa,
+ &prot->h2d_dma_writeindx_buf.dmah);
+
+ if (prot->h2d_dma_writeindx_buf.va == NULL) {
+ return BCME_NOMEM;
+ }
+
+ ASSERT(ISALIGNED(prot->h2d_dma_writeindx_buf.va, 4));
+ bzero(prot->h2d_dma_writeindx_buf.va, dma_block_size);
+ OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va, dma_block_size);
+ DHD_ERROR(("H2D_WRITEINDX_ARRAY_HOST: %d-bytes "
+ "inited for dma'ing h2d-w indices\n",
+ prot->h2d_dma_writeindx_buf_len));
+ break;
+
+ case HOST_TO_DNGL_DMA_READINDX_BUFFER:
+ /* ring update dma buffer for submission read */
+ prot->h2d_dma_readindx_buf_len = dma_block_size;
+ prot->h2d_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+ dma_block_size, DMA_ALIGN_LEN, &alloced,
+ &prot->h2d_dma_readindx_buf.pa,
+ &prot->h2d_dma_readindx_buf.dmah);
+ if (prot->h2d_dma_readindx_buf.va == NULL) {
+ return BCME_NOMEM;
+ }
+
+ ASSERT(ISALIGNED(prot->h2d_dma_readindx_buf.va, 4));
+ bzero(prot->h2d_dma_readindx_buf.va, dma_block_size);
+ OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va, dma_block_size);
+ DHD_ERROR(("H2D_READINDX_ARRAY_HOST %d-bytes "
+ "inited for dma'ing h2d-r indices\n",
+ prot->h2d_dma_readindx_buf_len));
+ break;
+
+ case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
+ /* ring update dma buffer for completion write */
+ prot->d2h_dma_writeindx_buf_len = dma_block_size;
+ prot->d2h_dma_writeindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+ dma_block_size, DMA_ALIGN_LEN, &alloced,
+ &prot->d2h_dma_writeindx_buf.pa,
+ &prot->d2h_dma_writeindx_buf.dmah);
+
+ if (prot->d2h_dma_writeindx_buf.va == NULL) {
+ return BCME_NOMEM;
+ }
+
+ ASSERT(ISALIGNED(prot->d2h_dma_writeindx_buf.va, 4));
+ bzero(prot->d2h_dma_writeindx_buf.va, dma_block_size);
+ OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va, dma_block_size);
+ DHD_ERROR(("D2H_WRITEINDX_ARRAY_HOST %d-bytes "
+ "inited for dma'ing d2h-w indices\n",
+ prot->d2h_dma_writeindx_buf_len));
+ break;
+
+ case DNGL_TO_HOST_DMA_READINDX_BUFFER:
+ /* ring update dma buffer for completion read */
+ prot->d2h_dma_readindx_buf_len = dma_block_size;
+ prot->d2h_dma_readindx_buf.va = DMA_ALLOC_CONSISTENT(dhd->osh,
+ dma_block_size, DMA_ALIGN_LEN, &alloced,
+ &prot->d2h_dma_readindx_buf.pa,
+ &prot->d2h_dma_readindx_buf.dmah);
+
+ if (prot->d2h_dma_readindx_buf.va == NULL) {
+ return BCME_NOMEM;
+ }
+
+ ASSERT(ISALIGNED(prot->d2h_dma_readindx_buf.va, 4));
+ bzero(prot->d2h_dma_readindx_buf.va, dma_block_size);
+ OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va, dma_block_size);
+ DHD_ERROR(("D2H_READINDX_ARRAY_HOST %d-bytes "
+ "inited for dma'ing d2h-r indices\n",
+ prot->d2h_dma_readindx_buf_len));
+ break;
+
+ default:
+ DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
+ return BCME_BADOPTION;
+ }
+
+ return BCME_OK;
+
+}
+
+/* Unlink, frees allocated protocol memory (including dhd_prot) */
+void dhd_prot_detach(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ /* Stop the protocol module */
+ if (dhd->prot) {
+
+ /* free up scratch buffer */
+ if (prot->d2h_dma_scratch_buf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_scratch_buf.va,
+ DMA_D2H_SCRATCH_BUF_LEN, prot->d2h_dma_scratch_buf.pa,
+ prot->d2h_dma_scratch_buf.dmah);
+ prot->d2h_dma_scratch_buf.va = NULL;
+ }
+ /* free up ring upd buffer for submission writes */
+ if (prot->h2d_dma_writeindx_buf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_writeindx_buf.va,
+ prot->h2d_dma_writeindx_buf_len, prot->h2d_dma_writeindx_buf.pa,
+ prot->h2d_dma_writeindx_buf.dmah);
+ prot->h2d_dma_writeindx_buf.va = NULL;
+ }
+
+ /* free up ring upd buffer for submission reads */
+ if (prot->h2d_dma_readindx_buf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, prot->h2d_dma_readindx_buf.va,
+ prot->h2d_dma_readindx_buf_len, prot->h2d_dma_readindx_buf.pa,
+ prot->h2d_dma_readindx_buf.dmah);
+ prot->h2d_dma_readindx_buf.va = NULL;
+ }
+
+ /* free up ring upd buffer for completion writes */
+ if (prot->d2h_dma_writeindx_buf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_writeindx_buf.va,
+ prot->d2h_dma_writeindx_buf_len, prot->d2h_dma_writeindx_buf.pa,
+ prot->d2h_dma_writeindx_buf.dmah);
+ prot->d2h_dma_writeindx_buf.va = NULL;
+ }
+
+ /* free up ring upd buffer for completion writes */
+ if (prot->d2h_dma_readindx_buf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, prot->d2h_dma_readindx_buf.va,
+ prot->d2h_dma_readindx_buf_len, prot->d2h_dma_readindx_buf.pa,
+ prot->d2h_dma_readindx_buf.dmah);
+ prot->d2h_dma_readindx_buf.va = NULL;
+ }
+
+ /* ioctl return buffer */
+ if (prot->retbuf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->retbuf.va,
+ IOCT_RETBUF_SIZE, dhd->prot->retbuf.pa, dhd->prot->retbuf.dmah);
+ dhd->prot->retbuf.va = NULL;
+ }
+
+ /* ioctl request buffer */
+ if (prot->ioctbuf.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, dhd->prot->ioctbuf.va,
+ IOCT_RETBUF_SIZE, dhd->prot->ioctbuf.pa, dhd->prot->ioctbuf.dmah);
+
+ dhd->prot->ioctbuf.va = NULL;
+ }
+
+
+ /* 1.0 H2D TXPOST ring */
+ dhd_prot_ring_detach(dhd, prot->h2dring_txp_subn);
+ /* 2.0 H2D RXPOST ring */
+ dhd_prot_ring_detach(dhd, prot->h2dring_rxp_subn);
+ /* 3.0 H2D CTRL_SUBMISSION ring */
+ dhd_prot_ring_detach(dhd, prot->h2dring_ctrl_subn);
+ /* 4.0 D2H TX_COMPLETION ring */
+ dhd_prot_ring_detach(dhd, prot->d2hring_tx_cpln);
+ /* 5.0 D2H RX_COMPLETION ring */
+ dhd_prot_ring_detach(dhd, prot->d2hring_rx_cpln);
+ /* 6.0 D2H CTRL_COMPLETION ring */
+ dhd_prot_ring_detach(dhd, prot->d2hring_ctrl_cpln);
+
+ NATIVE_TO_PKTID_FINI(dhd->prot->pktid_map_handle);
+
+#ifndef CONFIG_DHD_USE_STATIC_BUF
+ MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+
+ dhd->prot = NULL;
+ }
+}
+
+void
+dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
+{
+ dhd_prot_t *prot = dhd->prot;
+ prot->rx_dataoffset = rx_offset;
+}
+
+
+/* Initialize protocol: sync w/dongle state.
+ * Sets dongle media info (iswl, drv_version, mac address).
+ */
+int dhd_sync_with_dongle(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ wlc_rev_info_t revinfo;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Post event buffer after shim layer is attached */
+ ret = dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+
+ /* Get the device rev info */
+ memset(&revinfo, 0, sizeof(revinfo));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
+ if (ret < 0)
+ goto done;
+
+ dhd_process_cid_mac(dhd, TRUE);
+
+ ret = dhd_preinit_ioctls(dhd);
+
+ if (!ret)
+ dhd_process_cid_mac(dhd, FALSE);
+
+ /* Always assumes wl for now */
+ dhd->iswl = TRUE;
+done:
+ return ret;
+}
+
+/* This function does all necessary initialization needed
+* for IOCTL/IOVAR path
+*/
+int dhd_prot_init(dhd_pub_t *dhd)
+{
+ int ret = 0;
+ dhd_prot_t *prot = dhd->prot;
+
+ /* Max pkts in ring */
+ prot->max_tx_count = H2DRING_TXPOST_MAX_ITEM;
+
+ DHD_INFO(("%s:%d: MAX_TX_COUNT = %d\n", __FUNCTION__, __LINE__, prot->max_tx_count));
+
+ /* Read max rx packets supported by dongle */
+ dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
+ if (prot->max_rxbufpost == 0) {
+ /* This would happen if the dongle firmware is not */
+ /* using the latest shared structure template */
+ prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
+ }
+ DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
+
+ prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
+ prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
+
+ prot->active_tx_count = 0;
+ prot->data_seq_no = 0;
+ prot->ioctl_seq_no = 0;
+ prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
+
+ prot->ioctl_trans_id = 1;
+
+ /* Register the interrupt function upfront */
+ /* remove corerev checks in data path */
+ prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
+
+ /* Initialise rings */
+ /* 1.0 H2D TXPOST ring */
+ if (dhd_bus_is_txmode_push(dhd->bus)) {
+ dhd_ring_init(dhd, prot->h2dring_txp_subn);
+ }
+
+ /* 2.0 H2D RXPOST ring */
+ dhd_ring_init(dhd, prot->h2dring_rxp_subn);
+ /* 3.0 H2D CTRL_SUBMISSION ring */
+ dhd_ring_init(dhd, prot->h2dring_ctrl_subn);
+ /* 4.0 D2H TX_COMPLETION ring */
+ dhd_ring_init(dhd, prot->d2hring_tx_cpln);
+ /* 5.0 D2H RX_COMPLETION ring */
+ dhd_ring_init(dhd, prot->d2hring_rx_cpln);
+ /* 6.0 D2H CTRL_COMPLETION ring */
+ dhd_ring_init(dhd, prot->d2hring_ctrl_cpln);
+
+ /* init the scratch buffer */
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.pa,
+ sizeof(prot->d2h_dma_scratch_buf.pa), DNGL_TO_HOST_DMA_SCRATCH_BUFFER, 0);
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf_len,
+ sizeof(prot->d2h_dma_scratch_buf_len), DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN, 0);
+
+ /* If supported by the host, indicate the memory block
+ * for comletion writes / submission reads to shared space
+ */
+ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_writeindx_buf.pa,
+ sizeof(prot->d2h_dma_writeindx_buf.pa),
+ DNGL_TO_HOST_DMA_WRITEINDX_BUFFER, 0);
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_readindx_buf.pa,
+ sizeof(prot->h2d_dma_readindx_buf.pa),
+ HOST_TO_DNGL_DMA_READINDX_BUFFER, 0);
+ }
+
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support)) {
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->h2d_dma_writeindx_buf.pa,
+ sizeof(prot->h2d_dma_writeindx_buf.pa),
+ HOST_TO_DNGL_DMA_WRITEINDX_BUFFER, 0);
+ dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_readindx_buf.pa,
+ sizeof(prot->d2h_dma_readindx_buf.pa),
+ DNGL_TO_HOST_DMA_READINDX_BUFFER, 0);
+
+ }
+
+ ret = dhd_msgbuf_rxbuf_post(dhd);
+ ret = dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+
+ return ret;
+}
+
+#define DHD_DBG_SHOW_METADATA 0
+#if DHD_DBG_SHOW_METADATA
+static void BCMFASTPATH
+dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
+{
+ uint8 tlv_t;
+ uint8 tlv_l;
+ uint8 *tlv_v = (uint8 *)ptr;
+
+ if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
+ return;
+
+ len -= BCMPCIE_D2H_METADATA_HDRLEN;
+ tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
+
+ while (len > TLV_HDR_LEN) {
+ tlv_t = tlv_v[TLV_TAG_OFF];
+ tlv_l = tlv_v[TLV_LEN_OFF];
+
+ len -= TLV_HDR_LEN;
+ tlv_v += TLV_HDR_LEN;
+ if (len < tlv_l)
+ break;
+ if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
+ break;
+
+ switch (tlv_t) {
+ case WLFC_CTL_TYPE_TXSTATUS:
+ bcm_print_bytes("METADATA TX_STATUS", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_RSSI:
+ bcm_print_bytes("METADATA RX_RSSI", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_FIFO_CREDITBACK:
+ bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
+ bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_RX_STAMP:
+ bcm_print_bytes("METADATA RX_TIMESTAMP", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_TRANS_ID:
+ bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
+ break;
+
+ case WLFC_CTL_TYPE_COMP_TXSTATUS:
+ bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
+ break;
+
+ default:
+ bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
+ break;
+ }
+
+ len -= tlv_l;
+ tlv_v += tlv_l;
+ }
+}
+#endif /* DHD_DBG_SHOW_METADATA */
+
+static INLINE void BCMFASTPATH
+dhd_prot_packet_free(dhd_pub_t *dhd, uint32 pktid)
+{
+ void *PKTBUF;
+ dmaaddr_t pa;
+ uint32 pa_len;
+ PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len);
+
+ if (PKTBUF) {
+ DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_TX, 0, 0);
+ PKTFREE(dhd->osh, PKTBUF, FALSE);
+ }
+ return;
+}
+
+static INLINE void * BCMFASTPATH
+dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid)
+{
+ void *PKTBUF;
+ dmaaddr_t pa;
+ uint32 pa_len;
+ PKTBUF = PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, pa, pa_len);
+ if (PKTBUF) {
+ DMA_UNMAP(dhd->osh, pa, (uint) pa_len, DMA_RX, 0, 0);
+ }
+
+ return PKTBUF;
+}
+
+static int BCMFASTPATH
+dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int16 fillbufs;
+ uint16 cnt = 64;
+ int retcount = 0;
+
+ fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+ while (fillbufs > 0) {
+ cnt--;
+ if (cnt == 0) {
+ /* find a better way to reschedule rx buf post if space not available */
+ DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
+ DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
+ break;
+ }
+
+ /* Post in a burst of 8 buffers ata time */
+ fillbufs = MIN(fillbufs, RX_BUF_BURST);
+
+ /* Post buffers */
+ retcount = dhd_prot_rxbufpost(dhd, fillbufs);
+
+ if (retcount > 0) {
+ prot->rxbufpost += (uint16)retcount;
+
+ /* how many more to post */
+ fillbufs = prot->max_rxbufpost - prot->rxbufpost;
+ } else {
+ /* Make sure we don't run loop any further */
+ fillbufs = 0;
+ }
+ }
+
+ return 0;
+}
+
+/* Post count no of rx buffers down to dongle */
+static int BCMFASTPATH
+dhd_prot_rxbufpost(dhd_pub_t *dhd, uint16 count)
+{
+ void *p;
+ uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ uint8 *rxbuf_post_tmp;
+ host_rxbuf_post_t *rxbuf_post;
+ void* msg_start;
+ dmaaddr_t physaddr;
+ uint32 pktlen;
+ dhd_prot_t *prot = dhd->prot;
+ msgbuf_ring_t * ring = prot->h2dring_rxp_subn;
+ uint8 i = 0;
+ uint16 alloced = 0;
+ unsigned long flags;
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ /* Claim space for 'count' no of messages */
+ msg_start = (void *)dhd_alloc_ring_space(dhd, ring, count, &alloced);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ if (msg_start == NULL) {
+ DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
+ return -1;
+ }
+ /* if msg_start != NULL, we should have alloced space for atleast 1 item */
+ ASSERT(alloced > 0);
+
+ rxbuf_post_tmp = (uint8*)msg_start;
+
+ /* loop through each message */
+ for (i = 0; i < alloced; i++) {
+ rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
+ /* Create a rx buffer */
+ if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
+ DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
+ return -1;
+ }
+
+ pktlen = PKTLEN(dhd->osh, p);
+ physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+ if (PHYSADDRISZERO(physaddr)) {
+ if (RING_WRITE_PTR(ring) < alloced - i)
+ RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - alloced + i;
+ else
+ RING_WRITE_PTR(ring) -= alloced - i;
+ alloced = i;
+ DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+ PKTFREE(dhd->osh, p, FALSE);
+ DHD_ERROR(("Invalid phyaddr 0\n"));
+ ASSERT(0);
+ break;
+ }
+
+ PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
+ pktlen = PKTLEN(dhd->osh, p);
+
+ /* CMN msg header */
+ rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
+ rxbuf_post->cmn_hdr.if_id = 0;
+
+ /* get the lock before calling NATIVE_TO_PKTID */
+ DHD_GENERAL_LOCK(dhd, flags);
+
+ rxbuf_post->cmn_hdr.request_id =
+ htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr,
+ pktlen, DMA_RX));
+
+ /* free lock */
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+ if (RING_WRITE_PTR(ring) < alloced - i)
+ RING_WRITE_PTR(ring) = RING_MAX_ITEM(ring) - alloced + i;
+ else
+ RING_WRITE_PTR(ring) -= alloced - i;
+ alloced = i;
+ DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+ PKTFREE(dhd->osh, p, FALSE);
+ DHD_ERROR(("Pktid pool depleted.\n"));
+ break;
+ }
+
+ rxbuf_post->data_buf_len = htol16((uint16)pktlen);
+ rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+ rxbuf_post->data_buf_addr.low_addr =
+ htol32(PHYSADDRLO(physaddr) + prot->rx_metadata_offset);
+
+ if (prot->rx_metadata_offset) {
+ rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
+ rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+ rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr));
+ } else {
+ rxbuf_post->metadata_buf_len = 0;
+ rxbuf_post->metadata_buf_addr.high_addr = 0;
+ rxbuf_post->metadata_buf_addr.low_addr = 0;
+ }
+
+ /* Move rxbuf_post_tmp to next item */
+ rxbuf_post_tmp = rxbuf_post_tmp + RING_LEN_ITEMS(ring);
+ }
+ /* Update the write pointer in TCM & ring bell */
+ if (alloced > 0)
+ prot_ring_write_complete(dhd, prot->h2dring_rxp_subn, msg_start, alloced);
+
+ return alloced;
+}
+
+static int
+dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, bool event_buf)
+{
+ void *p;
+ uint16 pktsz;
+ ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
+ dmaaddr_t physaddr;
+ uint32 pktlen;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 alloced = 0;
+ unsigned long flags;
+
+ if (event_buf) {
+ /* Allocate packet for event buffer post */
+ pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
+ } else {
+ /* Allocate packet for ctrl/ioctl buffer post */
+ pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
+ }
+
+ if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
+ DHD_ERROR(("%s:%d: PKTGET for ctrl rxbuf failed\n", __FUNCTION__, __LINE__));
+ return -1;
+ }
+
+ pktlen = PKTLEN(dhd->osh, p);
+ physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
+ if (PHYSADDRISZERO(physaddr)) {
+
+ DHD_ERROR(("Invalid phyaddr 0\n"));
+ ASSERT(0);
+ goto free_pkt_return;
+ }
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)dhd_alloc_ring_space(dhd,
+ prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+ if (rxbuf_post == NULL) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
+ __FUNCTION__, __LINE__));
+ DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+ goto free_pkt_return;
+ }
+
+ /* CMN msg header */
+ if (event_buf)
+ rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_EVENT_BUF_POST;
+ else
+ rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_IOCTLRESP_BUF_POST;
+ rxbuf_post->cmn_hdr.if_id = 0;
+
+ rxbuf_post->cmn_hdr.request_id =
+ htol32(NATIVE_TO_PKTID(dhd->prot->pktid_map_handle, p, physaddr, pktlen, DMA_RX));
+
+ if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
+ if (RING_WRITE_PTR(prot->h2dring_ctrl_subn) == 0)
+ RING_WRITE_PTR(prot->h2dring_ctrl_subn) =
+ RING_MAX_ITEM(prot->h2dring_ctrl_subn) - 1;
+ else
+ RING_WRITE_PTR(prot->h2dring_ctrl_subn)--;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DMA_UNMAP(dhd->osh, physaddr, pktlen, DMA_RX, 0, 0);
+ goto free_pkt_return;
+ }
+
+ rxbuf_post->cmn_hdr.flags = 0;
+ rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
+ rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+ rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr));
+
+ /* Update the write pointer in TCM & ring bell */
+ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, rxbuf_post,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return 1;
+
+free_pkt_return:
+ PKTFREE(dhd->osh, p, FALSE);
+
+ return -1;
+}
+
+static uint16
+dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, bool event_buf, uint32 max_to_post)
+{
+ uint32 i = 0;
+ int32 ret_val;
+
+ DHD_INFO(("max to post %d, event %d \n", max_to_post, event_buf));
+ while (i < max_to_post) {
+ ret_val = dhd_prot_rxbufpost_ctrl(dhd, event_buf);
+ if (ret_val < 0)
+ break;
+ i++;
+ }
+ DHD_INFO(("posted %d buffers to event_pool/ioctl_resp_pool %d\n", i, event_buf));
+ return (uint16)i;
+}
+
+static int
+dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint16 retcnt = 0;
+
+ DHD_INFO(("ioctl resp buf post\n"));
+ retcnt = dhd_msgbuf_rxbuf_post_ctrlpath(dhd, FALSE,
+ prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted);
+ prot->cur_ioctlresp_bufs_posted += retcnt;
+ return 0;
+}
+
+static int
+dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, TRUE,
+ prot->max_eventbufpost - prot->cur_event_bufs_posted);
+ return 0;
+}
+
+int BCMFASTPATH
+dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ /* Process all the messages - DTOH direction */
+ while (TRUE) {
+ uint8 *src_addr;
+ uint16 src_len;
+
+ /* Store current read pointer */
+ /* Read pointer will be updated in prot_early_upd_rxcpln_read_idx */
+ prot_store_rxcpln_read_idx(dhd, prot->d2hring_rx_cpln);
+
+ /* Get the message from ring */
+ src_addr = prot_get_src_addr(dhd, prot->d2hring_rx_cpln, &src_len);
+ if (src_addr == NULL)
+ break;
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(src_addr);
+
+ if (dhd_prot_process_msgtype(dhd, prot->d2hring_rx_cpln, src_addr,
+ src_len) != BCME_OK) {
+ prot_upd_read_idx(dhd, prot->d2hring_rx_cpln);
+ DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
+ __FUNCTION__, src_len));
+ }
+ }
+
+ return 0;
+}
+
+void
+dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flow_id, void *msgring_info)
+{
+ uint16 r_index = 0;
+ msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring_info;
+
+ /* Update read pointer */
+ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx);
+ ring->ringstate->r_offset = r_index;
+ }
+
+ DHD_TRACE(("flow %d, write %d read %d \n\n", flow_id, RING_WRITE_PTR(ring),
+ RING_READ_PTR(ring)));
+
+ /* Need more logic here, but for now use it directly */
+ dhd_bus_schedule_queue(dhd->bus, flow_id, TRUE);
+}
+
+
+int BCMFASTPATH
+dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ /* Process all the messages - DTOH direction */
+ while (TRUE) {
+ uint8 *src_addr;
+ uint16 src_len;
+
+ src_addr = prot_get_src_addr(dhd, prot->d2hring_tx_cpln, &src_len);
+ if (src_addr == NULL)
+ break;
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(src_addr);
+
+ if (dhd_prot_process_msgtype(dhd, prot->d2hring_tx_cpln, src_addr,
+ src_len) != BCME_OK) {
+ DHD_ERROR(("%s: Error at process txcmpl msgbuf of len %d\n",
+ __FUNCTION__, src_len));
+ }
+
+ /* Write to dngl rd ptr */
+ prot_upd_read_idx(dhd, prot->d2hring_tx_cpln);
+ }
+
+ return 0;
+}
+
+int BCMFASTPATH
+dhd_prot_process_ctrlbuf(dhd_pub_t * dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ /* Process all the messages - DTOH direction */
+ while (TRUE) {
+ uint8 *src_addr;
+ uint16 src_len;
+ src_addr = prot_get_src_addr(dhd, prot->d2hring_ctrl_cpln, &src_len);
+
+ if (src_addr == NULL) {
+ break;
+ }
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(src_addr);
+ if (dhd_prot_process_msgtype(dhd, prot->d2hring_ctrl_cpln, src_addr,
+ src_len) != BCME_OK) {
+ DHD_ERROR(("%s: Error at process ctrlmsgbuf of len %d\n",
+ __FUNCTION__, src_len));
+ }
+
+ /* Write to dngl rd ptr */
+ prot_upd_read_idx(dhd, prot->d2hring_ctrl_cpln);
+ }
+
+ return 0;
+}
+
+static int BCMFASTPATH
+dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ uint32 cur_dma_len = 0;
+ int ret = BCME_OK;
+
+ DHD_INFO(("%s: process msgbuf of len %d\n", __FUNCTION__, len));
+
+ while (len > 0) {
+ ASSERT(len > (sizeof(cmn_msg_hdr_t) + prot->rx_dataoffset));
+ if (prot->rx_dataoffset) {
+ cur_dma_len = *(uint32 *) buf;
+ ASSERT(cur_dma_len <= len);
+ buf += prot->rx_dataoffset;
+ len -= (uint16)prot->rx_dataoffset;
+ }
+ else {
+ cur_dma_len = len;
+ }
+ if (dhd_process_msgtype(dhd, ring, buf, (uint16)cur_dma_len) != BCME_OK) {
+ DHD_ERROR(("%s: Error at process msg of dmalen %d\n",
+ __FUNCTION__, cur_dma_len));
+ ret = BCME_ERROR;
+ }
+
+ len -= (uint16)cur_dma_len;
+ buf += cur_dma_len;
+ }
+ return ret;
+}
+
+#define PCIE_M2M_D2H_DMA_WAIT_TRIES 256
+#define PCIE_D2H_RESET_MARK 0xdeadbeef
+void dhd_msgbuf_d2h_check_cmplt(msgbuf_ring_t *ring, void *msg)
+{
+ uint32 tries;
+ uint32 *marker = (uint32 *)msg + RING_LEN_ITEMS(ring) / sizeof(uint32) - 1;
+
+ for (tries = 0; tries < PCIE_M2M_D2H_DMA_WAIT_TRIES; tries++) {
+ if (*(volatile uint32 *)marker != PCIE_D2H_RESET_MARK)
+ return;
+ OSL_CACHE_INV(msg, RING_LEN_ITEMS(ring));
+ }
+
+ /* only print error for data ring */
+ if (ring->idx == BCMPCIE_D2H_MSGRING_TX_COMPLETE ||
+ ring->idx == BCMPCIE_D2H_MSGRING_RX_COMPLETE)
+ DHD_ERROR(("%s: stale msgbuf content after %d retries\n",
+ __FUNCTION__, tries));
+}
+
+static int BCMFASTPATH
+dhd_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8* buf, uint16 len)
+{
+ uint16 pktlen = len;
+ uint16 msglen;
+ uint8 msgtype;
+ cmn_msg_hdr_t *msg = NULL;
+ int ret = BCME_OK;
+ uint8 *buf_head = buf;
+
+ ASSERT(ring && ring->ringmem);
+ msglen = RING_LEN_ITEMS(ring);
+ if (msglen == 0) {
+ DHD_ERROR(("%s: ringidx %d, msglen is %d, pktlen is %d \n",
+ __FUNCTION__, ring->idx, msglen, pktlen));
+ return BCME_ERROR;
+ }
+
+ while (pktlen > 0) {
+ msg = (cmn_msg_hdr_t *)buf;
+
+ dhd_msgbuf_d2h_check_cmplt(ring, msg);
+
+ msgtype = msg->msg_type;
+
+ /* Prefetch data to populate the cache */
+ OSL_PREFETCH(buf + msglen);
+
+ DHD_INFO(("msgtype %d, msglen is %d, pktlen is %d \n",
+ msgtype, msglen, pktlen));
+ if (msgtype == MSG_TYPE_LOOPBACK) {
+ bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, msglen);
+ DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", msglen));
+ }
+
+ ASSERT(msgtype < DHD_PROT_FUNCS);
+ if (table_lookup[msgtype]) {
+ table_lookup[msgtype](dhd, buf, msglen);
+ }
+
+ if (pktlen < msglen) {
+ ret = BCME_ERROR;
+ goto done;
+ }
+ pktlen = pktlen - msglen;
+ buf = buf + msglen;
+
+ if (msgtype == MSG_TYPE_RX_CMPLT)
+ prot_early_upd_rxcpln_read_idx(dhd,
+ dhd->prot->d2hring_rx_cpln);
+ }
+done:
+ OSL_CACHE_FLUSH(buf_head, len - pktlen);
+
+#ifdef DHD_RX_CHAINING
+ dhd_rxchain_commit(dhd);
+#endif
+
+ return ret;
+}
+
+static void
+dhd_prot_ringstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+ pcie_ring_status_t * ring_status = (pcie_ring_status_t *)buf;
+ DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, w_offset %d \n",
+ ring_status->cmn_hdr.request_id, ring_status->compl_hdr.status,
+ ring_status->compl_hdr.flow_ring_id, ring_status->write_idx));
+ /* How do we track this to pair it with ??? */
+ return;
+}
+
+static void
+dhd_prot_genstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+ pcie_gen_status_t * gen_status = (pcie_gen_status_t *)buf;
+ DHD_ERROR(("gen status: request_id %d, status 0x%04x, flow ring %d \n",
+ gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
+ gen_status->compl_hdr.flow_ring_id));
+
+ /* How do we track this to pair it with ??? */
+ return;
+}
+
+static void
+dhd_prot_ioctack_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+ ioctl_req_ack_msg_t * ioct_ack = (ioctl_req_ack_msg_t *)buf;
+
+ DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
+ ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
+ ioct_ack->compl_hdr.flow_ring_id));
+ if (ioct_ack->compl_hdr.status != 0) {
+ DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
+ }
+
+ memset(buf, 0, msglen);
+ ioct_ack->marker = PCIE_D2H_RESET_MARK;
+}
+static void
+dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+ uint16 status;
+ uint32 resp_len = 0;
+ uint32 pkt_id, xt_id;
+ ioctl_comp_resp_msg_t * ioct_resp = (ioctl_comp_resp_msg_t *)buf;
+
+ resp_len = ltoh16(ioct_resp->resp_len);
+ xt_id = ltoh16(ioct_resp->trans_id);
+ pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
+ status = ioct_resp->compl_hdr.status;
+
+ memset(buf, 0, msglen);
+ ioct_resp->marker = PCIE_D2H_RESET_MARK;
+
+ DHD_CTL(("IOCTL_COMPLETE: pktid %x xtid %d status %x resplen %d\n",
+ pkt_id, xt_id, status, resp_len));
+
+ dhd_bus_update_retlen(dhd->bus, sizeof(ioctl_comp_resp_msg_t), pkt_id, status, resp_len);
+ dhd_os_ioctl_resp_wake(dhd);
+}
+
+static void BCMFASTPATH
+dhd_prot_txstatus_process(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+ dhd_prot_t *prot = dhd->prot;
+ host_txbuf_cmpl_t * txstatus;
+ unsigned long flags;
+ uint32 pktid;
+ void *pkt;
+
+ /* locks required to protect circular buffer accesses */
+ DHD_GENERAL_LOCK(dhd, flags);
+
+ txstatus = (host_txbuf_cmpl_t *)buf;
+ pktid = ltoh32(txstatus->cmn_hdr.request_id);
+
+ DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
+ if (prot->active_tx_count)
+ prot->active_tx_count--;
+ else
+ DHD_ERROR(("Extra packets are freed\n"));
+
+ ASSERT(pktid != 0);
+ pkt = dhd_prot_packet_get(dhd, pktid);
+ if (pkt) {
+#if defined(BCMPCIE)
+ dhd_txcomplete(dhd, pkt, true);
+#endif
+
+#if DHD_DBG_SHOW_METADATA
+ if (dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
+ uchar *ptr;
+ /* The Ethernet header of TX frame was copied and removed.
+ * Here, move the data pointer forward by Ethernet header size.
+ */
+ PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
+ ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
+ bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
+ dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
+ }
+#endif /* DHD_DBG_SHOW_METADATA */
+ PKTFREE(dhd->osh, pkt, TRUE);
+ }
+
+ memset(buf, 0, msglen);
+ txstatus->marker = PCIE_D2H_RESET_MARK;
+
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return;
+}
+
+static void
+dhd_prot_event_process(dhd_pub_t *dhd, void* buf, uint16 len)
+{
+ wlevent_req_msg_t *evnt;
+ uint32 bufid;
+ uint16 buflen;
+ int ifidx = 0;
+ void* pkt;
+ unsigned long flags;
+ dhd_prot_t *prot = dhd->prot;
+
+ /* Event complete header */
+ evnt = (wlevent_req_msg_t *)buf;
+ bufid = ltoh32(evnt->cmn_hdr.request_id);
+ buflen = ltoh16(evnt->event_data_len);
+
+ ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
+
+ /* Post another rxbuf to the device */
+ if (prot->cur_event_bufs_posted)
+ prot->cur_event_bufs_posted--;
+ dhd_msgbuf_rxbuf_post_event_bufs(dhd);
+
+ memset(buf, 0, len);
+ evnt->marker = PCIE_D2H_RESET_MARK;
+
+ /* locks required to protect pktid_map */
+ DHD_GENERAL_LOCK(dhd, flags);
+ pkt = dhd_prot_packet_get(dhd, ltoh32(bufid));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ if (!pkt)
+ return;
+
+ /* DMA RX offset updated through shared area */
+ if (dhd->prot->rx_dataoffset)
+ PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+
+ PKTSETLEN(dhd->osh, pkt, buflen);
+
+ dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+}
+
+static void BCMFASTPATH
+dhd_prot_rxcmplt_process(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+ host_rxbuf_cmpl_t *rxcmplt_h;
+ uint16 data_offset; /* offset at which data starts */
+ void * pkt;
+ unsigned long flags;
+ static uint8 current_phase = 0;
+ uint ifidx;
+
+ /* RXCMPLT HDR */
+ rxcmplt_h = (host_rxbuf_cmpl_t *)buf;
+
+ /* Post another set of rxbufs to the device */
+ dhd_prot_return_rxbuf(dhd, 1);
+
+ /* offset from which data starts is populated in rxstatus0 */
+ data_offset = ltoh16(rxcmplt_h->data_offset);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ pkt = dhd_prot_packet_get(dhd, ltoh32(rxcmplt_h->cmn_hdr.request_id));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ if (!pkt) {
+ return;
+ }
+
+ DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, pktdata %p, metalen %d\n",
+ ltoh32(rxcmplt_h->cmn_hdr.request_id), data_offset, ltoh16(rxcmplt_h->data_len),
+ rxcmplt_h->cmn_hdr.if_id, rxcmplt_h->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
+ ltoh16(rxcmplt_h->metadata_len)));
+
+#if DHD_DBG_SHOW_METADATA
+ if (dhd->prot->rx_metadata_offset && rxcmplt_h->metadata_len) {
+ uchar *ptr;
+ ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->rx_metadata_offset);
+ /* header followed by data */
+ bcm_print_bytes("rxmetadata", ptr, rxcmplt_h->metadata_len);
+ dhd_prot_print_metadata(dhd, ptr, rxcmplt_h->metadata_len);
+ }
+#endif /* DHD_DBG_SHOW_METADATA */
+
+ if (current_phase != rxcmplt_h->cmn_hdr.flags) {
+ current_phase = rxcmplt_h->cmn_hdr.flags;
+ }
+ if (rxcmplt_h->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)
+ DHD_INFO(("D11 frame rxed \n"));
+ /* data_offset from buf start */
+ if (data_offset) {
+ /* data offset given from dongle after split rx */
+ PKTPULL(dhd->osh, pkt, data_offset); /* data offset */
+ } else {
+ /* DMA RX offset updated through shared area */
+ if (dhd->prot->rx_dataoffset)
+ PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
+ }
+ /* Actual length of the packet */
+ PKTSETLEN(dhd->osh, pkt, ltoh16(rxcmplt_h->data_len));
+
+ ifidx = rxcmplt_h->cmn_hdr.if_id;
+ memset(buf, 0, msglen);
+ rxcmplt_h->marker = PCIE_D2H_RESET_MARK;
+
+#ifdef DHD_RX_CHAINING
+ /* Chain the packets */
+ dhd_rxchain_frame(dhd, pkt, ifidx);
+#else /* ! DHD_RX_CHAINING */
+ /* offset from which data starts is populated in rxstatus0 */
+ dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
+#endif /* ! DHD_RX_CHAINING */
+
+}
+
+/* Stop protocol: sync w/dongle state. */
+void dhd_prot_stop(dhd_pub_t *dhd)
+{
+ /* nothing to do for pcie */
+}
+
+/* Add any protocol-specific data header.
+ * Caller must reserve prot_hdrlen prepend space.
+ */
+void BCMFASTPATH
+dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
+{
+ return;
+}
+
+uint
+dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
+{
+ return 0;
+}
+
+
+#define PKTBUF pktbuf
+
+int BCMFASTPATH
+dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
+{
+ unsigned long flags;
+ dhd_prot_t *prot = dhd->prot;
+ host_txbuf_post_t *txdesc = NULL;
+ dmaaddr_t physaddr, meta_physaddr;
+ uint8 *pktdata;
+ uint16 pktlen;
+ uint32 pktid;
+ uint8 prio;
+ uint16 flowid = 0;
+ uint16 alloced = 0;
+ uint16 headroom;
+
+ msgbuf_ring_t *msg_ring;
+
+ if (!dhd_bus_is_txmode_push(dhd->bus)) {
+ flow_ring_table_t *flow_ring_table;
+ flow_ring_node_t *flow_ring_node;
+
+ flowid = (uint16)DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(PKTBUF));
+
+ flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+ flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+
+ msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+ } else {
+ msg_ring = prot->h2dring_txp_subn;
+ }
+
+
+
+ DHD_GENERAL_LOCK(dhd, flags);
+
+ /* Create a unique 32-bit packet id */
+ pktid = NATIVE_TO_PKTID_RSV(dhd->prot->pktid_map_handle, PKTBUF);
+ if (pktid == DHD_PKTID_INVALID) {
+ DHD_ERROR(("Pktid pool depleted.\n"));
+ /*
+ * If we return error here, the caller would queue the packet
+ * again. So we'll just free the skb allocated in DMA Zone.
+ * Since we have not freed the original SKB yet the caller would
+ * requeue the same.
+ */
+ goto err_no_res_pktfree;
+ }
+
+ /* Reserve space in the circular buffer */
+ txdesc = (host_txbuf_post_t *)dhd_alloc_ring_space(dhd,
+ msg_ring, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+ if (txdesc == NULL) {
+ DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
+ __FUNCTION__, __LINE__, prot->active_tx_count));
+ /* Free up the PKTID */
+ PKTID_TO_NATIVE(dhd->prot->pktid_map_handle, pktid, physaddr,
+ pktlen);
+ goto err_no_res_pktfree;
+ }
+
+ /* Extract the data pointer and length information */
+ pktdata = PKTDATA(dhd->osh, PKTBUF);
+ pktlen = (uint16)PKTLEN(dhd->osh, PKTBUF);
+
+ /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
+ bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
+
+ /* Extract the ethernet header and adjust the data pointer and length */
+ pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+ pktlen -= ETHER_HDR_LEN;
+
+ /* Map the data pointer to a DMA-able address */
+ physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
+ if ((PHYSADDRHI(physaddr) == 0) && (PHYSADDRLO(physaddr) == 0)) {
+ DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+ ASSERT(0);
+ }
+
+ /* No need to lock. Save the rest of the packet's metadata */
+ NATIVE_TO_PKTID_SAVE(dhd->prot->pktid_map_handle, PKTBUF, pktid,
+ physaddr, pktlen, DMA_TX);
+
+#ifdef TXP_FLUSH_NITEMS
+ if (msg_ring->pend_items_count == 0)
+ msg_ring->start_addr = (void *)txdesc;
+ msg_ring->pend_items_count++;
+#endif
+
+ /* Form the Tx descriptor message buffer */
+
+ /* Common message hdr */
+ txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
+ txdesc->cmn_hdr.request_id = htol32(pktid);
+ txdesc->cmn_hdr.if_id = ifidx;
+ txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
+ prio = (uint8)PKTPRIO(PKTBUF);
+
+
+ txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
+ txdesc->seg_cnt = 1;
+
+ txdesc->data_len = htol16(pktlen);
+ txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(physaddr));
+ txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(physaddr));
+
+ /* Move data pointer to keep ether header in local PKTBUF for later reference */
+ PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
+
+ /* Handle Tx metadata */
+ headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
+ if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
+ DHD_ERROR(("No headroom for Metadata tx %d %d\n",
+ prot->tx_metadata_offset, headroom));
+
+ if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
+ DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
+
+ /* Adjust the data pointer to account for meta data in DMA_MAP */
+ PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+ meta_physaddr = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
+ prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
+ if (PHYSADDRISZERO(meta_physaddr)) {
+ DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n"));
+ ASSERT(0);
+ }
+
+ /* Adjust the data pointer back to original value */
+ PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
+
+ txdesc->metadata_buf_len = prot->tx_metadata_offset;
+ txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_physaddr));
+ txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_physaddr));
+ }
+ else {
+ txdesc->metadata_buf_len = htol16(0);
+ txdesc->metadata_buf_addr.high_addr = 0;
+ txdesc->metadata_buf_addr.low_addr = 0;
+ }
+
+
+ DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
+ txdesc->cmn_hdr.request_id));
+
+ /* Update the write pointer in TCM & ring bell */
+#ifdef TXP_FLUSH_NITEMS
+ /* Flush if we have either hit the txp_threshold or if this msg is */
+ /* occupying the last slot in the flow_ring - before wrap around. */
+ if ((msg_ring->pend_items_count == prot->txp_threshold) ||
+ ((uint8 *) txdesc == (uint8 *) HOST_RING_END(msg_ring))) {
+ dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
+ }
+#else
+ prot_ring_write_complete(dhd, msg_ring, txdesc, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+#endif
+
+ prot->active_tx_count++;
+
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return BCME_OK;
+
+err_no_res_pktfree:
+
+
+
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return BCME_NORESOURCE;
+
+}
+
+/* called with a lock */
+void BCMFASTPATH
+dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
+{
+#ifdef TXP_FLUSH_NITEMS
+ unsigned long flags = 0;
+ flow_ring_table_t *flow_ring_table;
+ flow_ring_node_t *flow_ring_node;
+ msgbuf_ring_t *msg_ring;
+
+
+ if (!in_lock) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ }
+
+ flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
+ flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
+ msg_ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
+
+ /* Update the write pointer in TCM & ring bell */
+ if (msg_ring->pend_items_count) {
+ prot_ring_write_complete(dhd, msg_ring, msg_ring->start_addr,
+ msg_ring->pend_items_count);
+ msg_ring->pend_items_count = 0;
+ msg_ring->start_addr = NULL;
+ }
+
+ if (!in_lock) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ }
+#endif /* TXP_FLUSH_NITEMS */
+}
+
+#undef PKTBUF /* Only defined in the above routine */
+int BCMFASTPATH
+dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
+{
+ return 0;
+}
+
+static void BCMFASTPATH
+dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint16 rxcnt)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ if (prot->rxbufpost >= rxcnt) {
+ prot->rxbufpost -= rxcnt;
+ } else {
+ /* ASSERT(0); */
+ prot->rxbufpost = 0;
+ }
+
+ if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
+ dhd_msgbuf_rxbuf_post(dhd);
+
+ return;
+}
+
+
+
+/* Use protocol to issue ioctl to dongle */
+int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
+{
+ dhd_prot_t *prot = dhd->prot;
+ int ret = -1;
+ uint8 action;
+
+ if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ goto done;
+ }
+
+ if (dhd->busstate == DHD_BUS_SUSPEND) {
+ DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
+ goto done;
+ }
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(len <= WLC_IOCTL_MAXLEN);
+
+ if (len > WLC_IOCTL_MAXLEN)
+ goto done;
+
+ if (prot->pending == TRUE) {
+ DHD_ERROR(("packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
+ ioc->cmd, (unsigned long)ioc->cmd, prot->lastcmd,
+ (unsigned long)prot->lastcmd));
+ if ((ioc->cmd == WLC_SET_VAR) || (ioc->cmd == WLC_GET_VAR)) {
+ DHD_TRACE(("iovar cmd=%s\n", (char*)buf));
+ }
+ goto done;
+ }
+
+ prot->pending = TRUE;
+ prot->lastcmd = ioc->cmd;
+ action = ioc->set;
+
+
+ if (action & WL_IOCTL_ACTION_SET) {
+ ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ } else {
+ ret = dhdmsgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
+ if (ret > 0)
+ ioc->used = ret;
+ }
+ /* Too many programs assume ioctl() returns 0 on success */
+ if (ret >= 0)
+ ret = 0;
+ else {
+ DHD_ERROR(("%s: status ret value is %d \n", __FUNCTION__, ret));
+ dhd->dongle_error = ret;
+ }
+
+ /* Intercept the wme_dp ioctl here */
+ if ((!ret) && (ioc->cmd == WLC_SET_VAR) && (!strcmp(buf, "wme_dp"))) {
+ int slen, val = 0;
+
+ slen = strlen("wme_dp") + 1;
+ if (len >= (int)(slen + sizeof(int)))
+ bcopy(((char *)buf + slen), &val, sizeof(int));
+ dhd->wme_dp = (uint8) ltoh32(val);
+ }
+
+
+ prot->pending = FALSE;
+
+done:
+ return ret;
+
+}
+
+int
+dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
+{
+ unsigned long flags;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 alloced = 0;
+
+ ioct_reqst_hdr_t *ioct_rqst;
+
+ uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
+ uint16 msglen = len + hdrlen;
+
+
+ if (msglen > MSGBUF_MAX_MSG_SIZE)
+ msglen = MSGBUF_MAX_MSG_SIZE;
+
+ msglen = align(msglen, DMA_ALIGN_LEN);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ ioct_rqst = (ioct_reqst_hdr_t *)dhd_alloc_ring_space(dhd,
+ prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ if (ioct_rqst == NULL) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return 0;
+ }
+
+ {
+ uint8 *ptr;
+ uint16 i;
+
+ ptr = (uint8 *)ioct_rqst;
+ for (i = 0; i < msglen; i++) {
+ ptr[i] = i % 256;
+ }
+ }
+
+
+ /* Common msg buf hdr */
+ ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
+ ioct_rqst->msg.if_id = 0;
+
+ bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
+
+ /* Update the write pointer in TCM & ring bell */
+ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return 0;
+}
+
+void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma)
+{
+ if (dma == NULL)
+ return;
+
+ if (dma->srcmem.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va,
+ dma->len, dma->srcmem.pa, dma->srcmem.dmah);
+ dma->srcmem.va = NULL;
+ }
+ if (dma->destmem.va) {
+ DMA_FREE_CONSISTENT(dhd->osh, dma->destmem.va,
+ dma->len + 8, dma->destmem.pa, dma->destmem.dmah);
+ dma->destmem.va = NULL;
+ }
+}
+
+int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
+ uint srcdelay, uint destdelay, dhd_dmaxfer_t *dma)
+{
+ uint i;
+
+ if (!dma)
+ return BCME_ERROR;
+
+ /* First free up exisiting buffers */
+ dmaxfer_free_dmaaddr(dhd, dma);
+
+ dma->srcmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len, DMA_ALIGN_LEN,
+ &i, &dma->srcmem.pa, &dma->srcmem.dmah);
+ if (dma->srcmem.va == NULL) {
+ return BCME_NOMEM;
+ }
+
+ /* Populate source with a pattern */
+ for (i = 0; i < len; i++) {
+ ((uint8*)dma->srcmem.va)[i] = i % 256;
+ }
+ OSL_CACHE_FLUSH(dma->srcmem.va, len);
+
+ dma->destmem.va = DMA_ALLOC_CONSISTENT(dhd->osh, len + 8, DMA_ALIGN_LEN,
+ &i, &dma->destmem.pa, &dma->destmem.dmah);
+ if (dma->destmem.va == NULL) {
+ DMA_FREE_CONSISTENT(dhd->osh, dma->srcmem.va,
+ dma->len, dma->srcmem.pa, dma->srcmem.dmah);
+ dma->srcmem.va = NULL;
+ return BCME_NOMEM;
+ }
+
+
+ /* Clear the destination buffer */
+ bzero(dma->destmem.va, len +8);
+ OSL_CACHE_FLUSH(dma->destmem.va, len+8);
+
+ dma->len = len;
+ dma->srcdelay = srcdelay;
+ dma->destdelay = destdelay;
+
+ return BCME_OK;
+}
+
+static void
+dhdmsgbuf_dmaxfer_compare(dhd_pub_t *dhd, void * buf, uint16 msglen)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ OSL_CACHE_INV(prot->dmaxfer.destmem.va, prot->dmaxfer.len);
+ if (prot->dmaxfer.srcmem.va && prot->dmaxfer.destmem.va) {
+ if (memcmp(prot->dmaxfer.srcmem.va,
+ prot->dmaxfer.destmem.va,
+ prot->dmaxfer.len)) {
+ bcm_print_bytes("XFER SRC: ",
+ prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
+ bcm_print_bytes("XFER DEST: ",
+ prot->dmaxfer.destmem.va, prot->dmaxfer.len);
+ }
+ else {
+ DHD_INFO(("DMA successful\n"));
+ }
+ }
+ dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+ dhd->prot->dmaxfer_in_progress = FALSE;
+}
+
+int
+dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay)
+{
+ unsigned long flags;
+ int ret = BCME_OK;
+ dhd_prot_t *prot = dhd->prot;
+ pcie_dma_xfer_params_t *dmap;
+ uint32 xferlen = len > DMA_XFER_LEN_LIMIT ? DMA_XFER_LEN_LIMIT : len;
+ uint16 msglen = sizeof(pcie_dma_xfer_params_t);
+ uint16 alloced = 0;
+
+ if (prot->dmaxfer_in_progress) {
+ DHD_ERROR(("DMA is in progress...\n"));
+ return ret;
+ }
+ prot->dmaxfer_in_progress = TRUE;
+ if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
+ &prot->dmaxfer)) != BCME_OK) {
+ prot->dmaxfer_in_progress = FALSE;
+ return ret;
+ }
+
+
+ if (msglen > MSGBUF_MAX_MSG_SIZE)
+ msglen = MSGBUF_MAX_MSG_SIZE;
+
+ msglen = align(msglen, DMA_ALIGN_LEN);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ dmap = (pcie_dma_xfer_params_t *)dhd_alloc_ring_space(dhd,
+ prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ if (dmap == NULL) {
+ dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
+ prot->dmaxfer_in_progress = FALSE;
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return BCME_NOMEM;
+ }
+
+ /* Common msg buf hdr */
+ dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
+ dmap->cmn_hdr.request_id = 0x1234;
+
+ dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
+ dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
+ dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.destmem.pa));
+ dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.destmem.pa));
+ dmap->xfer_len = htol32(prot->dmaxfer.len);
+ dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
+ dmap->destdelay = htol32(prot->dmaxfer.destdelay);
+
+ /* Update the write pointer in TCM & ring bell */
+ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, dmap,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_ERROR(("DMA Started...\n"));
+
+ return BCME_OK;
+}
+
+static int
+dhdmsgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ int ret = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Respond "bcmerror" and "bcmerrorstr" with local cache */
+ if (cmd == WLC_GET_VAR && buf)
+ {
+ if (!strcmp((char *)buf, "bcmerrorstr"))
+ {
+ strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), BCME_STRLEN);
+ goto done;
+ }
+ else if (!strcmp((char *)buf, "bcmerror"))
+ {
+ *(int *)buf = dhd->dongle_error;
+ goto done;
+ }
+ }
+
+ ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
+
+ DHD_INFO(("ACTION %d ifdix %d cmd %d len %d \n",
+ action, ifidx, cmd, len));
+
+ /* wait for interrupt and get first fragment */
+ ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
+
+done:
+ return ret;
+}
+static int
+dhdmsgbuf_cmplt(dhd_pub_t *dhd, uint32 id, uint32 len, void* buf, void* retbuf)
+{
+ dhd_prot_t *prot = dhd->prot;
+ ioctl_comp_resp_msg_t ioct_resp;
+ void* pkt;
+ int retlen;
+ int msgbuf_len = 0;
+ unsigned long flags;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (prot->cur_ioctlresp_bufs_posted)
+ prot->cur_ioctlresp_bufs_posted--;
+
+ dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
+
+ retlen = dhd_bus_rxctl(dhd->bus, (uchar*)&ioct_resp, msgbuf_len);
+ if (retlen <= 0) {
+ DHD_ERROR(("IOCTL request failed with error code %d\n", retlen));
+ return retlen;
+ }
+ DHD_INFO(("ioctl resp retlen %d status %d, resp_len %d, pktid %d\n",
+ retlen, ioct_resp.compl_hdr.status, ioct_resp.resp_len,
+ ioct_resp.cmn_hdr.request_id));
+ if (ioct_resp.resp_len != 0) {
+ DHD_GENERAL_LOCK(dhd, flags);
+ pkt = dhd_prot_packet_get(dhd, ioct_resp.cmn_hdr.request_id);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ DHD_INFO(("ioctl ret buf %p retlen %d status %x \n", pkt, retlen,
+ ioct_resp.compl_hdr.status));
+ /* get ret buf */
+ if ((buf) && (pkt)) {
+ /* bcopy(PKTDATA(dhd->osh, pkt), buf, ioct_resp.resp_len); */
+ /* ioct_resp.resp_len could have been changed to make it > 8 bytes */
+ bcopy(PKTDATA(dhd->osh, pkt), buf, len);
+ }
+ if (pkt) {
+ PKTFREE(dhd->osh, pkt, FALSE);
+ }
+ } else {
+ DHD_GENERAL_LOCK(dhd, flags);
+ dhd_prot_packet_free(dhd, ioct_resp.cmn_hdr.request_id);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ }
+
+ return (int)(ioct_resp.compl_hdr.status);
+}
+static int
+dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ int ret = 0;
+
+ DHD_TRACE(("%s: Enter \n", __FUNCTION__));
+ DHD_TRACE(("%s: cmd %d len %d\n", __FUNCTION__, cmd, len));
+
+ if (dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
+ return -EIO;
+ }
+
+ /* don't talk to the dongle if fw is about to be reloaded */
+ if (dhd->hang_was_sent) {
+ DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
+ __FUNCTION__));
+ return -EIO;
+ }
+
+ /* Fill up msgbuf for ioctl req */
+ ret = dhd_fillup_ioct_reqst_ptrbased(dhd, (uint16)len, cmd, buf, ifidx);
+
+ DHD_INFO(("ACTIOn %d ifdix %d cmd %d len %d \n",
+ action, ifidx, cmd, len));
+
+ ret = dhdmsgbuf_cmplt(dhd, prot->reqid, len, buf, prot->retbuf.va);
+
+ return ret;
+}
+/* Handles a protocol control response asynchronously */
+int dhd_prot_ctl_complete(dhd_pub_t *dhd)
+{
+ return 0;
+}
+
+/* Check for and handle local prot-specific iovar commands */
+int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ return BCME_UNSUPPORTED;
+}
+
+/* Add prot dump output to a buffer */
+void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+
+}
+
+/* Update local copy of dongle statistics */
+void dhd_prot_dstats(dhd_pub_t *dhd)
+{
+ return;
+}
+
+int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
+ uint reorder_info_len, void **pkt, uint32 *free_buf_count)
+{
+ return 0;
+}
+/* post a dummy message to interrupt dongle */
+/* used to process cons commands */
+int
+dhd_post_dummy_msg(dhd_pub_t *dhd)
+{
+ unsigned long flags;
+ hostevent_hdr_t *hevent = NULL;
+ uint16 alloced = 0;
+
+ dhd_prot_t *prot = dhd->prot;
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ hevent = (hostevent_hdr_t *)dhd_alloc_ring_space(dhd,
+ prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ if (hevent == NULL) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return -1;
+ }
+
+ /* CMN msg header */
+ hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
+ hevent->msg.if_id = 0;
+
+ /* Event payload */
+ hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
+
+ /* Since, we are filling the data directly into the bufptr obtained
+ * from the msgbuf, we can directly call the write_complete
+ */
+ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, hevent,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return 0;
+}
+
+static void * BCMFASTPATH
+dhd_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced)
+{
+ void * ret_buf;
+ uint16 r_index = 0;
+
+ /* Alloc space for nitems in the ring */
+ ret_buf = prot_get_ring_space(ring, nitems, alloced);
+
+ if (ret_buf == NULL) {
+ /* if alloc failed , invalidate cached read ptr */
+ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ r_index = dhd_get_dmaed_index(dhd, H2D_DMA_READINDX, ring->idx);
+ ring->ringstate->r_offset = r_index;
+ } else
+ dhd_bus_cmn_readshared(dhd->bus, &(RING_READ_PTR(ring)),
+ RING_READ_PTR, ring->idx);
+
+ /* Try allocating once more */
+ ret_buf = prot_get_ring_space(ring, nitems, alloced);
+
+ if (ret_buf == NULL) {
+ DHD_INFO(("%s: Ring space not available \n", ring->name));
+ return NULL;
+ }
+ }
+
+ /* Return alloced space */
+ return ret_buf;
+}
+
+#define DHD_IOCTL_REQ_PKTID 0xFFFE
+
+/* Non inline ioct request */
+/* Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer */
+/* Form a separate request buffer where a 4 byte cmn header is added in the front */
+/* buf contents from parent function is copied to remaining section of this buffer */
+static int
+dhd_fillup_ioct_reqst_ptrbased(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
+{
+ dhd_prot_t *prot = dhd->prot;
+ ioctl_req_msg_t *ioct_rqst;
+ void * ioct_buf; /* For ioctl payload */
+ uint16 rqstlen, resplen;
+ unsigned long flags;
+ uint16 alloced = 0;
+
+ rqstlen = len;
+ resplen = len;
+
+ /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
+ /* 8K allocation of dongle buffer fails */
+ /* dhd doesnt give separate input & output buf lens */
+ /* so making the assumption that input length can never be more than 1.5k */
+ rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ /* Request for cbuf space */
+ ioct_rqst = (ioctl_req_msg_t*)dhd_alloc_ring_space(dhd, prot->h2dring_ctrl_subn,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+ if (ioct_rqst == NULL) {
+ DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return -1;
+ }
+
+ /* Common msg buf hdr */
+ ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
+ ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
+ ioct_rqst->cmn_hdr.flags = 0;
+ ioct_rqst->cmn_hdr.request_id = DHD_IOCTL_REQ_PKTID;
+
+ ioct_rqst->cmd = htol32(cmd);
+ ioct_rqst->output_buf_len = htol16(resplen);
+ ioct_rqst->trans_id = prot->ioctl_trans_id ++;
+
+ /* populate ioctl buffer info */
+ ioct_rqst->input_buf_len = htol16(rqstlen);
+ ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
+ ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
+ /* copy ioct payload */
+ ioct_buf = (void *) prot->ioctbuf.va;
+
+ if (buf)
+ memcpy(ioct_buf, buf, len);
+
+ OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
+
+ if ((ulong)ioct_buf % DMA_ALIGN_LEN)
+ DHD_ERROR(("host ioct address unaligned !!!!! \n"));
+
+ DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
+ ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
+ ioct_rqst->trans_id));
+
+ /* upd wrt ptr and raise interrupt */
+ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, ioct_rqst,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return 0;
+}
+
+/* Packet to PacketID mapper */
+typedef struct {
+ ulong native;
+ dmaaddr_t pa;
+ uint32 pa_len;
+ uchar dma;
+} pktid_t;
+
+typedef struct {
+ void *osh;
+ void *mwbmap_hdl;
+ pktid_t *pktid_list;
+ uint32 count;
+} pktid_map_t;
+
+
+void *pktid_map_init(void *osh, uint32 count)
+{
+ pktid_map_t *handle;
+
+ handle = (pktid_map_t *) MALLOC(osh, sizeof(pktid_map_t));
+ if (handle == NULL) {
+ printf("%s:%d: MALLOC failed for size %d\n",
+ __FUNCTION__, __LINE__, (uint32) sizeof(pktid_map_t));
+ return NULL;
+ }
+ handle->osh = osh;
+ handle->count = count;
+ handle->mwbmap_hdl = bcm_mwbmap_init(osh, count);
+ if (handle->mwbmap_hdl == NULL) {
+ printf("%s:%d: bcm_mwbmap_init failed for count %d\n",
+ __FUNCTION__, __LINE__, count);
+ MFREE(osh, handle, sizeof(pktid_map_t));
+ return NULL;
+ }
+
+ handle->pktid_list = (pktid_t *) MALLOC(osh, sizeof(pktid_t) * (count+1));
+ if (handle->pktid_list == NULL) {
+ printf("%s:%d: MALLOC failed for count %d / total = %d\n",
+ __FUNCTION__, __LINE__, count, (uint32) sizeof(pktid_t) * count);
+ bcm_mwbmap_fini(osh, handle->mwbmap_hdl);
+ MFREE(osh, handle, sizeof(pktid_map_t));
+ return NULL;
+ }
+
+ return handle;
+}
+
+void
+pktid_map_uninit(void *pktid_map_handle)
+{
+ pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+ uint32 ix;
+
+ if (handle != NULL) {
+ void *osh = handle->osh;
+ for (ix = 0; ix < MAX_PKTID_ITEMS; ix++)
+ {
+ if (!bcm_mwbmap_isfree(handle->mwbmap_hdl, ix)) {
+ /* Mark the slot as free */
+ bcm_mwbmap_free(handle->mwbmap_hdl, ix);
+ /*
+ Here we can do dma unmapping for 32 bit also.
+ Since this in removal path, it will not affect performance
+ */
+ DMA_UNMAP(osh, handle->pktid_list[ix+1].pa,
+ (uint) handle->pktid_list[ix+1].pa_len,
+ handle->pktid_list[ix+1].dma, 0, 0);
+ PKTFREE(osh, (unsigned long*)handle->pktid_list[ix+1].native, TRUE);
+ }
+ }
+ bcm_mwbmap_fini(osh, handle->mwbmap_hdl);
+ MFREE(osh, handle->pktid_list, sizeof(pktid_t) * (handle->count+1));
+ MFREE(osh, handle, sizeof(pktid_map_t));
+ }
+ return;
+}
+
+uint32 BCMFASTPATH
+pktid_map_unique(void *pktid_map_handle, void *pkt, dmaaddr_t physaddr, uint32 physlen, uint32 dma)
+{
+ uint32 id;
+ pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+
+ if (handle == NULL) {
+ printf("%s:%d: Error !!! pktid_map_unique called without initing pktid_map\n",
+ __FUNCTION__, __LINE__);
+ return 0;
+ }
+ id = bcm_mwbmap_alloc(handle->mwbmap_hdl);
+ if (id == BCM_MWBMAP_INVALID_IDX) {
+ printf("%s:%d: bcm_mwbmap_alloc failed. Free Count = %d\n",
+ __FUNCTION__, __LINE__, bcm_mwbmap_free_cnt(handle->mwbmap_hdl));
+ return 0;
+ }
+
+ /* id=0 is invalid as we use this for error checking in the dongle */
+ id += 1;
+ handle->pktid_list[id].native = (ulong) pkt;
+ handle->pktid_list[id].pa = physaddr;
+ handle->pktid_list[id].pa_len = (uint32) physlen;
+ handle->pktid_list[id].dma = (uchar)dma;
+
+ return id;
+}
+
+void * BCMFASTPATH
+pktid_get_packet(void *pktid_map_handle, uint32 id, dmaaddr_t *physaddr, uint32 *physlen)
+{
+ void *native = NULL;
+ pktid_map_t *handle = (pktid_map_t *) pktid_map_handle;
+ if (handle == NULL) {
+ printf("%s:%d: Error !!! pktid_get_packet called without initing pktid_map\n",
+ __FUNCTION__, __LINE__);
+ return NULL;
+ }
+
+ /* Debug check */
+ if (bcm_mwbmap_isfree(handle->mwbmap_hdl, (id-1))) {
+ printf("%s:%d: Error !!!. slot (%d/0x%04x) free but the app is using it.\n",
+ __FUNCTION__, __LINE__, (id-1), (id-1));
+ return NULL;
+ }
+
+ native = (void *) handle->pktid_list[id].native;
+ *physaddr = handle->pktid_list[id].pa;
+ *physlen = (uint32) handle->pktid_list[id].pa_len;
+
+ /* Mark the slot as free */
+ bcm_mwbmap_free(handle->mwbmap_hdl, (id-1));
+
+ return native;
+}
+static msgbuf_ring_t*
+prot_ring_attach(dhd_prot_t * prot, char* name, uint16 max_item, uint16 len_item, uint16 ringid)
+{
+ uint alloced = 0;
+ msgbuf_ring_t *ring;
+ dmaaddr_t physaddr;
+ uint16 size, cnt;
+ uint32 *marker;
+
+ ASSERT(name);
+ BCM_REFERENCE(physaddr);
+
+ /* allocate ring info */
+ ring = MALLOC(prot->osh, sizeof(msgbuf_ring_t));
+ if (ring == NULL) {
+ ASSERT(0);
+ return NULL;
+ }
+ bzero(ring, sizeof(*ring));
+
+ /* Init name */
+ strncpy(ring->name, name, sizeof(ring->name) - 1);
+
+ /* Ringid in the order given in bcmpcie.h */
+ ring->idx = ringid;
+
+ /* init ringmem */
+ ring->ringmem = MALLOC(prot->osh, sizeof(ring_mem_t));
+ if (ring->ringmem == NULL)
+ goto fail;
+ bzero(ring->ringmem, sizeof(*ring->ringmem));
+
+ ring->ringmem->max_item = max_item;
+ ring->ringmem->len_items = len_item;
+ size = max_item * len_item;
+
+ /* Ring Memmory allocation */
+ ring->ring_base.va = DMA_ALLOC_CONSISTENT(prot->osh, size, DMA_ALIGN_LEN,
+ &alloced, &ring->ring_base.pa, &ring->ring_base.dmah);
+
+ if (ring->ring_base.va == NULL)
+ goto fail;
+ ring->ringmem->base_addr.high_addr = htol32(PHYSADDRHI(ring->ring_base.pa));
+ ring->ringmem->base_addr.low_addr = htol32(PHYSADDRLO(ring->ring_base.pa));
+
+ ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0);
+ bzero(ring->ring_base.va, size);
+ for (cnt = 0; cnt < max_item; cnt++) {
+ marker = (uint32 *)ring->ring_base.va +
+ (cnt + 1) * len_item / sizeof(uint32) - 1;
+ *marker = PCIE_D2H_RESET_MARK;
+ }
+ OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
+
+ /* Ring state init */
+ ring->ringstate = MALLOC(prot->osh, sizeof(ring_state_t));
+ if (ring->ringstate == NULL)
+ goto fail;
+ bzero(ring->ringstate, sizeof(*ring->ringstate));
+
+ DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
+ "ring start %p buf phys addr %x:%x \n",
+ ring->name, ring->ringmem->max_item, ring->ringmem->len_items,
+ size, ring->ring_base.va, ring->ringmem->base_addr.high_addr,
+ ring->ringmem->base_addr.low_addr));
+ return ring;
+fail:
+ if (ring->ring_base.va)
+ PHYSADDRHISET(physaddr, ring->ringmem->base_addr.high_addr);
+ PHYSADDRLOSET(physaddr, ring->ringmem->base_addr.low_addr);
+ size = ring->ringmem->max_item * ring->ringmem->len_items;
+ DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa, NULL);
+ ring->ring_base.va = NULL;
+ if (ring->ringmem)
+ MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
+ MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
+ ASSERT(0);
+ return NULL;
+}
+static void
+dhd_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
+{
+ /* update buffer address of ring */
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->base_addr,
+ sizeof(ring->ringmem->base_addr), RING_BUF_ADDR, ring->idx);
+
+ /* Update max items possible in ring */
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->max_item,
+ sizeof(ring->ringmem->max_item), RING_MAX_ITEM, ring->idx);
+
+ /* Update length of each item in the ring */
+ dhd_bus_cmn_writeshared(dhd->bus, &ring->ringmem->len_items,
+ sizeof(ring->ringmem->len_items), RING_LEN_ITEMS, ring->idx);
+
+ /* ring inited */
+ ring->inited = TRUE;
+}
+static void
+dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+ dmaaddr_t phyaddr;
+ uint16 size;
+ dhd_prot_t *prot = dhd->prot;
+
+ BCM_REFERENCE(phyaddr);
+
+ if (ring == NULL)
+ return;
+
+
+ if (ring->ringmem == NULL) {
+ DHD_ERROR(("%s: ring->ringmem is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ ring->inited = FALSE;
+
+ PHYSADDRHISET(phyaddr, ring->ringmem->base_addr.high_addr);
+ PHYSADDRLOSET(phyaddr, ring->ringmem->base_addr.low_addr);
+ size = ring->ringmem->max_item * ring->ringmem->len_items;
+ /* Free up ring */
+ if (ring->ring_base.va) {
+ DMA_FREE_CONSISTENT(prot->osh, ring->ring_base.va, size, ring->ring_base.pa,
+ ring->ring_base.dmah);
+ ring->ring_base.va = NULL;
+ }
+
+ /* Free up ring mem space */
+ if (ring->ringmem) {
+ MFREE(prot->osh, ring->ringmem, sizeof(ring_mem_t));
+ ring->ringmem = NULL;
+ }
+
+ /* Free up ring state info */
+ if (ring->ringstate) {
+ MFREE(prot->osh, ring->ringstate, sizeof(ring_state_t));
+ ring->ringstate = NULL;
+ }
+
+ /* free up ring info */
+ MFREE(prot->osh, ring, sizeof(msgbuf_ring_t));
+}
+/* Assumes only one index is updated ata time */
+static void *BCMFASTPATH
+prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced)
+{
+ void *ret_ptr = NULL;
+ uint16 ring_avail_cnt;
+
+ ASSERT(nitems <= RING_MAX_ITEM(ring));
+
+ ring_avail_cnt = CHECK_WRITE_SPACE(RING_READ_PTR(ring), RING_WRITE_PTR(ring),
+ RING_MAX_ITEM(ring));
+
+ if (ring_avail_cnt == 0) {
+ DHD_INFO(("RING space not available on ring %s for %d items \n",
+ ring->name, nitems));
+ DHD_INFO(("write %d read %d \n\n", RING_WRITE_PTR(ring),
+ RING_READ_PTR(ring)));
+ return NULL;
+ }
+ *alloced = MIN(nitems, ring_avail_cnt);
+
+ /* Return next available space */
+ ret_ptr = (char*)HOST_RING_BASE(ring) + (RING_WRITE_PTR(ring) * RING_LEN_ITEMS(ring));
+
+ /* Update write pointer */
+ if ((RING_WRITE_PTR(ring) + *alloced) == RING_MAX_ITEM(ring))
+ RING_WRITE_PTR(ring) = 0;
+ else if ((RING_WRITE_PTR(ring) + *alloced) < RING_MAX_ITEM(ring))
+ RING_WRITE_PTR(ring) += *alloced;
+ else {
+ /* Should never hit this */
+ ASSERT(0);
+ return NULL;
+ }
+
+ return ret_ptr;
+}
+
+static void BCMFASTPATH
+prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, uint16 nitems)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ /* cache flush */
+ OSL_CACHE_FLUSH(p, RING_LEN_ITEMS(ring) * nitems);
+
+ /* update write pointer */
+ /* If dma'ing h2d indices are supported
+ * update the values in the host memory
+ * o/w update the values in TCM
+ */
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+ dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX,
+ ring->idx, (uint16)RING_WRITE_PTR(ring));
+ else
+ dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(ring)),
+ sizeof(uint16), RING_WRITE_PTR, ring->idx);
+
+ /* raise h2d interrupt */
+ prot->mb_ring_fn(dhd->bus, RING_WRITE_PTR(ring));
+}
+
+/* If dma'ing h2d indices are supported
+ * this function updates the indices in
+ * the host memory
+ */
+static void
+dhd_set_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid, uint16 new_index)
+{
+ dhd_prot_t *prot = dhd->prot;
+
+ uint32 *ptr = NULL;
+ uint16 offset = 0;
+
+ switch (type) {
+ case H2D_DMA_WRITEINDX:
+ ptr = (uint32 *)(prot->h2d_dma_writeindx_buf.va);
+
+ /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+ * but in host memory their indices start
+ * after H2D Common Rings
+ */
+ if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+ offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+ BCMPCIE_H2D_COMMON_MSGRINGS;
+ else
+ offset = ringid;
+ ptr += offset;
+
+ *ptr = htol16(new_index);
+
+ /* cache flush */
+ OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va,
+ prot->h2d_dma_writeindx_buf_len);
+
+ break;
+
+ case D2H_DMA_READINDX:
+ ptr = (uint32 *)(prot->d2h_dma_readindx_buf.va);
+
+ /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+ offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+ ptr += offset;
+
+ *ptr = htol16(new_index);
+ /* cache flush */
+ OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va,
+ prot->d2h_dma_readindx_buf_len);
+
+ break;
+
+ default:
+ DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+ __FUNCTION__));
+
+ break;
+ }
+ DHD_TRACE(("%s: Data 0x%p, ringId %d, new_index %d\n",
+ __FUNCTION__, ptr, ringid, new_index));
+}
+
+
+static uint16
+dhd_get_dmaed_index(dhd_pub_t *dhd, uint8 type, uint16 ringid)
+{
+ uint32 *ptr = NULL;
+ uint16 data = 0;
+ uint16 offset = 0;
+
+ switch (type) {
+ case H2D_DMA_WRITEINDX:
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_writeindx_buf.va,
+ dhd->prot->h2d_dma_writeindx_buf_len);
+ ptr = (uint32 *)(dhd->prot->h2d_dma_writeindx_buf.va);
+
+ /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+ * but in host memory their indices start
+ * after H2D Common Rings
+ */
+ if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+ offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+ BCMPCIE_H2D_COMMON_MSGRINGS;
+ else
+ offset = ringid;
+ ptr += offset;
+
+ data = LTOH16((uint16)*ptr);
+ break;
+
+ case H2D_DMA_READINDX:
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va,
+ dhd->prot->h2d_dma_readindx_buf_len);
+ ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va);
+
+ /* Flow-Rings start at Id BCMPCIE_COMMON_MSGRINGS
+ * but in host memory their indices start
+ * after H2D Common Rings
+ */
+ if (ringid >= BCMPCIE_COMMON_MSGRINGS)
+ offset = ringid - BCMPCIE_COMMON_MSGRINGS +
+ BCMPCIE_H2D_COMMON_MSGRINGS;
+ else
+ offset = ringid;
+ ptr += offset;
+
+ data = LTOH16((uint16)*ptr);
+ break;
+
+ case D2H_DMA_WRITEINDX:
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va,
+ dhd->prot->d2h_dma_writeindx_buf_len);
+ ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va);
+
+ /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+ offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+ ptr += offset;
+
+ data = LTOH16((uint16)*ptr);
+ break;
+
+ case D2H_DMA_READINDX:
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_readindx_buf.va,
+ dhd->prot->d2h_dma_readindx_buf_len);
+ ptr = (uint32 *)(dhd->prot->d2h_dma_readindx_buf.va);
+
+ /* H2D Common Righs start at Id BCMPCIE_H2D_COMMON_MSGRINGS */
+ offset = ringid - BCMPCIE_H2D_COMMON_MSGRINGS;
+ ptr += offset;
+
+ data = LTOH16((uint16)*ptr);
+ break;
+
+ default:
+ DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
+ __FUNCTION__));
+
+ break;
+ }
+ DHD_TRACE(("%s: Data 0x%p, data %d\n", __FUNCTION__, ptr, data));
+ return (data);
+}
+
+/* D2H dircetion: get next space to read from */
+static uint8*
+prot_get_src_addr(dhd_pub_t *dhd, msgbuf_ring_t * ring, uint16* available_len)
+{
+ uint16 w_ptr;
+ uint16 r_ptr;
+ uint16 depth;
+ void* ret_addr = NULL;
+ uint16 d2h_w_index = 0;
+
+ DHD_TRACE(("%s: h2d_dma_readindx_buf %p, d2h_dma_writeindx_buf %p\n",
+ __FUNCTION__, (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va),
+ (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va)));
+
+ /* update write pointer */
+ if (DMA_INDX_ENAB(dhd->dma_d2h_ring_upd_support)) {
+ /* DMAing write/read indices supported */
+ d2h_w_index = dhd_get_dmaed_index(dhd, D2H_DMA_WRITEINDX, ring->idx);
+ ring->ringstate->w_offset = d2h_w_index;
+ } else
+ dhd_bus_cmn_readshared(dhd->bus,
+ &(RING_WRITE_PTR(ring)), RING_WRITE_PTR, ring->idx);
+
+ w_ptr = ring->ringstate->w_offset;
+ r_ptr = ring->ringstate->r_offset;
+ depth = ring->ringmem->max_item;
+
+ /* check for avail space */
+ *available_len = READ_AVAIL_SPACE(w_ptr, r_ptr, depth);
+ if (*available_len == 0)
+ return NULL;
+
+ ASSERT(*available_len <= ring->ringmem->max_item);
+
+ /* if space available, calculate address to be read */
+ ret_addr = (char*)ring->ring_base.va + (r_ptr * ring->ringmem->len_items);
+
+ /* update read pointer */
+ if ((ring->ringstate->r_offset + *available_len) >= ring->ringmem->max_item)
+ ring->ringstate->r_offset = 0;
+ else
+ ring->ringstate->r_offset += *available_len;
+
+ ASSERT(ring->ringstate->r_offset < ring->ringmem->max_item);
+
+ /* convert index to bytes */
+ *available_len = *available_len * ring->ringmem->len_items;
+
+ /* return read address */
+ return ret_addr;
+}
+static void
+prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+ /* update read index */
+ /* If dma'ing h2d indices supported
+ * update the r -indices in the
+ * host memory o/w in TCM
+ */
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+ dhd_set_dmaed_index(dhd, D2H_DMA_READINDX,
+ ring->idx, (uint16)RING_READ_PTR(ring));
+ else
+ dhd_bus_cmn_writeshared(dhd->bus, &(RING_READ_PTR(ring)),
+ sizeof(uint16), RING_READ_PTR, ring->idx);
+}
+
+static void
+prot_store_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+ dhd_prot_t *prot;
+
+ if (!dhd || !dhd->prot)
+ return;
+
+ prot = dhd->prot;
+ prot->rx_cpln_early_upd_idx = RING_READ_PTR(ring);
+}
+
+static void
+prot_early_upd_rxcpln_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
+{
+ dhd_prot_t *prot;
+
+ if (!dhd || !dhd->prot)
+ return;
+
+ prot = dhd->prot;
+
+ if (prot->rx_cpln_early_upd_idx == RING_READ_PTR(ring))
+ return;
+
+ if (++prot->rx_cpln_early_upd_idx >= RING_MAX_ITEM(ring))
+ prot->rx_cpln_early_upd_idx = 0;
+
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+ dhd_set_dmaed_index(dhd, D2H_DMA_READINDX,
+ ring->idx, (uint16)prot->rx_cpln_early_upd_idx);
+ else
+ dhd_bus_cmn_writeshared(dhd->bus, &(prot->rx_cpln_early_upd_idx),
+ sizeof(uint16), RING_READ_PTR, ring->idx);
+}
+
+int
+dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+ tx_flowring_create_request_t *flow_create_rqst;
+ msgbuf_ring_t *msgbuf_flow_info;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 hdrlen = sizeof(tx_flowring_create_request_t);
+ uint16 msglen = hdrlen;
+ unsigned long flags;
+ char eabuf[ETHER_ADDR_STR_LEN];
+ uint16 alloced = 0;
+
+ if (!(msgbuf_flow_info = prot_ring_attach(prot, "h2dflr",
+ H2DRING_TXPOST_MAX_ITEM, H2DRING_TXPOST_ITEMSIZE,
+ BCMPCIE_H2D_TXFLOWRINGID +
+ (flow_ring_node->flowid - BCMPCIE_H2D_COMMON_MSGRINGS)))) {
+ DHD_ERROR(("%s: kmalloc for H2D TX Flow ring failed\n", __FUNCTION__));
+ return BCME_NOMEM;
+ }
+ /* Clear write pointer of the ring */
+ flow_ring_node->prot_info = (void *)msgbuf_flow_info;
+
+ /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+ msglen = align(msglen, DMA_ALIGN_LEN);
+
+
+ DHD_GENERAL_LOCK(dhd, flags);
+ /* Request for ring buffer space */
+ flow_create_rqst = (tx_flowring_create_request_t *)dhd_alloc_ring_space(dhd,
+ prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ if (flow_create_rqst == NULL) {
+ DHD_ERROR(("%s: No space in control ring for Flow create req\n", __FUNCTION__));
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ return BCME_NOMEM;
+ }
+ msgbuf_flow_info->inited = TRUE;
+
+ /* Common msg buf hdr */
+ flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
+ flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+ flow_create_rqst->msg.request_id = htol16(0); /* TBD */
+
+ /* Update flow create message */
+ flow_create_rqst->tid = flow_ring_node->flow_info.tid;
+ flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+ memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
+ memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
+ flow_create_rqst->flow_ring_ptr.low_addr = msgbuf_flow_info->ringmem->base_addr.low_addr;
+ flow_create_rqst->flow_ring_ptr.high_addr = msgbuf_flow_info->ringmem->base_addr.high_addr;
+ flow_create_rqst->max_items = htol16(H2DRING_TXPOST_MAX_ITEM);
+ flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
+ bcm_ether_ntoa((struct ether_addr *)flow_ring_node->flow_info.da, eabuf);
+ DHD_ERROR(("%s Send Flow create Req msglen flow ID %d for peer %s prio %d ifindex %d\n",
+ __FUNCTION__, flow_ring_node->flowid, eabuf, flow_ring_node->flow_info.tid,
+ flow_ring_node->flow_info.ifindex));
+
+ /* upd wrt ptr and raise interrupt */
+ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_create_rqst,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+
+ /* If dma'ing indices supported
+ * update the w-index in host memory o/w in TCM
+ */
+ if (DMA_INDX_ENAB(dhd->dma_h2d_ring_upd_support))
+ dhd_set_dmaed_index(dhd, H2D_DMA_WRITEINDX,
+ msgbuf_flow_info->idx, (uint16)RING_WRITE_PTR(msgbuf_flow_info));
+ else
+ dhd_bus_cmn_writeshared(dhd->bus, &(RING_WRITE_PTR(msgbuf_flow_info)),
+ sizeof(uint16), RING_WRITE_PTR, msgbuf_flow_info->idx);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_create_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+ tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)buf;
+
+ DHD_ERROR(("%s Flow create Response status = %d Flow %d\n", __FUNCTION__,
+ flow_create_resp->cmplt.status, flow_create_resp->cmplt.flow_ring_id));
+
+ dhd_bus_flow_ring_create_response(dhd->bus, flow_create_resp->cmplt.flow_ring_id,
+ flow_create_resp->cmplt.status);
+}
+
+void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
+{
+ msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+ dhd_prot_ring_detach(dhd, flow_ring);
+ DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
+}
+
+void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
+ struct bcmstrbuf *strbuf)
+{
+ msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
+ uint16 rd, wrt;
+ dhd_bus_cmn_readshared(dhd->bus, &rd, RING_READ_PTR, flow_ring->idx);
+ dhd_bus_cmn_readshared(dhd->bus, &wrt, RING_WRITE_PTR, flow_ring->idx);
+ bcm_bprintf(strbuf, "RD %d WR %d\n", rd, wrt);
+}
+
+void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
+{
+ bcm_bprintf(strbuf, "CtrlPost: ");
+ dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_ctrl_subn, strbuf);
+ bcm_bprintf(strbuf, "CtrlCpl: ");
+ dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_ctrl_cpln, strbuf);
+ bcm_bprintf(strbuf, "RxPost: ");
+ bcm_bprintf(strbuf, "RBP %d ", dhd->prot->rxbufpost);
+ dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_rxp_subn, strbuf);
+ bcm_bprintf(strbuf, "RxCpl: ");
+ dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_rx_cpln, strbuf);
+ if (dhd_bus_is_txmode_push(dhd->bus)) {
+ bcm_bprintf(strbuf, "TxPost: ");
+ dhd_prot_print_flow_ring(dhd, dhd->prot->h2dring_txp_subn, strbuf);
+ }
+ bcm_bprintf(strbuf, "TxCpl: ");
+ dhd_prot_print_flow_ring(dhd, dhd->prot->d2hring_tx_cpln, strbuf);
+ bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail %d\n",
+ dhd->prot->active_tx_count,
+ dhd_pktid_map_avail_cnt(dhd->prot->pktid_map_handle));
+}
+
+int
+dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+ tx_flowring_delete_request_t *flow_delete_rqst;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 msglen = sizeof(tx_flowring_delete_request_t);
+ unsigned long flags;
+ uint16 alloced = 0;
+
+ /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+ msglen = align(msglen, DMA_ALIGN_LEN);
+
+ /* Request for ring buffer space */
+ DHD_GENERAL_LOCK(dhd, flags);
+ flow_delete_rqst = (tx_flowring_delete_request_t *)dhd_alloc_ring_space(dhd,
+ prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+
+ if (flow_delete_rqst == NULL) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_ERROR(("%s Flow Delete req failure no ring mem %d \n", __FUNCTION__, msglen));
+ return BCME_NOMEM;
+ }
+
+ /* Common msg buf hdr */
+ flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
+ flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+ flow_delete_rqst->msg.request_id = htol16(0); /* TBD */
+
+ /* Update Delete info */
+ flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+ flow_delete_rqst->reason = htol16(BCME_OK);
+
+ DHD_ERROR(("%s sending FLOW RING Delete req msglen %d \n", __FUNCTION__, msglen));
+
+ /* upd wrt ptr and raise interrupt */
+ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_delete_rqst,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_delete_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+ tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)buf;
+
+ DHD_INFO(("%s Flow Delete Response status = %d \n", __FUNCTION__,
+ flow_delete_resp->cmplt.status));
+
+ dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
+ flow_delete_resp->cmplt.status);
+}
+
+int
+dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
+{
+ tx_flowring_flush_request_t *flow_flush_rqst;
+ dhd_prot_t *prot = dhd->prot;
+ uint16 msglen = sizeof(tx_flowring_flush_request_t);
+ unsigned long flags;
+ uint16 alloced = 0;
+
+ /* align it to 4 bytes, so that all start addr form cbuf is 4 byte aligned */
+ msglen = align(msglen, DMA_ALIGN_LEN);
+
+ /* Request for ring buffer space */
+ DHD_GENERAL_LOCK(dhd, flags);
+ flow_flush_rqst = (tx_flowring_flush_request_t *)dhd_alloc_ring_space(dhd,
+ prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced);
+ if (flow_flush_rqst == NULL) {
+ DHD_GENERAL_UNLOCK(dhd, flags);
+ DHD_ERROR(("%s Flow Flush req failure no ring mem %d \n", __FUNCTION__, msglen));
+ return BCME_NOMEM;
+ }
+
+ /* Common msg buf hdr */
+ flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
+ flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
+ flow_flush_rqst->msg.request_id = htol16(0); /* TBD */
+
+ flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
+ flow_flush_rqst->reason = htol16(BCME_OK);
+
+ DHD_INFO(("%s sending FLOW RING Flush req msglen %d \n", __FUNCTION__, msglen));
+
+ /* upd wrt ptr and raise interrupt */
+ prot_ring_write_complete(dhd, prot->h2dring_ctrl_subn, flow_flush_rqst,
+ DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
+ DHD_GENERAL_UNLOCK(dhd, flags);
+
+ return BCME_OK;
+}
+
+static void
+dhd_prot_process_flow_ring_flush_response(dhd_pub_t *dhd, void* buf, uint16 msglen)
+{
+ tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)buf;
+
+ DHD_INFO(("%s Flow Flush Response status = %d \n", __FUNCTION__,
+ flow_flush_resp->cmplt.status));
+
+ dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
+ flow_flush_resp->cmplt.status);
+}
+
+int
+dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
+{
+ uint32 *ptr;
+ uint32 value;
+ uint32 i;
+ uint8 txpush = 0;
+ uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus, &txpush);
+
+ OSL_CACHE_INV((void *)dhd->prot->d2h_dma_writeindx_buf.va,
+ dhd->prot->d2h_dma_writeindx_buf_len);
+
+ ptr = (uint32 *)(dhd->prot->d2h_dma_writeindx_buf.va);
+
+ bcm_bprintf(b, "\n max_tx_queues %d, txpush mode %d\n", max_h2d_queues, txpush);
+
+ bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
+
+ if (txpush) {
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tH2D TXPOST value 0x%04x\n", value);
+ }
+ else {
+ ptr++;
+ bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
+ for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
+ ptr++;
+ }
+ }
+
+ OSL_CACHE_INV((void *)dhd->prot->h2d_dma_readindx_buf.va,
+ dhd->prot->h2d_dma_readindx_buf_len);
+
+ ptr = (uint32 *)(dhd->prot->h2d_dma_readindx_buf.va);
+
+ bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
+ ptr++;
+ value = ltoh32(*ptr);
+ bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
+
+ return 0;
+}
+
+uint32
+dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
+{
+ dhd_prot_t *prot = dhd->prot;
+ if (rx)
+ prot->rx_metadata_offset = (uint16)val;
+ else
+ prot->tx_metadata_offset = (uint16)val;
+ return dhd_prot_metadatalen_get(dhd, rx);
+}
+
+uint32
+dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
+{
+ dhd_prot_t *prot = dhd->prot;
+ if (rx)
+ return prot->rx_metadata_offset;
+ else
+ return prot->tx_metadata_offset;
+}
+
+uint32
+dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
+{
+ dhd_prot_t *prot = dhd->prot;
+ if (set)
+ prot->txp_threshold = (uint16)val;
+ val = prot->txp_threshold;
+ return val;
+}
+
+#ifdef DHD_RX_CHAINING
+static INLINE void BCMFASTPATH
+dhd_rxchain_reset(rxchain_info_t *rxchain)
+{
+ rxchain->pkt_count = 0;
+}
+
+static void BCMFASTPATH
+dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
+{
+ uint8 *eh;
+ uint8 prio;
+ dhd_prot_t *prot = dhd->prot;
+ rxchain_info_t *rxchain = &prot->rxchain;
+
+ eh = PKTDATA(dhd->osh, pkt);
+ prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
+
+ /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
+ /* so that the chain can be handed off to CTF bridge as is. */
+ if (rxchain->pkt_count == 0) {
+ /* First packet in chain */
+ rxchain->pkthead = rxchain->pkttail = pkt;
+
+ /* Keep a copy of ptr to ether_da, ether_sa and prio */
+ rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
+ rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
+ rxchain->h_prio = prio;
+ rxchain->ifidx = ifidx;
+ rxchain->pkt_count++;
+ } else {
+ if (PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
+ rxchain->h_da, rxchain->h_prio)) {
+ /* Same flow - keep chaining */
+ PKTSETCLINK(rxchain->pkttail, pkt);
+ rxchain->pkttail = pkt;
+ rxchain->pkt_count++;
+ } else {
+ /* Different flow - First release the existing chain */
+ dhd_rxchain_commit(dhd);
+
+ /* Create a new chain */
+ rxchain->pkthead = rxchain->pkttail = pkt;
+
+ /* Keep a copy of ptr to ether_da, ether_sa and prio */
+ rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
+ rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
+ rxchain->h_prio = prio;
+ rxchain->ifidx = ifidx;
+ rxchain->pkt_count++;
+ }
+ }
+
+ if ((!ETHER_ISMULTI(rxchain->h_da)) &&
+ ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
+ (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
+ PKTSETCHAINED(dhd->osh, pkt);
+ PKTCINCRCNT(rxchain->pkthead);
+ PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
+ } else {
+ dhd_rxchain_commit(dhd);
+ return;
+ }
+
+ /* If we have hit the max chain length, dispatch the chain and reset */
+ if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
+ dhd_rxchain_commit(dhd);
+ }
+}
+
+static void BCMFASTPATH
+dhd_rxchain_commit(dhd_pub_t *dhd)
+{
+ dhd_prot_t *prot = dhd->prot;
+ rxchain_info_t *rxchain = &prot->rxchain;
+
+ if (rxchain->pkt_count == 0)
+ return;
+
+ /* Release the packets to dhd_linux */
+ dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
+
+ /* Reset the chain */
+ dhd_rxchain_reset(rxchain);
+}
+#endif /* DHD_RX_CHAINING */
+
+static void
+dhd_prot_ring_clear(msgbuf_ring_t* ring)
+{
+ uint16 size;
+
+ DHD_TRACE(("%s\n", __FUNCTION__));
+
+ size = ring->ringmem->max_item * ring->ringmem->len_items;
+ ASSERT(MODX((unsigned long)ring->ring_base.va, DMA_ALIGN_LEN) == 0);
+ OSL_CACHE_INV((void *) ring->ring_base.va, size);
+ bzero(ring->ring_base.va, size);
+
+ OSL_CACHE_FLUSH((void *) ring->ring_base.va, size);
+
+ bzero(ring->ringstate, sizeof(*ring->ringstate));
+}
+
+void
+dhd_prot_clear(dhd_pub_t *dhd)
+{
+ struct dhd_prot *prot = dhd->prot;
+
+ DHD_TRACE(("%s\n", __FUNCTION__));
+
+ if (prot == NULL)
+ return;
+
+ if (prot->h2dring_txp_subn)
+ dhd_prot_ring_clear(prot->h2dring_txp_subn);
+ if (prot->h2dring_rxp_subn)
+ dhd_prot_ring_clear(prot->h2dring_rxp_subn);
+ if (prot->h2dring_ctrl_subn)
+ dhd_prot_ring_clear(prot->h2dring_ctrl_subn);
+ if (prot->d2hring_tx_cpln)
+ dhd_prot_ring_clear(prot->d2hring_tx_cpln);
+ if (prot->d2hring_rx_cpln)
+ dhd_prot_ring_clear(prot->d2hring_rx_cpln);
+ if (prot->d2hring_ctrl_cpln)
+ dhd_prot_ring_clear(prot->d2hring_ctrl_cpln);
+
+ if (prot->retbuf.va) {
+ OSL_CACHE_INV((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+ bzero(prot->retbuf.va, IOCT_RETBUF_SIZE);
+ OSL_CACHE_FLUSH((void *) prot->retbuf.va, IOCT_RETBUF_SIZE);
+ }
+
+ if (prot->ioctbuf.va) {
+ OSL_CACHE_INV((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+ bzero(prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+ OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, IOCT_RETBUF_SIZE);
+ }
+
+ if (prot->d2h_dma_scratch_buf.va) {
+ OSL_CACHE_INV((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+ bzero(prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+ OSL_CACHE_FLUSH((void *)prot->d2h_dma_scratch_buf.va, DMA_D2H_SCRATCH_BUF_LEN);
+ }
+
+ if (prot->h2d_dma_readindx_buf.va) {
+ OSL_CACHE_INV((void *)prot->h2d_dma_readindx_buf.va,
+ prot->h2d_dma_readindx_buf_len);
+ bzero(prot->h2d_dma_readindx_buf.va,
+ prot->h2d_dma_readindx_buf_len);
+ OSL_CACHE_FLUSH((void *)prot->h2d_dma_readindx_buf.va,
+ prot->h2d_dma_readindx_buf_len);
+ }
+
+ if (prot->h2d_dma_writeindx_buf.va) {
+ OSL_CACHE_INV((void *)prot->h2d_dma_writeindx_buf.va,
+ prot->h2d_dma_writeindx_buf_len);
+ bzero(prot->h2d_dma_writeindx_buf.va, prot->h2d_dma_writeindx_buf_len);
+ OSL_CACHE_FLUSH((void *)prot->h2d_dma_writeindx_buf.va,
+ prot->h2d_dma_writeindx_buf_len);
+ }
+
+ if (prot->d2h_dma_readindx_buf.va) {
+ OSL_CACHE_INV((void *)prot->d2h_dma_readindx_buf.va,
+ prot->d2h_dma_readindx_buf_len);
+ bzero(prot->d2h_dma_readindx_buf.va, prot->d2h_dma_readindx_buf_len);
+ OSL_CACHE_FLUSH((void *)prot->d2h_dma_readindx_buf.va,
+ prot->d2h_dma_readindx_buf_len);
+ }
+
+ if (prot->d2h_dma_writeindx_buf.va) {
+ OSL_CACHE_INV((void *)prot->d2h_dma_writeindx_buf.va,
+ prot->d2h_dma_writeindx_buf_len);
+ bzero(prot->d2h_dma_writeindx_buf.va, prot->d2h_dma_writeindx_buf_len);
+ OSL_CACHE_FLUSH((void *)prot->d2h_dma_writeindx_buf.va,
+ prot->d2h_dma_writeindx_buf_len);
+ }
+
+ prot->rx_metadata_offset = 0;
+ prot->tx_metadata_offset = 0;
+
+ prot->rxbufpost = 0;
+ prot->cur_event_bufs_posted = 0;
+ prot->cur_ioctlresp_bufs_posted = 0;
+
+ prot->active_tx_count = 0;
+ prot->data_seq_no = 0;
+ prot->ioctl_seq_no = 0;
+ prot->pending = 0;
+ prot->lastcmd = 0;
+
+ prot->ioctl_trans_id = 1;
+
+ /* dhd_flow_rings_init is located at dhd_bus_start,
+ * so when stopping bus, flowrings shall be deleted
+ */
+ dhd_flow_rings_deinit(dhd);
+ NATIVE_TO_PKTID_CLEAR(prot->pktid_map_handle);
+}
--- /dev/null
+/*
+ * DHD Bus Module for PCIE
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_pcie.c 491657 2014-07-17 06:29:40Z $
+ */
+
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <sbchipc.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_flowring.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <sdiovar.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <bcmpcie.h>
+#include <bcmendian.h>
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
+#ifdef BCMEMBEDIMAGE
+#include BCMEMBEDIMAGE
+#endif /* BCMEMBEDIMAGE */
+
+#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
+#define MAX_NVRAMBUF_SIZE 6144 /* max nvram buf size */
+
+#define ARMCR4REG_BANKIDX (0x40/sizeof(uint32))
+#define ARMCR4REG_BANKPDA (0x4C/sizeof(uint32))
+/* Temporary war to fix precommit till sync issue between trunk & precommit branch is resolved */
+
+int dhd_dongle_memsize;
+int dhd_dongle_ramsize;
+#ifdef DHD_DEBUG
+static int dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size);
+static int dhdpcie_bus_readconsole(dhd_bus_t *bus);
+#endif
+static int dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size);
+static int dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid,
+ const char *name, void *params,
+ int plen, void *arg, int len, int val_size);
+static int dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 intval);
+static int dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus,
+ uint32 len, uint32 srcdelay, uint32 destdelay);
+static int dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter);
+static int _dhdpcie_download_firmware(struct dhd_bus *bus);
+static int dhdpcie_download_firmware(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_bus_write_vars(dhd_bus_t *bus);
+static void dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus);
+static void dhdpci_bus_read_frames(dhd_bus_t *bus);
+static int dhdpcie_readshared(dhd_bus_t *bus);
+static void dhdpcie_init_shared_addr(dhd_bus_t *bus);
+static bool dhdpcie_dongle_attach(dhd_bus_t *bus);
+static void dhdpcie_bus_intr_enable(dhd_bus_t *bus);
+static void dhdpcie_bus_dongle_setmemsize(dhd_bus_t *bus, int mem_size);
+static void dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh,
+ bool dongle_isolation, bool reset_flag);
+static void dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh);
+static int dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len);
+static uint8 dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data);
+static void dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data);
+static uint16 dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data);
+static uint32 dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data);
+static uint64 dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset);
+static void dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data);
+static void dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size);
+static int dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b);
+static void dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data);
+static void dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info);
+extern void dhd_dpc_kill(dhd_pub_t *dhdp);
+
+#ifdef BCMEMBEDIMAGE
+static int dhdpcie_download_code_array(dhd_bus_t *bus);
+#endif /* BCMEMBEDIMAGE */
+
+
+
+#define PCI_VENDOR_ID_BROADCOM 0x14e4
+
+/* IOVar table */
+enum {
+ IOV_INTR = 1,
+ IOV_MEMBYTES,
+ IOV_MEMSIZE,
+ IOV_SET_DOWNLOAD_STATE,
+ IOV_DEVRESET,
+ IOV_VARS,
+ IOV_MSI_SIM,
+ IOV_PCIE_LPBK,
+ IOV_CC_NVMSHADOW,
+ IOV_RAMSIZE,
+ IOV_RAMSTART,
+ IOV_SLEEP_ALLOWED,
+ IOV_PCIE_DMAXFER,
+ IOV_PCIE_SUSPEND,
+ IOV_PCIEREG,
+ IOV_PCIECFGREG,
+ IOV_PCIECOREREG,
+ IOV_PCIESERDESREG,
+ IOV_BAR0_SECWIN_REG,
+ IOV_SBREG,
+ IOV_DONGLEISOLATION,
+ IOV_LTRSLEEPON_UNLOOAD,
+ IOV_RX_METADATALEN,
+ IOV_TX_METADATALEN,
+ IOV_TXP_THRESHOLD,
+ IOV_BUZZZ_DUMP,
+ IOV_DUMP_RINGUPD_BLOCK,
+ IOV_DMA_RINGINDICES,
+ IOV_DB1_FOR_MB,
+ IOV_FLOW_PRIO_MAP
+};
+
+
+const bcm_iovar_t dhdpcie_iovars[] = {
+ {"intr", IOV_INTR, 0, IOVT_BOOL, 0 },
+ {"membytes", IOV_MEMBYTES, 0, IOVT_BUFFER, 2 * sizeof(int) },
+ {"memsize", IOV_MEMSIZE, 0, IOVT_UINT32, 0 },
+ {"dwnldstate", IOV_SET_DOWNLOAD_STATE, 0, IOVT_BOOL, 0 },
+ {"vars", IOV_VARS, 0, IOVT_BUFFER, 0 },
+ {"devreset", IOV_DEVRESET, 0, IOVT_BOOL, 0 },
+ {"pcie_lpbk", IOV_PCIE_LPBK, 0, IOVT_UINT32, 0 },
+ {"cc_nvmshadow", IOV_CC_NVMSHADOW, 0, IOVT_BUFFER, 0 },
+ {"ramsize", IOV_RAMSIZE, 0, IOVT_UINT32, 0 },
+ {"ramstart", IOV_RAMSTART, 0, IOVT_UINT32, 0 },
+ {"pciereg", IOV_PCIEREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"pciecfgreg", IOV_PCIECFGREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"pciecorereg", IOV_PCIECOREREG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"pcieserdesreg", IOV_PCIESERDESREG, 0, IOVT_BUFFER, 3 * sizeof(int32) },
+ {"bar0secwinreg", IOV_BAR0_SECWIN_REG, 0, IOVT_BUFFER, 2 * sizeof(int32) },
+ {"sbreg", IOV_SBREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
+ {"pcie_dmaxfer", IOV_PCIE_DMAXFER, 0, IOVT_BUFFER, 3 * sizeof(int32) },
+ {"pcie_suspend", IOV_PCIE_SUSPEND, 0, IOVT_UINT32, 0 },
+ {"sleep_allowed", IOV_SLEEP_ALLOWED, 0, IOVT_BOOL, 0 },
+ {"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
+ {"ltrsleep_on_unload", IOV_LTRSLEEPON_UNLOOAD, 0, IOVT_UINT32, 0 },
+ {"dump_ringupdblk", IOV_DUMP_RINGUPD_BLOCK, 0, IOVT_BUFFER, 0 },
+ {"dma_ring_indices", IOV_DMA_RINGINDICES, 0, IOVT_UINT32, 0},
+ {"rx_metadata_len", IOV_RX_METADATALEN, 0, IOVT_UINT32, 0 },
+ {"tx_metadata_len", IOV_TX_METADATALEN, 0, IOVT_UINT32, 0 },
+ {"db1_for_mb", IOV_DB1_FOR_MB, 0, IOVT_UINT32, 0 },
+ {"txp_thresh", IOV_TXP_THRESHOLD, 0, IOVT_UINT32, 0 },
+ {"buzzz_dump", IOV_BUZZZ_DUMP, 0, IOVT_UINT32, 0 },
+ {"flow_prio_map", IOV_FLOW_PRIO_MAP, 0, IOVT_UINT32, 0 },
+ {NULL, 0, 0, 0, 0 }
+};
+
+#define MAX_READ_TIMEOUT 5 * 1000 * 1000
+
+/* Register/Unregister functions are called by the main DHD entry
+ * point (e.g. module insertion) to link with the bus driver, in
+ * order to look for or await the device.
+ */
+
+int
+dhd_bus_register(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ return dhdpcie_bus_register();
+}
+
+void
+dhd_bus_unregister(void)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ dhdpcie_bus_unregister();
+ return;
+}
+
+
+/** returns a host virtual address */
+uint32 *
+dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size)
+{
+ return (uint32 *)REG_MAP(addr, size);
+}
+
+void
+dhdpcie_bus_reg_unmap(osl_t *osh, ulong addr, int size)
+{
+ REG_UNMAP((void*)(uintptr)addr);
+ return;
+}
+
+/**
+ * 'regs' is the host virtual address that maps to the start of the PCIe BAR0 window. The first 4096
+ * bytes in this window are mapped to the backplane address in the PCIEBAR0Window register. The
+ * precondition is that the PCIEBAR0Window register 'points' at the PCIe core.
+ *
+ * 'tcm' is the *host* virtual address at which tcm is mapped.
+ */
+dhd_bus_t* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm)
+{
+ dhd_bus_t *bus;
+
+ DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+ do {
+ if (!(bus = MALLOC(osh, sizeof(dhd_bus_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ break;
+ }
+ bzero(bus, sizeof(dhd_bus_t));
+ bus->regs = regs;
+ bus->tcm = tcm;
+ bus->osh = osh;
+
+ dll_init(&bus->const_flowring);
+
+ /* Attach pcie shared structure */
+ bus->pcie_sh = MALLOC(osh, sizeof(pciedev_shared_t));
+
+ /* dhd_common_init(osh); */
+
+ if (dhdpcie_dongle_attach(bus)) {
+ DHD_ERROR(("%s: dhdpcie_probe_attach failed\n", __FUNCTION__));
+ break;
+ }
+
+ /* software resources */
+ if (!(bus->dhd = dhd_attach(osh, bus, PCMSGBUF_HDRLEN))) {
+ DHD_ERROR(("%s: dhd_attach failed\n", __FUNCTION__));
+
+ break;
+ }
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ bus->db1_for_mb = TRUE;
+ bus->dhd->hang_report = TRUE;
+
+ DHD_TRACE(("%s: EXIT SUCCESS\n",
+ __FUNCTION__));
+
+ return bus;
+ } while (0);
+
+ DHD_TRACE(("%s: EXIT FAILURE\n", __FUNCTION__));
+
+ return NULL;
+}
+
+uint
+dhd_bus_chip(struct dhd_bus *bus)
+{
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chip;
+}
+
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+ ASSERT(bus);
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chiprev;
+}
+
+void *
+dhd_bus_pub(struct dhd_bus *bus)
+{
+ return bus->dhd;
+}
+
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+ return (void *)bus->sih;
+}
+
+void *
+dhd_bus_txq(struct dhd_bus *bus)
+{
+ return &bus->txq;
+}
+
+/* Get Chip ID version */
+uint dhd_bus_chip_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->sih->chip;
+}
+
+/* Get Chip Rev ID version */
+uint dhd_bus_chiprev_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->sih->chiprev;
+}
+
+/* Get Chip Pkg ID version */
+uint dhd_bus_chippkg_id(dhd_pub_t *dhdp)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ return bus->sih->chippkg;
+}
+
+
+/*
+
+Name: dhdpcie_bus_isr
+
+Parametrs:
+
+1: IN int irq -- interrupt vector
+2: IN void *arg -- handle to private data structure
+
+Return value:
+
+Status (TRUE or FALSE)
+
+Description:
+Interrupt Service routine checks for the status register,
+disable interrupt and queue DPC if mail box interrupts are raised.
+*/
+
+
+int32
+dhdpcie_bus_isr(dhd_bus_t *bus)
+{
+
+ do {
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ /* verify argument */
+ if (!bus) {
+ DHD_ERROR(("%s : bus is null pointer , exit \n", __FUNCTION__));
+ break;
+ }
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s : bus is down. we have nothing to do\n",
+ __FUNCTION__));
+ break;
+ }
+
+ /* Overall operation:
+ * - Mask further interrupts
+ * - Read/ack intstatus
+ * - Take action based on bits and state
+ * - Reenable interrupts (as per state)
+ */
+
+ /* Count the interrupt call */
+ bus->intrcount++;
+
+ /* read interrupt status register!! Status bits will be cleared in DPC !! */
+ bus->ipend = TRUE;
+ dhdpcie_bus_intr_disable(bus); /* Disable interrupt!! */
+ bus->intdis = TRUE;
+
+#if defined(PCIE_ISR_THREAD)
+
+ DHD_TRACE(("Calling dhd_bus_dpc() from %s\n", __FUNCTION__));
+ DHD_OS_WAKE_LOCK(bus->dhd);
+ while (dhd_bus_dpc(bus));
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+#else
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd); /* queue DPC now!! */
+#endif /* defined(SDIO_ISR_THREAD) */
+
+ DHD_TRACE(("%s: Exit Success DPC Queued\n", __FUNCTION__));
+ return TRUE;
+
+ } while (0);
+
+ DHD_TRACE(("%s: Exit Failure\n", __FUNCTION__));
+ return FALSE;
+}
+
+static bool
+dhdpcie_dongle_attach(dhd_bus_t *bus)
+{
+
+ osl_t *osh = bus->osh;
+ void *regsva = (void*)bus->regs;
+ uint16 devid = bus->cl_devid;
+ uint32 val;
+ sbpcieregs_t *sbpcieregs;
+
+ DHD_TRACE(("%s: ENTER\n",
+ __FUNCTION__));
+
+ bus->alp_only = TRUE;
+ bus->sih = NULL;
+
+ /* Set bar0 window to si_enum_base */
+ dhdpcie_bus_cfg_set_bar0_win(bus, SI_ENUM_BASE);
+
+ /* si_attach() will provide an SI handle and scan the backplane */
+ if (!(bus->sih = si_attach((uint)devid, osh, regsva, PCI_BUS, bus,
+ &bus->vars, &bus->varsz))) {
+ DHD_ERROR(("%s: si_attach failed!\n", __FUNCTION__));
+ goto fail;
+ }
+
+
+ si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+ sbpcieregs = (sbpcieregs_t*)(bus->regs);
+
+ /* WAR where the BAR1 window may not be sized properly */
+ W_REG(osh, &sbpcieregs->configaddr, 0x4e0);
+ val = R_REG(osh, &sbpcieregs->configdata);
+ W_REG(osh, &sbpcieregs->configdata, val);
+
+ /* Get info on the ARM and SOCRAM cores... */
+ /* Should really be qualified by device id */
+ if ((si_setcore(bus->sih, ARM7S_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCM3_CORE_ID, 0)) ||
+ (si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+ bus->armrev = si_corerev(bus->sih);
+ } else {
+ DHD_ERROR(("%s: failed to find ARM core!\n", __FUNCTION__));
+ goto fail;
+ }
+
+ if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (!(bus->orig_ramsize = si_socram_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find SOCRAM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ } else {
+ /* cr4 has a different way to find the RAM size from TCM's */
+ if (!(bus->orig_ramsize = si_tcm_size(bus->sih))) {
+ DHD_ERROR(("%s: failed to find CR4-TCM memory!\n", __FUNCTION__));
+ goto fail;
+ }
+ /* also populate base address */
+ switch ((uint16)bus->sih->chip) {
+ case BCM4339_CHIP_ID:
+ case BCM4335_CHIP_ID:
+ bus->dongle_ram_base = CR4_4335_RAM_BASE;
+ break;
+ case BCM4358_CHIP_ID:
+ case BCM4356_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM43567_CHIP_ID:
+ case BCM43569_CHIP_ID:
+ case BCM4350_CHIP_ID:
+ case BCM43570_CHIP_ID:
+ bus->dongle_ram_base = CR4_4350_RAM_BASE;
+ break;
+ case BCM4360_CHIP_ID:
+ bus->dongle_ram_base = CR4_4360_RAM_BASE;
+ break;
+ case BCM4345_CHIP_ID:
+ bus->dongle_ram_base = CR4_4345_RAM_BASE;
+ break;
+ case BCM43602_CHIP_ID:
+ bus->dongle_ram_base = CR4_43602_RAM_BASE;
+ break;
+ case BCM4349_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4349_RAM_BASE;
+ break;
+ default:
+ bus->dongle_ram_base = 0;
+ DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
+ __FUNCTION__, bus->dongle_ram_base));
+ }
+ }
+ bus->ramsize = bus->orig_ramsize;
+ if (dhd_dongle_memsize)
+ dhdpcie_bus_dongle_setmemsize(bus, dhd_dongle_memsize);
+
+ DHD_ERROR(("DHD: dongle ram size is set to %d(orig %d) at 0x%x\n",
+ bus->ramsize, bus->orig_ramsize, bus->dongle_ram_base));
+
+ bus->srmemsize = si_socram_srmem_size(bus->sih);
+
+
+ bus->def_intmask = PCIE_MB_D2H_MB_MASK | PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1;
+
+ /* Set the poll and/or interrupt flags */
+ bus->intr = (bool)dhd_intr;
+
+ bus->wait_for_d3_ack = 1;
+ DHD_TRACE(("%s: EXIT: SUCCESS\n",
+ __FUNCTION__));
+ return 0;
+
+fail:
+ if (bus->sih != NULL)
+ si_detach(bus->sih);
+ DHD_TRACE(("%s: EXIT: FAILURE\n",
+ __FUNCTION__));
+ return -1;
+}
+
+int
+dhpcie_bus_unmask_interrupt(dhd_bus_t *bus)
+{
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, I_MB);
+ return 0;
+}
+int
+dhpcie_bus_mask_interrupt(dhd_bus_t *bus)
+{
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntmask, 4, 0x0);
+ return 0;
+}
+
+void
+dhdpcie_bus_intr_enable(dhd_bus_t *bus)
+{
+ DHD_TRACE(("enable interrupts\n"));
+
+ if (!bus || !bus->sih)
+ return;
+
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_unmask_interrupt(bus);
+ }
+ else if (bus->sih) {
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+ bus->def_intmask, bus->def_intmask);
+ }
+}
+
+void
+dhdpcie_bus_intr_disable(dhd_bus_t *bus)
+{
+
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+ if (!bus || !bus->sih)
+ return;
+
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ dhpcie_bus_mask_interrupt(bus);
+ }
+ else if (bus->sih) {
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxMask,
+ bus->def_intmask, 0);
+ }
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+
+/* Detach and free everything */
+void
+dhdpcie_bus_release(dhd_bus_t *bus)
+{
+ bool dongle_isolation = FALSE;
+ osl_t *osh = NULL;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+
+ osh = bus->osh;
+ ASSERT(osh);
+
+ if (bus->dhd) {
+ dongle_isolation = bus->dhd->dongle_isolation;
+ dhd_detach(bus->dhd);
+
+ if (bus->intr) {
+ dhdpcie_bus_intr_disable(bus);
+ dhdpcie_free_irq(bus);
+ }
+ dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+ dhd_free(bus->dhd);
+ bus->dhd = NULL;
+ }
+
+ /* unmap the regs and tcm here!! */
+ if (bus->regs) {
+ dhdpcie_bus_reg_unmap(osh, (ulong)bus->regs, DONGLE_REG_MAP_SIZE);
+ bus->regs = NULL;
+ }
+ if (bus->tcm) {
+ dhdpcie_bus_reg_unmap(osh, (ulong)bus->tcm, DONGLE_TCM_MAP_SIZE);
+ bus->tcm = NULL;
+ }
+
+ dhdpcie_bus_release_malloc(bus, osh);
+ /* Detach pcie shared structure */
+ if (bus->pcie_sh)
+ MFREE(osh, bus->pcie_sh, sizeof(pciedev_shared_t));
+
+#ifdef DHD_DEBUG
+
+ if (bus->console.buf != NULL)
+ MFREE(osh, bus->console.buf, bus->console.bufsize);
+#endif
+
+
+ /* Finally free bus info */
+ MFREE(osh, bus, sizeof(dhd_bus_t));
+
+ }
+
+ DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+
+}
+
+
+void
+dhdpcie_bus_release_dongle(dhd_bus_t *bus, osl_t *osh, bool dongle_isolation, bool reset_flag)
+{
+
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
+
+ DHD_TRACE(("%s: Enter bus->dhd %p bus->dhd->dongle_reset %d \n", __FUNCTION__,
+ bus->dhd, bus->dhd->dongle_reset));
+
+ if ((bus->dhd && bus->dhd->dongle_reset) && reset_flag) {
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->sih) {
+
+ if (!dongle_isolation)
+ pcie_watchdog_reset(bus->osh, bus->sih, (sbpcieregs_t *)(bus->regs));
+
+ if (bus->ltrsleep_on_unload) {
+ si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, u.pcie2.ltr_state), ~0, 0);
+ }
+ si_detach(bus->sih);
+ if (bus->vars && bus->varsz)
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+}
+
+uint32
+dhdpcie_bus_cfg_read_dword(dhd_bus_t *bus, uint32 addr, uint32 size)
+{
+ uint32 data = OSL_PCI_READ_CONFIG(bus->osh, addr, size);
+ return data;
+}
+
+/* 32 bit config write */
+void
+dhdpcie_bus_cfg_write_dword(dhd_bus_t *bus, uint32 addr, uint32 size, uint32 data)
+{
+ OSL_PCI_WRITE_CONFIG(bus->osh, addr, size, data);
+}
+
+void
+dhdpcie_bus_cfg_set_bar0_win(dhd_bus_t *bus, uint32 data)
+{
+ OSL_PCI_WRITE_CONFIG(bus->osh, PCI_BAR0_WIN, 4, data);
+}
+
+void
+dhdpcie_bus_dongle_setmemsize(struct dhd_bus *bus, int mem_size)
+{
+ int32 min_size = DONGLE_MIN_MEMSIZE;
+ /* Restrict the memsize to user specified limit */
+ DHD_ERROR(("user: Restrict the dongle ram size to %d, min accepted %d\n",
+ dhd_dongle_memsize, min_size));
+ if ((dhd_dongle_memsize > min_size) &&
+ (dhd_dongle_memsize < (int32)bus->orig_ramsize))
+ bus->ramsize = dhd_dongle_memsize;
+}
+
+void
+dhdpcie_bus_release_malloc(dhd_bus_t *bus, osl_t *osh)
+{
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd && bus->dhd->dongle_reset)
+ return;
+
+ if (bus->vars && bus->varsz) {
+ MFREE(osh, bus->vars, bus->varsz);
+ bus->vars = NULL;
+ }
+
+ DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+ return;
+
+}
+
+/* Stop bus module: clear pending frames, disable data flow */
+void dhd_bus_stop(struct dhd_bus *bus, bool enforce_mutex)
+{
+ uint32 status;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (!bus->dhd)
+ return;
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: already down by net_dev_reset\n", __FUNCTION__));
+ goto done;
+ }
+
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ dhdpcie_bus_intr_disable(bus);
+ status = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, status);
+ if (!dhd_download_fw_on_driverload)
+ dhd_dpc_kill(bus->dhd);
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ dhd_os_ioctl_resp_wake(bus->dhd);
+
+done:
+ return;
+}
+
+/* Watchdog timer function */
+bool dhd_bus_watchdog(dhd_pub_t *dhd)
+{
+#ifdef DHD_DEBUG
+ dhd_bus_t *bus;
+ bus = dhd->bus;
+
+
+
+ /* Poll for console output periodically */
+ if (dhd->busstate == DHD_BUS_DATA && dhd_console_ms != 0) {
+ bus->console.count += dhd_watchdog_ms;
+ if (bus->console.count >= dhd_console_ms) {
+ bus->console.count -= dhd_console_ms;
+ /* Make sure backplane clock is on */
+ if (dhdpcie_bus_readconsole(bus) < 0)
+ dhd_console_ms = 0; /* On error, stop trying */
+ }
+ }
+#endif /* DHD_DEBUG */
+
+ return FALSE;
+}
+
+
+
+/* Download firmware image and nvram image */
+int
+dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
+ char *pfw_path, char *pnv_path)
+{
+ int ret;
+
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+
+ ret = dhdpcie_download_firmware(bus, osh);
+
+ return ret;
+}
+
+static int
+dhdpcie_download_firmware(struct dhd_bus *bus, osl_t *osh)
+{
+ int ret = 0;
+
+ DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
+ __FUNCTION__, bus->fw_path, bus->nv_path));
+
+ DHD_OS_WAKE_LOCK(bus->dhd);
+
+ ret = _dhdpcie_download_firmware(bus);
+
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ return ret;
+}
+
+static int
+dhdpcie_download_code_file(struct dhd_bus *bus, char *pfw_path)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ int len;
+ void *image = NULL;
+ uint8 *memblock = NULL, *memptr;
+
+ DHD_ERROR(("%s: download firmware %s\n", __FUNCTION__, pfw_path));
+
+ /* Should succeed in opening image if it is actually given through registry
+ * entry or in module param.
+ */
+ image = dhd_os_open_image(pfw_path);
+ if (image == NULL)
+ goto err;
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+ /* Download image */
+ while ((len = dhd_os_get_image_block((char*)memptr, MEMBLOCK, image))) {
+ if (len < 0) {
+ DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n", __FUNCTION__, len));
+ bcmerror = BCME_ERROR;
+ goto err;
+ }
+ /* check if CR4 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+
+ if (offset == 0) {
+ bus->resetinstr = *(((uint32*)memptr));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+ }
+
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, memptr, len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+
+ offset += MEMBLOCK;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+
+static int
+dhdpcie_download_nvram(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ uint len;
+ void * image = NULL;
+ char * memblock = NULL;
+ char *bufp;
+ char *pnv_path;
+ bool nvram_file_exists;
+
+ pnv_path = bus->nv_path;
+
+ nvram_file_exists = ((pnv_path != NULL) && (pnv_path[0] != '\0'));
+ if (!nvram_file_exists && (bus->nvram_params == NULL))
+ return (0);
+
+ if (nvram_file_exists) {
+ image = dhd_os_open_image(pnv_path);
+ if (image == NULL)
+ goto err;
+ }
+
+ memblock = MALLOC(bus->dhd->osh, MAX_NVRAMBUF_SIZE);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
+ __FUNCTION__, MAX_NVRAMBUF_SIZE));
+ goto err;
+ }
+
+ /* Download variables */
+ if (nvram_file_exists) {
+ len = dhd_os_get_image_block(memblock, MAX_NVRAMBUF_SIZE, image);
+ }
+ else {
+
+ /* nvram is string with null terminated. cannot use strlen */
+ len = bus->nvram_params_len;
+ ASSERT(len <= MAX_NVRAMBUF_SIZE);
+ memcpy(memblock, bus->nvram_params, len);
+ }
+ if (len > 0 && len < MAX_NVRAMBUF_SIZE) {
+ bufp = (char *)memblock;
+ bufp[len] = 0;
+
+ if (nvram_file_exists)
+ len = process_nvram_vars(bufp, len);
+
+ if (len % 4) {
+ len += 4 - (len % 4);
+ }
+ bufp += len;
+ *bufp++ = 0;
+ if (len)
+ bcmerror = dhdpcie_downloadvars(bus, memblock, len + 1);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error downloading vars: %d\n",
+ __FUNCTION__, bcmerror));
+ }
+ }
+ else {
+ DHD_ERROR(("%s: error reading nvram file: %d\n",
+ __FUNCTION__, len));
+ bcmerror = BCME_ERROR;
+ }
+
+err:
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MAX_NVRAMBUF_SIZE);
+
+ if (image)
+ dhd_os_close_image(image);
+
+ return bcmerror;
+}
+
+
+#ifdef BCMEMBEDIMAGE
+int
+dhdpcie_download_code_array(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+ int offset = 0;
+ unsigned char *p_dlarray = NULL;
+ unsigned int dlarray_size = 0;
+ unsigned int downloded_len, remaining_len, len;
+ char *p_dlimagename, *p_dlimagever, *p_dlimagedate;
+ uint8 *memblock = NULL, *memptr;
+
+ downloded_len = 0;
+ remaining_len = 0;
+ len = 0;
+
+ p_dlarray = dlarray;
+ dlarray_size = sizeof(dlarray);
+ p_dlimagename = dlimagename;
+ p_dlimagever = dlimagever;
+ p_dlimagedate = dlimagedate;
+
+ if ((p_dlarray == 0) || (dlarray_size == 0) ||(dlarray_size > bus->ramsize) ||
+ (p_dlimagename == 0) || (p_dlimagever == 0) || (p_dlimagedate == 0))
+ goto err;
+
+ memptr = memblock = MALLOC(bus->dhd->osh, MEMBLOCK + DHD_SDALIGN);
+ if (memblock == NULL) {
+ DHD_ERROR(("%s: Failed to allocate memory %d bytes\n", __FUNCTION__, MEMBLOCK));
+ goto err;
+ }
+ if ((uint32)(uintptr)memblock % DHD_SDALIGN)
+ memptr += (DHD_SDALIGN - ((uint32)(uintptr)memblock % DHD_SDALIGN));
+
+ while (downloded_len < dlarray_size) {
+ remaining_len = dlarray_size - downloded_len;
+ if (remaining_len >= MEMBLOCK)
+ len = MEMBLOCK;
+ else
+ len = remaining_len;
+
+ memcpy(memptr, (p_dlarray + downloded_len), len);
+ /* check if CR4 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+ if (offset == 0) {
+ bus->resetinstr = *(((uint32*)memptr));
+ /* Add start of RAM address to the address given by user */
+ offset += bus->dongle_ram_base;
+ }
+ }
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, offset, (uint8 *)memptr, len);
+ downloded_len += len;
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on writing %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto err;
+ }
+ offset += MEMBLOCK;
+ }
+
+#ifdef DHD_DEBUG
+ /* Upload and compare the downloaded code */
+ {
+ unsigned char *ularray = NULL;
+ unsigned int uploded_len;
+ uploded_len = 0;
+ bcmerror = -1;
+ ularray = MALLOC(bus->dhd->osh, dlarray_size);
+ if (ularray == NULL)
+ goto upload_err;
+ /* Upload image to verify downloaded contents. */
+ offset = bus->dongle_ram_base;
+ memset(ularray, 0xaa, dlarray_size);
+ while (uploded_len < dlarray_size) {
+ remaining_len = dlarray_size - uploded_len;
+ if (remaining_len >= MEMBLOCK)
+ len = MEMBLOCK;
+ else
+ len = remaining_len;
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, offset,
+ (uint8 *)(ularray + uploded_len), len);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d membytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, MEMBLOCK, offset));
+ goto upload_err;
+ }
+
+ uploded_len += len;
+ offset += MEMBLOCK;
+ }
+
+ if (memcmp(p_dlarray, ularray, dlarray_size)) {
+ DHD_ERROR(("%s: Downloaded image is corrupted (%s, %s, %s).\n",
+ __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+ goto upload_err;
+
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare succeeded (%s, %s, %s).\n",
+ __FUNCTION__, p_dlimagename, p_dlimagever, p_dlimagedate));
+upload_err:
+ if (ularray)
+ MFREE(bus->dhd->osh, ularray, dlarray_size);
+ }
+#endif /* DHD_DEBUG */
+err:
+
+ if (memblock)
+ MFREE(bus->dhd->osh, memblock, MEMBLOCK + DHD_SDALIGN);
+
+ return bcmerror;
+}
+#endif /* BCMEMBEDIMAGE */
+
+
+static int
+_dhdpcie_download_firmware(struct dhd_bus *bus)
+{
+ int bcmerror = -1;
+
+ bool embed = FALSE; /* download embedded firmware */
+ bool dlok = FALSE; /* download firmware succeeded */
+
+ /* Out immediately if no image to download */
+ if ((bus->fw_path == NULL) || (bus->fw_path[0] == '\0')) {
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ DHD_ERROR(("%s: no fimrware file\n", __FUNCTION__));
+ return 0;
+#endif
+ }
+
+ /* Keep arm in reset */
+ if (dhdpcie_bus_download_state(bus, TRUE)) {
+ DHD_ERROR(("%s: error placing ARM core in reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* External image takes precedence if specified */
+ if ((bus->fw_path != NULL) && (bus->fw_path[0] != '\0')) {
+ if (dhdpcie_download_code_file(bus, bus->fw_path)) {
+ DHD_ERROR(("%s: dongle image file download failed\n", __FUNCTION__));
+#ifdef BCMEMBEDIMAGE
+ embed = TRUE;
+#else
+ goto err;
+#endif
+ }
+ else {
+ embed = FALSE;
+ dlok = TRUE;
+ }
+ }
+
+#ifdef BCMEMBEDIMAGE
+ if (embed) {
+ if (dhdpcie_download_code_array(bus)) {
+ DHD_ERROR(("%s: dongle image array download failed\n", __FUNCTION__));
+ goto err;
+ }
+ else {
+ dlok = TRUE;
+ }
+ }
+#else
+ BCM_REFERENCE(embed);
+#endif
+ if (!dlok) {
+ DHD_ERROR(("%s: dongle image download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* EXAMPLE: nvram_array */
+ /* If a valid nvram_arry is specified as above, it can be passed down to dongle */
+ /* dhd_bus_set_nvram_params(bus, (char *)&nvram_array); */
+
+
+ /* External nvram takes precedence if specified */
+ if (dhdpcie_download_nvram(bus)) {
+ DHD_ERROR(("%s: dongle nvram file download failed\n", __FUNCTION__));
+ goto err;
+ }
+
+ /* Take arm out of reset */
+ if (dhdpcie_bus_download_state(bus, FALSE)) {
+ DHD_ERROR(("%s: error getting out of ARM core reset\n", __FUNCTION__));
+ goto err;
+ }
+
+ bcmerror = 0;
+
+err:
+ return bcmerror;
+}
+
+int dhd_bus_rxctl(struct dhd_bus *bus, uchar *msg, uint msglen)
+{
+ int timeleft;
+ uint rxlen = 0;
+ bool pending;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ /* Wait until control frame is available */
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->rxlen, &pending);
+ if (timeleft == 0) {
+ DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+ bus->ioct_resp.cmn_hdr.request_id = 0;
+ bus->ioct_resp.compl_hdr.status = 0xffff;
+ bus->rxlen = 0;
+ }
+ rxlen = bus->rxlen;
+ bcopy(&bus->ioct_resp, msg, sizeof(ioctl_comp_resp_msg_t));
+ bus->rxlen = 0;
+
+ if (rxlen) {
+ DHD_CTL(("%s: resumed on rxctl frame, got %d\n", __FUNCTION__, rxlen));
+ } else if (timeleft == 0) {
+ DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+ } else if (pending == TRUE) {
+ DHD_CTL(("%s: canceled\n", __FUNCTION__));
+ return -ERESTARTSYS;
+ } else {
+ DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
+ }
+ if (timeleft == 0) {
+ bus->dhd->rxcnt_timeout++;
+ DHD_ERROR(("%s: rxcnt_timeout=%d\n", __FUNCTION__, bus->dhd->rxcnt_timeout));
+ }
+ else
+ bus->dhd->rxcnt_timeout = 0;
+
+ if (rxlen)
+ bus->dhd->rx_ctlpkts++;
+ else
+ bus->dhd->rx_ctlerrs++;
+
+ if (bus->dhd->rxcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
+ return -ETIMEDOUT;
+
+ if (bus->dhd->dongle_trap_occured)
+ return -EREMOTEIO;
+
+ return rxlen ? (int)rxlen : -EIO;
+
+}
+
+#define CONSOLE_LINE_MAX 192
+
+#ifdef DHD_DEBUG
+static int
+dhdpcie_bus_readconsole(dhd_bus_t *bus)
+{
+ dhd_console_t *c = &bus->console;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, idx, addr;
+ int rv;
+
+ /* Don't do anything until FWREADY updates console address */
+ if (bus->console_addr == 0)
+ return -1;
+
+ /* Read console log struct */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
+
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
+ return rv;
+
+ /* Allocate console buffer (one time only) */
+ if (c->buf == NULL) {
+ c->bufsize = ltoh32(c->log.buf_size);
+ if ((c->buf = MALLOC(bus->dhd->osh, c->bufsize)) == NULL)
+ return BCME_NOMEM;
+ }
+ idx = ltoh32(c->log.idx);
+
+ /* Protect against corrupt value */
+ if (idx > c->bufsize)
+ return BCME_ERROR;
+
+ /* Skip reading the console buffer if the index pointer has not moved */
+ if (idx == c->last)
+ return BCME_OK;
+
+ /* Read the console buffer */
+ addr = ltoh32(c->log.buf);
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, c->buf, c->bufsize)) < 0)
+ return rv;
+
+ while (c->last != idx) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ if (c->last == idx) {
+ /* This would output a partial line. Instead, back up
+ * the buffer pointer and output this line next time around.
+ */
+ if (c->last >= n)
+ c->last -= n;
+ else
+ c->last = c->bufsize - n;
+ goto break2;
+ }
+ ch = c->buf[c->last];
+ c->last = (c->last + 1) % c->bufsize;
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+break2:
+
+ return BCME_OK;
+}
+
+static int
+dhdpcie_checkdied(dhd_bus_t *bus, char *data, uint size)
+{
+ int bcmerror = 0;
+ uint msize = 512;
+ char *mbuffer = NULL;
+ char *console_buffer = NULL;
+ uint maxstrlen = 256;
+ char *str = NULL;
+ trap_t tr;
+ pciedev_shared_t *pciedev_shared = bus->pcie_sh;
+ struct bcmstrbuf strbuf;
+ uint32 console_ptr, console_size, console_index;
+ uint8 line[CONSOLE_LINE_MAX], ch;
+ uint32 n, i, addr;
+ int rv;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (DHD_NOCHECKDIED_ON())
+ return 0;
+
+ if (data == NULL) {
+ /*
+ * Called after a rx ctrl timeout. "data" is NULL.
+ * allocate memory to trace the trap or assert.
+ */
+ size = msize;
+ mbuffer = data = MALLOC(bus->dhd->osh, msize);
+
+ if (mbuffer == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, msize));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+ }
+
+ if ((str = MALLOC(bus->dhd->osh, maxstrlen)) == NULL) {
+ DHD_ERROR(("%s: MALLOC(%d) failed \n", __FUNCTION__, maxstrlen));
+ bcmerror = BCME_NOMEM;
+ goto done;
+ }
+
+ if ((bcmerror = dhdpcie_readshared(bus)) < 0)
+ goto done;
+
+ bcm_binit(&strbuf, data, size);
+
+ bcm_bprintf(&strbuf, "msgtrace address : 0x%08X\nconsole address : 0x%08X\n",
+ pciedev_shared->msgtrace_addr, pciedev_shared->console_addr);
+
+ if ((pciedev_shared->flags & PCIE_SHARED_ASSERT_BUILT) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "Assrt not built in dongle\n");
+ }
+
+ if ((bus->pcie_sh->flags & (PCIE_SHARED_ASSERT|PCIE_SHARED_TRAP)) == 0) {
+ /* NOTE: Misspelled assert is intentional - DO NOT FIX.
+ * (Avoids conflict with real asserts for programmatic parsing of output.)
+ */
+ bcm_bprintf(&strbuf, "No trap%s in dongle",
+ (bus->pcie_sh->flags & PCIE_SHARED_ASSERT_BUILT)
+ ?"/assrt" :"");
+ } else {
+ if (bus->pcie_sh->flags & PCIE_SHARED_ASSERT) {
+ /* Download assert */
+ bcm_bprintf(&strbuf, "Dongle assert");
+ if (bus->pcie_sh->assert_exp_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+ bus->pcie_sh->assert_exp_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " expr \"%s\"", str);
+ }
+
+ if (bus->pcie_sh->assert_file_addr != 0) {
+ str[0] = '\0';
+ if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+ bus->pcie_sh->assert_file_addr,
+ (uint8 *)str, maxstrlen)) < 0)
+ goto done;
+
+ str[maxstrlen - 1] = '\0';
+ bcm_bprintf(&strbuf, " file \"%s\"", str);
+ }
+
+ bcm_bprintf(&strbuf, " line %d ", bus->pcie_sh->assert_line);
+ }
+
+ if (bus->pcie_sh->flags & PCIE_SHARED_TRAP) {
+ bus->dhd->dongle_trap_occured = TRUE;
+ if ((bcmerror = dhdpcie_bus_membytes(bus, FALSE,
+ bus->pcie_sh->trap_addr,
+ (uint8*)&tr, sizeof(trap_t))) < 0)
+ goto done;
+
+ bcm_bprintf(&strbuf,
+ "Dongle trap type 0x%x @ epc 0x%x, cpsr 0x%x, spsr 0x%x, sp 0x%x,"
+ "lp 0x%x, rpc 0x%x Trap offset 0x%x, "
+ "r0 0x%x, r1 0x%x, r2 0x%x, r3 0x%x, "
+ "r4 0x%x, r5 0x%x, r6 0x%x, r7 0x%x\n\n",
+ ltoh32(tr.type), ltoh32(tr.epc), ltoh32(tr.cpsr), ltoh32(tr.spsr),
+ ltoh32(tr.r13), ltoh32(tr.r14), ltoh32(tr.pc),
+ ltoh32(bus->pcie_sh->trap_addr),
+ ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
+ ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
+
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log);
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+ (uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
+ goto printbuf;
+
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+ (uint8 *)&console_size, sizeof(console_size))) < 0)
+ goto printbuf;
+
+ addr = bus->pcie_sh->console_addr + OFFSETOF(hnd_cons_t, log.idx);
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr,
+ (uint8 *)&console_index, sizeof(console_index))) < 0)
+ goto printbuf;
+
+ console_ptr = ltoh32(console_ptr);
+ console_size = ltoh32(console_size);
+ console_index = ltoh32(console_index);
+
+ if (console_size > CONSOLE_BUFFER_MAX ||
+ !(console_buffer = MALLOC(bus->dhd->osh, console_size)))
+ goto printbuf;
+
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, console_ptr,
+ (uint8 *)console_buffer, console_size)) < 0)
+ goto printbuf;
+
+ for (i = 0, n = 0; i < console_size; i += n + 1) {
+ for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) {
+ ch = console_buffer[(console_index + i + n) % console_size];
+ if (ch == '\n')
+ break;
+ line[n] = ch;
+ }
+
+
+ if (n > 0) {
+ if (line[n - 1] == '\r')
+ n--;
+ line[n] = 0;
+ /* Don't use DHD_ERROR macro since we print
+ * a lot of information quickly. The macro
+ * will truncate a lot of the printfs
+ */
+
+ if (dhd_msg_level & DHD_ERROR_VAL)
+ printf("CONSOLE: %s\n", line);
+ }
+ }
+ }
+ }
+
+printbuf:
+ if (bus->pcie_sh->flags & (PCIE_SHARED_ASSERT | PCIE_SHARED_TRAP)) {
+ DHD_ERROR(("%s: %s\n", __FUNCTION__, strbuf.origbuf));
+ }
+
+done:
+ if (mbuffer)
+ MFREE(bus->dhd->osh, mbuffer, msize);
+ if (str)
+ MFREE(bus->dhd->osh, str, maxstrlen);
+
+ if (console_buffer)
+ MFREE(bus->dhd->osh, console_buffer, console_size);
+
+ return bcmerror;
+}
+#endif /* DHD_DEBUG */
+
+
+/**
+ * Transfers bytes from host to dongle using pio mode.
+ * Parameter 'address' is a backplane address.
+ */
+static int
+dhdpcie_bus_membytes(dhd_bus_t *bus, bool write, ulong address, uint8 *data, uint size)
+{
+ int bcmerror = 0;
+ uint dsize;
+ int detect_endian_flag = 0x01;
+ bool little_endian;
+
+ /* Detect endianness. */
+ little_endian = *(char *)&detect_endian_flag;
+
+ /* In remap mode, adjust address beyond socram and redirect
+ * to devram at SOCDEVRAM_BP_ADDR since remap address > orig_ramsize
+ * is not backplane accessible
+ */
+
+ /* Determine initial transfer parameters */
+ dsize = sizeof(uint64);
+
+ /* Do the transfer(s) */
+ if (write) {
+ while (size) {
+ if (size >= sizeof(uint64) && little_endian)
+ dhdpcie_bus_wtcm64(bus, address, *((uint64 *)data));
+ else {
+ dsize = sizeof(uint8);
+ dhdpcie_bus_wtcm8(bus, address, *data);
+ }
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize)) {
+ data += dsize;
+ address += dsize;
+ }
+ }
+ } else {
+ while (size) {
+ if (size >= sizeof(uint64) && little_endian)
+ *(uint64 *)data = dhdpcie_bus_rtcm64(bus, address);
+ else {
+ dsize = sizeof(uint8);
+ *data = dhdpcie_bus_rtcm8(bus, address);
+ }
+
+ /* Adjust for next transfer (if any) */
+ if ((size -= dsize) > 0) {
+ data += dsize;
+ address += dsize;
+ }
+ }
+ }
+ return bcmerror;
+}
+
+int BCMFASTPATH
+dhd_bus_schedule_queue(struct dhd_bus *bus, uint16 flow_id, bool txs)
+{
+ flow_ring_node_t *flow_ring_node;
+ int ret = BCME_OK;
+
+ DHD_INFO(("%s: flow_id is %d\n", __FUNCTION__, flow_id));
+ /* ASSERT on flow_id */
+ if (flow_id >= bus->max_sub_queues) {
+ DHD_ERROR(("%s: flow_id is invalid %d, max %d\n", __FUNCTION__,
+ flow_id, bus->max_sub_queues));
+ return 0;
+ }
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flow_id);
+
+ {
+ unsigned long flags;
+ void *txp = NULL;
+ flow_queue_t *queue;
+
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+ DHD_QUEUE_LOCK(queue->lock, flags);
+
+ while ((txp = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+ PKTORPHAN(txp);
+
+#ifdef DHDTCPACK_SUPPRESS
+ dhd_tcpack_check_xmit(bus->dhd, txp);
+#endif /* DHDTCPACK_SUPPRESS */
+ /* Attempt to transfer packet over flow ring */
+
+ ret = dhd_prot_txdata(bus->dhd, txp, flow_ring_node->flow_info.ifindex);
+ if (ret != BCME_OK) { /* may not have resources in flow ring */
+ DHD_INFO(("%s: Reinserrt %d\n", __FUNCTION__, ret));
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+ /* reinsert at head */
+ dhd_flow_queue_reinsert(bus->dhd, queue, txp);
+ DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+ /* If we are able to requeue back, return success */
+ return BCME_OK;
+ }
+ }
+
+ dhd_prot_txdata_write_flush(bus->dhd, flow_id, FALSE);
+
+ DHD_QUEUE_UNLOCK(queue->lock, flags);
+ }
+
+ return ret;
+}
+
+/* Send a data frame to the dongle. Callee disposes of txp. */
+int BCMFASTPATH
+dhd_bus_txdata(struct dhd_bus *bus, void *txp, uint8 ifidx)
+{
+ unsigned long flags;
+ int ret = BCME_OK;
+ void *txp_pend = NULL;
+ if (!bus->txmode_push) {
+ uint16 flowid;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ if (!bus->dhd->flowid_allocator) {
+ DHD_ERROR(("%s: Flow ring not intited yet \n", __FUNCTION__));
+ goto toss;
+ }
+
+ flowid = DHD_PKTTAG_FLOWID((dhd_pkttag_fr_t*)PKTTAG(txp));
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+
+ DHD_TRACE(("%s: pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+
+ if ((flowid >= bus->dhd->num_flow_rings) ||
+ (!flow_ring_node->active) ||
+ (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING)) {
+ DHD_INFO(("%s: Dropping pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+ ret = BCME_ERROR;
+ goto toss;
+ }
+
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+ DHD_QUEUE_LOCK(queue->lock, flags);
+
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp)) != BCME_OK)
+ txp_pend = txp;
+
+ DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+ if (flow_ring_node->status) {
+ DHD_INFO(("%s: Enq pkt flowid %d, status %d active %d\n",
+ __FUNCTION__, flowid, flow_ring_node->status,
+ flow_ring_node->active));
+ if (txp_pend) {
+ txp = txp_pend;
+ goto toss;
+ }
+ return BCME_OK;
+ }
+ ret = dhd_bus_schedule_queue(bus, flowid, FALSE);
+
+ /* If we have anything pending, try to push into q */
+ if (txp_pend) {
+ DHD_QUEUE_LOCK(queue->lock, flags);
+
+ if ((ret = dhd_flow_queue_enqueue(bus->dhd, queue, txp_pend)) != BCME_OK) {
+ DHD_QUEUE_UNLOCK(queue->lock, flags);
+ txp = txp_pend;
+ goto toss;
+ }
+
+ DHD_QUEUE_UNLOCK(queue->lock, flags);
+ }
+
+ return ret;
+
+ } else { /* bus->txmode_push */
+ return dhd_prot_txdata(bus->dhd, txp, ifidx);
+ }
+
+toss:
+ DHD_INFO(("%s: Toss %d\n", __FUNCTION__, ret));
+ PKTCFREE(bus->dhd->osh, txp, TRUE);
+ return ret;
+}
+
+
+void
+dhd_bus_stop_queue(struct dhd_bus *bus)
+{
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+ bus->bus_flowctrl = TRUE;
+}
+
+void
+dhd_bus_start_queue(struct dhd_bus *bus)
+{
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, OFF);
+ bus->bus_flowctrl = TRUE;
+}
+
+void
+dhd_bus_update_retlen(dhd_bus_t *bus, uint32 retlen, uint32 pkt_id, uint16 status,
+ uint32 resp_len)
+{
+ bus->rxlen = retlen;
+ bus->ioct_resp.cmn_hdr.request_id = pkt_id;
+ bus->ioct_resp.compl_hdr.status = status;
+ bus->ioct_resp.resp_len = (uint16)resp_len;
+}
+
+#if defined(DHD_DEBUG)
+/* Device console input function */
+int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
+{
+ dhd_bus_t *bus = dhd->bus;
+ uint32 addr, val;
+ int rv;
+ /* Address could be zero if CONSOLE := 0 in dongle Makefile */
+ if (bus->console_addr == 0)
+ return BCME_UNSUPPORTED;
+
+ /* Don't allow input if dongle is in reset */
+ if (bus->dhd->dongle_reset) {
+ dhd_os_sdunlock(bus->dhd);
+ return BCME_NOTREADY;
+ }
+
+ /* Zero cbuf_index */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
+ val = htol32(0);
+ if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* Write message into cbuf */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
+ if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
+ goto done;
+
+ /* Write length into vcons_in */
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
+ val = htol32(msglen);
+ if ((rv = dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
+ goto done;
+
+ /* generate an interurpt to dongle to indicate that it needs to process cons command */
+ dhdpcie_send_mb_data(bus, H2D_HOST_CONS_INT);
+done:
+ return rv;
+}
+#endif /* defined(DHD_DEBUG) */
+
+/* Process rx frame , Send up the layer to netif */
+void BCMFASTPATH
+dhd_bus_rx_frame(struct dhd_bus *bus, void* pkt, int ifidx, uint pkt_count)
+{
+ dhd_rx_frame(bus->dhd, ifidx, pkt, pkt_count, 0);
+}
+
+/** 'offset' is a backplane address */
+void
+dhdpcie_bus_wtcm8(dhd_bus_t *bus, ulong offset, uint8 data)
+{
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ *(volatile uint8 *)(bus->tcm + offset) = (uint8)data;
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+}
+
+uint8
+dhdpcie_bus_rtcm8(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint8 data;
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef BCM47XX_ACP_WAR
+ data = R_REG(bus->dhd->osh, (volatile uint8 *)(bus->tcm + offset));
+#else
+ data = *(volatile uint8 *)(bus->tcm + offset);
+#endif
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ return data;
+}
+
+void
+dhdpcie_bus_wtcm32(dhd_bus_t *bus, ulong offset, uint32 data)
+{
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ *(volatile uint32 *)(bus->tcm + offset) = (uint32)data;
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+}
+void
+dhdpcie_bus_wtcm16(dhd_bus_t *bus, ulong offset, uint16 data)
+{
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ *(volatile uint16 *)(bus->tcm + offset) = (uint16)data;
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+}
+void
+dhdpcie_bus_wtcm64(dhd_bus_t *bus, ulong offset, uint64 data)
+{
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ *(volatile uint64 *)(bus->tcm + offset) = (uint64)data;
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+}
+
+uint16
+dhdpcie_bus_rtcm16(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint16 data;
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef BCM47XX_ACP_WAR
+ data = R_REG(bus->dhd->osh, (volatile uint16 *)(bus->tcm + offset));
+#else
+ data = *(volatile uint16 *)(bus->tcm + offset);
+#endif
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ return data;
+}
+
+uint32
+dhdpcie_bus_rtcm32(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint32 data;
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef BCM47XX_ACP_WAR
+ data = R_REG(bus->dhd->osh, (volatile uint32 *)(bus->tcm + offset));
+#else
+ data = *(volatile uint32 *)(bus->tcm + offset);
+#endif
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ return data;
+}
+
+uint64
+dhdpcie_bus_rtcm64(dhd_bus_t *bus, ulong offset)
+{
+ volatile uint64 data;
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef BCM47XX_ACP_WAR
+ data = R_REG(bus->dhd->osh, (volatile uint64 *)(bus->tcm + offset));
+#else
+ data = *(volatile uint64 *)(bus->tcm + offset);
+#endif
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ return data;
+}
+
+void
+dhd_bus_cmn_writeshared(dhd_bus_t *bus, void * data, uint32 len, uint8 type, uint16 ringid)
+{
+ uint64 long_data;
+ ulong tcm_offset;
+ pciedev_shared_t *sh;
+ pciedev_shared_t *shmem = NULL;
+
+ sh = (pciedev_shared_t*)bus->shared_addr;
+
+ DHD_INFO(("%s: writing to msgbuf type %d, len %d\n", __FUNCTION__, type, len));
+
+ switch (type) {
+ case DNGL_TO_HOST_DMA_SCRATCH_BUFFER:
+ long_data = HTOL64(*(uint64 *)data);
+ tcm_offset = (ulong)&(sh->host_dma_scratch_buffer);
+ dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+ prhex(__FUNCTION__, data, len);
+ break;
+
+ case DNGL_TO_HOST_DMA_SCRATCH_BUFFER_LEN :
+ tcm_offset = (ulong)&(sh->host_dma_scratch_buffer_len);
+ dhdpcie_bus_wtcm32(bus, tcm_offset, (uint32) HTOL32(*(uint32 *)data));
+ prhex(__FUNCTION__, data, len);
+ break;
+
+ case HOST_TO_DNGL_DMA_WRITEINDX_BUFFER:
+ /* ring_info_ptr stored in pcie_sh */
+ shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+ long_data = HTOL64(*(uint64 *)data);
+ tcm_offset = (ulong)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, h2d_w_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+ prhex(__FUNCTION__, data, len);
+ break;
+
+ case HOST_TO_DNGL_DMA_READINDX_BUFFER:
+ /* ring_info_ptr stored in pcie_sh */
+ shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+ long_data = HTOL64(*(uint64 *)data);
+ tcm_offset = (ulong)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, h2d_r_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+ prhex(__FUNCTION__, data, len);
+ break;
+
+ case DNGL_TO_HOST_DMA_WRITEINDX_BUFFER:
+ /* ring_info_ptr stored in pcie_sh */
+ shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+ long_data = HTOL64(*(uint64 *)data);
+ tcm_offset = (ulong)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, d2h_w_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+ prhex(__FUNCTION__, data, len);
+ break;
+
+ case DNGL_TO_HOST_DMA_READINDX_BUFFER:
+ /* ring_info_ptr stored in pcie_sh */
+ shmem = (pciedev_shared_t *)bus->pcie_sh;
+
+ long_data = HTOL64(*(uint64 *)data);
+ tcm_offset = (ulong)shmem->rings_info_ptr;
+ tcm_offset += OFFSETOF(ring_info_t, d2h_r_idx_hostaddr);
+ dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8*) &long_data, len);
+ prhex(__FUNCTION__, data, len);
+ break;
+
+ case RING_LEN_ITEMS :
+ tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+ tcm_offset += OFFSETOF(ring_mem_t, len_items);
+ dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+ break;
+
+ case RING_MAX_ITEM :
+ tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+ tcm_offset += OFFSETOF(ring_mem_t, max_item);
+ dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+ break;
+
+ case RING_BUF_ADDR :
+ long_data = HTOL64(*(uint64 *)data);
+ tcm_offset = bus->ring_sh[ringid].ring_mem_addr;
+ tcm_offset += OFFSETOF(ring_mem_t, base_addr);
+ dhdpcie_bus_membytes(bus, TRUE, tcm_offset, (uint8 *) &long_data, len);
+ prhex(__FUNCTION__, data, len);
+ break;
+
+ case RING_WRITE_PTR :
+ tcm_offset = bus->ring_sh[ringid].ring_state_w;
+ dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+ break;
+ case RING_READ_PTR :
+ tcm_offset = bus->ring_sh[ringid].ring_state_r;
+ dhdpcie_bus_wtcm16(bus, tcm_offset, (uint16) HTOL16(*(uint16 *)data));
+ break;
+
+ case DTOH_MB_DATA:
+ dhdpcie_bus_wtcm32(bus, bus->d2h_mb_data_ptr_addr,
+ (uint32) HTOL32(*(uint32 *)data));
+ break;
+
+ case HTOD_MB_DATA:
+ dhdpcie_bus_wtcm32(bus, bus->h2d_mb_data_ptr_addr,
+ (uint32) HTOL32(*(uint32 *)data));
+ break;
+ default:
+ break;
+ }
+}
+
+
+void
+dhd_bus_cmn_readshared(dhd_bus_t *bus, void* data, uint8 type, uint16 ringid)
+{
+ pciedev_shared_t *sh;
+ ulong tcm_offset;
+
+ sh = (pciedev_shared_t*)bus->shared_addr;
+
+ switch (type) {
+ case RING_WRITE_PTR :
+ tcm_offset = bus->ring_sh[ringid].ring_state_w;
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
+ break;
+ case RING_READ_PTR :
+ tcm_offset = bus->ring_sh[ringid].ring_state_r;
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus, tcm_offset));
+ break;
+ case TOTAL_LFRAG_PACKET_CNT :
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+ (ulong) &sh->total_lfrag_pkt_cnt));
+ break;
+ case HTOD_MB_DATA:
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->h2d_mb_data_ptr_addr));
+ break;
+ case DTOH_MB_DATA:
+ *(uint32*)data = LTOH32(dhdpcie_bus_rtcm32(bus, bus->d2h_mb_data_ptr_addr));
+ break;
+ case MAX_HOST_RXBUFS :
+ *(uint16*)data = LTOH16(dhdpcie_bus_rtcm16(bus,
+ (ulong) &sh->max_host_rxbufs));
+ break;
+ default :
+ break;
+ }
+}
+
+uint32 dhd_bus_get_sharedflags(dhd_bus_t *bus)
+{
+ return ((pciedev_shared_t*)bus->pcie_sh)->flags;
+}
+
+void
+dhd_bus_clearcounts(dhd_pub_t *dhdp)
+{
+}
+
+int
+dhd_bus_iovar_op(dhd_pub_t *dhdp, const char *name,
+ void *params, int plen, void *arg, int len, bool set)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ const bcm_iovar_t *vi = NULL;
+ int bcmerror = 0;
+ int val_size;
+ uint32 actionid;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(name);
+ ASSERT(len >= 0);
+
+ /* Get MUST have return space */
+ ASSERT(set || (arg && len));
+
+ /* Set does NOT take qualifiers */
+ ASSERT(!set || (!params && !plen));
+
+ DHD_INFO(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
+ name, (set ? "set" : "get"), len, plen));
+
+ /* Look up var locally; if not found pass to host driver */
+ if ((vi = bcm_iovar_lookup(dhdpcie_iovars, name)) == NULL) {
+ goto exit;
+ }
+
+
+ /* set up 'params' pointer in case this is a set command so that
+ * the convenience int and bool code can be common to set and get
+ */
+ if (params == NULL) {
+ params = arg;
+ plen = len;
+ }
+
+ if (vi->type == IOVT_VOID)
+ val_size = 0;
+ else if (vi->type == IOVT_BUFFER)
+ val_size = len;
+ else
+ /* all other types are integer sized */
+ val_size = sizeof(int);
+
+ actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
+ bcmerror = dhdpcie_bus_doiovar(bus, vi, actionid, name, params, plen, arg, len, val_size);
+
+exit:
+ return bcmerror;
+}
+
+#ifdef BCM_BUZZZ
+#include <bcm_buzzz.h>
+
+int dhd_buzzz_dump_cntrs3(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
+{
+ int bytes = 0;
+ uint32 ctr, curr[3], prev[3], delta[3];
+
+ /* Compute elapsed counter values per counter event type */
+ for (ctr = 0U; ctr < 3; ctr++) {
+ prev[ctr] = core[ctr];
+ curr[ctr] = *log++;
+ core[ctr] = curr[ctr]; /* saved for next log */
+
+ if (curr[ctr] < prev[ctr])
+ delta[ctr] = curr[ctr] + (~0U - prev[ctr]);
+ else
+ delta[ctr] = (curr[ctr] - prev[ctr]);
+
+ /* Adjust for instrumentation overhead */
+ if (delta[ctr] >= ovhd[ctr])
+ delta[ctr] -= ovhd[ctr];
+ else
+ delta[ctr] = 0;
+
+ bytes += sprintf(p + bytes, "%12u ", delta[ctr]);
+ }
+
+ return bytes;
+}
+
+typedef union cm3_cnts { /* export this in bcm_buzzz.h */
+ uint32 u32;
+ uint8 u8[4];
+ struct {
+ uint8 cpicnt;
+ uint8 exccnt;
+ uint8 sleepcnt;
+ uint8 lsucnt;
+ };
+} cm3_cnts_t;
+
+int dhd_buzzz_dump_cntrs6(char *p, uint32 *core, uint32 * ovhd, uint32 *log)
+{
+ int bytes = 0;
+
+ uint32 cyccnt, instrcnt;
+ cm3_cnts_t cm3_cnts;
+ uint8 foldcnt;
+
+ { /* 32bit cyccnt */
+ uint32 curr, prev, delta;
+ prev = core[0]; curr = *log++; core[0] = curr;
+ if (curr < prev)
+ delta = curr + (~0U - prev);
+ else
+ delta = (curr - prev);
+ if (delta >= ovhd[0])
+ delta -= ovhd[0];
+ else
+ delta = 0;
+
+ bytes += sprintf(p + bytes, "%12u ", delta);
+ cyccnt = delta;
+ }
+
+ { /* Extract the 4 cnts: cpi, exc, sleep and lsu */
+ int i;
+ uint8 max8 = ~0;
+ cm3_cnts_t curr, prev, delta;
+ prev.u32 = core[1]; curr.u32 = * log++; core[1] = curr.u32;
+ for (i = 0; i < 4; i++) {
+ if (curr.u8[i] < prev.u8[i])
+ delta.u8[i] = curr.u8[i] + (max8 - prev.u8[i]);
+ else
+ delta.u8[i] = (curr.u8[i] - prev.u8[i]);
+ if (delta.u8[i] >= ovhd[i + 1])
+ delta.u8[i] -= ovhd[i + 1];
+ else
+ delta.u8[i] = 0;
+ bytes += sprintf(p + bytes, "%4u ", delta.u8[i]);
+ }
+ cm3_cnts.u32 = delta.u32;
+ }
+
+ { /* Extract the foldcnt from arg0 */
+ uint8 curr, prev, delta, max8 = ~0;
+ buzzz_arg0_t arg0; arg0.u32 = *log;
+ prev = core[2]; curr = arg0.klog.cnt; core[2] = curr;
+ if (curr < prev)
+ delta = curr + (max8 - prev);
+ else
+ delta = (curr - prev);
+ if (delta >= ovhd[5])
+ delta -= ovhd[5];
+ else
+ delta = 0;
+ bytes += sprintf(p + bytes, "%4u ", delta);
+ foldcnt = delta;
+ }
+
+ instrcnt = cyccnt - (cm3_cnts.u8[0] + cm3_cnts.u8[1] + cm3_cnts.u8[2]
+ + cm3_cnts.u8[3]) + foldcnt;
+ if (instrcnt > 0xFFFFFF00)
+ bytes += sprintf(p + bytes, "[%10s] ", "~");
+ else
+ bytes += sprintf(p + bytes, "[%10u] ", instrcnt);
+ return bytes;
+}
+
+int dhd_buzzz_dump_log(char * p, uint32 * core, uint32 * log, buzzz_t * buzzz)
+{
+ int bytes = 0;
+ buzzz_arg0_t arg0;
+ static uint8 * fmt[] = BUZZZ_FMT_STRINGS;
+
+ if (buzzz->counters == 6) {
+ bytes += dhd_buzzz_dump_cntrs6(p, core, buzzz->ovhd, log);
+ log += 2; /* 32bit cyccnt + (4 x 8bit) CM3 */
+ } else {
+ bytes += dhd_buzzz_dump_cntrs3(p, core, buzzz->ovhd, log);
+ log += 3; /* (3 x 32bit) CR4 */
+ }
+
+ /* Dump the logged arguments using the registered formats */
+ arg0.u32 = *log++;
+
+ switch (arg0.klog.args) {
+ case 0:
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id]);
+ break;
+ case 1:
+ {
+ uint32 arg1 = *log++;
+ bytes += sprintf(p + bytes, fmt[arg0.klog.id], arg1);
+ break;
+ }
+ default:
+ printf("Maximum one argument supported\n");
+ break;
+ }
+ bytes += sprintf(p + bytes, "\n");
+
+ return bytes;
+}
+
+void dhd_buzzz_dump(buzzz_t * buzzz_p, void * buffer_p, char * p)
+{
+ int i;
+ uint32 total, part1, part2, log_sz, core[BUZZZ_COUNTERS_MAX];
+ void * log;
+
+ for (i = 0; i < BUZZZ_COUNTERS_MAX; i++)
+ core[i] = 0;
+
+ log_sz = buzzz_p->log_sz;
+
+ part1 = ((uint32)buzzz_p->cur - (uint32)buzzz_p->log) / log_sz;
+
+ if (buzzz_p->wrap == TRUE) {
+ part2 = ((uint32)buzzz_p->end - (uint32)buzzz_p->cur) / log_sz;
+ total = (buzzz_p->buffer_sz - BUZZZ_LOGENTRY_MAXSZ) / log_sz;
+ } else {
+ part2 = 0U;
+ total = buzzz_p->count;
+ }
+
+ if (total == 0U) {
+ printf("buzzz_dump total<%u> done\n", total);
+ return;
+ } else {
+ printf("buzzz_dump total<%u> : part2<%u> + part1<%u>\n",
+ total, part2, part1);
+ }
+
+ if (part2) { /* with wrap */
+ log = (void*)((size_t)buffer_p + (buzzz_p->cur - buzzz_p->log));
+ while (part2--) { /* from cur to end : part2 */
+ p[0] = '\0';
+ dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+ printf("%s", p);
+ log = (void*)((size_t)log + buzzz_p->log_sz);
+ }
+ }
+
+ log = (void*)buffer_p;
+ while (part1--) {
+ p[0] = '\0';
+ dhd_buzzz_dump_log(p, core, (uint32 *)log, buzzz_p);
+ printf("%s", p);
+ log = (void*)((size_t)log + buzzz_p->log_sz);
+ }
+
+ printf("buzzz_dump done.\n");
+}
+
+int dhd_buzzz_dump_dngl(dhd_bus_t *bus)
+{
+ buzzz_t * buzzz_p = NULL;
+ void * buffer_p = NULL;
+ char * page_p = NULL;
+ pciedev_shared_t *sh;
+ int ret = 0;
+
+ if (bus->dhd->busstate != DHD_BUS_DATA) {
+ return BCME_UNSUPPORTED;
+ }
+ if ((page_p = (char *)MALLOC(bus->dhd->osh, 4096)) == NULL) {
+ printf("Page memory allocation failure\n");
+ goto done;
+ }
+ if ((buzzz_p = MALLOC(bus->dhd->osh, sizeof(buzzz_t))) == NULL) {
+ printf("Buzzz memory allocation failure\n");
+ goto done;
+ }
+
+ ret = dhdpcie_readshared(bus);
+ if (ret < 0) {
+ DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+ goto done;
+ }
+
+ sh = bus->pcie_sh;
+
+ DHD_INFO(("%s buzzz:%08x\n", __FUNCTION__, sh->buzzz));
+
+ if (sh->buzzz != 0U) { /* Fetch and display dongle BUZZZ Trace */
+ dhdpcie_bus_membytes(bus, FALSE, (ulong)sh->buzzz,
+ (uint8 *)buzzz_p, sizeof(buzzz_t));
+ if (buzzz_p->count == 0) {
+ printf("Empty dongle BUZZZ trace\n\n");
+ goto done;
+ }
+ if (buzzz_p->counters != 3) { /* 3 counters for CR4 */
+ printf("Counters<%u> mismatch\n", buzzz_p->counters);
+ goto done;
+ }
+ /* Allocate memory for trace buffer and format strings */
+ buffer_p = MALLOC(bus->dhd->osh, buzzz_p->buffer_sz);
+ if (buffer_p == NULL) {
+ printf("Buffer memory allocation failure\n");
+ goto done;
+ }
+ /* Fetch the trace and format strings */
+ dhdpcie_bus_membytes(bus, FALSE, (uint32)buzzz_p->log, /* Trace */
+ (uint8 *)buffer_p, buzzz_p->buffer_sz);
+ /* Process and display the trace using formatted output */
+ printf("<#cycle> <#instruction> <#ctr3> <event information>\n");
+ dhd_buzzz_dump(buzzz_p, buffer_p, page_p);
+ printf("----- End of dongle BUZZZ Trace -----\n\n");
+ MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz); buffer_p = NULL;
+ }
+
+done:
+
+ if (page_p) MFREE(bus->dhd->osh, page_p, 4096);
+ if (buzzz_p) MFREE(bus->dhd->osh, buzzz_p, sizeof(buzzz_t));
+ if (buffer_p) MFREE(bus->dhd->osh, buffer_p, buzzz_p->buffer_sz);
+
+ return BCME_OK;
+}
+#endif /* BCM_BUZZZ */
+
+#define PCIE_GEN2(sih) ((BUSTYPE((sih)->bustype) == PCI_BUS) && \
+ ((sih)->buscoretype == PCIE2_CORE_ID))
+
+static bool
+pcie2_mdiosetblock(dhd_bus_t *bus, uint blk)
+{
+ uint mdiodata, mdioctrl, i = 0;
+ uint pcie_serdes_spinwait = 200;
+
+ mdioctrl = MDIOCTL2_DIVISOR_VAL | (0x1F << MDIOCTL2_REGADDR_SHF);
+ mdiodata = (blk << MDIODATA2_DEVADDR_SHF) | MDIODATA2_DONE;
+
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdioctrl);
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0, mdiodata);
+
+ OSL_DELAY(10);
+ /* retry till the transaction is complete */
+ while (i < pcie_serdes_spinwait) {
+ uint mdioctrl_read = si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA,
+ 0, 0);
+ if (!(mdioctrl_read & MDIODATA2_DONE)) {
+ break;
+ }
+ OSL_DELAY(1000);
+ i++;
+ }
+
+ if (i >= pcie_serdes_spinwait) {
+ DHD_ERROR(("pcie_mdiosetblock: timed out\n"));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+static int
+pcie2_mdioop(dhd_bus_t *bus, uint physmedia, uint regaddr, bool write, uint *val,
+ bool slave_bypass)
+{
+ uint pcie_serdes_spinwait = 200, i = 0, mdio_ctrl;
+ uint32 reg32;
+
+ pcie2_mdiosetblock(bus, physmedia);
+
+ /* enable mdio access to SERDES */
+ mdio_ctrl = MDIOCTL2_DIVISOR_VAL;
+ mdio_ctrl |= (regaddr << MDIOCTL2_REGADDR_SHF);
+
+ if (slave_bypass)
+ mdio_ctrl |= MDIOCTL2_SLAVE_BYPASS;
+
+ if (!write)
+ mdio_ctrl |= MDIOCTL2_READ;
+
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_CONTROL, ~0, mdio_ctrl);
+
+ if (write) {
+ reg32 = PCIE2_MDIO_WR_DATA;
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIE2_MDIO_WR_DATA, ~0,
+ *val | MDIODATA2_DONE);
+ }
+ else
+ reg32 = PCIE2_MDIO_RD_DATA;
+
+ /* retry till the transaction is complete */
+ while (i < pcie_serdes_spinwait) {
+ uint done_val = si_corereg(bus->sih, bus->sih->buscoreidx, reg32, 0, 0);
+ if (!(done_val & MDIODATA2_DONE)) {
+ if (!write) {
+ *val = si_corereg(bus->sih, bus->sih->buscoreidx,
+ PCIE2_MDIO_RD_DATA, 0, 0);
+ *val = *val & MDIODATA2_MASK;
+ }
+ return 0;
+ }
+ OSL_DELAY(1000);
+ i++;
+ }
+ return -1;
+}
+
+int
+dhd_bus_devreset(dhd_pub_t *dhdp, uint8 flag)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ int bcmerror = 0;
+#ifdef CONFIG_ARCH_MSM
+ int retry = POWERUP_MAX_RETRY;
+#endif /* CONFIG_ARCH_MSM */
+
+ if (dhd_download_fw_on_driverload) {
+ bcmerror = dhd_bus_start(dhdp);
+ } else {
+ if (flag == TRUE) { /* Turn off WLAN */
+ /* Removing Power */
+ DHD_ERROR(("%s: == Power OFF ==\n", __FUNCTION__));
+ bus->dhd->up = FALSE;
+ if (bus->dhd->busstate != DHD_BUS_DOWN) {
+ dhd_prot_clear(dhdp);
+ dhd_os_wd_timer(dhdp, 0);
+ dhd_bus_stop(bus, TRUE);
+#ifdef CONFIG_ARCH_MSM
+ dhd_bus_release_dongle(bus);
+#endif /* CONFIG_ARCH_MSM */
+ dhdpcie_bus_free_resource(bus);
+ bcmerror = dhdpcie_bus_disable_device(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+#ifdef CONFIG_ARCH_MSM
+ bcmerror = dhdpcie_bus_clock_stop(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: host clock stop failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+#endif /* CONFIG_ARCH_MSM */
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ } else {
+ dhd_prot_clear(dhdp);
+#ifdef CONFIG_ARCH_MSM
+ dhd_bus_release_dongle(bus);
+#endif /* CONFIG_ARCH_MSM */
+ dhdpcie_bus_free_resource(bus);
+ bcmerror = dhdpcie_bus_disable_device(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_disable_device: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+#ifdef CONFIG_ARCH_MSM
+ bcmerror = dhdpcie_bus_clock_stop(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: host clock stop failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+#endif /* CONFIG_ARCH_MSM */
+ }
+
+ bus->dhd->dongle_reset = TRUE;
+ DHD_ERROR(("%s: WLAN OFF Done\n", __FUNCTION__));
+
+ } else { /* Turn on WLAN */
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ /* Powering On */
+ DHD_ERROR(("%s: == Power ON ==\n", __FUNCTION__));
+#ifdef CONFIG_ARCH_MSM
+ while (retry--) {
+ bcmerror = dhdpcie_bus_clock_start(bus);
+ if (!bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_clock_start OK\n",
+ __FUNCTION__));
+ break;
+ }
+ else
+ OSL_SLEEP(10);
+ }
+
+ if (bcmerror && !retry) {
+ DHD_ERROR(("%s: host pcie clock enable failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+#endif /* CONFIG_ARCH_MSM */
+ bcmerror = dhdpcie_bus_enable_device(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: host configuration restore failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ bcmerror = dhdpcie_bus_alloc_resource(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_resource_alloc failed: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ bcmerror = dhdpcie_bus_dongle_attach(bus);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhdpcie_bus_dongle_attach: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ bus->dhd->dongle_reset = FALSE;
+
+ bcmerror = dhd_bus_start(dhdp);
+ if (bcmerror) {
+ DHD_ERROR(("%s: dhd_bus_start: %d\n",
+ __FUNCTION__, bcmerror));
+ goto done;
+ }
+
+ bus->dhd->up = TRUE;
+ DHD_ERROR(("%s: WLAN Power On Done\n", __FUNCTION__));
+ } else {
+ DHD_ERROR(("%s: what should we do here\n", __FUNCTION__));
+ goto done;
+ }
+ }
+ }
+done:
+ if (bcmerror)
+ bus->dhd->busstate = DHD_BUS_DOWN;
+
+ return bcmerror;
+}
+
+static int
+dhdpcie_bus_doiovar(dhd_bus_t *bus, const bcm_iovar_t *vi, uint32 actionid, const char *name,
+ void *params, int plen, void *arg, int len, int val_size)
+{
+ int bcmerror = 0;
+ int32 int_val = 0;
+ int32 int_val2 = 0;
+ int32 int_val3 = 0;
+ bool bool_val = 0;
+
+ DHD_TRACE(("%s: Enter, action %d name %s params %p plen %d arg %p len %d val_size %d\n",
+ __FUNCTION__, actionid, name, params, plen, arg, len, val_size));
+
+ if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
+ goto exit;
+
+ if (plen >= (int)sizeof(int_val))
+ bcopy(params, &int_val, sizeof(int_val));
+
+ if (plen >= (int)sizeof(int_val) * 2)
+ bcopy((void*)((uintptr)params + sizeof(int_val)), &int_val2, sizeof(int_val2));
+
+ if (plen >= (int)sizeof(int_val) * 3)
+ bcopy((void*)((uintptr)params + 2 * sizeof(int_val)), &int_val3, sizeof(int_val3));
+
+ bool_val = (int_val != 0) ? TRUE : FALSE;
+
+ /* Check if dongle is in reset. If so, only allow DEVRESET iovars */
+ if (bus->dhd->dongle_reset && !(actionid == IOV_SVAL(IOV_DEVRESET) ||
+ actionid == IOV_GVAL(IOV_DEVRESET))) {
+ bcmerror = BCME_NOTREADY;
+ goto exit;
+ }
+
+ switch (actionid) {
+
+
+ case IOV_SVAL(IOV_VARS):
+ bcmerror = dhdpcie_downloadvars(bus, arg, len);
+ break;
+
+ case IOV_SVAL(IOV_PCIEREG):
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+ int_val);
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configdata), ~0,
+ int_val2);
+ break;
+
+ case IOV_GVAL(IOV_PCIEREG):
+ si_corereg(bus->sih, bus->sih->buscoreidx, OFFSETOF(sbpcieregs_t, configaddr), ~0,
+ int_val);
+ int_val = si_corereg(bus->sih, bus->sih->buscoreidx,
+ OFFSETOF(sbpcieregs_t, configdata), 0, 0);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_BAR0_SECWIN_REG):
+ {
+ uint32 cur_base, base;
+ uchar *bar0;
+ volatile uint32 *offset;
+ /* set the bar0 secondary window to this */
+ /* write the register value */
+ cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
+ base = int_val & 0xFFFFF000;
+ dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), base);
+ bar0 = (uchar *)bus->regs;
+ offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
+ int_val = *offset;
+ bcopy(&int_val, arg, val_size);
+ dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
+ }
+ break;
+ case IOV_SVAL(IOV_BAR0_SECWIN_REG):
+ {
+ uint32 cur_base, base;
+ uchar *bar0;
+ volatile uint32 *offset;
+ /* set the bar0 secondary window to this */
+ /* write the register value */
+ cur_base = dhdpcie_bus_cfg_read_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint));
+ base = int_val & 0xFFFFF000;
+ dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), base);
+ bar0 = (uchar *)bus->regs;
+ offset = (uint32 *)(bar0 + 0x4000 + (int_val & 0xFFF));
+ *offset = int_val2;
+ bcopy(&int_val2, arg, val_size);
+ dhdpcie_bus_cfg_write_dword(bus, PCIE2_BAR0_CORE2_WIN, sizeof(uint32), cur_base);
+ }
+ break;
+
+ case IOV_SVAL(IOV_PCIECOREREG):
+ si_corereg(bus->sih, bus->sih->buscoreidx, int_val, ~0, int_val2);
+ break;
+ case IOV_GVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, coreidx;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset;
+ coreidx = (addr & 0xF000) >> 12;
+
+ int_val = si_corereg(bus->sih, coreidx, (addr & 0xFFF), 0, 0);
+ bcopy(&int_val, arg, sizeof(int32));
+ break;
+ }
+
+ case IOV_SVAL(IOV_SBREG):
+ {
+ sdreg_t sdreg;
+ uint32 addr, coreidx;
+
+ bcopy(params, &sdreg, sizeof(sdreg));
+
+ addr = sdreg.offset;
+ coreidx = (addr & 0xF000) >> 12;
+
+ si_corereg(bus->sih, coreidx, (addr & 0xFFF), ~0, sdreg.value);
+
+ break;
+ }
+
+ case IOV_GVAL(IOV_PCIESERDESREG):
+ {
+ uint val;
+ if (!PCIE_GEN2(bus->sih)) {
+ DHD_ERROR(("supported only in pcie gen2\n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ if (!pcie2_mdioop(bus, int_val, int_val2, FALSE, &val, FALSE)) {
+ bcopy(&val, arg, sizeof(int32));
+ }
+ else {
+ DHD_ERROR(("pcie2_mdioop failed.\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+ }
+ case IOV_SVAL(IOV_PCIESERDESREG):
+ if (!PCIE_GEN2(bus->sih)) {
+ DHD_ERROR(("supported only in pcie gen2\n"));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ if (pcie2_mdioop(bus, int_val, int_val2, TRUE, &int_val3, FALSE)) {
+ DHD_ERROR(("pcie2_mdioop failed.\n"));
+ bcmerror = BCME_ERROR;
+ }
+ break;
+ case IOV_GVAL(IOV_PCIECOREREG):
+ int_val = si_corereg(bus->sih, bus->sih->buscoreidx, int_val, 0, 0);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PCIECFGREG):
+ OSL_PCI_WRITE_CONFIG(bus->osh, int_val, 4, int_val2);
+ break;
+
+ case IOV_GVAL(IOV_PCIECFGREG):
+ int_val = OSL_PCI_READ_CONFIG(bus->osh, int_val, 4);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PCIE_LPBK):
+ bcmerror = dhdpcie_bus_lpback_req(bus, int_val);
+ break;
+
+ case IOV_SVAL(IOV_PCIE_DMAXFER):
+ bcmerror = dhdpcie_bus_dmaxfer_req(bus, int_val, int_val2, int_val3);
+ break;
+
+ case IOV_GVAL(IOV_PCIE_SUSPEND):
+ int_val = (bus->dhd->busstate == DHD_BUS_SUSPEND) ? 1 : 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_PCIE_SUSPEND):
+ dhdpcie_bus_suspend(bus, bool_val);
+ break;
+
+ case IOV_GVAL(IOV_MEMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_MEMBYTES):
+ case IOV_GVAL(IOV_MEMBYTES):
+ {
+ uint32 address; /* absolute backplane address */
+ uint size, dsize;
+ uint8 *data;
+
+ bool set = (actionid == IOV_SVAL(IOV_MEMBYTES));
+
+ ASSERT(plen >= 2*sizeof(int));
+
+ address = (uint32)int_val;
+ bcopy((char *)params + sizeof(int_val), &int_val, sizeof(int_val));
+ size = (uint)int_val;
+
+ /* Do some validation */
+ dsize = set ? plen - (2 * sizeof(int)) : len;
+ if (dsize < size) {
+ DHD_ERROR(("%s: error on %s membytes, addr 0x%08x size %d dsize %d\n",
+ __FUNCTION__, (set ? "set" : "get"), address, size, dsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ DHD_INFO(("%s: Request to %s %d bytes at address 0x%08x\n dsize %d ", __FUNCTION__,
+ (set ? "write" : "read"), size, address, dsize));
+
+ /* check if CR4 */
+ if (si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ /* if address is 0, store the reset instruction to be written in 0 */
+ if (set && address == bus->dongle_ram_base) {
+ bus->resetinstr = *(((uint32*)params) + 2);
+ }
+ } else {
+ /* If we know about SOCRAM, check for a fit */
+ if ((bus->orig_ramsize) &&
+ ((address > bus->orig_ramsize) || (address + size > bus->orig_ramsize)))
+ {
+ uint8 enable, protect, remap;
+ si_socdevram(bus->sih, FALSE, &enable, &protect, &remap);
+ if (!enable || protect) {
+ DHD_ERROR(("%s: ramsize 0x%08x doesn't have %d bytes at 0x%08x\n",
+ __FUNCTION__, bus->orig_ramsize, size, address));
+ DHD_ERROR(("%s: socram enable %d, protect %d\n",
+ __FUNCTION__, enable, protect));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+
+ if (!REMAP_ENAB(bus) && (address >= SOCDEVRAM_ARM_ADDR)) {
+ uint32 devramsize = si_socdevram_size(bus->sih);
+ if ((address < SOCDEVRAM_ARM_ADDR) ||
+ (address + size > (SOCDEVRAM_ARM_ADDR + devramsize))) {
+ DHD_ERROR(("%s: bad address 0x%08x, size 0x%08x\n",
+ __FUNCTION__, address, size));
+ DHD_ERROR(("%s: socram range 0x%08x,size 0x%08x\n",
+ __FUNCTION__, SOCDEVRAM_ARM_ADDR, devramsize));
+ bcmerror = BCME_BADARG;
+ break;
+ }
+ /* move it such that address is real now */
+ address -= SOCDEVRAM_ARM_ADDR;
+ address += SOCDEVRAM_BP_ADDR;
+ DHD_INFO(("%s: Request to %s %d bytes @ Mapped address 0x%08x\n",
+ __FUNCTION__, (set ? "write" : "read"), size, address));
+ } else if (REMAP_ENAB(bus) && REMAP_ISADDR(bus, address) && remap) {
+ /* Can not access remap region while devram remap bit is set
+ * ROM content would be returned in this case
+ */
+ DHD_ERROR(("%s: Need to disable remap for address 0x%08x\n",
+ __FUNCTION__, address));
+ bcmerror = BCME_ERROR;
+ break;
+ }
+ }
+ }
+
+ /* Generate the actual data pointer */
+ data = set ? (uint8*)params + 2 * sizeof(int): (uint8*)arg;
+
+ /* Call to do the transfer */
+ bcmerror = dhdpcie_bus_membytes(bus, set, address, data, size);
+
+ break;
+ }
+
+#ifdef BCM_BUZZZ
+ case IOV_GVAL(IOV_BUZZZ_DUMP):
+ bcmerror = dhd_buzzz_dump_dngl(bus);
+ break;
+#endif /* BCM_BUZZZ */
+
+ case IOV_SVAL(IOV_SET_DOWNLOAD_STATE):
+ bcmerror = dhdpcie_bus_download_state(bus, bool_val);
+ break;
+
+ case IOV_GVAL(IOV_RAMSIZE):
+ int_val = (int32)bus->ramsize;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_RAMSTART):
+ int_val = (int32)bus->dongle_ram_base;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_CC_NVMSHADOW):
+ {
+ struct bcmstrbuf dump_b;
+
+ bcm_binit(&dump_b, arg, len);
+ bcmerror = dhdpcie_cc_nvmshadow(bus, &dump_b);
+ break;
+ }
+
+ case IOV_GVAL(IOV_SLEEP_ALLOWED):
+ bool_val = bus->sleep_allowed;
+ bcopy(&bool_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_SLEEP_ALLOWED):
+ bus->sleep_allowed = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_DONGLEISOLATION):
+ int_val = bus->dhd->dongle_isolation;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DONGLEISOLATION):
+ bus->dhd->dongle_isolation = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_LTRSLEEPON_UNLOOAD):
+ int_val = bus->ltrsleep_on_unload;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_LTRSLEEPON_UNLOOAD):
+ bus->ltrsleep_on_unload = bool_val;
+ break;
+
+ case IOV_GVAL(IOV_DUMP_RINGUPD_BLOCK):
+ {
+ struct bcmstrbuf dump_b;
+ bcm_binit(&dump_b, arg, len);
+ bcmerror = dhd_prot_ringupd_dump(bus->dhd, &dump_b);
+ break;
+ }
+ case IOV_GVAL(IOV_DMA_RINGINDICES):
+ { int h2d_support, d2h_support;
+
+ d2h_support = DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0;
+ h2d_support = DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0;
+ int_val = d2h_support | (h2d_support << 1);
+ bcopy(&int_val, arg, val_size);
+ break;
+ }
+ case IOV_SVAL(IOV_DMA_RINGINDICES):
+ /* Can change it only during initialization/FW download */
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ if ((int_val > 3) || (int_val < 0)) {
+ DHD_ERROR(("Bad argument. Possible values: 0, 1, 2 & 3\n"));
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->dhd->dma_d2h_ring_upd_support = (int_val & 1) ? TRUE : FALSE;
+ bus->dhd->dma_h2d_ring_upd_support = (int_val & 2) ? TRUE : FALSE;
+ }
+ } else {
+ DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
+ __FUNCTION__));
+ bcmerror = BCME_NOTDOWN;
+ }
+ break;
+
+ case IOV_GVAL(IOV_RX_METADATALEN):
+ int_val = dhd_prot_metadatalen_get(bus->dhd, TRUE);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_RX_METADATALEN):
+ if (int_val > 64) {
+ bcmerror = BCME_BUFTOOLONG;
+ break;
+ }
+ dhd_prot_metadatalen_set(bus->dhd, int_val, TRUE);
+ break;
+
+ case IOV_SVAL(IOV_TXP_THRESHOLD):
+ dhd_prot_txp_threshold(bus->dhd, TRUE, int_val);
+ break;
+
+ case IOV_GVAL(IOV_TXP_THRESHOLD):
+ int_val = dhd_prot_txp_threshold(bus->dhd, FALSE, int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_DB1_FOR_MB):
+ if (int_val)
+ bus->db1_for_mb = TRUE;
+ else
+ bus->db1_for_mb = FALSE;
+ break;
+
+ case IOV_GVAL(IOV_DB1_FOR_MB):
+ if (bus->db1_for_mb)
+ int_val = 1;
+ else
+ int_val = 0;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_GVAL(IOV_TX_METADATALEN):
+ int_val = dhd_prot_metadatalen_get(bus->dhd, FALSE);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_TX_METADATALEN):
+ if (int_val > 64) {
+ bcmerror = BCME_BUFTOOLONG;
+ break;
+ }
+ dhd_prot_metadatalen_set(bus->dhd, int_val, FALSE);
+ break;
+
+ case IOV_GVAL(IOV_FLOW_PRIO_MAP):
+ int_val = bus->dhd->flow_prio_map_type;
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ case IOV_SVAL(IOV_FLOW_PRIO_MAP):
+ int_val = (int32)dhd_update_flow_prio_map(bus->dhd, (uint8)int_val);
+ bcopy(&int_val, arg, val_size);
+ break;
+
+ default:
+ bcmerror = BCME_UNSUPPORTED;
+ break;
+ }
+
+exit:
+ return bcmerror;
+}
+
+/* Transfers bytes from host to dongle using pio mode */
+static int
+dhdpcie_bus_lpback_req(struct dhd_bus *bus, uint32 len)
+{
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("bus not inited\n"));
+ return 0;
+ }
+ if (bus->dhd->prot == NULL) {
+ DHD_ERROR(("prot is not inited\n"));
+ return 0;
+ }
+ if (bus->dhd->busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
+ return 0;
+ }
+ dhdmsgbuf_lpbk_req(bus->dhd, len);
+ return 0;
+}
+
+void
+dhd_bus_set_suspend_resume(dhd_pub_t *dhdp, bool state)
+{
+ struct dhd_bus *bus = dhdp->bus;
+ if (bus) {
+ dhdpcie_bus_suspend(bus, state);
+ }
+}
+
+int
+dhdpcie_bus_suspend(struct dhd_bus *bus, bool state)
+{
+
+ int timeleft;
+ bool pending;
+ int rc = 0;
+
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("bus not inited\n"));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->prot == NULL) {
+ DHD_ERROR(("prot is not inited\n"));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->busstate != DHD_BUS_DATA && bus->dhd->busstate != DHD_BUS_SUSPEND) {
+ DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->dongle_reset)
+ return -EIO;
+
+ if (state == (bus->dhd->busstate == DHD_BUS_SUSPEND)) /* Set to same state */
+ return BCME_OK;
+
+ if (state) {
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_set_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ bus->wait_for_d3_ack = 0;
+ DHD_OS_WAKE_LOCK_WAIVE(bus->dhd);
+ dhdpcie_send_mb_data(bus, H2D_HOST_D3_INFORM);
+ timeleft = dhd_os_ioctl_resp_wait(bus->dhd, &bus->wait_for_d3_ack, &pending);
+ DHD_OS_WAKE_LOCK_RESTORE(bus->dhd);
+ if (bus->wait_for_d3_ack) {
+ /* Got D3 Ack. Suspend the bus */
+ rc = dhdpcie_pci_suspend_resume(bus->dev, state);
+ bus->dhd->busstate = DHD_BUS_SUSPEND;
+ } else if (timeleft == 0) {
+ DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ return -ETIMEDOUT;
+ }
+ bus->wait_for_d3_ack = 1;
+ }
+ else {
+ /* Resume */
+ rc = dhdpcie_pci_suspend_resume(bus->dev, state);
+ bus->dhd->busstate = DHD_BUS_DATA;
+
+ }
+#ifdef EXYNOS5433_PCIE_WAR
+ exynos_pcie_clear_l1_exit();
+#endif /* EXYNOS5433_PCIE_WAR */
+ return rc;
+}
+
+/* Transfers bytes from host to dongle and to host again using DMA */
+static int
+dhdpcie_bus_dmaxfer_req(struct dhd_bus *bus, uint32 len, uint32 srcdelay, uint32 destdelay)
+{
+ if (bus->dhd == NULL) {
+ DHD_ERROR(("bus not inited\n"));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->prot == NULL) {
+ DHD_ERROR(("prot is not inited\n"));
+ return BCME_ERROR;
+ }
+ if (bus->dhd->busstate != DHD_BUS_DATA) {
+ DHD_ERROR(("not in a readystate to LPBK is not inited\n"));
+ return BCME_ERROR;
+ }
+
+ if (len < 5 || len > 4194296) {
+ DHD_ERROR(("len is too small or too large\n"));
+ return BCME_ERROR;
+ }
+ return dhdmsgbuf_dmaxfer_req(bus->dhd, len, srcdelay, destdelay);
+}
+
+
+
+static int
+dhdpcie_bus_download_state(dhd_bus_t *bus, bool enter)
+{
+ int bcmerror = 0;
+ uint32 *cr4_regs;
+
+ if (!bus->sih)
+ return BCME_ERROR;
+ /* To enter download state, disable ARM and reset SOCRAM.
+ * To exit download state, simply reset ARM (default is RAM boot).
+ */
+ if (enter) {
+ bus->alp_only = TRUE;
+
+ /* some chips (e.g. 43602) have two ARM cores, the CR4 is receives the firmware. */
+ cr4_regs = si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+
+ if (cr4_regs == NULL && !(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (cr4_regs == NULL) { /* no CR4 present on chip */
+ si_core_disable(bus->sih, 0);
+
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+
+
+ /* Clear the top bit of memory */
+ if (bus->ramsize) {
+ uint32 zeros = 0;
+ if (dhdpcie_bus_membytes(bus, TRUE, bus->ramsize - 4,
+ (uint8*)&zeros, 4) < 0) {
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ }
+ } else {
+ /* For CR4,
+ * Halt ARM
+ * Remove ARM reset
+ * Read RAM base address [0x18_0000]
+ * [next] Download firmware
+ * [done at else] Populate the reset vector
+ * [done at else] Remove ARM halt
+ */
+ /* Halt ARM & remove reset */
+ si_core_reset(bus->sih, SICF_CPUHALT, SICF_CPUHALT);
+ if (bus->sih->chip == BCM43602_CHIP_ID) {
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 5);
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKIDX, 7);
+ W_REG(bus->pcie_mb_intr_osh, cr4_regs + ARMCR4REG_BANKPDA, 0);
+ }
+ /* reset last 4 bytes of RAM address. to be used for shared area */
+ dhdpcie_init_shared_addr(bus);
+ }
+ } else {
+ if (!si_setcore(bus->sih, ARMCR4_CORE_ID, 0)) {
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ if (!si_iscoreup(bus->sih)) {
+ DHD_ERROR(("%s: SOCRAM core is down after reset?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+
+ /* Enable remap before ARM reset but after vars.
+ * No backplane access in remap mode
+ */
+
+ if (!si_setcore(bus->sih, PCMCIA_CORE_ID, 0) &&
+ !si_setcore(bus->sih, SDIOD_CORE_ID, 0)) {
+ DHD_ERROR(("%s: Can't change back to SDIO core?\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+
+ if (!(si_setcore(bus->sih, ARM7S_CORE_ID, 0)) &&
+ !(si_setcore(bus->sih, ARMCM3_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ } else {
+ if (bus->sih->chip == BCM43602_CHIP_ID) {
+ /* Firmware crashes on SOCSRAM access when core is in reset */
+ if (!(si_setcore(bus->sih, SOCRAM_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find SOCRAM core!\n",
+ __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+ si_core_reset(bus->sih, 0, 0);
+ si_setcore(bus->sih, ARMCR4_CORE_ID, 0);
+ }
+
+ /* write vars */
+ if ((bcmerror = dhdpcie_bus_write_vars(bus))) {
+ DHD_ERROR(("%s: could not write vars to RAM\n", __FUNCTION__));
+ goto fail;
+ }
+
+
+ /* switch back to arm core again */
+ if (!(si_setcore(bus->sih, ARMCR4_CORE_ID, 0))) {
+ DHD_ERROR(("%s: Failed to find ARM CR4 core!\n", __FUNCTION__));
+ bcmerror = BCME_ERROR;
+ goto fail;
+ }
+
+ /* write address 0 with reset instruction */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, 0,
+ (uint8 *)&bus->resetinstr, sizeof(bus->resetinstr));
+
+ /* now remove reset and halt and continue to run CR4 */
+ }
+
+ si_core_reset(bus->sih, 0, 0);
+
+ /* Allow HT Clock now that the ARM is running. */
+ bus->alp_only = FALSE;
+
+ bus->dhd->busstate = DHD_BUS_LOAD;
+ }
+
+fail:
+ /* Always return to PCIE core */
+ si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+
+ return bcmerror;
+}
+
+static int
+dhdpcie_bus_write_vars(dhd_bus_t *bus)
+{
+ int bcmerror = 0;
+ uint32 varsize, phys_size;
+ uint32 varaddr;
+ uint8 *vbuffer;
+ uint32 varsizew;
+#ifdef DHD_DEBUG
+ uint8 *nvram_ularray;
+#endif /* DHD_DEBUG */
+
+ /* Even if there are no vars are to be written, we still need to set the ramsize. */
+ varsize = bus->varsz ? ROUNDUP(bus->varsz, 4) : 0;
+ varaddr = (bus->ramsize - 4) - varsize;
+
+ varaddr += bus->dongle_ram_base;
+
+ if (bus->vars) {
+
+ vbuffer = (uint8 *)MALLOC(bus->dhd->osh, varsize);
+ if (!vbuffer)
+ return BCME_NOMEM;
+
+ bzero(vbuffer, varsize);
+ bcopy(bus->vars, vbuffer, bus->varsz);
+ /* Write the vars list */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, varaddr, vbuffer, varsize);
+
+ /* Implement read back and verify later */
+#ifdef DHD_DEBUG
+ /* Verify NVRAM bytes */
+ DHD_INFO(("Compare NVRAM dl & ul; varsize=%d\n", varsize));
+ nvram_ularray = (uint8*)MALLOC(bus->dhd->osh, varsize);
+ if (!nvram_ularray)
+ return BCME_NOMEM;
+
+ /* Upload image to verify downloaded contents. */
+ memset(nvram_ularray, 0xaa, varsize);
+
+ /* Read the vars list to temp buffer for comparison */
+ bcmerror = dhdpcie_bus_membytes(bus, FALSE, varaddr, nvram_ularray, varsize);
+ if (bcmerror) {
+ DHD_ERROR(("%s: error %d on reading %d nvram bytes at 0x%08x\n",
+ __FUNCTION__, bcmerror, varsize, varaddr));
+ }
+
+ /* Compare the org NVRAM with the one read from RAM */
+ if (memcmp(vbuffer, nvram_ularray, varsize)) {
+ DHD_ERROR(("%s: Downloaded NVRAM image is corrupted.\n", __FUNCTION__));
+ } else
+ DHD_ERROR(("%s: Download, Upload and compare of NVRAM succeeded.\n",
+ __FUNCTION__));
+
+ MFREE(bus->dhd->osh, nvram_ularray, varsize);
+#endif /* DHD_DEBUG */
+
+ MFREE(bus->dhd->osh, vbuffer, varsize);
+ }
+
+ phys_size = REMAP_ENAB(bus) ? bus->ramsize : bus->orig_ramsize;
+
+ phys_size += bus->dongle_ram_base;
+
+ /* adjust to the user specified RAM */
+ DHD_INFO(("Physical memory size: %d, usable memory size: %d\n",
+ phys_size, bus->ramsize));
+ DHD_INFO(("Vars are at %d, orig varsize is %d\n",
+ varaddr, varsize));
+ varsize = ((phys_size - 4) - varaddr);
+
+ /*
+ * Determine the length token:
+ * Varsize, converted to words, in lower 16-bits, checksum in upper 16-bits.
+ */
+ if (bcmerror) {
+ varsizew = 0;
+ bus->nvram_csm = varsizew;
+ } else {
+ varsizew = varsize / 4;
+ varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
+ bus->nvram_csm = varsizew;
+ varsizew = htol32(varsizew);
+ }
+
+ DHD_INFO(("New varsize is %d, length token=0x%08x\n", varsize, varsizew));
+
+ /* Write the length token to the last word */
+ bcmerror = dhdpcie_bus_membytes(bus, TRUE, (phys_size - 4),
+ (uint8*)&varsizew, 4);
+
+ return bcmerror;
+}
+
+int
+dhdpcie_downloadvars(dhd_bus_t *bus, void *arg, int len)
+{
+ int bcmerror = BCME_OK;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ /* Basic sanity checks */
+ if (bus->dhd->up) {
+ bcmerror = BCME_NOTDOWN;
+ goto err;
+ }
+ if (!len) {
+ bcmerror = BCME_BUFTOOSHORT;
+ goto err;
+ }
+
+ /* Free the old ones and replace with passed variables */
+ if (bus->vars)
+ MFREE(bus->dhd->osh, bus->vars, bus->varsz);
+
+ bus->vars = MALLOC(bus->dhd->osh, len);
+ bus->varsz = bus->vars ? len : 0;
+ if (bus->vars == NULL) {
+ bcmerror = BCME_NOMEM;
+ goto err;
+ }
+
+ /* Copy the passed variables, which should include the terminating double-null */
+ bcopy(arg, bus->vars, bus->varsz);
+err:
+ return bcmerror;
+}
+
+/* Add bus dump output to a buffer */
+void dhd_bus_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ uint16 flowid;
+ flow_ring_node_t *flow_ring_node;
+
+ dhd_prot_print_info(dhdp, strbuf);
+ for (flowid = 0; flowid < dhdp->num_flow_rings; flowid++) {
+ flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
+ if (flow_ring_node->active) {
+ bcm_bprintf(strbuf, "Flow:%d IF %d Prio %d Qlen %d ",
+ flow_ring_node->flowid, flow_ring_node->flow_info.ifindex,
+ flow_ring_node->flow_info.tid, flow_ring_node->queue.len);
+ dhd_prot_print_flow_ring(dhdp, flow_ring_node->prot_info, strbuf);
+ }
+ }
+}
+
+static void
+dhd_update_txflowrings(dhd_pub_t *dhd)
+{
+ dll_t *item, *next;
+ flow_ring_node_t *flow_ring_node;
+ struct dhd_bus *bus = dhd->bus;
+
+ for (item = dll_head_p(&bus->const_flowring);
+ !dll_end(&bus->const_flowring, item); item = next) {
+ next = dll_next_p(item);
+
+ flow_ring_node = dhd_constlist_to_flowring(item);
+ ASSERT(flow_ring_node->active);
+ dhd_prot_update_txflowring(dhd, flow_ring_node->flowid, flow_ring_node->prot_info);
+ }
+}
+
+
+/* Mailbox ringbell Function */
+static void
+dhd_bus_gen_devmb_intr(struct dhd_bus *bus)
+{
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ DHD_ERROR(("mailbox communication not supported\n"));
+ return;
+ }
+ if (bus->db1_for_mb) {
+ /* this is a pcie core register, not the config regsiter */
+ DHD_INFO(("writing a mail box interrupt to the device, through doorbell 1\n"));
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_DB1, ~0, 0x12345678);
+ }
+ else {
+ DHD_INFO(("writing a mail box interrupt to the device, through config space\n"));
+ dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+ dhdpcie_bus_cfg_write_dword(bus, PCISBMbx, 4, (1 << 0));
+ }
+}
+
+/* doorbell ring Function */
+void
+dhd_bus_ringbell(struct dhd_bus *bus, uint32 value)
+{
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, PCIE_INTB, PCIE_INTB);
+ } else {
+ /* this is a pcie core register, not the config regsiter */
+ DHD_INFO(("writing a door bell to the device\n"));
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIH2D_MailBox, ~0, 0x12345678);
+ }
+}
+
+static void
+dhd_bus_ringbell_fast(struct dhd_bus *bus, uint32 value)
+{
+ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, value);
+}
+
+static void
+dhd_bus_ringbell_oldpcie(struct dhd_bus *bus, uint32 value)
+{
+ uint32 w;
+ w = (R_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr) & ~PCIE_INTB) | PCIE_INTB;
+ W_REG(bus->pcie_mb_intr_osh, bus->pcie_mb_intr_addr, w);
+}
+
+dhd_mb_ring_t
+dhd_bus_get_mbintr_fn(struct dhd_bus *bus)
+{
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+ PCIMailBoxInt);
+ if (bus->pcie_mb_intr_addr) {
+ bus->pcie_mb_intr_osh = si_osh(bus->sih);
+ return dhd_bus_ringbell_oldpcie;
+ }
+ } else {
+ bus->pcie_mb_intr_addr = si_corereg_addr(bus->sih, bus->sih->buscoreidx,
+ PCIH2D_MailBox);
+ if (bus->pcie_mb_intr_addr) {
+ bus->pcie_mb_intr_osh = si_osh(bus->sih);
+ return dhd_bus_ringbell_fast;
+ }
+ }
+ return dhd_bus_ringbell;
+}
+
+bool BCMFASTPATH
+dhd_bus_dpc(struct dhd_bus *bus)
+{
+ uint32 intstatus = 0;
+ uint32 newstatus = 0;
+ bool resched = FALSE; /* Flag indicating resched wanted */
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus->dhd->busstate == DHD_BUS_DOWN) {
+ DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
+ bus->intstatus = 0;
+ return 0;
+ }
+
+ if (bus->dhd->busstate == DHD_BUS_SUSPEND) {
+ resched = TRUE;
+ DHD_ERROR(("%s : pcie is still in suspend state!!!\n", __FUNCTION__));
+ OSL_DELAY(20 * 1000); /* 20ms */
+ return resched;
+ }
+
+ intstatus = bus->intstatus;
+
+ if ((bus->sih->buscorerev == 6) || (bus->sih->buscorerev == 4) ||
+ (bus->sih->buscorerev == 2)) {
+ newstatus = dhdpcie_bus_cfg_read_dword(bus, PCIIntstatus, 4);
+ dhdpcie_bus_cfg_write_dword(bus, PCIIntstatus, 4, newstatus);
+ /* Merge new bits with previous */
+ intstatus |= newstatus;
+ bus->intstatus = 0;
+ if (intstatus & I_MB) {
+ dhdpcie_bus_process_mailbox_intr(bus, intstatus);
+ }
+ } else {
+ /* this is a PCIE core register..not a config register... */
+ newstatus = si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
+ intstatus |= (newstatus & bus->def_intmask);
+ si_corereg(bus->sih, bus->sih->buscoreidx, PCIMailBoxInt, intstatus, intstatus);
+ if (intstatus & bus->def_intmask) {
+ dhdpcie_bus_process_mailbox_intr(bus, intstatus);
+ intstatus &= ~bus->def_intmask;
+ }
+ }
+
+ dhdpcie_bus_intr_enable(bus);
+ return resched;
+
+}
+
+
+static void
+dhdpcie_send_mb_data(dhd_bus_t *bus, uint32 h2d_mb_data)
+{
+ uint32 cur_h2d_mb_data = 0;
+
+ dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
+
+ if (cur_h2d_mb_data != 0) {
+ uint32 i = 0;
+ DHD_INFO(("GRRRRRRR: MB transaction is already pending 0x%04x\n", cur_h2d_mb_data));
+ while ((i++ < 100) && cur_h2d_mb_data) {
+ OSL_DELAY(10);
+ dhd_bus_cmn_readshared(bus, &cur_h2d_mb_data, HTOD_MB_DATA, 0);
+ }
+ if (i >= 100)
+ DHD_ERROR(("waited 1ms for the dngl to ack the previous mb transaction\n"));
+ }
+
+ dhd_bus_cmn_writeshared(bus, &h2d_mb_data, sizeof(uint32), HTOD_MB_DATA, 0);
+ dhd_bus_gen_devmb_intr(bus);
+
+ if (h2d_mb_data == H2D_HOST_D3_INFORM)
+ DHD_INFO_HW4(("%s: send H2D_HOST_D3_INFORM to dongle\n", __FUNCTION__));
+}
+
+static void
+dhdpcie_handle_mb_data(dhd_bus_t *bus)
+{
+ uint32 d2h_mb_data = 0;
+ uint32 zero = 0;
+ dhd_bus_cmn_readshared(bus, &d2h_mb_data, DTOH_MB_DATA, 0);
+ if (!d2h_mb_data)
+ return;
+
+ dhd_bus_cmn_writeshared(bus, &zero, sizeof(uint32), DTOH_MB_DATA, 0);
+
+ DHD_INFO(("D2H_MB_DATA: 0x%04x\n", d2h_mb_data));
+ if (d2h_mb_data & D2H_DEV_DS_ENTER_REQ) {
+ /* what should we do */
+ DHD_INFO(("D2H_MB_DATA: DEEP SLEEP REQ\n"));
+ dhdpcie_send_mb_data(bus, H2D_HOST_DS_ACK);
+ DHD_INFO(("D2H_MB_DATA: sent DEEP SLEEP ACK\n"));
+ }
+ if (d2h_mb_data & D2H_DEV_DS_EXIT_NOTE) {
+ /* what should we do */
+ DHD_INFO(("D2H_MB_DATA: DEEP SLEEP EXIT\n"));
+ }
+ if (d2h_mb_data & D2H_DEV_D3_ACK) {
+ /* what should we do */
+ DHD_INFO_HW4(("%s D2H_MB_DATA: Received D3 ACK\n", __FUNCTION__));
+ if (!bus->wait_for_d3_ack) {
+ bus->wait_for_d3_ack = 1;
+ dhd_os_ioctl_resp_wake(bus->dhd);
+ }
+ }
+ if (d2h_mb_data & D2H_DEV_FWHALT) {
+ DHD_INFO(("FW trap has happened\n"));
+#ifdef DHD_DEBUG
+ dhdpcie_checkdied(bus, NULL, 0);
+#endif
+ bus->dhd->busstate = DHD_BUS_DOWN;
+ }
+}
+
+static void
+dhdpcie_bus_process_mailbox_intr(dhd_bus_t *bus, uint32 intstatus)
+{
+
+ if ((bus->sih->buscorerev == 2) || (bus->sih->buscorerev == 6) ||
+ (bus->sih->buscorerev == 4)) {
+ /* Msg stream interrupt */
+ if (intstatus & I_BIT1) {
+ dhdpci_bus_read_frames(bus);
+ } else if (intstatus & I_BIT0) {
+ /* do nothing for Now */
+ }
+ }
+ else {
+ if (intstatus & (PCIE_MB_TOPCIE_FN0_0 | PCIE_MB_TOPCIE_FN0_1))
+ dhdpcie_handle_mb_data(bus);
+ if (intstatus & PCIE_MB_D2H_MB_MASK) {
+ dhdpci_bus_read_frames(bus);
+ }
+ }
+}
+
+/* Decode dongle to host message stream */
+static void
+dhdpci_bus_read_frames(dhd_bus_t *bus)
+{
+ /* There may be frames in both ctrl buf and data buf; check ctrl buf first */
+ DHD_PERIM_LOCK(bus->dhd); /* Take the perimeter lock */
+
+ dhd_prot_process_ctrlbuf(bus->dhd);
+
+ /* update the flow ring cpls */
+ dhd_update_txflowrings(bus->dhd);
+
+ dhd_prot_process_msgbuf_txcpl(bus->dhd);
+
+ dhd_prot_process_msgbuf_rxcpl(bus->dhd);
+
+ DHD_PERIM_UNLOCK(bus->dhd); /* Release the perimeter lock */
+}
+
+static int
+dhdpcie_readshared(dhd_bus_t *bus)
+{
+ uint32 addr = 0;
+ int rv, w_init, r_init;
+ uint32 shaddr = 0;
+ pciedev_shared_t *sh = bus->pcie_sh;
+ dhd_timeout_t tmo;
+
+ shaddr = bus->dongle_ram_base + bus->ramsize - 4;
+ /* start a timer for 5 seconds */
+ dhd_timeout_start(&tmo, MAX_READ_TIMEOUT);
+
+ while (((addr == 0) || (addr == bus->nvram_csm)) && !dhd_timeout_expired(&tmo)) {
+ /* Read last word in memory to determine address of sdpcm_shared structure */
+ addr = LTOH32(dhdpcie_bus_rtcm32(bus, shaddr));
+ }
+
+ if ((addr == 0) || (addr == bus->nvram_csm) || (addr < bus->dongle_ram_base) ||
+ (addr > shaddr)) {
+ DHD_ERROR(("%s: address (0x%08x) of pciedev_shared invalid\n",
+ __FUNCTION__, addr));
+ DHD_ERROR(("Waited %u usec, dongle is not ready\n", tmo.elapsed));
+ return BCME_ERROR;
+ } else {
+ bus->shared_addr = (ulong)addr;
+ DHD_ERROR(("PCIe shared addr read took %u usec "
+ "before dongle is ready\n", tmo.elapsed));
+ }
+
+ /* Read hndrte_shared structure */
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, addr, (uint8 *)sh,
+ sizeof(pciedev_shared_t))) < 0) {
+ DHD_ERROR(("Failed to read PCIe shared struct,"
+ "size read %d < %d\n", rv, (int)sizeof(pciedev_shared_t)));
+ return rv;
+ }
+
+ /* Endianness */
+ sh->flags = ltoh32(sh->flags);
+ sh->trap_addr = ltoh32(sh->trap_addr);
+ sh->assert_exp_addr = ltoh32(sh->assert_exp_addr);
+ sh->assert_file_addr = ltoh32(sh->assert_file_addr);
+ sh->assert_line = ltoh32(sh->assert_line);
+ sh->console_addr = ltoh32(sh->console_addr);
+ sh->msgtrace_addr = ltoh32(sh->msgtrace_addr);
+ sh->dma_rxoffset = ltoh32(sh->dma_rxoffset);
+ sh->rings_info_ptr = ltoh32(sh->rings_info_ptr);
+ /* load bus console address */
+
+#ifdef DHD_DEBUG
+ bus->console_addr = sh->console_addr;
+#endif
+
+ /* Read the dma rx offset */
+ bus->dma_rxoffset = bus->pcie_sh->dma_rxoffset;
+ dhd_prot_rx_dataoffset(bus->dhd, bus->dma_rxoffset);
+
+ DHD_ERROR(("DMA RX offset from shared Area %d\n", bus->dma_rxoffset));
+
+ if ((sh->flags & PCIE_SHARED_VERSION_MASK) > PCIE_SHARED_VERSION) {
+ DHD_ERROR(("%s: pcie_shared version %d in dhd "
+ "is older than pciedev_shared version %d in dongle\n",
+ __FUNCTION__, PCIE_SHARED_VERSION,
+ sh->flags & PCIE_SHARED_VERSION_MASK));
+ return BCME_ERROR;
+ }
+ if ((sh->flags & PCIE_SHARED_VERSION_MASK) >= 4) {
+ if (sh->flags & PCIE_SHARED_TXPUSH_SPRT) {
+#ifdef DHDTCPACK_SUPPRESS
+ /* Do not use tcpack suppress as packets don't stay in queue */
+ dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
+#endif
+ bus->txmode_push = TRUE;
+ } else
+ bus->txmode_push = FALSE;
+ }
+ DHD_ERROR(("bus->txmode_push is set to %d\n", bus->txmode_push));
+
+ /* Does the FW support DMA'ing r/w indices */
+ if (sh->flags & PCIE_SHARED_DMA_INDEX) {
+
+ DHD_ERROR(("%s: Host support DMAing indices: H2D:%d - D2H:%d. FW supports it\n",
+ __FUNCTION__,
+ (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support) ? 1 : 0),
+ (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ? 1 : 0)));
+
+ } else if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support) ||
+ DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
+
+#ifdef BCM_INDX_DMA
+ DHD_ERROR(("%s: Incompatible FW. FW does not support DMAing indices\n",
+ __FUNCTION__));
+ return BCME_ERROR;
+#endif
+ DHD_ERROR(("%s: Host supports DMAing indices but FW does not\n",
+ __FUNCTION__));
+ bus->dhd->dma_d2h_ring_upd_support = FALSE;
+ bus->dhd->dma_h2d_ring_upd_support = FALSE;
+ }
+
+
+ /* get ring_info, ring_state and mb data ptrs and store the addresses in bus structure */
+ {
+ ring_info_t ring_info;
+
+ if ((rv = dhdpcie_bus_membytes(bus, FALSE, sh->rings_info_ptr,
+ (uint8 *)&ring_info, sizeof(ring_info_t))) < 0)
+ return rv;
+
+ bus->h2d_mb_data_ptr_addr = ltoh32(sh->h2d_mb_data_ptr);
+ bus->d2h_mb_data_ptr_addr = ltoh32(sh->d2h_mb_data_ptr);
+
+
+ bus->max_sub_queues = ltoh16(ring_info.max_sub_queues);
+
+ /* If both FW and Host support DMA'ing indices, allocate memory and notify FW
+ * The max_sub_queues is read from FW initialized ring_info
+ */
+ if (DMA_INDX_ENAB(bus->dhd->dma_h2d_ring_upd_support)) {
+ w_init = dhd_prot_init_index_dma_block(bus->dhd,
+ HOST_TO_DNGL_DMA_WRITEINDX_BUFFER,
+ bus->max_sub_queues);
+ r_init = dhd_prot_init_index_dma_block(bus->dhd,
+ DNGL_TO_HOST_DMA_READINDX_BUFFER,
+ BCMPCIE_D2H_COMMON_MSGRINGS);
+
+ if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
+ DHD_ERROR(("%s: Failed to allocate memory for dma'ing h2d indices"
+ "Host will use w/r indices in TCM\n",
+ __FUNCTION__));
+ bus->dhd->dma_h2d_ring_upd_support = FALSE;
+ }
+ }
+
+ if (DMA_INDX_ENAB(bus->dhd->dma_d2h_ring_upd_support)) {
+ w_init = dhd_prot_init_index_dma_block(bus->dhd,
+ DNGL_TO_HOST_DMA_WRITEINDX_BUFFER,
+ BCMPCIE_D2H_COMMON_MSGRINGS);
+ r_init = dhd_prot_init_index_dma_block(bus->dhd,
+ HOST_TO_DNGL_DMA_READINDX_BUFFER,
+ bus->max_sub_queues);
+
+ if ((w_init != BCME_OK) || (r_init != BCME_OK)) {
+ DHD_ERROR(("%s: Failed to allocate memory for dma'ing d2h indices"
+ "Host will use w/r indices in TCM\n",
+ __FUNCTION__));
+ bus->dhd->dma_d2h_ring_upd_support = FALSE;
+ }
+ }
+
+ /* read ringmem and ringstate ptrs from shared area and store in host variables */
+ dhd_fillup_ring_sharedptr_info(bus, &ring_info);
+
+ bcm_print_bytes("ring_info_raw", (uchar *)&ring_info, sizeof(ring_info_t));
+ DHD_INFO(("ring_info\n"));
+
+ DHD_ERROR(("max H2D queues %d\n", ltoh16(ring_info.max_sub_queues)));
+
+ DHD_INFO(("mail box address\n"));
+ DHD_INFO(("h2d_mb_data_ptr_addr 0x%04x\n", bus->h2d_mb_data_ptr_addr));
+ DHD_INFO(("d2h_mb_data_ptr_addr 0x%04x\n", bus->d2h_mb_data_ptr_addr));
+ }
+ return BCME_OK;
+}
+/* Read ring mem and ring state ptr info from shared are in TCM */
+static void
+dhd_fillup_ring_sharedptr_info(dhd_bus_t *bus, ring_info_t *ring_info)
+{
+ uint16 i = 0;
+ uint16 j = 0;
+ uint32 tcm_memloc;
+ uint32 d2h_w_idx_ptr, d2h_r_idx_ptr, h2d_w_idx_ptr, h2d_r_idx_ptr;
+
+ /* Ring mem ptr info */
+ /* Alloated in the order
+ H2D_MSGRING_CONTROL_SUBMIT 0
+ H2D_MSGRING_RXPOST_SUBMIT 1
+ D2H_MSGRING_CONTROL_COMPLETE 2
+ D2H_MSGRING_TX_COMPLETE 3
+ D2H_MSGRING_RX_COMPLETE 4
+ TX_FLOW_RING 5
+ */
+
+ {
+ /* ringmemptr holds start of the mem block address space */
+ tcm_memloc = ltoh32(ring_info->ringmem_ptr);
+
+ /* Find out ringmem ptr for each ring common ring */
+ for (i = 0; i <= BCMPCIE_COMMON_MSGRING_MAX_ID; i++) {
+ bus->ring_sh[i].ring_mem_addr = tcm_memloc;
+ /* Update mem block */
+ tcm_memloc = tcm_memloc + sizeof(ring_mem_t);
+ DHD_INFO(("ring id %d ring mem addr 0x%04x \n",
+ i, bus->ring_sh[i].ring_mem_addr));
+ }
+
+ /* Tx flow Ring */
+ if (bus->txmode_push) {
+ bus->ring_sh[i].ring_mem_addr = tcm_memloc;
+ DHD_INFO(("TX ring ring id %d ring mem addr 0x%04x \n",
+ i, bus->ring_sh[i].ring_mem_addr));
+ }
+ }
+
+ /* Ring state mem ptr info */
+ {
+ d2h_w_idx_ptr = ltoh32(ring_info->d2h_w_idx_ptr);
+ d2h_r_idx_ptr = ltoh32(ring_info->d2h_r_idx_ptr);
+ h2d_w_idx_ptr = ltoh32(ring_info->h2d_w_idx_ptr);
+ h2d_r_idx_ptr = ltoh32(ring_info->h2d_r_idx_ptr);
+ /* Store h2d common ring write/read pointers */
+ for (i = 0; i < BCMPCIE_H2D_COMMON_MSGRINGS; i++) {
+ bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+ /* update mem block */
+ h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
+ h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
+
+ DHD_INFO(("h2d w/r : idx %d write %x read %x \n", i,
+ bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+ }
+ /* Store d2h common ring write/read pointers */
+ for (j = 0; j < BCMPCIE_D2H_COMMON_MSGRINGS; j++, i++) {
+ bus->ring_sh[i].ring_state_w = d2h_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = d2h_r_idx_ptr;
+
+ /* update mem block */
+ d2h_w_idx_ptr = d2h_w_idx_ptr + sizeof(uint32);
+ d2h_r_idx_ptr = d2h_r_idx_ptr + sizeof(uint32);
+
+ DHD_INFO(("d2h w/r : idx %d write %x read %x \n", i,
+ bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+ }
+
+ /* Store txflow ring write/read pointers */
+ if (bus->txmode_push) {
+ bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+ DHD_INFO(("txflow : idx %d write %x read %x \n", i,
+ bus->ring_sh[i].ring_state_w, bus->ring_sh[i].ring_state_r));
+ } else {
+ for (j = 0; j < (bus->max_sub_queues - BCMPCIE_H2D_COMMON_MSGRINGS);
+ i++, j++)
+ {
+ bus->ring_sh[i].ring_state_w = h2d_w_idx_ptr;
+ bus->ring_sh[i].ring_state_r = h2d_r_idx_ptr;
+
+ /* update mem block */
+ h2d_w_idx_ptr = h2d_w_idx_ptr + sizeof(uint32);
+ h2d_r_idx_ptr = h2d_r_idx_ptr + sizeof(uint32);
+
+ DHD_INFO(("FLOW Rings h2d w/r : idx %d write %x read %x \n", i,
+ bus->ring_sh[i].ring_state_w,
+ bus->ring_sh[i].ring_state_r));
+ }
+ }
+ }
+}
+/* Initialize bus module: prepare for communication w/dongle */
+int dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
+{
+ dhd_bus_t *bus = dhdp->bus;
+ int ret = 0;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ ASSERT(bus->dhd);
+ if (!bus->dhd)
+ return 0;
+
+ /* Make sure we're talking to the core. */
+ bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+ ASSERT(bus->reg != NULL);
+
+ /* before opening up bus for data transfer, check if shared are is intact */
+ ret = dhdpcie_readshared(bus);
+ if (ret < 0) {
+ DHD_ERROR(("%s :Shared area read failed \n", __FUNCTION__));
+ return ret;
+ }
+
+
+ /* Make sure we're talking to the core. */
+ bus->reg = si_setcore(bus->sih, PCIE2_CORE_ID, 0);
+ ASSERT(bus->reg != NULL);
+
+ /* Set bus state according to enable result */
+ dhdp->busstate = DHD_BUS_DATA;
+
+ /* Enable the interrupt after device is up */
+ dhdpcie_bus_intr_enable(bus);
+
+ /* bcmsdh_intr_unmask(bus->sdh); */
+
+ return ret;
+
+}
+
+
+static void
+dhdpcie_init_shared_addr(dhd_bus_t *bus)
+{
+ uint32 addr = 0;
+ uint32 val = 0;
+ addr = bus->dongle_ram_base + bus->ramsize - 4;
+ dhdpcie_bus_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val));
+}
+
+
+bool
+dhdpcie_chipmatch(uint16 vendor, uint16 device)
+{
+ if (vendor != PCI_VENDOR_ID_BROADCOM) {
+ DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__,
+ vendor, device));
+ return (-ENODEV);
+ }
+
+ if ((device == BCM4350_D11AC_ID) || (device == BCM4350_D11AC2G_ID) ||
+ (device == BCM4350_D11AC5G_ID) || BCM4350_CHIP(device))
+ return 0;
+
+ if ((device == BCM4354_D11AC_ID) || (device == BCM4354_D11AC2G_ID) ||
+ (device == BCM4354_D11AC5G_ID) || (device == BCM4354_CHIP_ID))
+ return 0;
+
+ if ((device == BCM4356_D11AC_ID) || (device == BCM4356_D11AC2G_ID) ||
+ (device == BCM4356_D11AC5G_ID) || (device == BCM4356_CHIP_ID))
+ return 0;
+
+ if ((device == BCM4345_D11AC_ID) || (device == BCM4345_D11AC2G_ID) ||
+ (device == BCM4345_D11AC5G_ID) || (device == BCM4345_CHIP_ID))
+ return 0;
+
+ if ((device == BCM4335_D11AC_ID) || (device == BCM4335_D11AC2G_ID) ||
+ (device == BCM4335_D11AC5G_ID) || (device == BCM4335_CHIP_ID))
+ return 0;
+
+ if ((device == BCM43602_D11AC_ID) || (device == BCM43602_D11AC2G_ID) ||
+ (device == BCM43602_D11AC5G_ID) || (device == BCM43602_CHIP_ID))
+ return 0;
+
+ if ((device == BCM43569_D11AC_ID) || (device == BCM43569_D11AC2G_ID) ||
+ (device == BCM43569_D11AC5G_ID) || (device == BCM43569_CHIP_ID))
+ return 0;
+
+ if ((device == BCM4358_D11AC_ID) || (device == BCM4358_D11AC2G_ID) ||
+ (device == BCM4358_D11AC5G_ID) || (device == BCM4358_CHIP_ID))
+ return 0;
+
+ if ((device == BCM4349_D11AC_ID) || (device == BCM4349_D11AC2G_ID) ||
+ (device == BCM4349_D11AC5G_ID) || (device == BCM4349_CHIP_ID))
+ return 0;
+ if ((device == BCM4355_D11AC_ID) || (device == BCM4355_D11AC2G_ID) ||
+ (device == BCM4355_D11AC5G_ID) || (device == BCM4355_CHIP_ID))
+ return 0;
+ if ((device == BCM4359_D11AC_ID) || (device == BCM4359_D11AC2G_ID) ||
+ (device == BCM4359_D11AC5G_ID) || (device == BCM4359_CHIP_ID))
+ return 0;
+
+
+ DHD_ERROR(("%s: Unsupported vendor %x device %x\n", __FUNCTION__, vendor, device));
+ return (-ENODEV);
+}
+
+
+/*
+
+Name: dhdpcie_cc_nvmshadow
+
+Description:
+A shadow of OTP/SPROM exists in ChipCommon Region
+betw. 0x800 and 0xBFF (Backplane Addr. 0x1800_0800 and 0x1800_0BFF).
+Strapping option (SPROM vs. OTP), presence of OTP/SPROM and its size
+can also be read from ChipCommon Registers.
+*/
+
+static int
+dhdpcie_cc_nvmshadow(dhd_bus_t *bus, struct bcmstrbuf *b)
+{
+ uint16 dump_offset = 0;
+ uint32 dump_size = 0, otp_size = 0, sprom_size = 0;
+
+ /* Table for 65nm OTP Size (in bits) */
+ int otp_size_65nm[8] = {0, 2048, 4096, 8192, 4096, 6144, 512, 1024};
+
+ volatile uint16 *nvm_shadow;
+
+ uint cur_coreid;
+ uint chipc_corerev;
+ chipcregs_t *chipcregs;
+
+
+ /* Save the current core */
+ cur_coreid = si_coreid(bus->sih);
+ /* Switch to ChipC */
+ chipcregs = (chipcregs_t *)si_setcore(bus->sih, CC_CORE_ID, 0);
+ chipc_corerev = si_corerev(bus->sih);
+
+ /* Check ChipcommonCore Rev */
+ if (chipc_corerev < 44) {
+ DHD_ERROR(("%s: ChipcommonCore Rev %d < 44\n", __FUNCTION__, chipc_corerev));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* Check ChipID */
+ if (((uint16)bus->sih->chip != BCM4350_CHIP_ID) &&
+ ((uint16)bus->sih->chip != BCM4345_CHIP_ID)) {
+ DHD_ERROR(("%s: cc_nvmdump cmd. supported for 4350/4345 only\n",
+ __FUNCTION__));
+ return BCME_UNSUPPORTED;
+ }
+
+ /* Check if SRC_PRESENT in SpromCtrl(0x190 in ChipCommon Regs) is set */
+ if (chipcregs->sromcontrol & SRC_PRESENT) {
+ /* SPROM Size: 1Kbits (0x0), 4Kbits (0x1), 16Kbits(0x2) */
+ sprom_size = (1 << (2 * ((chipcregs->sromcontrol & SRC_SIZE_MASK)
+ >> SRC_SIZE_SHIFT))) * 1024;
+ bcm_bprintf(b, "\nSPROM Present (Size %d bits)\n", sprom_size);
+ }
+
+ if (chipcregs->sromcontrol & SRC_OTPPRESENT) {
+ bcm_bprintf(b, "\nOTP Present");
+
+ if (((chipcregs->otplayout & OTPL_WRAP_TYPE_MASK) >> OTPL_WRAP_TYPE_SHIFT)
+ == OTPL_WRAP_TYPE_40NM) {
+ /* 40nm OTP: Size = (OtpSize + 1) * 1024 bits */
+ otp_size = (((chipcregs->capabilities & CC_CAP_OTPSIZE)
+ >> CC_CAP_OTPSIZE_SHIFT) + 1) * 1024;
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+ } else {
+ /* This part is untested since newer chips have 40nm OTP */
+ otp_size = otp_size_65nm[(chipcregs->capabilities & CC_CAP_OTPSIZE)
+ >> CC_CAP_OTPSIZE_SHIFT];
+ bcm_bprintf(b, "(Size %d bits)\n", otp_size);
+ DHD_INFO(("%s: 65nm/130nm OTP Size not tested. \n",
+ __FUNCTION__));
+ }
+ }
+
+ if (((chipcregs->sromcontrol & SRC_PRESENT) == 0) &&
+ ((chipcregs->capabilities & CC_CAP_OTPSIZE) == 0)) {
+ DHD_ERROR(("%s: SPROM and OTP could not be found \n",
+ __FUNCTION__));
+ return BCME_NOTFOUND;
+ }
+
+ /* Check the strapping option in SpromCtrl: Set = OTP otherwise SPROM */
+ if ((chipcregs->sromcontrol & SRC_OTPSEL) &&
+ (chipcregs->sromcontrol & SRC_OTPPRESENT)) {
+
+ bcm_bprintf(b, "OTP Strap selected.\n"
+ "\nOTP Shadow in ChipCommon:\n");
+
+ dump_size = otp_size / 16 ; /* 16bit words */
+
+ } else if (((chipcregs->sromcontrol & SRC_OTPSEL) == 0) &&
+ (chipcregs->sromcontrol & SRC_PRESENT)) {
+
+ bcm_bprintf(b, "SPROM Strap selected\n"
+ "\nSPROM Shadow in ChipCommon:\n");
+
+ /* If SPROM > 8K only 8Kbits is mapped to ChipCommon (0x800 - 0xBFF) */
+ /* dump_size in 16bit words */
+ dump_size = sprom_size > 8 ? (8 * 1024) / 16 : sprom_size / 16;
+ }
+ else {
+ DHD_ERROR(("%s: NVM Shadow does not exist in ChipCommon\n",
+ __FUNCTION__));
+ return BCME_NOTFOUND;
+ }
+
+ if (bus->regs == NULL) {
+ DHD_ERROR(("ChipCommon Regs. not initialized\n"));
+ return BCME_NOTREADY;
+ } else {
+ bcm_bprintf(b, "\n OffSet:");
+
+ /* Point to the SPROM/OTP shadow in ChipCommon */
+ nvm_shadow = chipcregs->sromotp;
+
+ /*
+ * Read 16 bits / iteration.
+ * dump_size & dump_offset in 16-bit words
+ */
+ while (dump_offset < dump_size) {
+ if (dump_offset % 2 == 0)
+ /* Print the offset in the shadow space in Bytes */
+ bcm_bprintf(b, "\n 0x%04x", dump_offset * 2);
+
+ bcm_bprintf(b, "\t0x%04x", *(nvm_shadow + dump_offset));
+ dump_offset += 0x1;
+ }
+ }
+
+ /* Switch back to the original core */
+ si_setcore(bus->sih, cur_coreid, 0);
+
+ return BCME_OK;
+}
+
+
+uint8 BCMFASTPATH
+dhd_bus_is_txmode_push(dhd_bus_t *bus)
+{
+ return bus->txmode_push;
+}
+
+void dhd_bus_clean_flow_ring(dhd_bus_t *bus, uint16 flowid)
+{
+ void *pkt;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ ASSERT(flow_ring_node->flowid == flowid);
+
+ queue = &flow_ring_node->queue;
+
+ /* Call Flow ring clean up */
+ dhd_prot_clean_flow_ring(bus->dhd, flow_ring_node->prot_info);
+ dhd_flowid_free(bus->dhd, flow_ring_node->flow_info.ifindex,
+ flow_ring_node->flowid);
+
+ /* clean up BUS level info */
+ DHD_QUEUE_LOCK(queue->lock, flags);
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+ /* Flush all pending packets in the queue, if any */
+ while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+ PKTFREE(bus->dhd->osh, pkt, TRUE);
+ }
+ ASSERT(flow_queue_empty(queue));
+
+ DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+ flow_ring_node->active = FALSE;
+
+ dll_delete(&flow_ring_node->list);
+}
+
+/*
+ * Allocate a Flow ring buffer,
+ * Init Ring buffer,
+ * Send Msg to device about flow ring creation
+*/
+int
+dhd_bus_flow_ring_create_request(dhd_bus_t *bus, void *arg)
+{
+ flow_ring_node_t *flow_ring_node = (flow_ring_node_t *)arg;
+
+ DHD_INFO(("%s :Flow create\n", __FUNCTION__));
+
+ /* Send Msg to device about flow ring creation */
+ dhd_prot_flow_ring_create(bus->dhd, flow_ring_node);
+
+ flow_ring_node->status = FLOW_RING_STATUS_PENDING;
+
+ dll_prepend(&bus->const_flowring, &flow_ring_node->list);
+
+ return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_create_response(dhd_bus_t *bus, uint16 flowid, int32 status)
+{
+ flow_ring_node_t *flow_ring_node;
+
+ DHD_INFO(("%s :Flow Response %d \n", __FUNCTION__, flowid));
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ ASSERT(flow_ring_node->flowid == flowid);
+
+ if (status != BCME_OK) {
+ DHD_ERROR(("%s Flow create Response failure error status = %d \n",
+ __FUNCTION__, status));
+ /* Call Flow clean up */
+ dhd_bus_clean_flow_ring(bus, flowid);
+ return;
+ }
+
+ flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+
+ dhd_bus_schedule_queue(bus, flowid, FALSE);
+
+ return;
+}
+
+int
+dhd_bus_flow_ring_delete_request(dhd_bus_t *bus, void *arg)
+{
+ void * pkt;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
+
+ DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
+
+ flow_ring_node = (flow_ring_node_t *)arg;
+
+ if (flow_ring_node->status & FLOW_RING_STATUS_DELETE_PENDING) {
+ DHD_ERROR(("%s :Delete Pending\n", __FUNCTION__));
+ return BCME_ERROR;
+ }
+
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+ DHD_QUEUE_LOCK(queue->lock, flags);
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+ /* Flush all pending packets in the queue, if any */
+ while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+ PKTFREE(bus->dhd->osh, pkt, TRUE);
+ }
+ ASSERT(flow_queue_empty(queue));
+
+ DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+ /* Send Msg to device about flow ring deletion */
+ dhd_prot_flow_ring_delete(bus->dhd, flow_ring_node);
+
+ flow_ring_node->status = FLOW_RING_STATUS_DELETE_PENDING;
+ return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_delete_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+ flow_ring_node_t *flow_ring_node;
+
+ DHD_INFO(("%s :Flow Delete Response %d \n", __FUNCTION__, flowid));
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ ASSERT(flow_ring_node->flowid == flowid);
+
+ if (status != BCME_OK) {
+ DHD_ERROR(("%s Flow Delete Response failure error status = %d \n",
+ __FUNCTION__, status));
+ return;
+ }
+ /* Call Flow clean up */
+ dhd_bus_clean_flow_ring(bus, flowid);
+
+ flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+ flow_ring_node->active = FALSE;
+ return;
+
+}
+
+int dhd_bus_flow_ring_flush_request(dhd_bus_t *bus, void *arg)
+{
+ void *pkt;
+ flow_queue_t *queue;
+ flow_ring_node_t *flow_ring_node;
+ unsigned long flags;
+
+ DHD_INFO(("%s :Flow Delete\n", __FUNCTION__));
+
+ flow_ring_node = (flow_ring_node_t *)arg;
+ queue = &flow_ring_node->queue; /* queue associated with flow ring */
+
+ DHD_QUEUE_LOCK(queue->lock, flags);
+
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+ /* Flush all pending packets in the queue, if any */
+ while ((pkt = dhd_flow_queue_dequeue(bus->dhd, queue)) != NULL) {
+ PKTFREE(bus->dhd->osh, pkt, TRUE);
+ }
+ ASSERT(flow_queue_empty(queue));
+
+ DHD_QUEUE_UNLOCK(queue->lock, flags);
+
+ /* Send Msg to device about flow ring flush */
+ dhd_prot_flow_ring_flush(bus->dhd, flow_ring_node);
+
+ flow_ring_node->status = FLOW_RING_STATUS_FLUSH_PENDING;
+ return BCME_OK;
+}
+
+void
+dhd_bus_flow_ring_flush_response(dhd_bus_t *bus, uint16 flowid, uint32 status)
+{
+ flow_ring_node_t *flow_ring_node;
+
+ if (status != BCME_OK) {
+ DHD_ERROR(("%s Flow flush Response failure error status = %d \n",
+ __FUNCTION__, status));
+ return;
+ }
+
+ flow_ring_node = DHD_FLOW_RING(bus->dhd, flowid);
+ ASSERT(flow_ring_node->flowid == flowid);
+
+ flow_ring_node->status = FLOW_RING_STATUS_OPEN;
+ return;
+}
+
+uint32
+dhd_bus_max_h2d_queues(struct dhd_bus *bus, uint8 *txpush)
+{
+ if (bus->txmode_push)
+ *txpush = 1;
+ else
+ *txpush = 0;
+ return bus->max_sub_queues;
+}
+
+int
+dhdpcie_bus_clock_start(struct dhd_bus *bus)
+{
+ return dhdpcie_start_host_pcieclock(bus);
+}
+
+int
+dhdpcie_bus_clock_stop(struct dhd_bus *bus)
+{
+ return dhdpcie_stop_host_pcieclock(bus);
+}
+
+int
+dhdpcie_bus_disable_device(struct dhd_bus *bus)
+{
+ return dhdpcie_disable_device(bus);
+}
+
+int
+dhdpcie_bus_enable_device(struct dhd_bus *bus)
+{
+ return dhdpcie_enable_device(bus);
+}
+
+int
+dhdpcie_bus_alloc_resource(struct dhd_bus *bus)
+{
+ return dhdpcie_alloc_resource(bus);
+}
+
+void
+dhdpcie_bus_free_resource(struct dhd_bus *bus)
+{
+ dhdpcie_free_resource(bus);
+}
+
+bool
+dhdpcie_bus_dongle_attach(struct dhd_bus *bus)
+{
+ return dhdpcie_dongle_attach(bus);
+}
+
+int
+dhd_bus_release_dongle(struct dhd_bus *bus)
+{
+ bool dongle_isolation;
+ osl_t *osh;
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ if (bus) {
+ osh = bus->osh;
+ ASSERT(osh);
+
+ if (bus->dhd) {
+ dongle_isolation = bus->dhd->dongle_isolation;
+ dhdpcie_bus_release_dongle(bus, osh, dongle_isolation, TRUE);
+ }
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_pcie.h 491657 2014-07-17 06:29:40Z $
+ */
+
+
+#ifndef dhd_pcie_h
+#define dhd_pcie_h
+
+#include <bcmpcie.h>
+#include <hnd_cons.h>
+
+/* defines */
+
+#define PCMSGBUF_HDRLEN 0
+#define DONGLE_REG_MAP_SIZE (32 * 1024)
+#define DONGLE_TCM_MAP_SIZE (4096 * 1024)
+#define DONGLE_MIN_MEMSIZE (128 *1024)
+#ifdef DHD_DEBUG
+#define DHD_PCIE_SUCCESS 0
+#define DHD_PCIE_FAILURE 1
+#endif /* DHD_DEBUG */
+#define REMAP_ENAB(bus) ((bus)->remap)
+#define REMAP_ISADDR(bus, a) (((a) >= ((bus)->orig_ramsize)) && ((a) < ((bus)->ramsize)))
+
+#define MAX_DHD_TX_FLOWS 256
+
+/* user defined data structures */
+#ifdef DHD_DEBUG
+/* Device console log buffer state */
+#define CONSOLE_LINE_MAX 192
+#define CONSOLE_BUFFER_MAX 2024
+
+
+typedef struct dhd_console {
+ uint count; /* Poll interval msec counter */
+ uint log_addr; /* Log struct address (fixed) */
+ hnd_log_t log; /* Log struct (host copy) */
+ uint bufsize; /* Size of log buffer */
+ uint8 *buf; /* Log buffer (host copy) */
+ uint last; /* Last buffer read index */
+} dhd_console_t;
+#endif /* DHD_DEBUG */
+typedef struct ring_sh_info {
+ uint32 ring_mem_addr;
+ uint32 ring_state_w;
+ uint32 ring_state_r;
+} ring_sh_info_t;
+
+typedef struct dhd_bus {
+ dhd_pub_t *dhd;
+ struct pci_dev *dev; /* pci device handle */
+ dll_t const_flowring; /* constructed list of tx flowring queues */
+
+ si_t *sih; /* Handle for SI calls */
+ char *vars; /* Variables (from CIS and/or other) */
+ uint varsz; /* Size of variables buffer */
+ uint32 sbaddr; /* Current SB window pointer (-1, invalid) */
+ sbpcieregs_t *reg; /* Registers for PCIE core */
+
+ uint armrev; /* CPU core revision */
+ uint ramrev; /* SOCRAM core revision */
+ uint32 ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 orig_ramsize; /* Size of RAM in SOCRAM (bytes) */
+ uint32 srmemsize; /* Size of SRMEM */
+
+ uint32 bus; /* gSPI or SDIO bus */
+ uint32 intstatus; /* Intstatus bits (events) pending */
+ bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
+ bool fcstate; /* State of dongle flow-control */
+
+ uint16 cl_devid; /* cached devid for dhdsdio_probe_attach() */
+ char *fw_path; /* module_param: path to firmware image */
+ char *nv_path; /* module_param: path to nvram vars file */
+ char *nvram_params; /* user specified nvram params. */
+ int nvram_params_len;
+
+ struct pktq txq; /* Queue length used for flow-control */
+
+ uint rxlen; /* Length of valid data in buffer */
+
+
+ bool intr; /* Use interrupts */
+ bool ipend; /* Device interrupt is pending */
+ bool intdis; /* Interrupts disabled by isr */
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+
+#ifdef DHD_DEBUG
+ dhd_console_t console; /* Console output polling support */
+ uint console_addr; /* Console address from shared struct */
+#endif /* DHD_DEBUG */
+
+ bool alp_only; /* Don't use HT clock (ALP only) */
+
+ bool remap; /* Contiguous 1MB RAM: 512K socram + 512K devram
+ * Available with socram rev 16
+ * Remap region not DMA-able
+ */
+ uint32 resetinstr;
+ uint32 dongle_ram_base;
+
+ ulong shared_addr;
+ pciedev_shared_t *pcie_sh;
+ bool bus_flowctrl;
+ ioctl_comp_resp_msg_t ioct_resp;
+ uint32 dma_rxoffset;
+ volatile char *regs; /* pci device memory va */
+ volatile char *tcm; /* pci device memory va */
+ osl_t *osh;
+ uint32 nvram_csm; /* Nvram checksum */
+ uint16 pollrate;
+ uint16 polltick;
+
+ uint32 *pcie_mb_intr_addr;
+ void *pcie_mb_intr_osh;
+ bool sleep_allowed;
+
+ /* version 3 shared struct related info start */
+ ring_sh_info_t ring_sh[BCMPCIE_COMMON_MSGRINGS + MAX_DHD_TX_FLOWS];
+ uint8 h2d_ring_count;
+ uint8 d2h_ring_count;
+ uint32 ringmem_ptr;
+ uint32 ring_state_ptr;
+
+ uint32 d2h_dma_scratch_buffer_mem_addr;
+
+ uint32 h2d_mb_data_ptr_addr;
+ uint32 d2h_mb_data_ptr_addr;
+ /* version 3 shared struct related info end */
+
+ uint32 def_intmask;
+ bool ltrsleep_on_unload;
+ uint wait_for_d3_ack;
+ uint8 txmode_push;
+ uint32 max_sub_queues;
+ bool db1_for_mb;
+
+} dhd_bus_t;
+
+/* function declarations */
+
+extern uint32* dhdpcie_bus_reg_map(osl_t *osh, ulong addr, int size);
+extern int dhdpcie_bus_register(void);
+extern void dhdpcie_bus_unregister(void);
+extern bool dhdpcie_chipmatch(uint16 vendor, uint16 device);
+
+extern struct dhd_bus* dhdpcie_bus_attach(osl_t *osh, volatile char* regs, volatile char* tcm);
+extern uint32 dhdpcie_bus_cfg_read_dword(struct dhd_bus *bus, uint32 addr, uint32 size);
+extern void dhdpcie_bus_cfg_write_dword(struct dhd_bus *bus, uint32 addr, uint32 size, uint32 data);
+extern void dhdpcie_bus_intr_disable(struct dhd_bus *bus);
+extern void dhdpcie_bus_release(struct dhd_bus *bus);
+extern int32 dhdpcie_bus_isr(struct dhd_bus *bus);
+extern void dhdpcie_free_irq(dhd_bus_t *bus);
+extern int dhdpcie_bus_suspend(struct dhd_bus *bus, bool state);
+extern int dhdpcie_pci_suspend_resume(struct pci_dev *dev, bool state);
+extern int dhdpcie_start_host_pcieclock(dhd_bus_t *bus);
+extern int dhdpcie_stop_host_pcieclock(dhd_bus_t *bus);
+extern int dhdpcie_disable_device(dhd_bus_t *bus);
+extern int dhdpcie_enable_device(dhd_bus_t *bus);
+extern int dhdpcie_alloc_resource(dhd_bus_t *bus);
+extern void dhdpcie_free_resource(dhd_bus_t *bus);
+
+extern int dhd_buzzz_dump_dngl(dhd_bus_t *bus);
+#endif /* dhd_pcie_h */
--- /dev/null
+/*
+ * Linux DHD Bus Module for PCIE
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: dhd_pcie_linux.c 491657 2014-07-17 06:29:40Z $
+ */
+
+
+/* include files */
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <hndpmu.h>
+#include <sbchipc.h>
+#if defined(DHD_DEBUG)
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
+#endif /* defined(DHD_DEBUG) */
+#include <dngl_stats.h>
+#include <pcie_core.h>
+#include <dhd.h>
+#include <dhd_bus.h>
+#include <dhd_proto.h>
+#include <dhd_dbg.h>
+#include <dhdioctl.h>
+#include <bcmmsgbuf.h>
+#include <pcicfg.h>
+#include <dhd_pcie.h>
+#include <dhd_linux.h>
+#ifdef CONFIG_ARCH_MSM
+#include <mach/msm_pcie.h>
+#endif
+
+#define PCI_CFG_RETRY 10
+#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
+
+#define OSL_PKTTAG_CLEAR(p) \
+do { \
+ struct sk_buff *s = (struct sk_buff *)(p); \
+ ASSERT(OSL_PKTTAG_SZ == 32); \
+ *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
+ *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
+ *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
+ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
+} while (0)
+
+
+/* user defined data structures */
+
+typedef struct dhd_pc_res {
+ uint32 bar0_size;
+ void* bar0_addr;
+ uint32 bar1_size;
+ void* bar1_addr;
+} pci_config_res, *pPci_config_res;
+
+typedef bool (*dhdpcie_cb_fn_t)(void *);
+
+typedef struct dhdpcie_info
+{
+ dhd_bus_t *bus;
+ osl_t *osh;
+ struct pci_dev *dev; /* pci device handle */
+ volatile char *regs; /* pci device memory va */
+ volatile char *tcm; /* pci device memory va */
+ uint32 tcm_size; /* pci device memory size */
+ struct pcos_info *pcos_info;
+ uint16 last_intrstatus; /* to cache intrstatus */
+ int irq;
+ char pciname[32];
+ struct pci_saved_state* state;
+} dhdpcie_info_t;
+
+
+struct pcos_info {
+ dhdpcie_info_t *pc;
+ spinlock_t lock;
+ wait_queue_head_t intr_wait_queue;
+ struct timer_list tuning_timer;
+ int tuning_timer_exp;
+ atomic_t timer_enab;
+ struct tasklet_struct tuning_tasklet;
+};
+
+
+/* function declarations */
+static int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev);
+static int dhdpcie_init(struct pci_dev *pdev);
+static irqreturn_t dhdpcie_isr(int irq, void *arg);
+/* OS Routine functions for PCI suspend/resume */
+
+static int dhdpcie_pci_suspend(struct pci_dev *dev, pm_message_t state);
+static int dhdpcie_set_suspend_resume(struct pci_dev *dev, bool state);
+static int dhdpcie_pci_resume(struct pci_dev *dev);
+static int dhdpcie_resume_dev(struct pci_dev *dev);
+static int dhdpcie_suspend_dev(struct pci_dev *dev);
+static struct pci_device_id dhdpcie_pci_devid[] __devinitdata = {
+ { vendor: 0x14e4,
+ device: PCI_ANY_ID,
+ subvendor: PCI_ANY_ID,
+ subdevice: PCI_ANY_ID,
+ class: PCI_CLASS_NETWORK_OTHER << 8,
+ class_mask: 0xffff00,
+ driver_data: 0,
+ },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, dhdpcie_pci_devid);
+
+static struct pci_driver dhdpcie_driver = {
+ node: {},
+ name: "pcieh",
+ id_table: dhdpcie_pci_devid,
+ probe: dhdpcie_pci_probe,
+ remove: dhdpcie_pci_remove,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ save_state: NULL,
+#endif
+ suspend: dhdpcie_pci_suspend,
+ resume: dhdpcie_pci_resume,
+};
+
+int dhdpcie_init_succeeded = FALSE;
+
+static void dhdpcie_pme_active(struct pci_dev *pdev, bool enable)
+{
+ uint16 pmcsr;
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ /* Clear PME Status by writing 1 to it and enable PME# */
+ pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
+ if (!enable)
+ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmcsr);
+}
+
+static int dhdpcie_set_suspend_resume(struct pci_dev *pdev, bool state)
+{
+ int ret = 0;
+ dhdpcie_info_t *pch = pci_get_drvdata(pdev);
+ dhd_bus_t *bus = NULL;
+
+ if (pch) {
+ bus = pch->bus;
+ }
+
+ /* When firmware is not loaded do the PCI bus */
+ /* suspend/resume only */
+ if (bus && (bus->dhd->busstate == DHD_BUS_DOWN) &&
+ !bus->dhd->dongle_reset) {
+ ret = dhdpcie_pci_suspend_resume(bus->dev, state);
+ return ret;
+ }
+
+ if (bus && ((bus->dhd->busstate == DHD_BUS_SUSPEND)||
+ (bus->dhd->busstate == DHD_BUS_DATA))) {
+
+ ret = dhdpcie_bus_suspend(bus, state);
+ }
+ return ret;
+}
+
+static int dhdpcie_pci_suspend(struct pci_dev * pdev, pm_message_t state)
+{
+ BCM_REFERENCE(state);
+ return dhdpcie_set_suspend_resume(pdev, TRUE);
+}
+
+static int dhdpcie_pci_resume(struct pci_dev *pdev)
+{
+ return dhdpcie_set_suspend_resume(pdev, FALSE);
+}
+
+static int dhdpcie_suspend_dev(struct pci_dev *dev)
+{
+ int ret;
+ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+ dhdpcie_pme_active(dev, TRUE);
+ pci_save_state(dev);
+ pci_enable_wake(dev, PCI_D0, TRUE);
+ pci_disable_device(dev);
+ ret = pci_set_power_state(dev, PCI_D3hot);
+ if (ret) {
+ DHD_ERROR(("%s: pci_set_power_state error %d\n",
+ __FUNCTION__, ret));
+ }
+ return ret;
+}
+
+static int dhdpcie_resume_dev(struct pci_dev *dev)
+{
+ int err = 0;
+ DHD_TRACE_HW4(("%s: Enter\n", __FUNCTION__));
+ pci_restore_state(dev);
+ err = pci_enable_device(dev);
+ if (err) {
+ printf("%s:pci_enable_device error %d \n", __FUNCTION__, err);
+ return err;
+ }
+ pci_set_master(dev);
+ err = pci_set_power_state(dev, PCI_D0);
+ if (err) {
+ printf("%s:pci_set_power_state error %d \n", __FUNCTION__, err);
+ return err;
+ }
+ dhdpcie_pme_active(dev, FALSE);
+ return err;
+}
+
+int dhdpcie_pci_suspend_resume(struct pci_dev *dev, bool state)
+{
+ int rc;
+
+ if (state)
+ rc = dhdpcie_suspend_dev(dev);
+ else
+ rc = dhdpcie_resume_dev(dev);
+ return rc;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
+static int dhdpcie_device_scan(struct device *dev, void *data)
+{
+ struct pci_dev *pcidev;
+ int *cnt = data;
+
+ pcidev = container_of(dev, struct pci_dev, dev);
+ if (pcidev->vendor != 0x14e4)
+ return 0;
+
+ DHD_INFO(("Found Broadcom PCI device 0x%04x\n", pcidev->device));
+ *cnt += 1;
+ if (pcidev->driver && strcmp(pcidev->driver->name, dhdpcie_driver.name))
+ DHD_ERROR(("Broadcom PCI Device 0x%04x has allocated with driver %s\n",
+ pcidev->device, pcidev->driver->name));
+
+ return 0;
+}
+#endif /* LINUX_VERSION >= 2.6.0 */
+
+int
+dhdpcie_bus_register(void)
+{
+ int error = 0;
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
+ if (!(error = pci_module_init(&dhdpcie_driver)))
+ return 0;
+
+ DHD_ERROR(("%s: pci_module_init failed 0x%x\n", __FUNCTION__, error));
+#else
+ if (!(error = pci_register_driver(&dhdpcie_driver))) {
+ bus_for_each_dev(dhdpcie_driver.driver.bus, NULL, &error, dhdpcie_device_scan);
+ if (!error) {
+ DHD_ERROR(("No Broadcom PCI device enumerated!\n"));
+ } else if (!dhdpcie_init_succeeded) {
+ DHD_ERROR(("%s: dhdpcie initialize failed.\n", __FUNCTION__));
+ } else {
+ return 0;
+ }
+
+ pci_unregister_driver(&dhdpcie_driver);
+ error = BCME_ERROR;
+ }
+#endif /* LINUX_VERSION < 2.6.0 */
+
+ return error;
+}
+
+
+void
+dhdpcie_bus_unregister(void)
+{
+ pci_unregister_driver(&dhdpcie_driver);
+}
+
+int __devinit
+dhdpcie_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+
+ if (dhdpcie_chipmatch (pdev->vendor, pdev->device)) {
+ DHD_ERROR(("%s: chipmatch failed!!\n", __FUNCTION__));
+ return -ENODEV;
+ }
+ printf("PCI_PROBE: bus %X, slot %X,vendor %X, device %X"
+ "(good PCI location)\n", pdev->bus->number,
+ PCI_SLOT(pdev->devfn), pdev->vendor, pdev->device);
+
+ if (dhdpcie_init (pdev)) {
+ DHD_ERROR(("%s: PCIe Enumeration failed\n", __FUNCTION__));
+ return -ENODEV;
+ }
+
+ DHD_TRACE(("%s: PCIe Enumeration done!!\n", __FUNCTION__));
+ return 0;
+}
+
+int
+dhdpcie_detach(dhdpcie_info_t *pch)
+{
+ if (pch) {
+ osl_t *osh = pch->osh;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (!dhd_download_fw_on_driverload)
+ pci_load_and_free_saved_state(pch->dev, &pch->state);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ MFREE(osh, pch, sizeof(dhdpcie_info_t));
+ }
+ return 0;
+}
+
+
+void __devexit
+dhdpcie_pci_remove(struct pci_dev *pdev)
+{
+ osl_t *osh = NULL;
+ dhdpcie_info_t *pch = NULL;
+ dhd_bus_t *bus = NULL;
+
+ DHD_TRACE(("%s Enter\n", __FUNCTION__));
+ pch = pci_get_drvdata(pdev);
+ bus = pch->bus;
+ osh = pch->osh;
+
+ dhdpcie_bus_release(bus);
+ pci_disable_device(pdev);
+ /* pcie info detach */
+ dhdpcie_detach(pch);
+ /* osl detach */
+ osl_detach(osh);
+
+ dhdpcie_init_succeeded = FALSE;
+
+ DHD_TRACE(("%s Exit\n", __FUNCTION__));
+
+ return;
+}
+
+/* Free Linux irq */
+int
+dhdpcie_request_irq(dhdpcie_info_t *dhdpcie_info)
+{
+ dhd_bus_t *bus = dhdpcie_info->bus;
+ struct pci_dev *pdev = dhdpcie_info->bus->dev;
+
+ snprintf(dhdpcie_info->pciname, sizeof(dhdpcie_info->pciname),
+ "dhdpcie:%s", pci_name(pdev));
+ if (request_irq(pdev->irq, dhdpcie_isr, IRQF_SHARED,
+ dhdpcie_info->pciname, bus) < 0) {
+ DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+ return -1;
+ }
+
+ DHD_TRACE(("%s %s\n", __FUNCTION__, dhdpcie_info->pciname));
+
+
+ return 0; /* SUCCESS */
+}
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define PRINTF_RESOURCE "0x%016llx"
+#else
+#define PRINTF_RESOURCE "0x%08x"
+#endif
+
+/*
+
+Name: osl_pci_get_resource
+
+Parametrs:
+
+1: struct pci_dev *pdev -- pci device structure
+2: pci_res -- structure containing pci configuration space values
+
+
+Return value:
+
+int - Status (TRUE or FALSE)
+
+Description:
+Access PCI configuration space, retrieve PCI allocated resources , updates in resource structure.
+
+ */
+int dhdpcie_get_resource(dhdpcie_info_t *dhdpcie_info)
+{
+ phys_addr_t bar0_addr, bar1_addr;
+ ulong bar1_size;
+ struct pci_dev *pdev = NULL;
+ pdev = dhdpcie_info->dev;
+ do {
+ if (pci_enable_device(pdev)) {
+ printf("%s: Cannot enable PCI device\n", __FUNCTION__);
+ break;
+ }
+ pci_set_master(pdev);
+ bar0_addr = pci_resource_start(pdev, 0); /* Bar-0 mapped address */
+ bar1_addr = pci_resource_start(pdev, 2); /* Bar-1 mapped address */
+
+ /* read Bar-1 mapped memory range */
+ bar1_size = pci_resource_len(pdev, 2);
+
+ if ((bar1_size == 0) || (bar1_addr == 0)) {
+ printf("%s: BAR1 Not enabled for this device size(%ld),"
+ " addr(0x"PRINTF_RESOURCE")\n",
+ __FUNCTION__, bar1_size, bar1_addr);
+ goto err;
+ }
+
+ dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
+ dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
+
+ if (!dhdpcie_info->regs || !dhdpcie_info->tcm) {
+ DHD_ERROR(("%s:ioremap() failed\n", __FUNCTION__));
+ break;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (!dhd_download_fw_on_driverload) {
+ /* Backup PCIe configuration so as to use Wi-Fi on/off process
+ * in case of built in driver
+ */
+ pci_save_state(pdev);
+ dhdpcie_info->state = pci_store_saved_state(pdev);
+
+ if (dhdpcie_info->state == NULL) {
+ DHD_ERROR(("%s pci_store_saved_state returns NULL\n",
+ __FUNCTION__));
+ REG_UNMAP(dhdpcie_info->regs);
+ REG_UNMAP(dhdpcie_info->tcm);
+ pci_disable_device(pdev);
+ break;
+ }
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+ DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+ __FUNCTION__, dhdpcie_info->regs, bar0_addr));
+ DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+ __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+ return 0; /* SUCCESS */
+ } while (0);
+err:
+ return -1; /* FAILURE */
+}
+
+int dhdpcie_scan_resource(dhdpcie_info_t *dhdpcie_info)
+{
+
+ DHD_TRACE(("%s: ENTER\n", __FUNCTION__));
+
+ do {
+ /* define it here only!! */
+ if (dhdpcie_get_resource (dhdpcie_info)) {
+ DHD_ERROR(("%s: Failed to get PCI resources\n", __FUNCTION__));
+ break;
+ }
+ DHD_TRACE(("%s:Exit - SUCCESS \n",
+ __FUNCTION__));
+
+ return 0; /* SUCCESS */
+
+ } while (0);
+
+ DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+ return -1; /* FAILURE */
+
+}
+
+int dhdpcie_init(struct pci_dev *pdev)
+{
+
+ osl_t *osh = NULL;
+ dhd_bus_t *bus = NULL;
+ dhdpcie_info_t *dhdpcie_info = NULL;
+ wifi_adapter_info_t *adapter = NULL;
+
+ do {
+ /* osl attach */
+ if (!(osh = osl_attach(pdev, PCI_BUS, FALSE))) {
+ DHD_ERROR(("%s: osl_attach failed\n", __FUNCTION__));
+ break;
+ }
+
+ /* initialize static buffer */
+ adapter = dhd_wifi_platform_get_adapter(PCI_BUS, pdev->bus->number,
+ PCI_SLOT(pdev->devfn));
+ if (adapter != NULL)
+ DHD_ERROR(("%s: found adapter info '%s'\n", __FUNCTION__, adapter->name));
+ else
+ DHD_ERROR(("%s: can't find adapter info for this chip\n", __FUNCTION__));
+ osl_static_mem_init(osh, adapter);
+
+ /* allocate linux spcific pcie structure here */
+ if (!(dhdpcie_info = MALLOC(osh, sizeof(dhdpcie_info_t)))) {
+ DHD_ERROR(("%s: MALLOC of dhd_bus_t failed\n", __FUNCTION__));
+ break;
+ }
+ bzero(dhdpcie_info, sizeof(dhdpcie_info_t));
+ dhdpcie_info->osh = osh;
+ dhdpcie_info->dev = pdev;
+
+ /* Find the PCI resources, verify the */
+ /* vendor and device ID, map BAR regions and irq, update in structures */
+ if (dhdpcie_scan_resource(dhdpcie_info)) {
+ DHD_ERROR(("%s: dhd_Scan_PCI_Res failed\n", __FUNCTION__));
+
+ break;
+ }
+
+ /* Bus initialization */
+ bus = dhdpcie_bus_attach(osh, dhdpcie_info->regs, dhdpcie_info->tcm);
+ if (!bus) {
+ DHD_ERROR(("%s:dhdpcie_bus_attach() failed\n", __FUNCTION__));
+ break;
+ }
+
+ dhdpcie_info->bus = bus;
+ dhdpcie_info->bus->dev = pdev;
+
+ if (bus->intr) {
+ /* Register interrupt callback, but mask it (not operational yet). */
+ DHD_INTR(("%s: Registering and masking interrupts\n", __FUNCTION__));
+ dhdpcie_bus_intr_disable(bus);
+
+ if (dhdpcie_request_irq(dhdpcie_info)) {
+ DHD_ERROR(("%s: request_irq() failed\n", __FUNCTION__));
+ break;
+ }
+ } else {
+ bus->pollrate = 1;
+ DHD_INFO(("%s: PCIe interrupt function is NOT registered "
+ "due to polling mode\n", __FUNCTION__));
+ }
+
+ if (dhd_download_fw_on_driverload) {
+ if (dhd_bus_start(bus->dhd)) {
+ DHD_ERROR(("%s: dhd_bud_start() failed\n", __FUNCTION__));
+ break;
+ }
+ }
+
+ /* set private data for pci_dev */
+ pci_set_drvdata(pdev, dhdpcie_info);
+
+ /* Attach to the OS network interface */
+ DHD_TRACE(("%s(): Calling dhd_register_if() \n", __FUNCTION__));
+ if (dhd_register_if(bus->dhd, 0, TRUE)) {
+ DHD_ERROR(("%s(): ERROR.. dhd_register_if() failed\n", __FUNCTION__));
+ break;
+ }
+
+ dhdpcie_init_succeeded = TRUE;
+
+ DHD_TRACE(("%s:Exit - SUCCESS \n", __FUNCTION__));
+ return 0; /* return SUCCESS */
+
+ } while (0);
+ /* reverse the initialization in order in case of error */
+
+ if (bus)
+ dhdpcie_bus_release(bus);
+
+ if (dhdpcie_info)
+ dhdpcie_detach(dhdpcie_info);
+ pci_disable_device(pdev);
+ if (osh)
+ osl_detach(osh);
+
+ dhdpcie_init_succeeded = FALSE;
+
+ DHD_TRACE(("%s:Exit - FAILURE \n", __FUNCTION__));
+
+ return -1; /* return FAILURE */
+}
+
+/* Free Linux irq */
+void
+dhdpcie_free_irq(dhd_bus_t *bus)
+{
+ struct pci_dev *pdev = NULL;
+
+ DHD_TRACE(("%s: freeing up the IRQ\n", __FUNCTION__));
+ if (bus) {
+ pdev = bus->dev;
+ free_irq(pdev->irq, bus);
+ }
+ DHD_TRACE(("%s: Exit\n", __FUNCTION__));
+ return;
+}
+
+/*
+
+Name: dhdpcie_isr
+
+Parametrs:
+
+1: IN int irq -- interrupt vector
+2: IN void *arg -- handle to private data structure
+
+Return value:
+
+Status (TRUE or FALSE)
+
+Description:
+Interrupt Service routine checks for the status register,
+disable interrupt and queue DPC if mail box interrupts are raised.
+*/
+
+
+irqreturn_t
+dhdpcie_isr(int irq, void *arg)
+{
+ dhd_bus_t *bus = (dhd_bus_t*)arg;
+ if (dhdpcie_bus_isr(bus))
+ return TRUE;
+ else
+ return FALSE;
+}
+
+int
+dhdpcie_start_host_pcieclock(dhd_bus_t *bus)
+{
+ int ret = 0;
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ int options = 0;
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+ if (bus == NULL)
+ return BCME_ERROR;
+
+ if (bus->dev == NULL)
+ return BCME_ERROR;
+
+#if defined(CONFIG_ARCH_MSM)
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->islinkdown) {
+ options = MSM_PCIE_CONFIG_NO_CFG_RESTORE;
+ }
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+ NULL, NULL, options);
+ if (bus->islinkdown && !ret) {
+ msm_pcie_recover_config(bus->dev);
+ if (bus->dhd)
+ DHD_OS_WAKE_UNLOCK(bus->dhd);
+ bus->islinkdown = FALSE;
+ }
+#else
+ ret = msm_pcie_pm_control(MSM_PCIE_RESUME, bus->dev->bus->number,
+ NULL, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ if (ret) {
+ DHD_ERROR(("%s Failed to bring up PCIe link\n", __FUNCTION__));
+ goto done;
+ }
+
+done:
+#endif /* CONFIG_ARCH_MSM */
+ DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+ return ret;
+}
+
+int
+dhdpcie_stop_host_pcieclock(dhd_bus_t *bus)
+{
+ int ret = 0;
+
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ int options = 0;
+#endif
+ DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+ if (bus == NULL)
+ return BCME_ERROR;
+
+ if (bus->dev == NULL)
+ return BCME_ERROR;
+
+#if defined(CONFIG_ARCH_MSM)
+#ifdef SUPPORT_LINKDOWN_RECOVERY
+ if (bus->islinkdown)
+ options = MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN;
+
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+ NULL, NULL, options);
+#else
+ ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, bus->dev->bus->number,
+ NULL, NULL, 0);
+#endif /* SUPPORT_LINKDOWN_RECOVERY */
+ if (ret) {
+ DHD_ERROR(("Failed to stop PCIe link\n"));
+ goto done;
+ }
+done:
+#endif /* CONFIG_ARCH_MSM */
+ DHD_TRACE(("%s Exit:\n", __FUNCTION__));
+ return ret;
+}
+
+int
+dhdpcie_disable_device(dhd_bus_t *bus)
+{
+ if (bus == NULL)
+ return BCME_ERROR;
+
+ if (bus->dev == NULL)
+ return BCME_ERROR;
+
+ pci_disable_device(bus->dev);
+
+ return 0;
+}
+
+int
+dhdpcie_enable_device(dhd_bus_t *bus)
+{
+ int ret = BCME_ERROR;
+ dhdpcie_info_t *pch;
+
+ DHD_TRACE(("%s Enter:\n", __FUNCTION__));
+
+ if (bus == NULL)
+ return BCME_ERROR;
+
+ if (bus->dev == NULL)
+ return BCME_ERROR;
+
+ pch = pci_get_drvdata(bus->dev);
+ if (pch == NULL)
+ return BCME_ERROR;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ if (pci_load_saved_state(bus->dev, pch->state))
+ pci_disable_device(bus->dev);
+ else {
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+ pci_restore_state(bus->dev);
+ ret = pci_enable_device(bus->dev);
+ if (!ret)
+ pci_set_master(bus->dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
+ }
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
+
+ if (ret)
+ pci_disable_device(bus->dev);
+
+ return ret;
+}
+
+int
+dhdpcie_alloc_resource(dhd_bus_t *bus)
+{
+ dhdpcie_info_t *dhdpcie_info;
+ phys_addr_t bar0_addr, bar1_addr;
+ ulong bar1_size;
+
+ do {
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ break;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ break;
+ }
+
+ dhdpcie_info = pci_get_drvdata(bus->dev);
+ if (dhdpcie_info == NULL) {
+ DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+ break;
+ }
+
+ bar0_addr = pci_resource_start(bus->dev, 0); /* Bar-0 mapped address */
+ bar1_addr = pci_resource_start(bus->dev, 2); /* Bar-1 mapped address */
+
+ /* read Bar-1 mapped memory range */
+ bar1_size = pci_resource_len(bus->dev, 2);
+
+ if ((bar1_size == 0) || (bar1_addr == 0)) {
+ printf("%s: BAR1 Not enabled for this device size(%ld),"
+ " addr(0x"PRINTF_RESOURCE")\n",
+ __FUNCTION__, bar1_size, bar1_addr);
+ break;
+ }
+
+ dhdpcie_info->regs = (volatile char *) REG_MAP(bar0_addr, DONGLE_REG_MAP_SIZE);
+ if (!dhdpcie_info->regs) {
+ DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+ break;
+ }
+
+ bus->regs = dhdpcie_info->regs;
+ dhdpcie_info->tcm = (volatile char *) REG_MAP(bar1_addr, DONGLE_TCM_MAP_SIZE);
+ dhdpcie_info->tcm_size = DONGLE_TCM_MAP_SIZE;
+ if (!dhdpcie_info->tcm) {
+ DHD_ERROR(("%s: ioremap() for regs is failed\n", __FUNCTION__));
+ REG_UNMAP(dhdpcie_info->regs);
+ bus->regs = NULL;
+ break;
+ }
+
+ bus->tcm = dhdpcie_info->tcm;
+
+ DHD_TRACE(("%s:Phys addr : reg space = %p base addr 0x"PRINTF_RESOURCE" \n",
+ __FUNCTION__, dhdpcie_info->regs, bar0_addr));
+ DHD_TRACE(("%s:Phys addr : tcm_space = %p base addr 0x"PRINTF_RESOURCE" \n",
+ __FUNCTION__, dhdpcie_info->tcm, bar1_addr));
+
+ return 0;
+ } while (0);
+
+ return BCME_ERROR;
+}
+
+void
+dhdpcie_free_resource(dhd_bus_t *bus)
+{
+ dhdpcie_info_t *dhdpcie_info;
+
+ if (bus == NULL) {
+ DHD_ERROR(("%s: bus is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->dev == NULL) {
+ DHD_ERROR(("%s: bus->dev is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ dhdpcie_info = pci_get_drvdata(bus->dev);
+ if (dhdpcie_info == NULL) {
+ DHD_ERROR(("%s: dhdpcie_info is NULL\n", __FUNCTION__));
+ return;
+ }
+
+ if (bus->regs) {
+ REG_UNMAP(dhdpcie_info->regs);
+ bus->regs = NULL;
+ }
+
+ if (bus->tcm) {
+ REG_UNMAP(dhdpcie_info->tcm);
+ bus->tcm = NULL;
+ }
+}
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_pno.c 420056 2013-08-24 00:53:12Z $
+ * $Id: dhd_pno.c 423669 2013-09-18 13:01:55Z yangj$
*/
+#ifdef PNO_SUPPORT
#include <typedefs.h>
#include <osl.h>
#define htodchanspec(i) htod16(i)
#define dtohchanspec(i) dtoh16(i)
#else
-#define htod32(i) i
-#define htod16(i) i
-#define dtoh32(i) i
-#define dtoh16(i) i
-#define htodchanspec(i) i
-#define dtohchanspec(i) i
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
#endif /* IL_BIGENDINA */
#define NULL_CHECK(p, s, err) \
else
return FALSE;
}
-static int
-_dhd_pno_clean(dhd_pub_t *dhd)
+int
+dhd_pno_clean(dhd_pub_t *dhd)
{
int pfn = 0;
int err;
dhd_pno_status_info_t *_pno_state;
NULL_CHECK(dhd, "dhd is NULL", err);
NULL_CHECK(dhd->pno_state, "pno_state is NULL", err);
+
DHD_PNO(("%s enter\n", __FUNCTION__));
+ _pno_state = PNO_GET_PNOSTATE(dhd);
err = dhd_iovar(dhd, 0, "pfn_suspend", (char *)&suspend, sizeof(suspend), 1);
if (err < 0) {
DHD_ERROR(("%s : failed to suspend pfn(error :%d)\n", __FUNCTION__, err));
if (mode == DHD_PNO_BATCH_MODE) {
int _tmp = pfn_param.bestn;
/* set bestn to calculate the max mscan which firmware supports */
- err = dhd_iovar(dhd, 0, "pfnmscan", (char *)&_tmp, sizeof(_tmp), 1);
+ err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 1);
if (err < 0) {
- DHD_ERROR(("%s : failed to set pfnmscan\n", __FUNCTION__));
+ DHD_ERROR(("%s : failed to set pfnmem\n", __FUNCTION__));
goto exit;
}
/* get max mscan which the firmware supports */
- err = dhd_iovar(dhd, 0, "pfnmscan", (char *)&_tmp, sizeof(_tmp), 0);
+ err = dhd_iovar(dhd, 0, "pfnmem", (char *)&_tmp, sizeof(_tmp), 0);
if (err < 0) {
- DHD_ERROR(("%s : failed to get pfnmscan\n", __FUNCTION__));
+ DHD_ERROR(("%s : failed to get pfnmem\n", __FUNCTION__));
goto exit;
}
DHD_PNO((" returned mscan : %d, set bestn : %d\n", _tmp, pfn_param.bestn));
kfree(iter);
}
}
+ params->params_legacy.nssid = 0;
params->params_legacy.scan_fr = 0;
params->params_legacy.pno_freq_expo_max = 0;
params->params_legacy.pno_repeat = 0;
dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
/* save current pno_mode before calling dhd_pno_clean */
mode = _pno_state->pno_mode;
- _dhd_pno_clean(dhd);
+ dhd_pno_clean(dhd);
/* restore previous pno_mode */
_pno_state->pno_mode = mode;
if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
}
}
} else {
- err = _dhd_pno_clean(dhd);
+ err = dhd_pno_clean(dhd);
if (err < 0) {
- DHD_ERROR(("%s : failed to call _dhd_pno_clean (err: %d)\n",
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
__FUNCTION__, err));
goto exit;
}
scan_fr, pno_repeat, pno_freq_expo_max, nchan));
_params = &(_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS]);
- if (!(_pno_state->pno_mode & DHD_PNO_LEGACY_MODE)) {
- _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE;
- err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ DHD_ERROR(("%s : Legacy PNO mode was already started, "
+ "will disable previous one to start new one\n", __FUNCTION__));
+ err = dhd_pno_stop_for_ssid(dhd);
if (err < 0) {
- DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n",
+ DHD_ERROR(("%s : failed to stop legacy PNO (err %d)\n",
__FUNCTION__, err));
goto exit;
}
}
+ _pno_state->pno_mode |= DHD_PNO_LEGACY_MODE;
+ err = _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+ if (err < 0) {
+ DHD_ERROR(("%s : failed to reinitialize profile (err %d)\n",
+ __FUNCTION__, err));
+ goto exit;
+ }
memset(_chan_list, 0, sizeof(_chan_list));
tot_nchan = nchan;
if (tot_nchan > 0 && channel_list) {
goto exit;
}
if ((err = _dhd_pno_add_ssid(dhd, ssid_list, nssid)) < 0) {
- DHD_ERROR(("failed to add ssid list (err %d) in firmware\n", err));
+ DHD_ERROR(("failed to add ssid list(err %d), %d in firmware\n", err, nssid));
goto exit;
}
for (i = 0; i < nssid; i++) {
__FUNCTION__));
goto exit;
}
+ } else {
+ /* batch mode is already started */
+ return -EBUSY;
}
_params->params_batch.scan_fr = batch_params->scan_fr;
_params->params_batch.bestn = batch_params->bestn;
}
DHD_PNO(("ver %d, status : %d, count %d\n", plbestnet->version,
plbestnet->status, plbestnet->count));
- if (plbestnet->version != PFN_LSCANRESULT_VERSION) {
+ if (plbestnet->version != PFN_SCANRESULT_VERSION) {
err = BCME_VERSION;
DHD_ERROR(("bestnet version(%d) is mismatch with Driver version(%d)\n",
- plbestnet->version, PFN_LSCANRESULT_VERSION));
+ plbestnet->version, PFN_SCANRESULT_VERSION));
goto exit;
}
plnetinfo = plbestnet->netinfo;
DHD_ERROR(("failed to allocate dhd_pno_bestnet_entry\n"));
goto exit;
}
+ memset(pbestnet_entry, 0, BESTNET_ENTRY_SIZE);
pbestnet_entry->recorded_time = jiffies; /* record the current time */
/* create header for the first entry */
allocate_header = (i == 0)? TRUE : FALSE;
/* fills the best network info */
pbestnet_entry->channel = plnetinfo->pfnsubnet.channel;
pbestnet_entry->RSSI = plnetinfo->RSSI;
- if (pbestnet_entry->RSSI > 0) {
+ if (plnetinfo->flags & PFN_PARTIAL_SCAN_MASK) {
/* if RSSI is positive value, we assume that
* this scan is aborted by other scan
*/
- pbestnet_entry->RSSI *= -1;
+ DHD_PNO(("This scan is aborted\n"));
pbestnetheader->reason = (ENABLE << PNO_STATUS_ABORT);
}
pbestnet_entry->rtt0 = plnetinfo->rtt0;
plnetinfo++;
}
}
+ if (pscan_results->cnt_header == 0) {
+ /* In case that we didn't get any data from the firmware
+ * Remove the current scan_result list from get_bach.scan_results_list.
+ */
+ DHD_PNO(("NO BATCH DATA from Firmware, Delete current SCAN RESULT LIST\n"));
+ list_del(&pscan_results->list);
+ MFREE(dhd->osh, pscan_results, SCAN_RESULTS_SIZE);
+ _params->params_batch.get_batch.top_node_cnt--;
+ }
/* increase total scan count using current scan count */
_params->params_batch.get_batch.tot_scan_cnt += pscan_results->cnt_header;
_params->params_batch.get_batch.tot_scan_cnt = 0;
}
convert_format:
- if (!list_empty(&_params->params_batch.get_batch.expired_scan_results_list)) {
- err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize);
- if (err < 0) {
- DHD_ERROR(("failed to convert the data into upper layer format\n"));
- goto exit;
- }
+ err = _dhd_pno_convert_format(dhd, &_params->params_batch, buf, bufsize);
+ if (err < 0) {
+ DHD_ERROR(("failed to convert the data into upper layer format\n"));
+ goto exit;
}
}
exit:
if (plbestnet)
MFREE(dhd->osh, plbestnet, PNO_BESTNET_LEN);
- _params->params_batch.get_batch.buf = NULL;
- _params->params_batch.get_batch.bufsize = 0;
+ if (_params) {
+ _params->params_batch.get_batch.buf = NULL;
+ _params->params_batch.get_batch.bufsize = 0;
+ _params->params_batch.get_batch.bytes_written = err;
+ }
mutex_unlock(&_pno_state->pno_mutex);
- complete(&_pno_state->get_batch_done);
+ if (waitqueue_active(&_pno_state->get_batch_done.wait))
+ complete(&_pno_state->get_batch_done);
return err;
}
static void
dhd_pno_get_for_batch(dhd_pub_t *dhd, char *buf, int bufsize, int reason)
{
int err = BCME_OK;
+ char *pbuf = buf;
dhd_pno_status_info_t *_pno_state;
struct dhd_pno_batch_params *params_batch;
NULL_CHECK(dhd, "dhd is NULL", err);
params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
if (!(_pno_state->pno_mode & DHD_PNO_BATCH_MODE)) {
DHD_ERROR(("%s: Batching SCAN mode is not enabled\n", __FUNCTION__));
+ memset(pbuf, 0, bufsize);
+ pbuf += sprintf(pbuf, "scancount=%d\n", 0);
+ sprintf(pbuf, "%s", RESULTS_END_MARKER);
+ err = strlen(buf);
goto exit;
}
params_batch->get_batch.buf = buf;
params_batch->get_batch.bufsize = bufsize;
params_batch->get_batch.reason = reason;
+ params_batch->get_batch.bytes_written = 0;
schedule_work(&_pno_state->work);
wait_for_completion(&_pno_state->get_batch_done);
+ err = params_batch->get_batch.bytes_written;
exit:
return err;
}
_pno_state->pno_mode &= ~DHD_PNO_BATCH_MODE;
if (_pno_state->pno_mode & (DHD_PNO_LEGACY_MODE | DHD_PNO_HOTLIST_MODE)) {
mode = _pno_state->pno_mode;
- _dhd_pno_clean(dhd);
+ dhd_pno_clean(dhd);
_pno_state->pno_mode = mode;
/* restart Legacy PNO if the Legacy PNO is on */
if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
}
}
} else {
- err = _dhd_pno_clean(dhd);
+ err = dhd_pno_clean(dhd);
if (err < 0) {
- DHD_ERROR(("%s : failed to call _dhd_pno_clean (err: %d)\n",
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
__FUNCTION__, err));
goto exit;
}
dhd_pno_get_for_batch(dhd, NULL, 0, PNO_STATUS_DISABLE);
/* save current pno_mode before calling dhd_pno_clean */
mode = _pno_state->pno_mode;
- err = _dhd_pno_clean(dhd);
+ err = dhd_pno_clean(dhd);
if (err < 0) {
- DHD_ERROR(("%s : failed to call _dhd_pno_clean (err: %d)\n",
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
__FUNCTION__, err));
goto exit;
}
}
}
} else {
- err = _dhd_pno_clean(dhd);
+ err = dhd_pno_clean(dhd);
if (err < 0) {
- DHD_ERROR(("%s : failed to call _dhd_pno_clean (err: %d)\n",
+ DHD_ERROR(("%s : failed to call dhd_pno_clean (err: %d)\n",
__FUNCTION__, err));
goto exit;
}
{
struct dhd_pno_batch_params *params_batch;
params_batch = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS].params_batch;
- DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__));
- params_batch->get_batch.buf = NULL;
- params_batch->get_batch.bufsize = 0;
- params_batch->get_batch.reason = PNO_STATUS_EVENT;
- schedule_work(&_pno_state->work);
+ if (!waitqueue_active(&_pno_state->get_batch_done.wait)) {
+ DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING\n", __FUNCTION__));
+ params_batch->get_batch.buf = NULL;
+ params_batch->get_batch.bufsize = 0;
+ params_batch->get_batch.reason = PNO_STATUS_EVENT;
+ schedule_work(&_pno_state->work);
+ } else
+ DHD_PNO(("%s : WLC_E_PFN_BEST_BATCHING"
+ "will skip this event\n", __FUNCTION__));
break;
}
default:
if (dhd->pno_state)
goto exit;
dhd->pno_state = MALLOC(dhd->osh, sizeof(dhd_pno_status_info_t));
+ NULL_CHECK(dhd->pno_state, "failed to create dhd_pno_state", err);
memset(dhd->pno_state, 0, sizeof(dhd_pno_status_info_t));
- NULL_CHECK(dhd, "failed to create dhd_pno_state", err);
/* need to check whether current firmware support batching and hotlist scan */
_pno_state = PNO_GET_PNOSTATE(dhd);
_pno_state->wls_supported = TRUE;
int dhd_pno_deinit(dhd_pub_t *dhd)
{
int err = BCME_OK;
- dhd_pno_status_info_t *_pno_state = PNO_GET_PNOSTATE(dhd);
+ dhd_pno_status_info_t *_pno_state;
+ dhd_pno_params_t *_params;
NULL_CHECK(dhd, "dhd is NULL", err);
+
DHD_PNO(("%s enter\n", __FUNCTION__));
+ _pno_state = PNO_GET_PNOSTATE(dhd);
+ NULL_CHECK(_pno_state, "pno_state is NULL", err);
+ /* may need to free legacy ssid_list */
+ if (_pno_state->pno_mode & DHD_PNO_LEGACY_MODE) {
+ _params = &_pno_state->pno_params_arr[INDEX_OF_LEGACY_PARAMS];
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_LEGACY_MODE);
+ }
+
+ if (_pno_state->pno_mode & DHD_PNO_BATCH_MODE) {
+ _params = &_pno_state->pno_params_arr[INDEX_OF_BATCH_PARAMS];
+ /* clear resource if the BATCH MODE is on */
+ _dhd_pno_reinitialize_prof(dhd, _params, DHD_PNO_BATCH_MODE);
+ }
cancel_work_sync(&_pno_state->work);
- if (dhd->pno_state)
- MFREE(dhd->osh, dhd->pno_state, sizeof(dhd_pno_status_info_t));
+ MFREE(dhd->osh, _pno_state, sizeof(dhd_pno_status_info_t));
dhd->pno_state = NULL;
return err;
}
+#endif /* PNO_SUPPORT */
* Prefered Network Offload code and Wi-Fi Location Service(WLS) code.
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_pno.h 419969 2013-08-23 18:54:36Z $
+ * $Id: dhd_pno.h 423669 2013-09-18 13:01:55Z $
*/
#ifndef __DHD_PNO_H__
#define __DHD_PNO_H__
+#if defined(PNO_SUPPORT)
#define PNO_TLV_PREFIX 'S'
#define PNO_TLV_VERSION '1'
#define PNO_TLV_SUBTYPE_LEGACY_PNO '2'
} cmd_tlv_t;
typedef enum dhd_pno_mode {
/* Wi-Fi Legacy PNO Mode */
- DHD_PNO_NONE_MODE = 0,
+ DHD_PNO_NONE_MODE = 0,
DHD_PNO_LEGACY_MODE = (1 << (0)),
/* Wi-Fi Android BATCH SCAN Mode */
DHD_PNO_BATCH_MODE = (1 << (1)),
uint32 expired_tot_scan_cnt;
uint32 top_node_cnt;
uint32 bufsize;
+ uint32 bytes_written;
int reason;
struct list_head scan_results_list;
struct list_head expired_scan_results_list;
extern int dhd_pno_event_handler(dhd_pub_t *dhd, wl_event_msg_t *event, void *event_data);
extern int dhd_pno_init(dhd_pub_t *dhd);
extern int dhd_pno_deinit(dhd_pub_t *dhd);
+#endif
+
+#if (defined(NDISVER) && (NDISVER >= 0x0630)) && defined(PNO_SUPPORT)
+extern int dhd_pno_cfg(dhd_pub_t *dhd, wl_pfn_cfg_t *pcfg);
+extern int dhd_pno_suspend(dhd_pub_t *dhd, int pfn_suspend);
+extern int dhd_pno_set_add(dhd_pub_t *dhd, wl_pfn_t *netinfo, int nssid, ushort scan_fr,
+ ushort slowscan_fr, uint8 pno_repeat, uint8 pno_freq_expo_max, int16 flags);
+extern int dhd_pno_enable(dhd_pub_t *dhd, int pfn_enabled);
+extern int dhd_pno_clean(dhd_pub_t *dhd);
+#endif /* (defined(NDISVER) && (NDISVER >= 0x0630)) && defined(PNO_SUPPORT) */
#endif /* __DHD_PNO_H__ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_proto.h 390836 2013-03-13 23:43:53Z $
+ * $Id: dhd_proto.h 490409 2014-07-10 16:34:27Z $
*/
#ifndef _dhd_proto_h_
#include <dhdioctl.h>
#include <wlioctl.h>
+#ifdef BCMPCIE
+#include <dhd_flowring.h>
+#endif
#ifndef IOCTL_RESP_TIMEOUT
#define IOCTL_RESP_TIMEOUT 2000 /* In milli second default value for Production FW */
#endif /* IOCTL_RESP_TIMEOUT */
+#ifndef MFG_IOCTL_RESP_TIMEOUT
+#define MFG_IOCTL_RESP_TIMEOUT 20000 /* In milli second default value for MFG FW */
+#endif /* MFG_IOCTL_RESP_TIMEOUT */
+
/*
* Exported from the dhd protocol module (dhd_cdc, dhd_rndis)
*/
/* Linkage, sets prot link and updates hdrlen in pub */
extern int dhd_prot_attach(dhd_pub_t *dhdp);
+/* Initilizes the index block for dma'ing indices */
+extern int dhd_prot_init_index_dma_block(dhd_pub_t *dhdp, uint8 type, uint32 length);
+
/* Unlink, frees allocated protocol memory (including dhd_prot) */
extern void dhd_prot_detach(dhd_pub_t *dhdp);
/* Initialize protocol: sync w/dongle state.
* Sets dongle media info (iswl, drv_version, mac address).
*/
-extern int dhd_prot_init(dhd_pub_t *dhdp);
+extern int dhd_sync_with_dongle(dhd_pub_t *dhdp);
+
+/* Protocol initialization needed for IOCTL/IOVAR path */
+extern int dhd_prot_init(dhd_pub_t *dhd);
/* Stop protocol: sync w/dongle state. */
extern void dhd_prot_stop(dhd_pub_t *dhdp);
* Caller must reserve prot_hdrlen prepend space.
*/
extern void dhd_prot_hdrpush(dhd_pub_t *, int ifidx, void *txp);
+extern uint dhd_prot_hdrlen(dhd_pub_t *, void *txp);
/* Remove any protocol-specific data header. */
extern int dhd_prot_hdrpull(dhd_pub_t *, int *ifidx, void *rxp, uchar *buf, uint *len);
extern int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
uint reorder_info_len, void **pkt, uint32 *free_buf_count);
+#ifdef BCMPCIE
+extern int dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd);
+extern int dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd);
+extern int dhd_prot_process_ctrlbuf(dhd_pub_t * dhd);
+extern bool dhd_prot_dtohsplit(dhd_pub_t * dhd);
+extern int dhd_post_dummy_msg(dhd_pub_t *dhd);
+extern int dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len);
+extern void dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 offset);
+extern int dhd_prot_txdata(dhd_pub_t *dhd, void *p, uint8 ifidx);
+extern int dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay);
+
+extern int dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info);
+extern int dhd_post_tx_ring_item(dhd_pub_t *dhd, void *PKTBUF, uint8 ifindex);
+extern int dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node);
+extern int dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b);
+extern uint32 dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx);
+extern uint32 dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx);
+extern void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
+ struct bcmstrbuf *strbuf);
+extern void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf);
+extern void dhd_prot_update_txflowring(dhd_pub_t *dhdp, uint16 flow_id, void *msgring_info);
+extern void dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flow_id, bool in_lock);
+extern uint32 dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val);
+extern void dhd_prot_clear(dhd_pub_t *dhd);
+
+#endif /* BCMPCIE */
/********************************
* For version-string expansion *
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_sdio.c 419981 2013-08-23 19:49:45Z $
+ * $Id: dhd_sdio.c 489913 2014-07-08 18:57:48Z $
*/
#include <typedefs.h>
#include <hndsoc.h>
#include <bcmsdpcm.h>
#if defined(DHD_DEBUG)
-#include <hndrte_armtrap.h>
-#include <hndrte_cons.h>
+#include <hnd_armtrap.h>
+#include <hnd_cons.h>
#endif /* defined(DHD_DEBUG) */
#include <sbchipc.h>
#include <sbhnddma.h>
#include <sdiovar.h>
#include <dhd_config.h>
+#ifdef PROP_TXSTATUS
+#include <dhd_wlfc.h>
+#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
+
bool dhd_mp_halting(dhd_pub_t *dhdp);
extern void bcmsdh_waitfor_iodrain(void *sdh);
extern void bcmsdh_reject_ioreqs(void *sdh, bool reject);
#define DHDSDIO_MEM_DUMP_FNAME "mem_dump"
#endif
-#define QLEN 256 /* bulk rx and tx queue lengths */
+#define QLEN (1024) /* bulk rx and tx queue lengths */
#define FCHI (QLEN - 10)
#define FCLOW (FCHI / 2)
#define PRIOMASK 7
#define TXRETRIES 2 /* # of retries for tx frames */
+#define READ_FRM_CNT_RETRIES 3
#ifndef DHD_RXBOUND
#define DHD_RXBOUND 50 /* Default for max rx frames in one scheduling */
#endif
#define MEMBLOCK 2048 /* Block size used for downloading of dongle image */
#define MAX_NVRAMBUF_SIZE 4096 /* max nvram buf size */
-#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold biggest possible glom */
+#define MAX_DATA_BUF (64 * 1024) /* Must be large enough to hold biggest possible glom */
#ifndef DHD_FIRSTREAD
#define DHD_FIRSTREAD 32
#error DHD_FIRSTREAD is not a power of 2!
#endif
-#ifdef BCMSDIOH_TXGLOM
-/* Total length of TX frame header for dongle protocol */
-#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN + SDPCM_SWHEADER_LEN)
-/* Total length of RX frame for dongle protocol */
-#else
-/* Total length of TX frame header for dongle protocol */
+/* Total length of frame header for dongle protocol */
#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
-#endif
-
-#define SDPCM_HDRLEN_RX (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
+#define SDPCM_HDRLEN_TXGLOM (SDPCM_HDRLEN + SDPCM_HWEXT_LEN)
+#define MAX_TX_PKTCHAIN_CNT SDPCM_MAXGLOM_SIZE
#ifdef SDTEST
#define SDPCM_RESERVE (SDPCM_HDRLEN + SDPCM_TEST_HDRLEN + DHD_SDALIGN)
#define PMU_MAX_TRANSITION_DLY 1000000
#endif
+/* hooks for limiting threshold custom tx num in rx processing */
+#define DEFAULT_TXINRX_THRES 0
+#ifndef CUSTOM_TXINRX_THRES
+#define CUSTOM_TXINRX_THRES DEFAULT_TXINRX_THRES
+#endif
+
/* Value for ChipClockCSR during initial setup */
#define DHD_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ)
#define DHD_INIT_CLKCTL2 (SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP)
#define PKTFREE2() if ((bus->bus != SPI_BUS) || bus->usebufpool) \
PKTFREE(bus->dhd->osh, pkt, FALSE);
DHD_SPINWAIT_SLEEP_INIT(sdioh_spinwait_sleep);
-#if defined(OOB_INTR_ONLY)
-extern void bcmsdh_set_irq(int flag);
-#endif
-#ifdef PROP_TXSTATUS
-extern void dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
-extern void dhd_wlfc_trigger_pktcommit(dhd_pub_t *dhd);
-#endif
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
typedef struct dhd_console {
uint count; /* Poll interval msec counter */
uint log_addr; /* Log struct address (fixed) */
- hndrte_log_t log; /* Log struct (host copy) */
+ hnd_log_t log; /* Log struct (host copy) */
uint bufsize; /* Size of log buffer */
uint8 *buf; /* Log buffer (host copy) */
uint last; /* Last buffer read index */
#define GPIO_DEV_WAKEUP 17 /* Host gpio17 mapped to device gpio1 wakeup */
#define CC_CHIPCTRL2_GPIO1_WAKEUP (1 << 0)
#define CC_CHIPCTRL3_SR_ENG_ENABLE (1 << 2)
-#define OVERFLOW_BLKSZ512_WM 48
+#define OVERFLOW_BLKSZ512_WM 96
#define OVERFLOW_BLKSZ512_MES 80
#define CC_PMUCC3 (0x3)
uint32 srmemsize; /* Size of SRMEM */
uint32 bus; /* gSPI or SDIO bus */
- uint32 hostintmask; /* Copy of Host Interrupt Mask */
+ uint32 bus_num; /* bus number */
+ uint32 slot_num; /* slot ID */
+ uint32 hostintmask; /* Copy of Host Interrupt Mask */
uint32 intstatus; /* Intstatus bits (events) pending */
bool dpc_sched; /* Indicates DPC schedule (intrpt rcvd) */
bool fcstate; /* State of dongle flow-control */
int32 sd_rxchain; /* If bcmsdh api accepts PKT chains */
bool use_rxchain; /* If dhd should use PKT chains */
bool sleeping; /* Is SDIO bus sleeping? */
+ wait_queue_head_t bus_sleep;
uint rxflow_mode; /* Rx flow control mode */
bool rxflow; /* Is rx flow control on */
uint prev_rxlim_hit; /* Is prev rx limit exceeded (per dpc schedule) */
bool alp_only; /* Don't use HT clock (ALP only) */
/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
bool usebufpool;
-
+ int32 txinrx_thres; /* num of in-queued pkts */
+ int32 dotxinrx; /* tx first in dhdsdio_readframes */
#ifdef SDTEST
/* external loopback */
bool ext_loop;
uint f2rxdata; /* Number of frame data reads */
uint f2txdata; /* Number of f2 frame writes */
uint f1regdata; /* Number of f1 register accesses */
-
+#ifdef DHDENABLE_TAILPAD
+ uint tx_tailpad_chain; /* Number of tail padding by chaining pad_pkt */
+ uint tx_tailpad_pktget; /* Number of tail padding by new PKTGET */
+#endif /* DHDENABLE_TAILPAD */
uint8 *ctrl_frame_buf;
uint32 ctrl_frame_len;
bool ctrl_frame_stat;
bool reqbussleep;
uint32 resetinstr;
uint32 dongle_ram_base;
-#ifdef BCMSDIOH_TXGLOM
+
void *glom_pkt_arr[SDPCM_MAXGLOM_SIZE]; /* Array of pkts for glomming */
- uint16 glom_cnt; /* Number of pkts in the glom array */
- uint16 glom_total_len; /* Total length of pkts in glom array */
- bool glom_enable; /* Flag to indicate whether tx glom is enabled/disabled */
- uint8 glom_mode; /* Glom mode - 0-copy mode, 1 - Multi-descriptor mode */
- uint32 glomsize; /* Glom size limitation */
-#endif
+ uint32 txglom_cnt; /* Number of pkts in the glom array */
+ uint32 txglom_total_len; /* Total length of pkts in glom array */
+ bool txglom_enable; /* Flag to indicate whether tx glom is enabled/disabled */
+ uint32 txglomsize; /* Glom size limitation */
+#ifdef DHDENABLE_TAILPAD
+ void *pad_pkt;
+#endif /* DHDENABLE_TAILPAD */
} dhd_bus_t;
/* clkstate */
static bool retrydata;
#define RETRYCHAN(chan) (((chan) == SDPCM_EVENT_CHANNEL) || retrydata)
-#if defined(SDIO_CRC_ERROR_FIX)
-static uint watermark = 48;
-static uint mesbusyctrl = 80;
-#else
-static const uint watermark = 8;
-static const uint mesbusyctrl = 0;
-#endif
+static uint watermark = 8;
+static uint mesbusyctrl = 0;
static const uint firstread = DHD_FIRSTREAD;
-#define HDATLEN (firstread - (SDPCM_HDRLEN))
-
/* Retry count for register access failures */
static const uint retry_limit = 2;
#endif /* defined(OOB_INTR_ONLY) && defined(SDIO_ISR_THREAD) */
#define PKTALIGN(osh, p, len, align) \
do { \
- uint datalign; \
+ uintptr datalign; \
datalign = (uintptr)PKTDATA((osh), (p)); \
datalign = ROUNDUP(datalign, (align)) - datalign; \
ASSERT(datalign < (align)); \
ASSERT(PKTLEN((osh), (p)) >= ((len) + datalign)); \
if (datalign) \
- PKTPULL((osh), (p), datalign); \
+ PKTPULL((osh), (p), (uint)datalign); \
PKTSETLEN((osh), (p), (len)); \
} while (0)
void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
static int dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags,
uint8 *buf, uint nbytes,
- void *pkt, bcmsdh_cmplt_fn_t complete, void *handle);
-#ifdef BCMSDIOH_TXGLOM
-static void dhd_bcmsdh_glom_post(dhd_bus_t *bus, uint8 *frame, void *pkt, uint len);
-static void dhd_bcmsdh_glom_clear(dhd_bus_t *bus);
-#endif
-
-static bool dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry);
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt);
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+ int prev_chain_total_len, bool last_chained_pkt,
+ int *pad_pkt_len, void **new_pkt);
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt);
+
+static int dhdsdio_download_firmware(dhd_bus_t *bus, osl_t *osh, void *sdh);
static int _dhdsdio_download_firmware(dhd_bus_t *bus);
static int dhdsdio_download_code_file(dhd_bus_t *bus, char *image_path);
#endif /* WLMEDIA_HTSF */
static void
-dhd_overflow_war(struct dhd_bus *bus)
+dhdsdio_tune_fifoparam(struct dhd_bus *bus)
{
int err;
uint8 devctl, wm, mes;
- /* See .ppt in PR for these recommended values */
- if (bus->blocksize == 512) {
- wm = OVERFLOW_BLKSZ512_WM;
- mes = OVERFLOW_BLKSZ512_MES;
+ if (bus->sih->buscorerev >= 15) {
+ /* See .ppt in PR for these recommended values */
+ if (bus->blocksize == 512) {
+ wm = OVERFLOW_BLKSZ512_WM;
+ mes = OVERFLOW_BLKSZ512_MES;
+ } else {
+ mes = bus->blocksize/4;
+ wm = bus->blocksize/4;
+ }
+
+ watermark = wm;
+ mesbusyctrl = mes;
} else {
- mes = bus->blocksize/4;
- wm = bus->blocksize/4;
+ DHD_INFO(("skip fifotune: SdioRev(%d) is lower than minimal requested ver\n",
+ bus->sih->buscorerev));
+ return;
}
-
/* Update watermark */
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err);
+ if (wm > 0) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK, wm, &err);
- devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
- devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ devctl = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, devctl, &err);
+ }
/* Update MES */
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
- (mes | SBSDIO_MESBUSYCTRL_ENAB), &err);
+ if (mes > 0) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
+ (mes | SBSDIO_MESBUSYCTRL_ENAB), &err);
+ }
DHD_INFO(("Apply overflow WAR: 0x%02x 0x%02x 0x%02x\n",
bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err),
{
bool cap = FALSE;
uint32 core_capext, addr, data;
+
+ if (bus->sih->chip == BCM43430_CHIP_ID) {
+ /* check if fw initialized sr engine */
+ addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, sr_control1);
+ if (bcmsdh_reg_read(bus->sdh, addr, 4) != 0)
+ cap = TRUE;
+
+ return cap;
+ }
if (bus->sih->chip == BCM4324_CHIP_ID) {
addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
core_capext = FALSE;
} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
(bus->sih->chip == BCM4339_CHIP_ID) ||
+ (bus->sih->chip == BCM43349_CHIP_ID) ||
+ (bus->sih->chip == BCM4345_CHIP_ID) ||
+ (bus->sih->chip == BCM4354_CHIP_ID) ||
+ (bus->sih->chip == BCM4356_CHIP_ID) ||
+ (bus->sih->chip == BCM4358_CHIP_ID) ||
+ (BCM4349_CHIP(bus->sih->chip)) ||
(bus->sih->chip == BCM4350_CHIP_ID)) {
core_capext = TRUE;
} else {
/* FIX: Should change to query SR control register instead */
cap = TRUE;
} else if ((bus->sih->chip == BCM4335_CHIP_ID) ||
- (bus->sih->chip == BCM4339_CHIP_ID)) {
+ (bus->sih->chip == BCM4339_CHIP_ID) ||
+ (bus->sih->chip == BCM43349_CHIP_ID) ||
+ (bus->sih->chip == BCM4345_CHIP_ID) ||
+ (bus->sih->chip == BCM4354_CHIP_ID) ||
+ (bus->sih->chip == BCM4356_CHIP_ID) ||
+ (bus->sih->chip == BCM4358_CHIP_ID) ||
+ (bus->sih->chip == BCM4350_CHIP_ID)) {
uint32 enabval = 0;
addr = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_addr);
data = SI_ENUM_BASE + OFFSETOF(chipcregs_t, chipcontrol_data);
enabval = bcmsdh_reg_read(bus->sdh, data, 4);
if ((bus->sih->chip == BCM4350_CHIP_ID) ||
- 0)
- enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE;
+ (bus->sih->chip == BCM4345_CHIP_ID) ||
+ (bus->sih->chip == BCM4354_CHIP_ID) ||
+ (bus->sih->chip == BCM4356_CHIP_ID) ||
+ (bus->sih->chip == BCM4358_CHIP_ID))
+ enabval &= CC_CHIPCTRL3_SR_ENG_ENABLE;
if (enabval)
cap = TRUE;
static int
dhdsdio_srwar_init(dhd_bus_t *bus)
{
+#if !defined(NDISVER) || (NDISVER < 0x0630)
bcmsdh_gpio_init(bus->sdh);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
#ifdef USE_OOB_GPIO1
dhdsdio_oobwakeup_init(bus);
#define KSO_DBG(x)
#define KSO_WAIT_US 50
+#define KSO_WAIT_MS 1
+#define KSO_SLEEP_RETRY_COUNT 20
+#define ERROR_BCME_NODEVICE_MAX 1
+
#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY/KSO_WAIT_US)
static int
dhdsdio_clk_kso_enab(dhd_bus_t *bus, bool on)
int err = 0;
int try_cnt = 0;
+ if (!bus->dhd->conf->kso_enable)
+ return 0;
+
KSO_DBG(("%s> op:%s\n", __FUNCTION__, (on ? "KSO_SET" : "KSO_CLR")));
wr_val |= (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT);
break;
KSO_DBG(("%s> KSO wr/rd retry:%d, ERR:%x \n", __FUNCTION__, try_cnt, err));
- OSL_DELAY(KSO_WAIT_US);
+
+ if (((try_cnt + 1) % KSO_SLEEP_RETRY_COUNT) == 0) {
+ OSL_SLEEP(KSO_WAIT_MS);
+ } else
+ OSL_DELAY(KSO_WAIT_US);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
} while (try_cnt++ < MAX_KSO_ATTEMPTS);
err = dhdsdio_clk_kso_enab(bus, FALSE);
if (OOB_WAKEUP_ENAB(bus))
{
+#if !defined(NDISVER) || (NDISVER < 0x0630)
err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, FALSE); /* GPIO_1 is off */
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
}
#endif /* USE_CMD14 */
} else {
DHD_TRACE(("%s: Request SD clk\n", __FUNCTION__));
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
}
+#if !defined(NDISVER) || (NDISVER < 0x0630)
if ((bus->sih->chip == BCM4334_CHIP_ID) && (bus->sih->chiprev == 2)) {
SPINWAIT_SLEEP(sdioh_spinwait_sleep,
DHD_ERROR(("ERROR: GPIO_DEV_SRSTATE still low!\n"));
}
}
+#endif
#ifdef USE_CMD14
err = bcmsdh_sleep(bus->sdh, FALSE);
if (SLPAUTO_ENAB(bus) && (err != 0)) {
#else
if (OOB_WAKEUP_ENAB(bus))
{
+#if !defined(NDISVER) || (NDISVER < 0x0630)
err = bcmsdh_gpioout(bus->sdh, GPIO_DEV_WAKEUP, TRUE); /* GPIO_1 is on */
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
}
do {
err = dhdsdio_clk_kso_enab(bus, TRUE);
SBSDIO_FUNC1_CHIPCLKCSR, &err)) & SBSDIO_HT_AVAIL) !=
(SBSDIO_HT_AVAIL)), (10000));
+ DHD_TRACE(("%s: SBSDIO_FUNC1_CHIPCLKCSR : 0x%x\n", __FUNCTION__, csr));
+ if (!err && ((csr & SBSDIO_HT_AVAIL) != SBSDIO_HT_AVAIL)) {
+ DHD_ERROR(("%s:ERROR: device NOT Ready! 0x%x\n",
+ __FUNCTION__, csr));
+ err = BCME_NODEVICE;
+ }
}
}
if (err == 0)
bus->kso = on ? FALSE : TRUE;
else {
- DHD_ERROR(("%s: Sleep request failed: on:%d err:%d\n", __FUNCTION__, on, err));
+ DHD_ERROR(("%s: Sleep request failed: kso:%d on:%d err:%d\n",
+ __FUNCTION__, bus->kso, on, err));
if (!on && retry > 2)
- bus->kso = TRUE;
+ bus->kso = FALSE;
}
return err;
#endif /* DHD_USE_IDLECOUNT */
} else {
clkreq = 0;
+
if (bus->clkstate == CLK_PENDING) {
/* Cancel CA-only interrupt filter */
devctl = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL, &err);
(sleep ? "SLEEP" : "WAKE"),
(bus->sleeping ? "SLEEP" : "WAKE")));
+ if (bus->dhd->hang_was_sent)
+ return BCME_ERROR;
+
/* Done if we're already in the requested state */
if (sleep == bus->sleeping)
return BCME_OK;
/* Change state */
bus->sleeping = TRUE;
-
+ wake_up(&bus->bus_sleep);
} else {
/* Waking up: bus power up is ok, set local state */
return err;
}
+
#if defined(OOB_INTR_ONLY)
void
dhd_enable_oob_intr(struct dhd_bus *bus, bool enable)
}
#endif
-#ifdef DHDTCPACK_SUPPRESS
-extern bool dhd_use_tcpack_suppress;
-
-/* Please be sure this function is called under dhd_os_tcpacklock() */
-void dhd_onoff_tcpack_sup(void *pub, bool on)
+int
+dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
{
- dhd_pub_t *dhdp = (dhd_pub_t *)pub;
-
- if (dhd_use_tcpack_suppress != on) {
-
- DHD_ERROR(("dhd_onoff_tcpack_sup: %d -> %d\n", dhd_use_tcpack_suppress, on));
- dhd_use_tcpack_suppress = on;
- dhdp->tcp_ack_info_cnt = 0;
- bzero(dhdp->tcp_ack_info_tbl, sizeof(struct tcp_ack_info)*MAXTCPSTREAMS);
-
- } else
- DHD_ERROR(("dhd_onoff_tcpack_sup: alread %d\n", on));
-
- return;
-}
+ int ret = BCME_ERROR;
+ osl_t *osh;
+ uint datalen, prec;
+#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+ uint8 *dump_data;
+ uint16 protocol;
+#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
-inline void dhd_tcpack_check_xmit(dhd_pub_t *dhdp, void *pkt)
-{
- uint8 i;
- tcp_ack_info_t *tcp_ack_info = NULL;
- int tbl_cnt;
-
- dhd_os_tcpacklock(dhdp);
- tbl_cnt = dhdp->tcp_ack_info_cnt;
- for (i = 0; i < tbl_cnt; i++) {
- tcp_ack_info = &dhdp->tcp_ack_info_tbl[i];
- if (tcp_ack_info->p_tcpackinqueue == pkt) {
- /* This pkt is being transmitted so remove the tcp_ack_info of it.
- * compact the array unless the last element,
- * then the pkt's array is removed.
- */
- if (i < tbl_cnt-1) {
- memmove(&dhdp->tcp_ack_info_tbl[i],
- &dhdp->tcp_ack_info_tbl[i+1],
- sizeof(struct tcp_ack_info)*(tbl_cnt - (i+1)));
- }
- bzero(&dhdp->tcp_ack_info_tbl[tbl_cnt-1], sizeof(struct tcp_ack_info));
- if (--dhdp->tcp_ack_info_cnt < 0) {
- DHD_ERROR(("dhdsdio_sendfromq:(ERROR) tcp_ack_info_cnt %d"
- " Stop using tcpack_suppress\n", dhdp->tcp_ack_info_cnt));
- dhd_onoff_tcpack_sup(dhdp, FALSE);
- }
- break;
- }
- }
- dhd_os_tcpackunlock(dhdp);
-}
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-bool
-dhd_tcpack_suppress(dhd_pub_t *dhdp, void *pkt)
-{
- uint8 *eh_header;
- uint16 eh_type;
- uint8 *ip_header;
- uint8 *tcp_header;
- uint32 ip_hdr_len;
- uint32 cur_framelen;
- uint8 bdc_hdr_len = BDC_HEADER_LEN;
- uint8 wlfc_hdr_len = 0;
- uint8 *data = PKTDATA(dhdp->osh, pkt);
- cur_framelen = PKTLEN(dhdp->osh, pkt);
+ osh = bus->dhd->osh;
+ datalen = PKTLEN(osh, pkt);
-#ifdef PROP_TXSTATUS
- /* In this case, BDC header is not pushed in dhd_sendpkt() */
- if (dhdp->wlfc_state) {
- bdc_hdr_len = 0;
- wlfc_hdr_len = 8;
- }
-#endif
- if (cur_framelen < bdc_hdr_len + ETHER_HDR_LEN) {
- DHD_TRACE(("dhd_tcpack_suppress: Too short packet length %d\n", cur_framelen));
- return FALSE;
+#ifdef SDTEST
+ /* Push the test header if doing loopback */
+ if (bus->ext_loop) {
+ uint8* data;
+ PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
+ data = PKTDATA(osh, pkt);
+ *data++ = SDPCM_TEST_ECHOREQ;
+ *data++ = (uint8)bus->loopid++;
+ *data++ = (datalen >> 0);
+ *data++ = (datalen >> 8);
+ datalen += SDPCM_TEST_HDRLEN;
}
+#else /* SDTEST */
+ BCM_REFERENCE(datalen);
+#endif /* SDTEST */
- /* Get rid of BDC header */
- eh_header = data + bdc_hdr_len;
- cur_framelen -= bdc_hdr_len;
- eh_type = eh_header[12] << 8 | eh_header[13];
+#if defined(DHD_TX_DUMP) || defined(DHD_8021X_DUMP)
+ dump_data = PKTDATA(osh, pkt);
+ dump_data += 4; /* skip 4 bytes header */
+ protocol = (dump_data[12] << 8) | dump_data[13];
- if (eh_type != ETHER_TYPE_IP) {
- DHD_TRACE(("dhd_tcpack_suppress: Not a IP packet 0x%x\n", eh_type));
- return FALSE;
+ if (protocol == ETHER_TYPE_802_1X) {
+ DHD_ERROR(("ETHER_TYPE_802_1X [TX]: ver %d, type %d, replay %d\n",
+ dump_data[14], dump_data[15], dump_data[30]));
}
+#endif /* DHD_TX_DUMP || DHD_8021X_DUMP */
- DHD_TRACE(("dhd_tcpack_suppress: IP pkt! 0x%x\n", eh_type));
-
- ip_header = eh_header + ETHER_HDR_LEN;
- cur_framelen -= ETHER_HDR_LEN;
- ip_hdr_len = 4 * (ip_header[0] & 0x0f);
+#if defined(DHD_TX_DUMP) && defined(DHD_TX_FULL_DUMP)
+ {
+ int i;
+ DHD_ERROR(("TX DUMP\n"));
- if ((ip_header[0] & 0xf0) != 0x40) {
- DHD_TRACE(("dhd_tcpack_suppress: Not IPv4!\n"));
- return FALSE;
+ for (i = 0; i < (datalen - 4); i++) {
+ DHD_ERROR(("%02X ", dump_data[i]));
+ if ((i & 15) == 15)
+ printk("\n");
+ }
+ DHD_ERROR(("\n"));
}
+#endif /* DHD_TX_DUMP && DHD_TX_FULL_DUMP */
- if (cur_framelen < ip_hdr_len) {
- DHD_ERROR(("dhd_tcpack_suppress: IP packet length %d wrong!\n", cur_framelen));
- return FALSE;
- }
+ prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
- /* not tcp */
- if (ip_header[9] != 0x06) {
- DHD_TRACE(("dhd_tcpack_suppress: Not a TCP packet 0x%x\n", ip_header[9]));
- return FALSE;
- }
+ /* Check for existing queue, current flow-control, pending event, or pending clock */
+ if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
+ (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
+ (bus->clkstate != CLK_AVAIL)) {
+ bool deq_ret;
+ int pkq_len;
- DHD_TRACE(("dhd_tcpack_suppress: TCP pkt!\n"));
+ DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__, pktq_len(&bus->txq)));
+ bus->fcqueued++;
- tcp_header = ip_header + ip_hdr_len;
+ /* Priority based enq */
+ dhd_os_sdlock_txq(bus->dhd);
+ deq_ret = dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec);
+ dhd_os_sdunlock_txq(bus->dhd);
- /* is it an ack ? */
- if (tcp_header[13] == 0x10) {
-#if defined(DHD_DEBUG)
- uint32 tcp_seq_num = tcp_header[4] << 24 | tcp_header[5] << 16 |
- tcp_header[6] << 8 | tcp_header[7];
-#endif
- uint32 tcp_ack_num = tcp_header[8] << 24 | tcp_header[9] << 16 |
- tcp_header[10] << 8 | tcp_header[11];
- uint16 ip_tcp_ttllen = (ip_header[3] & 0xff) + (ip_header[2] << 8);
- uint32 tcp_hdr_len = 4*((tcp_header[12] & 0xf0) >> 4);
- DHD_TRACE(("dhd_tcpack_suppress: TCP ACK seq %ud ack %ud\n",
- tcp_seq_num, tcp_ack_num));
-
-
- /* zero length ? */
- if (ip_tcp_ttllen == ip_hdr_len + tcp_hdr_len) {
- int i;
- tcp_ack_info_t *tcp_ack_info = NULL;
- DHD_TRACE(("dhd_tcpack_suppress: TCP ACK zero length\n"));
- /* Look for tcp_ack_info that has the same
- * ip src/dst addrs and tcp src/dst ports
- */
- dhd_os_tcpacklock(dhdp);
- for (i = 0; i < dhdp->tcp_ack_info_cnt; i++) {
- if (dhdp->tcp_ack_info_tbl[i].p_tcpackinqueue &&
- !memcmp(&ip_header[12], dhdp->tcp_ack_info_tbl[i].ipaddrs, 8) &&
- !memcmp(tcp_header, dhdp->tcp_ack_info_tbl[i].tcpports, 4)) {
- tcp_ack_info = &dhdp->tcp_ack_info_tbl[i];
- break;
+ if (!deq_ret) {
+#ifdef PROP_TXSTATUS
+ if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt)) == 0)
+#endif /* PROP_TXSTATUS */
+ {
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
}
+#endif /* DHDTCPACK_SUPPRESS */
+ dhd_txcomplete(bus->dhd, pkt, FALSE);
+ PKTFREE(osh, pkt, TRUE);
}
+ ret = BCME_NORESOURCE;
+ } else
+ ret = BCME_OK;
- if (i == dhdp->tcp_ack_info_cnt && i < MAXTCPSTREAMS)
- tcp_ack_info = &dhdp->tcp_ack_info_tbl[dhdp->tcp_ack_info_cnt++];
-
- if (!tcp_ack_info) {
- DHD_TRACE(("dhd_tcpack_suppress: No empty tcp ack info"
- "%d %d %d %d, %d %d %d %d\n",
- tcp_header[0], tcp_header[1], tcp_header[2], tcp_header[3],
- dhdp->tcp_ack_info_tbl[i].tcpports[0],
- dhdp->tcp_ack_info_tbl[i].tcpports[1],
- dhdp->tcp_ack_info_tbl[i].tcpports[2],
- dhdp->tcp_ack_info_tbl[i].tcpports[3]));
- dhd_os_tcpackunlock(dhdp);
- return FALSE;
- }
-
- if (tcp_ack_info->p_tcpackinqueue) {
- if (tcp_ack_num > tcp_ack_info->tcpack_number) {
- void *prevpkt = tcp_ack_info->p_tcpackinqueue;
- uint8 pushed_len = SDPCM_HDRLEN +
- (BDC_HEADER_LEN - bdc_hdr_len) + wlfc_hdr_len;
+ dhd_os_sdlock_txq(bus->dhd);
+ pkq_len = pktq_len(&bus->txq);
+ dhd_os_sdunlock_txq(bus->dhd);
+ if (pkq_len >= FCHI) {
+ bool wlfc_enabled = FALSE;
#ifdef PROP_TXSTATUS
- /* In case the prev pkt is delayenqueued
- * but not delayedequeued yet, it may not have
- * any additional header yet.
- */
- if (dhdp->wlfc_state && (PKTLEN(dhdp->osh, prevpkt) ==
- tcp_ack_info->ip_tcp_ttllen + ETHER_HDR_LEN))
- pushed_len = 0;
+ wlfc_enabled = (dhd_wlfc_flowcontrol(bus->dhd, ON, FALSE) !=
+ WLFC_UNSUPPORTED);
#endif
- if ((ip_tcp_ttllen == tcp_ack_info->ip_tcp_ttllen) &&
- (PKTLEN(dhdp->osh, pkt) ==
- PKTLEN(dhdp->osh, prevpkt) - pushed_len)) {
- bcopy(PKTDATA(dhdp->osh, pkt),
- PKTDATA(dhdp->osh, prevpkt) + pushed_len,
- PKTLEN(dhdp->osh, pkt));
- PKTFREE(dhdp->osh, pkt, FALSE);
- DHD_TRACE(("dhd_tcpack_suppress: pkt 0x%p"
- " TCP ACK replace %ud -> %ud\n", prevpkt,
- tcp_ack_info->tcpack_number, tcp_ack_num));
- tcp_ack_info->tcpack_number = tcp_ack_num;
- dhd_os_tcpackunlock(dhdp);
- return TRUE;
- } else
- DHD_TRACE(("dhd_tcpack_suppress: len mismatch"
- " %d(%d) %d(%d)\n",
- PKTLEN(dhdp->osh, pkt), ip_tcp_ttllen,
- PKTLEN(dhdp->osh, prevpkt),
- tcp_ack_info->ip_tcp_ttllen));
- } else {
-#ifdef TCPACK_TEST
- void *prevpkt = tcp_ack_info->p_tcpackinqueue;
+ if (!wlfc_enabled && dhd_doflow) {
+ dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
+ }
+ }
+
+#ifdef DHD_DEBUG
+ dhd_os_sdlock_txq(bus->dhd);
+ if (pktq_plen(&bus->txq, prec) > qcount[prec])
+ qcount[prec] = pktq_plen(&bus->txq, prec);
+ dhd_os_sdunlock_txq(bus->dhd);
#endif
- DHD_TRACE(("dhd_tcpack_suppress: TCP ACK number reverse"
- " prev %ud (0x%p) new %ud (0x%p)\n",
- tcp_ack_info->tcpack_number,
- tcp_ack_info->p_tcpackinqueue,
- tcp_ack_num, pkt));
-#ifdef TCPACK_TEST
- if (PKTLEN(dhdp->osh, pkt) == PKTLEN(dhdp->osh, prevpkt)) {
- PKTFREE(dhdp->osh, pkt, FALSE);
- dhd_os_tcpackunlock(dhdp);
- return TRUE;
- }
+
+ /* Schedule DPC if needed to send queued packet(s) */
+ if (dhd_deferred_tx && !bus->dpc_sched) {
+ bus->dpc_sched = TRUE;
+ dhd_sched_dpc(bus->dhd);
+ }
+ } else {
+ int chan = SDPCM_DATA_CHANNEL;
+
+#ifdef SDTEST
+ chan = (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL);
#endif
- }
- } else {
- tcp_ack_info->p_tcpackinqueue = pkt;
- tcp_ack_info->tcpack_number = tcp_ack_num;
- tcp_ack_info->ip_tcp_ttllen = ip_tcp_ttllen;
- bcopy(&ip_header[12], tcp_ack_info->ipaddrs, 8);
- bcopy(tcp_header, tcp_ack_info->tcpports, 4);
- }
- dhd_os_tcpackunlock(dhdp);
- } else
- DHD_TRACE(("dhd_tcpack_suppress: TCP ACK with DATA len %d\n",
- ip_tcp_ttllen - ip_hdr_len - tcp_hdr_len));
+ /* Lock: we're about to use shared data/code (and SDIO) */
+ dhd_os_sdlock(bus->dhd);
+
+ /* Otherwise, send it now */
+ BUS_WAKE(bus);
+ /* Make sure back plane ht clk is on, no pending allowed */
+ dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
+
+ ret = dhdsdio_txpkt(bus, chan, &pkt, 1, TRUE);
+
+ if (ret != BCME_OK)
+ bus->dhd->tx_errors++;
+ else
+ bus->dhd->dstats.tx_bytes += datalen;
+
+ if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
+ bus->activity = FALSE;
+ dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+ }
+
+ dhd_os_sdunlock(bus->dhd);
}
- return FALSE;
+
+ return ret;
}
-#endif /* DHDTCPACK_SUPPRESS */
-/* Writes a HW/SW header into the packet and sends it. */
-/* Assumes: (a) header space already there, (b) caller holds lock */
-static int
-dhdsdio_txpkt(dhd_bus_t *bus, void *pkt, uint chan, bool free_pkt, bool queue_only)
+
+/* align packet data pointer and packet length to n-byte boundary, process packet headers,
+ * a new packet may be allocated if there is not enough head and/or tail from for padding.
+ * the caller is responsible for updating the glom size in the head packet (when glom is
+ * used)
+ *
+ * pad_pkt_len: returns the length of extra padding needed from the padding packet, this parameter
+ * is taken in tx glom mode only
+ *
+ * new_pkt: out, pointer of the new packet allocated due to insufficient head room for alignment
+ * padding, NULL if not needed, the caller is responsible for freeing the new packet
+ *
+ * return: positive value - length of the packet, including head and tail padding
+ * negative value - errors
+ */
+static int dhdsdio_txpkt_preprocess(dhd_bus_t *bus, void *pkt, int chan, int txseq,
+ int prev_chain_total_len, bool last_chained_pkt,
+ int *pad_pkt_len, void **new_pkt)
{
- int ret;
osl_t *osh;
uint8 *frame;
- uint16 len, pad1 = 0, act_len = 0;
+ int pkt_len;
+ int modulo;
+ int head_padding;
+ int tail_padding = 0;
uint32 swheader;
- uint retries = 0;
- uint32 real_pad = 0;
- bcmsdh_info_t *sdh;
- void *new;
- int i;
- int pkt_cnt;
-#ifdef BCMSDIOH_TXGLOM
- uint8 *frame_tmp;
-#endif
-#ifdef WLMEDIA_HTSF
- char *p;
- htsfts_t *htsf_ts;
-#endif
+ uint32 swhdr_offset;
+ bool alloc_new_pkt = FALSE;
+ uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
-
- sdh = bus->sdh;
+ *new_pkt = NULL;
osh = bus->dhd->osh;
#ifdef DHDTCPACK_SUPPRESS
- if (dhd_use_tcpack_suppress) {
- dhd_tcpack_check_xmit(bus->dhd, pkt);
+ if (dhd_tcpack_check_xmit(bus->dhd, pkt) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(bus->dhd, TCPACK_SUP_OFF);
}
#endif /* DHDTCPACK_SUPPRESS */
- if (bus->dhd->dongle_reset) {
- ret = BCME_NOTREADY;
- goto done;
- }
+ /* Add space for the SDPCM hardware/software headers */
+ PKTPUSH(osh, pkt, sdpcm_hdrlen);
+ ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
frame = (uint8*)PKTDATA(osh, pkt);
+ pkt_len = (uint16)PKTLEN(osh, pkt);
#ifdef WLMEDIA_HTSF
+ frame = (uint8*)PKTDATA(osh, pkt);
if (PKTLEN(osh, pkt) >= 100) {
- p = PKTDATA(osh, pkt);
- htsf_ts = (htsfts_t*) (p + HTSF_HOSTOFFSET + 12);
+ htsf_ts = (htsfts_t*) (frame + HTSF_HOSTOFFSET + 12);
if (htsf_ts->magic == HTSFMAGIC) {
htsf_ts->c20 = get_cycles();
htsf_ts->t20 = dhd_get_htsf(bus->dhd->info, 0);
}
}
#endif /* WLMEDIA_HTSF */
+#ifdef DHD_DEBUG
+ if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets))
+ tx_packets[PKTPRIO(pkt)]++;
+#endif /* DHD_DEBUG */
- /* Add alignment padding, allocate new packet if needed */
- if ((pad1 = ((uintptr)frame % DHD_SDALIGN))) {
- if (PKTHEADROOM(osh, pkt) < pad1) {
- DHD_INFO(("%s: insufficient headroom %d for %d pad1\n",
- __FUNCTION__, (int)PKTHEADROOM(osh, pkt), pad1));
- bus->dhd->tx_realloc++;
- new = PKTGET(osh, (PKTLEN(osh, pkt) + DHD_SDALIGN), TRUE);
- if (!new) {
- DHD_ERROR(("%s: couldn't allocate new %d-byte packet\n",
- __FUNCTION__, PKTLEN(osh, pkt) + DHD_SDALIGN));
- ret = BCME_NOMEM;
- goto done;
+ /* align the data pointer, allocate a new packet if there is not enough space (new
+ * packet data pointer will be aligned thus no padding will be needed)
+ */
+ head_padding = (ulong)frame % DHD_SDALIGN;
+ if (PKTHEADROOM(osh, pkt) < head_padding) {
+ head_padding = 0;
+ alloc_new_pkt = TRUE;
+ } else {
+ uint cur_chain_total_len;
+ int chain_tail_padding = 0;
+
+ /* All packets need to be aligned by DHD_SDALIGN */
+ modulo = (pkt_len + head_padding) % DHD_SDALIGN;
+ tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
+
+ /* Total pkt chain length needs to be aligned by block size,
+ * unless it is a single pkt chain with total length less than one block size,
+ * which we prefer sending by byte mode.
+ *
+ * Do the chain alignment here if
+ * 1. This is the last pkt of the chain of multiple pkts or a single pkt.
+ * 2-1. This chain is of multiple pkts, or
+ * 2-2. This is a single pkt whose size is longer than one block size.
+ */
+ cur_chain_total_len = prev_chain_total_len +
+ (head_padding + pkt_len + tail_padding);
+ if (last_chained_pkt && bus->blocksize != 0 &&
+ (cur_chain_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+ modulo = cur_chain_total_len % bus->blocksize;
+ chain_tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+ }
+
+#ifdef DHDENABLE_TAILPAD
+ if (PKTTAILROOM(osh, pkt) < tail_padding) {
+ /* We don't have tail room to align by DHD_SDALIGN */
+ alloc_new_pkt = TRUE;
+ bus->tx_tailpad_pktget++;
+ } else if (PKTTAILROOM(osh, pkt) < tail_padding + chain_tail_padding) {
+ /* We have tail room for tail_padding of this pkt itself, but not for
+ * total pkt chain alignment by block size.
+ * Use the padding packet to avoid memory copy if applicable,
+ * otherwise, just allocate a new pkt.
+ */
+ if (bus->pad_pkt) {
+ *pad_pkt_len = chain_tail_padding;
+ bus->tx_tailpad_chain++;
+ } else {
+ alloc_new_pkt = TRUE;
+ bus->tx_tailpad_pktget++;
}
-
- PKTALIGN(osh, new, PKTLEN(osh, pkt), DHD_SDALIGN);
- bcopy(PKTDATA(osh, pkt), PKTDATA(osh, new), PKTLEN(osh, pkt));
- if (free_pkt)
- PKTFREE(osh, pkt, TRUE);
- /* free the pkt if canned one is not used */
- free_pkt = TRUE;
- pkt = new;
- frame = (uint8*)PKTDATA(osh, pkt);
- ASSERT(((uintptr)frame % DHD_SDALIGN) == 0);
- pad1 = 0;
- } else {
- PKTPUSH(osh, pkt, pad1);
- frame = (uint8*)PKTDATA(osh, pkt);
-
- ASSERT((pad1 + SDPCM_HDRLEN) <= (int) PKTLEN(osh, pkt));
- bzero(frame, pad1 + SDPCM_HDRLEN);
- }
+ } else
+ /* This last pkt's tailroom is sufficient to hold both tail_padding
+ * of the pkt itself and chain_tail_padding of total pkt chain
+ */
+#endif /* DHDENABLE_TAILPAD */
+ tail_padding += chain_tail_padding;
}
- ASSERT(pad1 < DHD_SDALIGN);
- /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
- len = (uint16)PKTLEN(osh, pkt);
- *(uint16*)frame = htol16(len);
- *(((uint16*)frame) + 1) = htol16(~len);
+ DHD_INFO(("%s sdhdr len + orig_pkt_len %d h_pad %d t_pad %d pad_pkt_len %d\n",
+ __FUNCTION__, pkt_len, head_padding, tail_padding, *pad_pkt_len));
-#ifdef BCMSDIOH_TXGLOM
- if (bus->glom_enable) {
- uint32 hwheader1 = 0, hwheader2 = 0;
- act_len = len;
+ if (alloc_new_pkt) {
+ void *tmp_pkt;
+ int newpkt_size;
+ int cur_total_len;
- /* Software tag: channel, sequence number, data offset */
- swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) |
- ((bus->tx_seq + bus->glom_cnt) % SDPCM_SEQUENCE_WRAP) |
- (((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
- htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
- htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN + sizeof(swheader));
-
- if (queue_only) {
- uint8 alignment = ALIGNMENT;
- if (forcealign && (len & (alignment - 1)))
- len = ROUNDUP(len, alignment);
- /* Hardware extention tag */
- /* 2byte frame length, 1byte-, 1byte frame flag,
- * 2byte-hdrlength, 2byte padlenght
- */
- hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (0 << 24);
- hwheader2 = (len - act_len) << 16;
- htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
- htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
- real_pad = len - act_len;
- if (PKTTAILROOM(osh, pkt) < real_pad) {
- DHD_INFO(("%s 1: insufficient tailroom %d for %d real_pad\n",
- __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
- if (PKTPADTAILROOM(osh, pkt, real_pad)) {
- DHD_ERROR(("CHK1: padding error size %d\n", real_pad));
- ret = BCME_NOMEM;
- goto done;
- }
-#ifndef BCMLXSDMMC
- else
- PKTSETLEN(osh, pkt, act_len);
-#endif
- }
-#ifdef BCMLXSDMMC
- PKTSETLEN(osh, pkt, len);
-#endif /* BCMLXSDMMC */
- /* Post the frame pointer to sdio glom array */
- dhd_bcmsdh_glom_post(bus, frame, pkt, len);
- /* Save the pkt pointer in bus glom array */
- bus->glom_pkt_arr[bus->glom_cnt] = pkt;
- bus->glom_total_len += len;
- bus->glom_cnt++;
- return BCME_OK;
- } else {
- /* Raise len to next SDIO block to eliminate tail command */
- if (bus->roundup && bus->blocksize &&
- ((bus->glom_total_len + len) > bus->blocksize)) {
- uint16 pad2 = bus->blocksize -
- ((bus->glom_total_len + len) % bus->blocksize);
- if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize)) {
- len += pad2;
- } else {
- }
- } else if ((bus->glom_total_len + len) % DHD_SDALIGN) {
- len += DHD_SDALIGN
- - ((bus->glom_total_len + len) % DHD_SDALIGN);
- }
- if (forcealign && (len & (ALIGNMENT - 1))) {
- len = ROUNDUP(len, ALIGNMENT);
- }
+ ASSERT(*pad_pkt_len == 0);
- /* Hardware extention tag */
- /* 2byte frame length, 1byte-, 1byte frame flag,
- * 2byte-hdrlength, 2byte padlenght
- */
- hwheader1 = (act_len - SDPCM_FRAMETAG_LEN) | (1 << 24);
- hwheader2 = (len - act_len) << 16;
- htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
- htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
- real_pad = len - act_len;
- if (PKTTAILROOM(osh, pkt) < real_pad) {
- DHD_INFO(("%s 2: insufficient tailroom %d"
- " for %d real_pad\n",
- __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
- if (PKTPADTAILROOM(osh, pkt, real_pad)) {
- DHD_ERROR(("CHK2: padding error size %d."
- " %d more pkts are discarded together.\n",
- real_pad, bus->glom_cnt));
- /* Save the pkt pointer in bus glom array
- * Otherwise, this last pkt will not be
- * cleaned under "goto done"
- */
- bus->glom_pkt_arr[bus->glom_cnt] = pkt;
- bus->glom_cnt++;
- bus->glom_total_len += len;
- ret = BCME_NOMEM;
- goto done;
- }
-#ifndef BCMLXSDMMC
- else
- PKTSETLEN(osh, pkt, act_len);
-#endif
- }
-#ifdef BCMLXSDMMC
- PKTSETLEN(osh, pkt, len);
-#endif /* BCMLXSDMMC */
+ DHD_INFO(("%s allocating new packet for padding\n", __FUNCTION__));
- /* Post the frame pointer to sdio glom array */
- dhd_bcmsdh_glom_post(bus, frame, pkt, len);
- /* Save the pkt pointer in bus glom array */
- bus->glom_pkt_arr[bus->glom_cnt] = pkt;
- bus->glom_cnt++;
- bus->glom_total_len += len;
+ /* head pointer is aligned now, no padding needed */
+ head_padding = 0;
- /* Update the total length on the first pkt */
- frame_tmp = (uint8*)PKTDATA(osh, bus->glom_pkt_arr[0]);
- *(uint16*)frame_tmp = htol16(bus->glom_total_len);
- *(((uint16*)frame_tmp) + 1) = htol16(~bus->glom_total_len);
+ /* update the tail padding as it depends on the head padding, since a new packet is
+ * allocated, the head padding is non longer needed and packet length is chagned
+ */
+
+ cur_total_len = prev_chain_total_len + pkt_len;
+ if (last_chained_pkt && bus->blocksize != 0 &&
+ (cur_total_len > (int)bus->blocksize || prev_chain_total_len > 0)) {
+ modulo = cur_total_len % bus->blocksize;
+ tail_padding = modulo > 0 ? (bus->blocksize - modulo) : 0;
+ }
+ else {
+ modulo = pkt_len % DHD_SDALIGN;
+ tail_padding = modulo > 0 ? (DHD_SDALIGN - modulo) : 0;
}
- } else
-#endif /* BCMSDIOH_TXGLOM */
- {
- act_len = len;
- /* Software tag: channel, sequence number, data offset */
- swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
- (((pad1 + SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
- htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
- htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
-#ifdef DHD_DEBUG
- if (PKTPRIO(pkt) < ARRAYSIZE(tx_packets)) {
- tx_packets[PKTPRIO(pkt)]++;
- }
- if (DHD_BYTES_ON() &&
- (((DHD_CTL_ON() && (chan == SDPCM_CONTROL_CHANNEL)) ||
- (DHD_DATA_ON() && (chan != SDPCM_CONTROL_CHANNEL))))) {
- prhex("Tx Frame", frame, len);
- } else if (DHD_HDRS_ON()) {
- prhex("TxHdr", frame, MIN(len, 16));
+ newpkt_size = PKTLEN(osh, pkt) + bus->blocksize + DHD_SDALIGN;
+ bus->dhd->tx_realloc++;
+ tmp_pkt = PKTGET(osh, newpkt_size, TRUE);
+ if (tmp_pkt == NULL) {
+ DHD_ERROR(("failed to alloc new %d byte packet\n", newpkt_size));
+ return BCME_NOMEM;
+ }
+ PKTALIGN(osh, tmp_pkt, PKTLEN(osh, pkt), DHD_SDALIGN);
+ bcopy(PKTDATA(osh, pkt), PKTDATA(osh, tmp_pkt), PKTLEN(osh, pkt));
+ *new_pkt = tmp_pkt;
+ pkt = tmp_pkt;
}
-#endif
- /* Raise len to next SDIO block to eliminate tail command */
- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- uint16 pad2 = bus->blocksize - (len % bus->blocksize);
- if ((pad2 <= bus->roundup) && (pad2 < bus->blocksize))
-#ifdef NOTUSED
- if (pad2 <= PKTTAILROOM(osh, pkt))
-#endif /* NOTUSED */
- len += pad2;
- } else if (len % DHD_SDALIGN) {
- len += DHD_SDALIGN - (len % DHD_SDALIGN);
- }
+ if (head_padding)
+ PKTPUSH(osh, pkt, head_padding);
- /* Some controllers have trouble with odd bytes -- round to even */
- if (forcealign && (len & (ALIGNMENT - 1))) {
-#ifdef NOTUSED
- if (PKTTAILROOM(osh, pkt))
-#endif
- len = ROUNDUP(len, ALIGNMENT);
-#ifdef NOTUSED
- else
- DHD_ERROR(("%s: sending unrounded %d-byte packet\n", __FUNCTION__, len));
-#endif
- }
- real_pad = len - act_len;
- if (PKTTAILROOM(osh, pkt) < real_pad) {
- DHD_INFO(("%s 3: insufficient tailroom %d for %d real_pad\n",
- __FUNCTION__, (int)PKTTAILROOM(osh, pkt), real_pad));
- if (PKTPADTAILROOM(osh, pkt, real_pad)) {
- DHD_ERROR(("CHK3: padding error size %d\n", real_pad));
- ret = BCME_NOMEM;
- goto done;
- }
-#ifndef BCMLXSDMMC
- else
- PKTSETLEN(osh, pkt, act_len);
-#endif
- }
-#ifdef BCMLXSDMMC
- PKTSETLEN(osh, pkt, len);
-#endif /* BCMLXSDMMC */
+ frame = (uint8*)PKTDATA(osh, pkt);
+ bzero(frame, head_padding + sdpcm_hdrlen);
+ pkt_len = (uint16)PKTLEN(osh, pkt);
+
+ /* the header has the followming format
+ * 4-byte HW frame tag: length, ~length (for glom this is the total length)
+ *
+ * 8-byte HW extesion flags (glom mode only) as the following:
+ * 2-byte packet length, excluding HW tag and padding
+ * 2-byte frame channel and frame flags (e.g. next frame following)
+ * 2-byte header length
+ * 2-byte tail padding size
+ *
+ * 8-byte SW frame tags as the following
+ * 4-byte flags: host tx seq, channel, data offset
+ * 4-byte flags: TBD
+ */
+
+ swhdr_offset = SDPCM_FRAMETAG_LEN;
+
+ /* hardware frame tag:
+ *
+ * in tx-glom mode, dongle only checks the hardware frame tag in the first
+ * packet and sees it as the total lenght of the glom (including tail padding),
+ * for each packet in the glom, the packet length needs to be updated, (see
+ * below PKTSETLEN)
+ *
+ * in non tx-glom mode, PKTLEN still need to include tail padding as to be
+ * referred to in sdioh_request_buffer(). The tail length will be excluded in
+ * dhdsdio_txpkt_postprocess().
+ */
+ *(uint16*)frame = (uint16)htol16(pkt_len);
+ *(((uint16*)frame) + 1) = (uint16)htol16(~pkt_len);
+ pkt_len += tail_padding;
+
+ /* hardware extesion flags */
+ if (bus->txglom_enable) {
+ uint32 hwheader1;
+ uint32 hwheader2;
+
+ swhdr_offset += SDPCM_HWEXT_LEN;
+ hwheader1 = (pkt_len - SDPCM_FRAMETAG_LEN - tail_padding) |
+ (last_chained_pkt << 24);
+ hwheader2 = (tail_padding) << 16;
+ htol32_ua_store(hwheader1, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(hwheader2, frame + SDPCM_FRAMETAG_LEN + 4);
}
- do {
- ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
- frame, len, pkt, NULL, NULL);
- bus->f2txdata++;
- ASSERT(ret != BCME_PENDING);
+ PKTSETLEN((osh), (pkt), (pkt_len));
- if (ret == BCME_NODEVICE) {
- DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
- } else if (ret < 0) {
- /* On failure, abort the command and terminate the frame */
- DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
- __FUNCTION__, ret));
- bus->tx_sderrs++;
+ /* software frame tags */
+ swheader = ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | (txseq % SDPCM_SEQUENCE_WRAP) |
+ (((head_padding + sdpcm_hdrlen) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + swhdr_offset);
+ htol32_ua_store(0, frame + swhdr_offset + sizeof(swheader));
- bcmsdh_abort(sdh, SDIO_FUNC_2);
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
- bus->f1regdata++;
+ return pkt_len;
+}
- for (i = 0; i < 3; i++) {
- uint8 hi, lo;
- hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
- }
- if (ret == 0) {
-#ifdef BCMSDIOH_TXGLOM
- if (bus->glom_enable) {
- bus->tx_seq = (bus->tx_seq + bus->glom_cnt) % SDPCM_SEQUENCE_WRAP;
- } else
-#endif
- {
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
- }
- }
- } while ((ret < 0) && retrydata && retries++ < TXRETRIES);
+static int dhdsdio_txpkt_postprocess(dhd_bus_t *bus, void *pkt)
+{
+ osl_t *osh;
+ uint8 *frame;
+ int data_offset;
+ int tail_padding;
+ int swhdr_offset = SDPCM_FRAMETAG_LEN + (bus->txglom_enable ? SDPCM_HWEXT_LEN : 0);
-done:
+ (void)osh;
+ osh = bus->dhd->osh;
-#ifdef BCMSDIOH_TXGLOM
- if (bus->glom_enable && !queue_only) {
- dhd_bcmsdh_glom_clear(bus);
- pkt_cnt = bus->glom_cnt;
- } else
-#endif
- {
- pkt_cnt = 1;
- }
- /* restore pkt buffer pointer before calling tx complete routine */
- while (pkt_cnt) {
-#ifdef BCMSDIOH_TXGLOM
- uint32 doff;
- if (bus->glom_enable) {
-#ifdef BCMLXSDMMC
- uint32 pad2 = 0;
-#endif /* BCMLXSDMMC */
- if (!queue_only)
- pkt = bus->glom_pkt_arr[bus->glom_cnt - pkt_cnt];
-
- frame = (uint8*)PKTDATA(osh, pkt);
- doff = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + SDPCM_HWEXT_LEN);
- doff = (doff & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
-#ifdef BCMLXSDMMC
- pad2 = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
- PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - pad2);
-#endif /* BCMLXSDMMC */
- PKTPULL(osh, pkt, doff);
- } else
-#endif /* BCMSDIOH_TXGLOM */
- {
-#ifdef BCMLXSDMMC
- if (act_len > 0)
- PKTSETLEN(osh, pkt, act_len);
-#endif /* BCMLXSDMMC */
- PKTPULL(osh, pkt, SDPCM_HDRLEN + pad1);
- }
-#ifdef PROP_TXSTATUS
- if (bus->dhd->wlfc_state) {
- dhd_os_sdunlock(bus->dhd);
- dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0);
- dhd_os_sdlock(bus->dhd);
+ /* restore pkt buffer pointer, but keeps the header pushed by dhd_prot_hdrpush */
+ frame = (uint8*)PKTDATA(osh, pkt);
+
+ DHD_INFO(("%s PKTLEN before postprocess %d",
+ __FUNCTION__, PKTLEN(osh, pkt)));
+
+ /* PKTLEN still includes tail_padding, so exclude it.
+ * We shall have head_padding + original pkt_len for PKTLEN afterwards.
+ */
+ if (bus->txglom_enable) {
+ /* txglom pkts have tail_padding length in HW ext header */
+ tail_padding = ltoh32_ua(frame + SDPCM_FRAMETAG_LEN + 4) >> 16;
+ PKTSETLEN(osh, pkt, PKTLEN(osh, pkt) - tail_padding);
+ DHD_INFO((" txglom pkt: tail_padding %d PKTLEN %d\n",
+ tail_padding, PKTLEN(osh, pkt)));
} else {
-#endif /* PROP_TXSTATUS */
-#ifdef SDTEST
- if (chan != SDPCM_TEST_CHANNEL) {
- dhd_txcomplete(bus->dhd, pkt, ret != 0);
+ /* non-txglom pkts have head_padding + original pkt length in HW frame tag.
+ * We cannot refer to this field for txglom pkts as the first pkt of the chain will
+ * have the field for the total length of the chain.
+ */
+ PKTSETLEN(osh, pkt, *(uint16*)frame);
+ DHD_INFO((" non-txglom pkt: HW frame tag len %d after PKTLEN %d\n",
+ *(uint16*)frame, PKTLEN(osh, pkt)));
}
-#else /* SDTEST */
- dhd_txcomplete(bus->dhd, pkt, ret != 0);
-#endif /* SDTEST */
- if (free_pkt)
- PKTFREE(osh, pkt, TRUE);
-#ifdef PROP_TXSTATUS
- }
-#endif
- pkt_cnt--;
- }
+ data_offset = ltoh32_ua(frame + swhdr_offset);
+ data_offset = (data_offset & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT;
+ /* Get rid of sdpcm header + head_padding */
+ PKTPULL(osh, pkt, data_offset);
-#ifdef BCMSDIOH_TXGLOM
- /* Reset the glom array */
- if (bus->glom_enable && !queue_only) {
- bus->glom_cnt = 0;
- bus->glom_total_len = 0;
- }
-#endif
- return ret;
+ DHD_INFO(("%s data_offset %d, PKTLEN %d\n",
+ __FUNCTION__, data_offset, PKTLEN(osh, pkt)));
+
+ return BCME_OK;
}
-int
-dhd_bus_txdata(struct dhd_bus *bus, void *pkt)
+static int dhdsdio_txpkt(dhd_bus_t *bus, uint chan, void** pkts, int num_pkt, bool free_pkt)
{
- int ret = BCME_ERROR;
- osl_t *osh;
- uint datalen, prec;
-#ifdef DHD_TX_DUMP
- uint8 *dump_data;
- uint16 protocol;
-#ifdef DHD_TX_FULL_DUMP
int i;
-#endif /* DHD_TX_FULL_DUMP */
-#endif /* DHD_TX_DUMP */
- DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ int ret = 0;
+ osl_t *osh;
+ bcmsdh_info_t *sdh;
+ void *pkt = NULL;
+ void *pkt_chain;
+ int total_len = 0;
+ void *head_pkt = NULL;
+ void *prev_pkt = NULL;
+ int pad_pkt_len = 0;
+ int new_pkt_num = 0;
+ void *new_pkts[MAX_TX_PKTCHAIN_CNT];
+ bool wlfc_enabled = FALSE;
+
+ if (bus->dhd->dongle_reset)
+ return BCME_NOTREADY;
+ sdh = bus->sdh;
osh = bus->dhd->osh;
- datalen = PKTLEN(osh, pkt);
+ /* init new_pkts[0] to make some compiler happy, not necessary as we check new_pkt_num */
+ new_pkts[0] = NULL;
+
+ for (i = 0; i < num_pkt; i++) {
+ int pkt_len;
+ bool last_pkt;
+ void *new_pkt = NULL;
+
+ pkt = pkts[i];
+ ASSERT(pkt);
+ last_pkt = (i == num_pkt - 1);
+ pkt_len = dhdsdio_txpkt_preprocess(bus, pkt, chan, bus->tx_seq + i,
+ total_len, last_pkt, &pad_pkt_len, &new_pkt);
+ if (pkt_len <= 0)
+ goto done;
+ if (new_pkt) {
+ pkt = new_pkt;
+ new_pkts[new_pkt_num++] = new_pkt;
+ }
+ total_len += pkt_len;
+
+ PKTSETNEXT(osh, pkt, NULL);
+ /* insert the packet into the list */
+ head_pkt ? PKTSETNEXT(osh, prev_pkt, pkt) : (head_pkt = pkt);
+ prev_pkt = pkt;
-#ifdef SDTEST
- /* Push the test header if doing loopback */
- if (bus->ext_loop) {
- uint8* data;
- PKTPUSH(osh, pkt, SDPCM_TEST_HDRLEN);
- data = PKTDATA(osh, pkt);
- *data++ = SDPCM_TEST_ECHOREQ;
- *data++ = (uint8)bus->loopid++;
- *data++ = (datalen >> 0);
- *data++ = (datalen >> 8);
- datalen += SDPCM_TEST_HDRLEN;
}
-#endif /* SDTEST */
-#ifdef DHD_TX_DUMP
- dump_data = PKTDATA(osh, pkt);
- dump_data += 4; /* skip 4 bytes header */
- protocol = (dump_data[12] << 8) | dump_data[13];
-#ifdef DHD_TX_FULL_DUMP
- DHD_ERROR(("TX DUMP\n"));
+ /* Update the HW frame tag (total length) in the first pkt of the glom */
+ if (bus->txglom_enable) {
+ uint8 *frame;
+
+ total_len += pad_pkt_len;
+ frame = (uint8*)PKTDATA(osh, head_pkt);
+ *(uint16*)frame = (uint16)htol16(total_len);
+ *(((uint16*)frame) + 1) = (uint16)htol16(~total_len);
- for (i = 0; i < (datalen - 4); i++) {
- DHD_ERROR(("%02X ", dump_data[i]));
- if ((i & 15) == 15)
- printk("\n");
}
- DHD_ERROR(("\n"));
-#endif /* DHD_TX_FULL_DUMP */
- if (protocol == ETHER_TYPE_802_1X) {
- DHD_ERROR(("ETHER_TYPE_802_1X: ver %d, type %d, replay %d\n",
- dump_data[14], dump_data[15], dump_data[30]));
+#ifdef DHDENABLE_TAILPAD
+ /* if a padding packet if needed, insert it to the end of the link list */
+ if (pad_pkt_len) {
+ PKTSETLEN(osh, bus->pad_pkt, pad_pkt_len);
+ PKTSETNEXT(osh, pkt, bus->pad_pkt);
}
-#endif /* DHD_TX_DUMP */
+#endif /* DHDENABLE_TAILPAD */
- /* Add space for the header */
- PKTPUSH(osh, pkt, SDPCM_HDRLEN);
- ASSERT(ISALIGNED((uintptr)PKTDATA(osh, pkt), 2));
+ /* dhd_bcmsdh_send_buf ignores the buffer pointer if he packet
+ * parameter is not NULL, for non packet chian we pass NULL pkt pointer
+ * so it will take the aligned length and buffer pointer.
+ */
+ pkt_chain = PKTNEXT(osh, head_pkt) ? head_pkt : NULL;
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ PKTDATA(osh, head_pkt), total_len, pkt_chain, NULL, NULL, TXRETRIES);
+ if (ret == BCME_OK)
+ bus->tx_seq = (bus->tx_seq + num_pkt) % SDPCM_SEQUENCE_WRAP;
- prec = PRIO2PREC((PKTPRIO(pkt) & PRIOMASK));
-#ifndef DHDTHREAD
- /* Lock: we're about to use shared data/code (and SDIO) */
- dhd_os_sdlock(bus->dhd);
-#endif /* DHDTHREAD */
+ /* if a padding packet was needed, remove it from the link list as it not a data pkt */
+ if (pad_pkt_len && pkt)
+ PKTSETNEXT(osh, pkt, NULL);
- /* Check for existing queue, current flow-control, pending event, or pending clock */
- if (dhd_deferred_tx || bus->fcstate || pktq_len(&bus->txq) || bus->dpc_sched ||
- (!DATAOK(bus)) || (bus->flowcontrol & NBITVAL(prec)) ||
- (bus->clkstate != CLK_AVAIL)) {
- DHD_TRACE(("%s: deferring pktq len %d\n", __FUNCTION__,
- pktq_len(&bus->txq)));
- bus->fcqueued++;
+done:
+ pkt = head_pkt;
+ while (pkt) {
+ void *pkt_next = PKTNEXT(osh, pkt);
+ PKTSETNEXT(osh, pkt, NULL);
+ dhdsdio_txpkt_postprocess(bus, pkt);
+ pkt = pkt_next;
+ }
- /* Priority based enq */
- dhd_os_sdlock_txq(bus->dhd);
- if (dhd_prec_enq(bus->dhd, &bus->txq, pkt, prec) == FALSE) {
- PKTPULL(osh, pkt, SDPCM_HDRLEN);
-#ifndef DHDTHREAD
- /* Need to also release txqlock before releasing sdlock.
- * This thread still has txqlock and releases sdlock.
- * Deadlock happens when dpc() grabs sdlock first then
- * attempts to grab txqlock.
- */
- dhd_os_sdunlock_txq(bus->dhd);
- dhd_os_sdunlock(bus->dhd);
-#endif
-#ifdef PROP_TXSTATUS
- if (bus->dhd->wlfc_state)
- dhd_wlfc_txcomplete(bus->dhd, pkt, FALSE);
- else
-#endif
- dhd_txcomplete(bus->dhd, pkt, FALSE);
-#ifndef DHDTHREAD
- dhd_os_sdlock(bus->dhd);
- dhd_os_sdlock_txq(bus->dhd);
-#endif
+ /* new packets might be allocated due to insufficient room for padding, but we
+ * still have to indicate the original packets to upper layer
+ */
+ for (i = 0; i < num_pkt; i++) {
+ pkt = pkts[i];
+ wlfc_enabled = FALSE;
#ifdef PROP_TXSTATUS
- /* let the caller decide whether to free the packet */
- if (!bus->dhd->wlfc_state)
-#endif
- PKTFREE(osh, pkt, TRUE);
- ret = BCME_NORESOURCE;
- }
- else
- ret = BCME_OK;
-
- if ((pktq_len(&bus->txq) >= FCHI) && dhd_doflow)
- dhd_txflowcontrol(bus->dhd, ALL_INTERFACES, ON);
-
-#ifdef DHD_DEBUG
- if (pktq_plen(&bus->txq, prec) > qcount[prec])
- qcount[prec] = pktq_plen(&bus->txq, prec);
-#endif
- dhd_os_sdunlock_txq(bus->dhd);
-
- /* Schedule DPC if needed to send queued packet(s) */
- if (dhd_deferred_tx && !bus->dpc_sched) {
- bus->dpc_sched = TRUE;
- dhd_sched_dpc(bus->dhd);
+ if (DHD_PKTTAG_WLFCPKT(PKTTAG(pkt))) {
+ wlfc_enabled = (dhd_wlfc_txcomplete(bus->dhd, pkt, ret == 0) !=
+ WLFC_UNSUPPORTED);
}
- } else {
-#ifdef DHDTHREAD
- /* Lock: we're about to use shared data/code (and SDIO) */
- dhd_os_sdlock(bus->dhd);
-#endif /* DHDTHREAD */
-
- /* Otherwise, send it now */
- BUS_WAKE(bus);
- /* Make sure back plane ht clk is on, no pending allowed */
- dhdsdio_clkctl(bus, CLK_AVAIL, TRUE);
-#ifndef SDTEST
- ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE, FALSE);
-#else
- ret = dhdsdio_txpkt(bus, pkt,
- (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL), TRUE, FALSE);
-#endif
- if (ret)
- bus->dhd->tx_errors++;
- else
- bus->dhd->dstats.tx_bytes += datalen;
-
- if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
- bus->activity = FALSE;
- dhdsdio_clkctl(bus, CLK_NONE, TRUE);
+#endif /* PROP_TXSTATUS */
+ if (!wlfc_enabled) {
+ PKTSETNEXT(osh, pkt, NULL);
+ dhd_txcomplete(bus->dhd, pkt, ret != 0);
+ if (free_pkt)
+ PKTFREE(osh, pkt, TRUE);
}
-
-#ifdef DHDTHREAD
- dhd_os_sdunlock(bus->dhd);
-#endif /* DHDTHREAD */
}
-#ifndef DHDTHREAD
- dhd_os_sdunlock(bus->dhd);
-#endif /* DHDTHREAD */
+ for (i = 0; i < new_pkt_num; i++)
+ PKTFREE(osh, new_pkts[i], TRUE);
return ret;
}
static uint
dhdsdio_sendfromq(dhd_bus_t *bus, uint maxframes)
{
- void *pkt;
- uint32 intstatus = 0;
- uint retries = 0;
- int ret = 0, prec_out;
uint cnt = 0;
- uint datalen;
uint8 tx_prec_map;
uint16 txpktqlen = 0;
-#ifdef BCMSDIOH_TXGLOM
- uint i;
- uint8 glom_cnt;
-#endif
-
+ uint32 intstatus = 0;
+ uint retries = 0;
+ osl_t *osh;
+ uint datalen = 0;
dhd_pub_t *dhd = bus->dhd;
sdpcmd_regs_t *regs = bus->regs;
return BCME_NODEVICE;
}
+ osh = dhd->osh;
tx_prec_map = ~bus->flowcontrol;
+ for (cnt = 0; (cnt < maxframes) && DATAOK(bus);) {
+ int i;
+ int num_pkt = 1;
+ void *pkts[MAX_TX_PKTCHAIN_CNT];
+ int prec_out;
- /* Send frames until the limit or some other event */
- for (cnt = 0; (cnt < maxframes) && DATAOK(bus); cnt++) {
-#ifdef BCMSDIOH_TXGLOM
- if (bus->glom_enable) {
- void *pkttable[SDPCM_MAXGLOM_SIZE];
- dhd_os_sdlock_txq(bus->dhd);
- glom_cnt = MIN(DATABUFCNT(bus), bus->glomsize);
- glom_cnt = MIN(glom_cnt, pktq_mlen(&bus->txq, tx_prec_map));
- glom_cnt = MIN(glom_cnt, maxframes-cnt);
-
- /* Limiting the size to 2pkts in case of copy */
- if (bus->glom_mode == SDPCM_TXGLOM_CPY)
- glom_cnt = MIN(glom_cnt, 10);
-
- for (i = 0; i < glom_cnt; i++)
- pkttable[i] = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out);
-
- txpktqlen = pktq_len(&bus->txq);
- dhd_os_sdunlock_txq(bus->dhd);
-
- if (glom_cnt == 0)
+ dhd_os_sdlock_txq(bus->dhd);
+ if (bus->txglom_enable) {
+ num_pkt = MIN((uint32)DATABUFCNT(bus), (uint32)bus->txglomsize);
+ num_pkt = MIN(num_pkt, ARRAYSIZE(pkts));
+ }
+ num_pkt = MIN(num_pkt, pktq_mlen(&bus->txq, tx_prec_map));
+ for (i = 0; i < num_pkt; i++) {
+ pkts[i] = pktq_mdeq(&bus->txq, ~bus->flowcontrol, &prec_out);
+ if (!pkts[i]) {
+ DHD_ERROR(("%s: pktq_mlen non-zero when no pkt\n",
+ __FUNCTION__));
+ ASSERT(0);
break;
- datalen = 0;
- for (i = 0; i < glom_cnt; i++) {
- uint datalen_tmp = 0;
-
- if ((pkt = pkttable[i]) == NULL) {
- /* This case should not happen */
- DHD_ERROR(("No pkts in the queue for glomming\n"));
- break;
- }
-
- datalen_tmp = (PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN);
-
-#ifndef SDTEST
- ret = dhdsdio_txpkt(bus,
- pkt,
- SDPCM_DATA_CHANNEL,
- TRUE,
- (i == (glom_cnt-1))? FALSE: TRUE);
-#else
- ret = dhdsdio_txpkt(bus,
- pkt,
- (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL),
- TRUE,
- (i == (glom_cnt-1))? FALSE: TRUE);
-#endif
- if (ret == BCME_OK)
- datalen += datalen_tmp;
}
- cnt += i-1;
- } else
-#endif /* BCMSDIOH_TXGLOM */
- {
- dhd_os_sdlock_txq(bus->dhd);
- if ((pkt = pktq_mdeq(&bus->txq, tx_prec_map, &prec_out)) == NULL) {
- txpktqlen = pktq_len(&bus->txq);
- dhd_os_sdunlock_txq(bus->dhd);
- break;
+ PKTORPHAN(pkts[i]);
+ datalen += PKTLEN(osh, pkts[i]);
}
- txpktqlen = pktq_len(&bus->txq);
dhd_os_sdunlock_txq(bus->dhd);
- datalen = PKTLEN(bus->dhd->osh, pkt) - SDPCM_HDRLEN;
-
-#ifndef SDTEST
- ret = dhdsdio_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, TRUE, FALSE);
-#else
- ret = dhdsdio_txpkt(bus,
- pkt,
- (bus->ext_loop ? SDPCM_TEST_CHANNEL : SDPCM_DATA_CHANNEL),
- TRUE,
- FALSE);
-#endif
- }
- if (ret)
- bus->dhd->tx_errors++;
+ if (i == 0)
+ break;
+ if (dhdsdio_txpkt(bus, SDPCM_DATA_CHANNEL, pkts, i, TRUE) != BCME_OK)
+ dhd->tx_errors++;
else
- bus->dhd->dstats.tx_bytes += datalen;
+ dhd->dstats.tx_bytes += datalen;
+ cnt += i;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt)
if (intstatus & bus->hostintmask)
bus->ipend = TRUE;
}
+
}
- /* Deflow-control stack if needed */
- if (dhd_doflow && dhd->up && (dhd->busstate == DHD_BUS_DATA) &&
- dhd->txoff && (txpktqlen < FCLOW))
- dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ dhd_os_sdlock_txq(bus->dhd);
+ txpktqlen = pktq_len(&bus->txq);
+ dhd_os_sdunlock_txq(bus->dhd);
+
+ /* Do flow-control if needed */
+ if (dhd->up && (dhd->busstate == DHD_BUS_DATA) && (txpktqlen < FCLOW)) {
+ bool wlfc_enabled = FALSE;
+#ifdef PROP_TXSTATUS
+ wlfc_enabled = (dhd_wlfc_flowcontrol(dhd, OFF, TRUE) != WLFC_UNSUPPORTED);
+#endif
+ if (!wlfc_enabled && dhd_doflow && dhd->txoff) {
+ dhd_txflowcontrol(dhd, ALL_INTERFACES, OFF);
+ }
+ }
return cnt;
}
dhdsdio_sendpendctl(dhd_bus_t *bus)
{
bcmsdh_info_t *sdh = bus->sdh;
- int ret, i;
+ int ret;
uint8* frame_seq = bus->ctrl_frame_buf + SDPCM_FRAMETAG_LEN;
-#ifdef BCMSDIOH_TXGLOM
- if (bus->glom_enable)
+ if (bus->txglom_enable)
frame_seq += SDPCM_HWEXT_LEN;
-#endif
if (*frame_seq != bus->tx_seq) {
DHD_INFO(("%s IOCTL frame seq lag detected!"
ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
(uint8 *)bus->ctrl_frame_buf, (uint32)bus->ctrl_frame_len,
- NULL, NULL, NULL);
- ASSERT(ret != BCME_PENDING);
- if (ret == BCME_NODEVICE) {
- DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
- } else if (ret < 0) {
- /* On failure, abort the command and terminate the frame */
- DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
- __FUNCTION__, ret));
- bus->tx_sderrs++;
-
- bcmsdh_abort(sdh, SDIO_FUNC_2);
-
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
- bus->f1regdata++;
-
- for (i = 0; i < 3; i++) {
- uint8 hi, lo;
- hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
- }
- if (ret == 0) {
+ NULL, NULL, NULL, 1);
+ if (ret == BCME_OK)
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
- }
bus->ctrl_frame_stat = FALSE;
dhd_wait_event_wakeup(bus->dhd);
int
dhd_bus_txctl(struct dhd_bus *bus, uchar *msg, uint msglen)
{
+ static int err_nodevice = 0;
uint8 *frame;
uint16 len;
uint32 swheader;
- uint retries = 0;
bcmsdh_info_t *sdh = bus->sdh;
uint8 doff = 0;
int ret = -1;
- int i;
+ uint8 sdpcm_hdrlen = bus->txglom_enable ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
return -EIO;
/* Back the pointer to make a room for bus header */
- frame = msg - SDPCM_HDRLEN;
- len = (msglen += SDPCM_HDRLEN);
+ frame = msg - sdpcm_hdrlen;
+ len = (msglen += sdpcm_hdrlen);
/* Add alignment padding (optional for ctl frames) */
if (dhd_alignctl) {
frame -= doff;
len += doff;
msglen += doff;
- bzero(frame, doff + SDPCM_HDRLEN);
+ bzero(frame, doff + sdpcm_hdrlen);
}
ASSERT(doff < DHD_SDALIGN);
}
- doff += SDPCM_HDRLEN;
+ doff += sdpcm_hdrlen;
/* Round send length to next SDIO block */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
*(uint16*)frame = htol16((uint16)msglen);
*(((uint16*)frame) + 1) = htol16(~msglen);
-#ifdef BCMSDIOH_TXGLOM
- if (bus->glom_enable) {
+ if (bus->txglom_enable) {
uint32 hwheader1, hwheader2;
/* Software tag: channel, sequence number, data offset */
swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
*(uint16*)frame = htol16(len);
*(((uint16*)frame) + 1) = htol16(~(len));
- } else
-#endif /* BCMSDIOH_TXGLOM */
- {
- /* Software tag: channel, sequence number, data offset */
- swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
- | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
- htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
- htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+ } else {
+ /* Software tag: channel, sequence number, data offset */
+ swheader = ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK)
+ | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
+ htol32_ua_store(swheader, frame + SDPCM_FRAMETAG_LEN);
+ htol32_ua_store(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
}
if (!TXCTLOK(bus)) {
DHD_INFO(("%s: No bus credit bus->tx_max %d, bus->tx_seq %d\n",
}
bus->dhd->txcnt_timeout = 0;
+ bus->ctrl_frame_stat = TRUE;
if (ret == -1) {
#ifdef DHD_DEBUG
prhex("TxHdr", frame, MIN(len, 16));
}
#endif
-
- do {
- ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
- frame, len, NULL, NULL, NULL);
- ASSERT(ret != BCME_PENDING);
-
- if (ret == BCME_NODEVICE) {
- DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
- } else if (ret < 0) {
- /* On failure, abort the command and terminate the frame */
- DHD_INFO(("%s: sdio error %d, abort command and terminate frame.\n",
- __FUNCTION__, ret));
- bus->tx_sderrs++;
-
- bcmsdh_abort(sdh, SDIO_FUNC_2);
-
- bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
- SFC_WF_TERM, NULL);
- bus->f1regdata++;
-
- for (i = 0; i < 3; i++) {
- uint8 hi, lo;
- hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCHI, NULL);
- lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1,
- SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->f1regdata += 2;
- if ((hi == 0) && (lo == 0))
- break;
- }
- }
- if (ret == 0) {
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
- }
- } while ((ret < 0) && retries++ < TXRETRIES);
+ ret = dhd_bcmsdh_send_buf(bus, bcmsdh_cur_sbwad(sdh), SDIO_FUNC_2, F2SYNC,
+ frame, len, NULL, NULL, NULL, TXRETRIES);
+ if (ret == BCME_OK)
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
}
+ bus->ctrl_frame_stat = FALSE;
done:
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
if (bus->dhd->txcnt_timeout >= MAX_CNTL_TX_TIMEOUT)
return -ETIMEDOUT;
- return ret ? -EIO : 0;
+ if (ret == BCME_NODEVICE)
+ err_nodevice++;
+ else
+ err_nodevice = 0;
+
+ return ret ? err_nodevice >= ERROR_BCME_NODEVICE_MAX ? -ETIMEDOUT : -EIO : 0;
}
int
DHD_ERROR(("%s: resumed on timeout\n", __FUNCTION__));
#endif /* DHD_DEBUG */
#ifdef DHD_DEBUG
- dhd_os_sdlock(bus->dhd);
- dhdsdio_checkdied(bus, NULL, 0);
- dhd_os_sdunlock(bus->dhd);
+ dhd_os_sdlock(bus->dhd);
+ dhdsdio_checkdied(bus, NULL, 0);
+ dhd_os_sdunlock(bus->dhd);
#endif /* DHD_DEBUG */
} else if (pending == TRUE) {
/* signal pending */
DHD_ERROR(("%s: signal pending\n", __FUNCTION__));
return -EINTR;
+
} else {
DHD_CTL(("%s: resumed for unknown reason?\n", __FUNCTION__));
#ifdef DHD_DEBUG
IOV_SDALIGN,
IOV_DEVRESET,
IOV_CPU,
-#if defined(SDIO_CRC_ERROR_FIX)
+#if defined(USE_SDIOFIFO_IOVAR)
IOV_WATERMARK,
IOV_MESBUSYCTRL,
-#endif /* SDIO_CRC_ERROR_FIX */
+#endif /* USE_SDIOFIFO_IOVAR */
#ifdef SDTEST
IOV_PKTGEN,
IOV_EXTLOOP,
#endif
IOV_TXGLOMSIZE,
IOV_TXGLOMMODE,
- IOV_HANGREPORT
+ IOV_HANGREPORT,
+ IOV_TXINRX_THRES
};
const bcm_iovar_t dhdsdio_iovars[] = {
{"extloop", IOV_EXTLOOP, 0, IOVT_BOOL, 0 },
{"pktgen", IOV_PKTGEN, 0, IOVT_BUFFER, sizeof(dhd_pktgen_t) },
#endif /* SDTEST */
-#if defined(SDIO_CRC_ERROR_FIX)
+#if defined(USE_SDIOFIFO_IOVAR)
{"watermark", IOV_WATERMARK, 0, IOVT_UINT32, 0 },
{"mesbusyctrl", IOV_MESBUSYCTRL, 0, IOVT_UINT32, 0 },
-#endif /* SDIO_CRC_ERROR_FIX */
+#endif /* USE_SDIOFIFO_IOVAR */
{"devcap", IOV_DEVCAP, 0, IOVT_UINT32, 0 },
{"dngl_isolation", IOV_DONGLEISOLATION, 0, IOVT_UINT32, 0 },
{"kso", IOV_KSO, 0, IOVT_UINT32, 0 },
{"fwpath", IOV_FWPATH, 0, IOVT_BUFFER, 0 },
#endif
{"txglomsize", IOV_TXGLOMSIZE, 0, IOVT_UINT32, 0 },
- {"txglommode", IOV_TXGLOMMODE, 0, IOVT_UINT32, 0 },
{"fw_hang_report", IOV_HANGREPORT, 0, IOVT_BOOL, 0 },
+ {"txinrx_thres", IOV_TXINRX_THRES, 0, IOVT_INT32, 0 },
{NULL, 0, 0, 0, 0 }
};
bus->pollrate, bus->pollcnt, bus->regfails);
bcm_bprintf(strbuf, "\nAdditional counters:\n");
+#ifdef DHDENABLE_TAILPAD
+ bcm_bprintf(strbuf, "tx_tailpad_chain %u tx_tailpad_pktget %u\n",
+ bus->tx_tailpad_chain, bus->tx_tailpad_pktget);
+#endif /* DHDENABLE_TAILPAD */
bcm_bprintf(strbuf, "tx_sderrs %u fcqueued %u rxrtx %u rx_toolong %u rxc_errors %u\n",
bus->tx_sderrs, bus->fcqueued, bus->rxrtx, bus->rx_toolong,
bus->rxc_errors);
bus->intrcount = bus->lastintrs = bus->spurious = bus->regfails = 0;
bus->rxrtx = bus->rx_toolong = bus->rxc_errors = 0;
bus->rx_hdrfail = bus->rx_badhdr = bus->rx_badseq = 0;
+#ifdef DHDENABLE_TAILPAD
+ bus->tx_tailpad_chain = bus->tx_tailpad_pktget = 0;
+#endif /* DHDENABLE_TAILPAD */
bus->tx_sderrs = bus->fc_rcvd = bus->fc_xoff = bus->fc_xon = 0;
bus->rxglomfail = bus->rxglomframes = bus->rxglompkts = 0;
bus->f2rxhdrs = bus->f2rxdata = bus->f2txdata = bus->f1regdata = 0;
int rv, i;
uint32 shaddr = 0;
+ if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID && !dhdsdio_sr_cap(bus))
+ bus->srmemsize = 0;
+
shaddr = bus->dongle_ram_base + bus->ramsize - 4;
i = 0;
do {
return 0;
/* Read console log struct */
- addr = bus->console_addr + OFFSETOF(hndrte_cons_t, log);
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, log);
if ((rv = dhdsdio_membytes(bus, FALSE, addr, (uint8 *)&c->log, sizeof(c->log))) < 0)
return rv;
n--;
line[n] = 0;
printf("CONSOLE: %s\n", line);
+#ifdef LOG_INTO_TCPDUMP
+ dhd_sendup_log(bus->dhd, line, n);
+#endif /* LOG_INTO_TCPDUMP */
}
}
break2:
ltoh32(tr.r0), ltoh32(tr.r1), ltoh32(tr.r2), ltoh32(tr.r3),
ltoh32(tr.r4), ltoh32(tr.r5), ltoh32(tr.r6), ltoh32(tr.r7));
- addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log);
+ addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log);
if ((rv = dhdsdio_membytes(bus, FALSE, addr,
(uint8 *)&console_ptr, sizeof(console_ptr))) < 0)
goto printbuf;
- addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.buf_size);
+ addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.buf_size);
if ((rv = dhdsdio_membytes(bus, FALSE, addr,
(uint8 *)&console_size, sizeof(console_size))) < 0)
goto printbuf;
- addr = sdpcm_shared.console_addr + OFFSETOF(hndrte_cons_t, log.idx);
+ addr = sdpcm_shared.console_addr + OFFSETOF(hnd_cons_t, log.idx);
if ((rv = dhdsdio_membytes(bus, FALSE, addr,
(uint8 *)&console_index, sizeof(console_index))) < 0)
goto printbuf;
else if (bus->sih->chip == BCM4334_CHIP_ID ||
bus->sih->chip == BCM43340_CHIP_ID ||
bus->sih->chip == BCM43341_CHIP_ID ||
+ bus->sih->chip == BCM43342_CHIP_ID ||
0) {
if (enable) {
/* Moved to PMU chipcontrol 1 from 4330 */
if (bus->dhd->up) {
if (bus->intr) {
DHD_INTR(("%s: enable SDIO device interrupts\n", __FUNCTION__));
+ // terence 20141207: enbale intdis
+ bus->intdis = TRUE;
bcmsdh_intr_enable(bus->sdh);
} else {
DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
* If address is start of RAM (i.e. a downloaded image),
* store the reset instruction to be written in 0
*/
- if (address == bus->dongle_ram_base) {
+ if (set && address == bus->dongle_ram_base) {
bus->resetinstr = *(((uint32*)params) + 2);
}
} else {
sd_ptr = (sdreg_t *)params;
- addr = (uintptr)bus->regs + sd_ptr->offset;
+ addr = (uint32)((ulong)bus->regs + sd_ptr->offset);
size = sd_ptr->func;
int_val = (int32)bcmsdh_reg_read(bus->sdh, addr, size);
if (bcmsdh_regfail(bus->sdh))
sd_ptr = (sdreg_t *)params;
- addr = (uintptr)bus->regs + sd_ptr->offset;
+ addr = (uint32)((ulong)bus->regs + sd_ptr->offset);
size = sd_ptr->func;
bcmsdh_reg_write(bus->sdh, addr, size, sd_ptr->value);
if (bcmsdh_regfail(bus->sdh))
break;
-
#endif /* DHD_DEBUG */
break;
#endif /* SDTEST */
-#if defined(SDIO_CRC_ERROR_FIX)
+#if defined(USE_SDIOFIFO_IOVAR)
case IOV_GVAL(IOV_WATERMARK):
int_val = (int32)watermark;
bcopy(&int_val, arg, val_size);
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
((uint8)mesbusyctrl | 0x80), NULL);
break;
-#endif /* SDIO_CRC_ERROR_FIX */
+#endif
+
case IOV_GVAL(IOV_DONGLEISOLATION):
int_val = bus->dhd->dongle_isolation;
dhd_bus_devreset(bus->dhd, (uint8)bool_val);
break;
-#ifdef SOFTAP
- case IOV_GVAL(IOV_FWPATH):
- {
- uint32 fw_path_len;
-
- fw_path_len = strlen(bus->fw_path);
- DHD_INFO(("[softap] get fwpath, l=%d\n", len));
-
- if (fw_path_len > len-1) {
- bcmerror = BCME_BUFTOOSHORT;
- break;
- }
-
- if (fw_path_len) {
- bcopy(bus->fw_path, arg, fw_path_len);
- ((uchar*)arg)[fw_path_len] = 0;
- }
- break;
- }
-
- case IOV_SVAL(IOV_FWPATH):
- DHD_INFO(("[softap] set fwpath, idx=%d\n", int_val));
-
- switch (int_val) {
- case 1:
- bus->fw_path = fw_path; /* ordinary one */
- break;
- case 2:
- bus->fw_path = fw_path2;
- break;
- default:
- bcmerror = BCME_BADARG;
- break;
- }
-
- DHD_INFO(("[softap] new fw path: %s\n", (bus->fw_path[0] ? bus->fw_path : "NULL")));
- break;
+ /*
+ * softap firmware is updated through module parameter or android private command
+ */
-#endif /* SOFTAP */
case IOV_GVAL(IOV_DEVRESET):
DHD_TRACE(("%s: Called get IOV_DEVRESET\n", __FUNCTION__));
case IOV_SVAL(IOV_DEVCAP):
dhdsdio_devcap_set(bus, (uint8) int_val);
break;
-
-#ifdef BCMSDIOH_TXGLOM
case IOV_GVAL(IOV_TXGLOMSIZE):
- int_val = (int32)bus->glomsize;
+ int_val = (int32)bus->txglomsize;
bcopy(&int_val, arg, val_size);
break;
if (int_val > SDPCM_MAXGLOM_SIZE) {
bcmerror = BCME_ERROR;
} else {
- bus->glomsize = (uint)int_val;
- }
- break;
- case IOV_GVAL(IOV_TXGLOMMODE):
- int_val = (int32)bus->glom_mode;
- bcopy(&int_val, arg, val_size);
- break;
-
- case IOV_SVAL(IOV_TXGLOMMODE):
- if ((int_val != SDPCM_TXGLOM_CPY) && (int_val != SDPCM_TXGLOM_MDESC)) {
- bcmerror = BCME_RANGE;
- } else {
- if ((bus->glom_mode = bcmsdh_set_mode(bus->sdh, (uint)int_val)) != int_val)
- bcmerror = BCME_ERROR;
+ bus->txglomsize = (uint)int_val;
}
break;
-#endif /* BCMSDIOH_TXGLOM */
case IOV_SVAL(IOV_HANGREPORT):
bus->dhd->hang_report = bool_val;
DHD_ERROR(("%s: Set hang_report as %d\n", __FUNCTION__, bus->dhd->hang_report));
int_val = (int32)bus->dhd->hang_report;
bcopy(&int_val, arg, val_size);
break;
+
+ case IOV_GVAL(IOV_TXINRX_THRES):
+ int_val = bus->txinrx_thres;
+ bcopy(&int_val, arg, val_size);
+ break;
+ case IOV_SVAL(IOV_TXINRX_THRES):
+ if (int_val < 0) {
+ bcmerror = BCME_BADARG;
+ } else {
+ bus->txinrx_thres = int_val;
+ }
+ break;
+
default:
bcmerror = BCME_UNSUPPORTED;
break;
if (REMAP_ENAB(bus) && si_socdevram_remap_isenb(bus->sih))
dhdsdio_devram_remap(bus, FALSE);
+ if (CHIPID(bus->sih->chip) == BCM43430_CHIP_ID) {
+ /* Disabling Remap for SRAM_3 */
+ si_socram_set_bankpda(bus->sih, 0x3, 0x0);
+ }
+
/* Clear the top bit of memory */
if (bus->ramsize) {
uint32 zeros = 0;
DHD_INFO(("%s: noted %s update, value now %d\n",
__FUNCTION__, "sd_blocksize", bus->blocksize));
- if ((bus->sih->chip == BCM4335_CHIP_ID) ||
- (bus->sih->chip == BCM4339_CHIP_ID))
- dhd_overflow_war(bus);
+ dhdsdio_tune_fifoparam(bus);
}
}
bus->roundup = MIN(max_roundup, bus->blocksize);
uint8 saveclk;
uint retries;
int err;
+ bool wlfc_enabled = FALSE;
+
if (!bus->dhd)
return;
osh = bus->dhd->osh;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
- bcmsdh_waitlockfree(NULL);
+ bcmsdh_waitlockfree(bus->sdh);
if (enforce_mutex)
dhd_os_sdlock(bus->dhd);
bus->hostintmask = 0;
bcmsdh_intr_disable(bus->sdh);
} else {
+
BUS_WAKE(bus);
/* Change our idea of bus state */
if (KSO_ENAB(bus)) {
- /* Enable clock for device interrupts */
- dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+ /* Enable clock for device interrupts */
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
- /* Disable and clear interrupts at the chip level also */
- W_SDREG(0, &bus->regs->hostintmask, retries);
- local_hostintmask = bus->hostintmask;
- bus->hostintmask = 0;
+ /* Disable and clear interrupts at the chip level also */
+ W_SDREG(0, &bus->regs->hostintmask, retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
- }
- if (err) {
- DHD_ERROR(("%s: Failed to force clock for F2: err %d\n", __FUNCTION__, err));
- }
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = bcmsdh_cfg_read(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err) {
+ DHD_ERROR(("%s: Failed to force clock for F2: err %d\n",
+ __FUNCTION__, err));
+ }
- /* Turn off the bus (F2), free any pending packets */
- DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
- bcmsdh_intr_disable(bus->sdh);
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
+ /* Turn off the bus (F2), free any pending packets */
+ DHD_INTR(("%s: disable SDIO interrupts\n", __FUNCTION__));
+#if !defined(NDISVER) || (NDISVER < 0x0630)
+ bcmsdh_intr_disable(bus->sdh);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
+ bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_0, SDIOD_CCCR_IOEN, SDIO_FUNC_ENABLE_1, NULL);
- /* Clear any pending interrupts now that F2 is disabled */
- W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
+ /* Clear any pending interrupts now that F2 is disabled */
+ W_SDREG(local_hostintmask, &bus->regs->intstatus, retries);
}
/* Turn off the backplane clock (only) */
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
}
- /* Clear the data packet queues */
- pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+#ifdef PROP_TXSTATUS
+ wlfc_enabled = (dhd_wlfc_cleanup_txq(bus->dhd, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+ if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+ /* Clear the data packet queues */
+ pktq_flush(osh, &bus->txq, TRUE, NULL, 0);
+ }
/* Clear any held glomming stuff */
if (bus->glomd)
dhd_os_sdunlock(bus->dhd);
}
-#ifdef BCMSDIOH_TXGLOM
+#if defined(BCMSDIOH_TXGLOM) && defined(BCMSDIOH_STD)
+extern uint sd_txglom;
+#endif
void
dhd_txglom_enable(dhd_pub_t *dhdp, bool enable)
{
+ /* can't enable host txglom by default, some platforms have no
+ * (or crappy) ADMA support and txglom will cause kernel assertions (e.g.
+ * panda board)
+ */
dhd_bus_t *bus = dhdp->bus;
-
+#ifdef BCMSDIOH_TXGLOM
char buf[256];
uint32 rxglom;
int32 ret;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+#ifdef BCMSDIOH_STD
+ if (enable)
+ enable = sd_txglom;
+#endif /* BCMSDIOH_STD */
+
if (enable) {
rxglom = 1;
memset(buf, 0, sizeof(buf));
- bcm_mkiovar("bus:rxglom",
- (void *)&rxglom,
- 4, buf, sizeof(buf));
- ret = dhd_wl_ioctl_cmd(dhdp,
- WLC_SET_VAR, buf,
- sizeof(buf), TRUE, 0);
- if (!(ret < 0)) {
- bus->glom_enable = TRUE;
- }
- } else {
- bus->glom_enable = FALSE;
- }
-}
+ bcm_mkiovar("bus:rxglom", (void *)&rxglom, 4, buf, sizeof(buf));
+ ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0);
+ if (ret >= 0)
+ bus->txglom_enable = TRUE;
+ else {
+#ifdef BCMSDIOH_STD
+ sd_txglom = 0;
+#endif /* BCMSDIOH_STD */
+ bus->txglom_enable = FALSE;
+ }
+ } else
#endif /* BCMSDIOH_TXGLOM */
+ bus->txglom_enable = FALSE;
+}
int
dhd_bus_init(dhd_pub_t *dhdp, bool enforce_mutex)
bus->hostintmask |= I_XMTDATA_AVAIL;
}
W_SDREG(bus->hostintmask, &bus->regs->hostintmask, retries);
-#ifdef SDIO_CRC_ERROR_FIX
- if (bus->blocksize < 512) {
- mesbusyctrl = watermark = bus->blocksize / 4;
- }
-#endif /* SDIO_CRC_ERROR_FIX */
- if (!((bus->sih->chip == BCM4335_CHIP_ID) ||
- (bus->sih->chip == BCM4339_CHIP_ID))) {
+
+ if (bus->sih->buscorerev < 15) {
bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_WATERMARK,
(uint8)watermark, &err);
}
-#ifdef SDIO_CRC_ERROR_FIX
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_FUNC1_MESBUSYCTRL,
- (uint8)mesbusyctrl|0x80, &err);
- bcmsdh_cfg_write(bus->sdh, SDIO_FUNC_1, SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_EN_F2_BLK_WATERMARK, NULL);
-#endif /* SDIO_CRC_ERROR_FIX */
/* Set bus state according to enable result */
dhdp->busstate = DHD_BUS_DATA;
sublen = ltoh16_ua(dptr);
dlen -= sizeof(uint16);
dptr += sizeof(uint16);
- if ((sublen < SDPCM_HDRLEN_RX) ||
- ((num == 0) && (sublen < (2 * SDPCM_HDRLEN_RX)))) {
+ if ((sublen < SDPCM_HDRLEN) ||
+ ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
DHD_ERROR(("%s: descriptor len %d bad: %d\n",
__FUNCTION__, num, sublen));
pnext = NULL;
} else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
DHD_ERROR(("%s (superframe): got second descriptor?\n", __FUNCTION__));
errcode = -1;
- } else if ((doff < SDPCM_HDRLEN_RX) ||
- (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN_RX))) {
+ } else if ((doff < SDPCM_HDRLEN) ||
+ (doff > (PKTLEN(osh, pfirst) - SDPCM_HDRLEN))) {
DHD_ERROR(("%s (superframe): Bad data offset %d: HW %d pkt %d min %d\n",
__FUNCTION__, doff, sublen, PKTLEN(osh, pfirst),
- SDPCM_HDRLEN_RX));
+ SDPCM_HDRLEN));
errcode = -1;
}
}
/* Check window for sanity */
- if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ if ((uint8)(txmax - bus->tx_seq) > 0x70) {
DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
"len/check 0x%04x/0x%04x\n",
__FUNCTION__, num, sublen, check));
errcode = -1;
- } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN_RX)) {
+ } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
DHD_ERROR(("%s (subframe %d): length mismatch: "
"len 0x%04x, expect 0x%04x\n",
__FUNCTION__, num, sublen, dlen));
DHD_ERROR(("%s (subframe %d): bad channel %d\n",
__FUNCTION__, num, chan));
errcode = -1;
- } else if ((doff < SDPCM_HDRLEN_RX) || (doff > sublen)) {
+ } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
DHD_ERROR(("%s (subframe %d): Bad data offset %d: HW %d min %d\n",
- __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN_RX));
+ __FUNCTION__, num, doff, sublen, SDPCM_HDRLEN));
errcode = -1;
}
}
for (rxseq = bus->rx_seq, rxleft = maxframes;
!bus->rxskip && rxleft && bus->dhd->busstate != DHD_BUS_DOWN;
rxseq++, rxleft--) {
-
-#ifdef DHDTHREAD
+#ifdef DHDTCPACK_SUP_DBG
+ if (bus->dhd->tcpack_sup_mode != TCPACK_SUP_DELAYTX) {
+ if (bus->dotxinrx == FALSE)
+ DHD_ERROR(("%s %d: dotxinrx FALSE with tcpack_sub_mode %d\n",
+ __FUNCTION__, __LINE__, bus->dhd->tcpack_sup_mode));
+ }
+#ifdef DEBUG_COUNTER
+ else if (pktq_mlen(&bus->txq, ~bus->flowcontrol) > 0) {
+ tack_tbl.cnt[bus->dotxinrx ? 6 : 7]++;
+ }
+#endif /* DEBUG_COUNTER */
+#endif /* DHDTCPACK_SUP_DBG */
/* tx more to improve rx performance */
if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL)) {
dhdsdio_sendpendctl(bus);
- } else if ((bus->clkstate == CLK_AVAIL) && !bus->fcstate &&
- pktq_mlen(&bus->txq, ~bus->flowcontrol) && DATAOK(bus)) {
+ } else if (bus->dotxinrx && (bus->clkstate == CLK_AVAIL) &&
+ !bus->fcstate && DATAOK(bus) &&
+ (pktq_mlen(&bus->txq, ~bus->flowcontrol) > bus->txinrx_thres)) {
dhdsdio_sendfromq(bus, dhd_txbound);
+#ifdef DHDTCPACK_SUPPRESS
+ /* In TCPACK_SUP_DELAYTX mode, do txinrx only if
+ * 1. Any DATA packet to TX
+ * 2. TCPACK to TCPDATA PSH packets.
+ * in bus txq.
+ */
+ bus->dotxinrx = (bus->dhd->tcpack_sup_mode == TCPACK_SUP_DELAYTX) ?
+ FALSE : TRUE;
+#endif
}
-#endif /* DHDTHREAD */
/* Handle glomming separately */
if (bus->glom || bus->glomd) {
dhd_os_sdunlock_rxq(bus->dhd);
/* Now check the header */
- bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN_RX);
+ bcopy(rxbuf, bus->rxhdr, SDPCM_HDRLEN);
/* Extract hardware header fields */
len = ltoh16_ua(bus->rxhdr);
}
/* Validate frame length */
- if (len < SDPCM_HDRLEN_RX) {
+ if (len < SDPCM_HDRLEN) {
DHD_ERROR(("%s (nextlen): HW hdr length invalid: %d\n",
__FUNCTION__, len));
dhd_os_sdlock_rxq(bus->dhd);
}
/* Check window for sanity */
- if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ if ((uint8)(txmax - bus->tx_seq) > 0x70) {
DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
if (DHD_BYTES_ON() && DHD_DATA_ON()) {
prhex("Rx Data", rxbuf, len);
} else if (DHD_HDRS_ON()) {
- prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN_RX);
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
}
#endif
}
/* Validate data offset */
- if ((doff < SDPCM_HDRLEN_RX) || (doff > len)) {
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
DHD_ERROR(("%s (nextlen): bad data offset %d: HW len %d min %d\n",
- __FUNCTION__, doff, len, SDPCM_HDRLEN_RX));
+ __FUNCTION__, doff, len, SDPCM_HDRLEN));
dhd_os_sdlock_rxq(bus->dhd);
PKTFREE2();
dhd_os_sdunlock_rxq(bus->dhd);
#ifdef DHD_DEBUG
if (DHD_BYTES_ON() || DHD_HDRS_ON()) {
- prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN_RX);
+ prhex("RxHdr", bus->rxhdr, SDPCM_HDRLEN);
}
#endif
}
/* Validate frame length */
- if (len < SDPCM_HDRLEN_RX) {
+ if (len < SDPCM_HDRLEN) {
DHD_ERROR(("%s: HW hdr length invalid: %d\n", __FUNCTION__, len));
continue;
}
txmax = SDPCM_WINDOW_VALUE(&bus->rxhdr[SDPCM_FRAMETAG_LEN]);
/* Validate data offset */
- if ((doff < SDPCM_HDRLEN_RX) || (doff > len)) {
+ if ((doff < SDPCM_HDRLEN) || (doff > len)) {
DHD_ERROR(("%s: Bad data offset %d: HW len %d, min %d seq %d\n",
- __FUNCTION__, doff, len, SDPCM_HDRLEN_RX, seq));
+ __FUNCTION__, doff, len, SDPCM_HDRLEN, seq));
bus->rx_badhdr++;
ASSERT(0);
dhdsdio_rxfail(bus, FALSE, FALSE);
}
/* Check window for sanity */
- if ((uint8)(txmax - bus->tx_seq) > 0x40) {
+ if ((uint8)(txmax - bus->tx_seq) > 0x70) {
DHD_ERROR(("%s: got unlikely tx max %d with tx_seq %d\n",
__FUNCTION__, txmax, bus->tx_seq));
txmax = bus->tx_max;
}
#endif
PKTSETLEN(osh, pkt, len);
- ASSERT(doff == SDPCM_HDRLEN_RX);
- PKTPULL(osh, pkt, SDPCM_HDRLEN_RX);
+ ASSERT(doff == SDPCM_HDRLEN);
+ PKTPULL(osh, pkt, SDPCM_HDRLEN);
bus->glomd = pkt;
} else {
DHD_ERROR(("%s: glom superframe w/o descriptor!\n", __FUNCTION__));
bool resched = FALSE; /* Flag indicating resched wanted */
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ dhd_os_sdlock(bus->dhd);
+
if (bus->dhd->busstate == DHD_BUS_DOWN) {
DHD_ERROR(("%s: Bus down, ret\n", __FUNCTION__));
bus->intstatus = 0;
+ dhd_os_sdunlock(bus->dhd);
return 0;
}
/* Start with leftover status bits */
intstatus = bus->intstatus;
- dhd_os_sdlock(bus->dhd);
-
- // terence 20131025: fix kernel panic issue if user is enabling and disabling Wi-Fi
- if (bus->dhd->busstate == DHD_BUS_DOWN) {
- DHD_ERROR(("%s: Bus down 2, ret\n", __FUNCTION__));
- bus->intstatus = 0;
- dhd_os_sdunlock(bus->dhd);
- return 0;
- }
-
if (!SLPAUTO_ENAB(bus) && !KSO_ENAB(bus)) {
DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
goto exit;
intstatus &= ~I_CHIPACTIVE;
}
+ if (intstatus & I_HMB_FC_STATE) {
+ DHD_INFO(("Dongle reports HMB_FC_STATE\n"));
+ intstatus &= ~I_HMB_FC_STATE;
+ }
+
/* Ignore frame indications if rxskip is set */
if (bus->rxskip) {
intstatus &= ~FRAME_AVAIL_MASK(bus);
__FUNCTION__, rxdone, framecnt));
bus->intdis = FALSE;
#if defined(OOB_INTR_ONLY)
- bcmsdh_oob_intr_set(1);
+ bcmsdh_oob_intr_set(bus->sdh, TRUE);
#endif /* defined(OOB_INTR_ONLY) */
+#if !defined(NDISVER) || (NDISVER < 0x0630)
bcmsdh_intr_enable(sdh);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
}
#if defined(OOB_INTR_ONLY) && !defined(HW_OOB)
#endif /* defined(OOB_INTR_ONLY) && !defined(HW_OOB) */
#ifdef PROP_TXSTATUS
- dhd_wlfc_trigger_pktcommit(bus->dhd);
+ dhd_wlfc_commit_packets(bus->dhd, (f_commitpkt_t)dhd_bus_txdata, (void *)bus, NULL, FALSE);
#endif
if (TXCTLOK(bus) && bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL))
exit:
if (!resched && dhd_dpcpoll) {
- if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0) {
+ if (dhdsdio_readframes(bus, dhd_rxbound, &rxdone) != 0)
resched = TRUE;
- }
}
dhd_os_sdunlock(bus->dhd);
DHD_ERROR(("dhdsdio_isr() w/o interrupt configured!\n"));
}
+#if !defined(NDISVER) || (NDISVER < 0x0630)
bcmsdh_intr_disable(sdh);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
bus->intdis = TRUE;
#if defined(SDIO_ISR_THREAD)
DHD_OS_WAKE_UNLOCK(bus->dhd);
#else
+#if !defined(NDISVER) || (NDISVER < 0x0630)
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
-#endif
+#endif /* defined(SDIO_ISR_THREAD) */
}
#endif
/* Send it */
- if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE, FALSE)) {
+ if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK) {
bus->pktgen_fail++;
if (bus->pktgen_stop && bus->pktgen_stop == bus->pktgen_fail)
bus->pktgen_count = 0;
*data++ = (uint8)(count >> 24);
/* Send it */
- if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE, FALSE))
+ if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) != BCME_OK)
bus->pktgen_fail++;
}
case SDPCM_TEST_ECHOREQ:
/* Rx->Tx turnaround ok (even on NDIS w/current implementation) */
*(uint8 *)(PKTDATA(osh, pkt)) = SDPCM_TEST_ECHORSP;
- if (dhdsdio_txpkt(bus, pkt, SDPCM_TEST_CHANNEL, TRUE, FALSE) == 0) {
+ if (dhdsdio_txpkt(bus, SDPCM_TEST_CHANNEL, &pkt, 1, TRUE) == BCME_OK) {
bus->pktgen_sent++;
} else {
bus->pktgen_fail++;
}
#endif /* SDTEST */
-extern void
-dhd_disable_intr(dhd_pub_t *dhdp)
+int dhd_bus_oob_intr_register(dhd_pub_t *dhdp)
{
- dhd_bus_t *bus;
- bus = dhdp->bus;
- bcmsdh_intr_disable(bus->sdh);
+ int err = 0;
+
+#if defined(OOB_INTR_ONLY)
+ err = bcmsdh_oob_intr_register(dhdp->bus->sdh, dhdsdio_isr, dhdp->bus);
+#endif
+ return err;
+}
+
+void dhd_bus_oob_intr_unregister(dhd_pub_t *dhdp)
+{
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_unregister(dhdp->bus->sdh);
+#endif
+}
+
+void dhd_bus_oob_intr_set(dhd_pub_t *dhdp, bool enable)
+{
+#if defined(OOB_INTR_ONLY)
+ bcmsdh_oob_intr_set(dhdp->bus->sdh, enable);
+#endif
+}
+
+void dhd_bus_dev_pm_stay_awake(dhd_pub_t *dhdpub)
+{
+ bcmsdh_dev_pm_stay_awake(dhdpub->bus->sdh);
+}
+
+void dhd_bus_dev_pm_relax(dhd_pub_t *dhdpub)
+{
+ bcmsdh_dev_relax(dhdpub->bus->sdh);
+}
+
+bool dhd_bus_dev_pm_enabled(dhd_pub_t *dhdpub)
+{
+ bool enabled = FALSE;
+
+ enabled = bcmsdh_dev_pm_enabled(dhdpub->bus->sdh);
+ return enabled;
}
extern bool
if (bus->dhd->dongle_reset)
return FALSE;
+ if (bus->dhd->hang_was_sent) {
+ dhd_os_wd_timer(bus->dhd, 0);
+ return FALSE;
+ }
+
/* Ignore the timer if simulating bus down */
if (!SLPAUTO_ENAB(bus) && bus->sleeping)
return FALSE;
if (dhdp->busstate == DHD_BUS_DOWN)
return FALSE;
+ dhd_os_sdlock(bus->dhd);
+
/* Poll period: check device if appropriate. */
if (!SLPAUTO_ENAB(bus) && (bus->poll && (++bus->polltick >= bus->pollrate))) {
uint32 intstatus = 0;
}
bus->dpc_sched = TRUE;
dhd_sched_dpc(bus->dhd);
-
}
}
}
#else
if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) {
- if (++bus->idlecount > bus->idletime) {
+ if (++bus->idlecount >= bus->idletime) {
bus->idlecount = 0;
if (bus->activity) {
bus->activity = FALSE;
}
#endif /* DHD_USE_IDLECOUNT */
+ dhd_os_sdunlock(bus->dhd);
+
return bus->ipend;
}
dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
/* Zero cbuf_index */
- addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf_idx);
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf_idx);
val = htol32(0);
if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
goto done;
/* Write message into cbuf */
- addr = bus->console_addr + OFFSETOF(hndrte_cons_t, cbuf);
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, cbuf);
if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)msg, msglen)) < 0)
goto done;
/* Write length into vcons_in */
- addr = bus->console_addr + OFFSETOF(hndrte_cons_t, vcons_in);
+ addr = bus->console_addr + OFFSETOF(hnd_cons_t, vcons_in);
val = htol32(msglen);
if ((rv = dhdsdio_membytes(bus, TRUE, addr, (uint8 *)&val, sizeof(val))) < 0)
goto done;
* sdpcm_sendup (RX) checks for virtual console input.
*/
if ((pkt = PKTGET(bus->dhd->osh, 4 + SDPCM_RESERVE, TRUE)) != NULL)
- dhdsdio_txpkt(bus, pkt, SDPCM_EVENT_CHANNEL, TRUE, FALSE);
+ rv = dhdsdio_txpkt(bus, SDPCM_EVENT_CHANNEL, &pkt, 1, TRUE);
done:
if ((bus->idletime == DHD_IDLE_IMMEDIATE) && !bus->dpc_sched) {
return TRUE;
if (chipid == BCM4339_CHIP_ID)
return TRUE;
+ if (chipid == BCM43349_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4345_CHIP_ID)
+ return TRUE;
if (chipid == BCM4350_CHIP_ID)
return TRUE;
+ if (chipid == BCM4354_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4356_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM4358_CHIP_ID)
+ return TRUE;
+ if (chipid == BCM43430_CHIP_ID)
+ return TRUE;
+ if (BCM4349_CHIP(chipid))
+ return TRUE;
return FALSE;
}
{
int ret;
dhd_bus_t *bus;
-#ifdef GET_CUSTOM_MAC_ENABLE
- struct ether_addr ea_addr;
-#endif /* GET_CUSTOM_MAC_ENABLE */
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
}
if (osh == NULL) {
- /* Ask the OS interface part for an OSL handle */
- if (!(osh = dhd_osl_attach(sdh, DHD_BUS))) {
- DHD_ERROR(("%s: osl_attach failed!\n", __FUNCTION__));
- goto forcereturn;
- }
+ DHD_ERROR(("%s: osh is NULL!\n", __FUNCTION__));
+ goto forcereturn;
}
/* Allocate private bus interface state */
bus->sdh = sdh;
bus->cl_devid = (uint16)devid;
bus->bus = DHD_BUS;
+ bus->bus_num = bus_no;
+ bus->slot_num = slot;
bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
bus->usebufpool = FALSE; /* Use bufpool if allocated, else use locally malloced rxbuf */
- /* attach the common module */
- dhd_common_init(osh);
-
/* attempt to attach to the dongle */
if (!(dhdsdio_probe_attach(bus, osh, sdh, regsva, devid))) {
DHD_ERROR(("%s: dhdsdio_probe_attach failed\n", __FUNCTION__));
goto fail;
}
-
+
#ifdef PROP_TXSTATUS
- if (bus->sih->chip == BCM4330_CHIP_ID ||
- bus->sih->chip == BCM43362_CHIP_ID ) {
- // terence 20131215: disable_proptx should be set before dhd_attach
- printf("%s: disable prop_txstatus\n", __FUNCTION__);
- disable_proptx = TRUE;
+ // terence 20131215: disable_proptx should be set before dhd_attach
+ if ((bus->sih->chip == BCM43362_CHIP_ID) || (bus->sih->chip == BCM4330_CHIP_ID)) {
+ printf("%s: Disable prop_txstatus\n", __FUNCTION__);
+ disable_proptx = 1;
}
#endif
DHD_INFO(("%s: completed!!\n", __FUNCTION__));
-#ifdef GET_CUSTOM_MAC_ENABLE
- /* Read MAC address from external customer place */
- memset(&ea_addr, 0, sizeof(ea_addr));
- ret = dhd_custom_get_mac_address(ea_addr.octet);
- if (!ret) {
- memcpy(bus->dhd->mac.octet, (void *)&ea_addr, ETHER_ADDR_LEN);
- }
-#endif /* GET_CUSTOM_MAC_ENABLE */
-
/* if firmware path present try to download and bring up bus */
bus->dhd->hang_report = TRUE;
if (dhd_download_fw_on_driverload) {
if ((ret = dhd_bus_start(bus->dhd)) != 0) {
DHD_ERROR(("%s: dhd_bus_start failed\n", __FUNCTION__));
- goto fail;
+ goto fail;
}
}
/* Ok, have the per-port tell the stack we're open for business */
- if (dhd_net_attach(bus->dhd, 0) != 0) {
+ if (dhd_register_if(bus->dhd, 0, TRUE) != 0) {
DHD_ERROR(("%s: Net attach failed!!\n", __FUNCTION__));
goto fail;
}
+
#if defined(MULTIPLE_SUPPLICANT)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
mutex_unlock(&_dhd_sdio_mutex_lock_);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
#endif
+ init_waitqueue_head(&bus->bus_sleep);
+
return bus;
fail:
return TRUE;
}
#endif /* REGON_BP_HANG_FIX */
-
static bool
dhdsdio_probe_attach(struct dhd_bus *bus, osl_t *osh, void *sdh, void *regsva,
uint16 devid)
DHD_ERROR(("%s: FAILED to return to SI_ENUM_BASE\n", __FUNCTION__));
}
+#if defined(DHD_DEBUG)
+ DHD_ERROR(("F1 signature read @0x18000000=0x%4x\n",
+ bcmsdh_reg_read(bus->sdh, SI_ENUM_BASE, 4)));
+#endif
+
/* Force PLL off until si_attach() programs PLL control regs */
goto fail;
}
-
#ifdef DHD_DEBUG
DHD_ERROR(("F1 signature OK, socitype:0x%x chip:0x%4x rev:0x%x pkg:0x%x\n",
- bus->sih->socitype, bus->sih->chip, bus->sih->chiprev,
- bus->sih->chippkg));
+ bus->sih->socitype, bus->sih->chip, bus->sih->chiprev, bus->sih->chippkg));
#endif /* DHD_DEBUG */
#ifdef REGON_BP_HANG_FIX
switch ((uint16)bus->sih->chip) {
case BCM4335_CHIP_ID:
case BCM4339_CHIP_ID:
+ case BCM43349_CHIP_ID:
bus->dongle_ram_base = CR4_4335_RAM_BASE;
break;
case BCM4350_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM4356_CHIP_ID:
+ case BCM4358_CHIP_ID:
bus->dongle_ram_base = CR4_4350_RAM_BASE;
break;
case BCM4360_CHIP_ID:
bus->dongle_ram_base = CR4_4360_RAM_BASE;
break;
+ case BCM4345_CHIP_ID:
+ bus->dongle_ram_base = CR4_4345_RAM_BASE;
+ break;
+ case BCM4349_CHIP_GRPID:
+ bus->dongle_ram_base = CR4_4349_RAM_BASE;
+ break;
default:
bus->dongle_ram_base = 0;
DHD_ERROR(("%s: WARNING: Using default ram base at 0x%x\n",
if ((bus->poll = (bool)dhd_poll))
bus->pollrate = 1;
-#ifdef BCMSDIOH_TXGLOM
- /* Setting default Glom mode */
- bus->glom_mode = bcmsdh_set_mode(bus->sdh, SDPCM_DEFGLOM_MODE);
/* Setting default Glom size */
- bus->glomsize = SDPCM_DEFGLOM_SIZE;
-#endif
+ bus->txglomsize = SDPCM_DEFGLOM_SIZE;
return TRUE;
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
if (bus->dhd->maxctl) {
- bus->rxblen = ROUNDUP((bus->dhd->maxctl + SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
- if (!(bus->rxbuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_RXBUF, bus->rxblen))) {
+ bus->rxblen = ROUNDUP((bus->dhd->maxctl+SDPCM_HDRLEN), ALIGNMENT) + DHD_SDALIGN;
+ if (!(bus->rxbuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_RXBUF, bus->rxblen))) {
DHD_ERROR(("%s: MALLOC of %d-byte rxbuf failed\n",
__FUNCTION__, bus->rxblen));
goto fail;
}
}
/* Allocate buffer to receive glomed packet */
- if (!(bus->databuf = DHD_OS_PREALLOC(osh, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
+ if (!(bus->databuf = DHD_OS_PREALLOC(bus->dhd, DHD_PREALLOC_DATABUF, MAX_DATA_BUF))) {
DHD_ERROR(("%s: MALLOC of %d-byte databuf failed\n",
__FUNCTION__, MAX_DATA_BUF));
/* release rxbuf which was already located as above */
if (!bus->rxblen)
- DHD_OS_PREFREE(osh, bus->rxbuf, bus->rxblen);
+ DHD_OS_PREFREE(bus->dhd, bus->rxbuf, bus->rxblen);
goto fail;
}
DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+ bus->_srenab = FALSE;
+
#ifdef SDTEST
dhdsdio_pktgen_init(bus);
#endif /* SDTEST */
DHD_INFO(("%s: Initial value for %s is %d\n",
__FUNCTION__, "sd_blocksize", bus->blocksize));
- if ((bus->sih->chip == BCM4335_CHIP_ID) ||
- (bus->sih->chip == BCM4339_CHIP_ID))
- dhd_overflow_war(bus);
+ dhdsdio_tune_fifoparam(bus);
}
bus->roundup = MIN(max_roundup, bus->blocksize);
+#ifdef DHDENABLE_TAILPAD
+ if (bus->pad_pkt)
+ PKTFREE(osh, bus->pad_pkt, FALSE);
+ bus->pad_pkt = PKTGET(osh, SDIO_MAX_BLOCK_SIZE, FALSE);
+ if (bus->pad_pkt == NULL)
+ DHD_ERROR(("failed to allocate padding packet\n"));
+ else {
+ int alignment_offset = 0;
+ uintptr pktprt = (uintptr)PKTDATA(osh, bus->pad_pkt);
+ if (!(pktprt&1) && (pktprt = (pktprt % DHD_SDALIGN)))
+ PKTPUSH(osh, bus->pad_pkt, alignment_offset);
+ PKTSETNEXT(osh, bus->pad_pkt, NULL);
+ }
+#endif /* DHDENABLE_TAILPAD */
+
/* Query if bus module supports packet chaining, default to use if supported */
if (bcmsdh_iovar_op(sdh, "sd_rxchain", NULL, 0,
&bus->sd_rxchain, sizeof(int32), FALSE) != BCME_OK) {
__FUNCTION__, (bus->sd_rxchain ? "supports" : "does not support")));
}
bus->use_rxchain = (bool)bus->sd_rxchain;
+ bus->txinrx_thres = CUSTOM_TXINRX_THRES;
+ /* TX first in dhdsdio_readframes() */
+ bus->dotxinrx = TRUE;
return TRUE;
}
-bool
+int
dhd_bus_download_firmware(struct dhd_bus *bus, osl_t *osh,
char *pfw_path, char *pnv_path, char *pconf_path)
{
- bool ret;
+ int ret;
+
bus->fw_path = pfw_path;
bus->nv_path = pnv_path;
bus->dhd->conf_path = pconf_path;
return ret;
}
-static bool
+static int
dhdsdio_download_firmware(struct dhd_bus *bus, osl_t *osh, void *sdh)
{
- bool ret;
+ int ret;
+
+ DHD_TRACE_HW4(("%s: firmware path=%s, nvram path=%s\n",
+ __FUNCTION__, bus->fw_path, bus->nv_path));
DHD_OS_WAKE_LOCK(bus->dhd);
/* Download the firmware */
/* External conf takes precedence if specified */
dhd_conf_preinit(bus->dhd);
- dhd_conf_download_config(bus->dhd);
+ dhd_conf_read_config(bus->dhd);
+ dhd_conf_set_fw_name_by_chip(bus->dhd, bus->fw_path);
dhd_conf_set_fw_path(bus->dhd, bus->fw_path);
dhd_conf_set_nv_path(bus->dhd, bus->nv_path);
+ dhd_conf_set_fw_name_by_mac(bus->dhd, bus->sdh, bus->fw_path);
+ dhd_conf_set_nv_name_by_mac(bus->dhd, bus->sdh, bus->nv_path);
printk("Final fw_path=%s\n", bus->fw_path);
printk("Final nv_path=%s\n", bus->nv_path);
printk("Final conf_path=%s\n", bus->dhd->conf_path);
- ret = _dhdsdio_download_firmware(bus) == 0;
+ ret = _dhdsdio_download_firmware(bus);
dhdsdio_clkctl(bus, CLK_SDONLY, FALSE);
MFREE(osh, bus->console.buf, bus->console.bufsize);
#endif
+#ifdef DHDENABLE_TAILPAD
+ if (bus->pad_pkt)
+ PKTFREE(osh, bus->pad_pkt, FALSE);
+#endif /* DHDENABLE_TAILPAD */
+
MFREE(osh, bus, sizeof(dhd_bus_t));
}
- if (osh)
- dhd_osl_detach(osh);
-
DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
}
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) */
#endif /* LINUX */
+
DHD_TRACE(("%s: Disconnected\n", __FUNCTION__));
}
+static int
+dhdsdio_suspend(void *context)
+{
+ int ret = 0;
+
+ dhd_bus_t *bus = (dhd_bus_t*)context;
+ int wait_time = 0;
+ if (bus->idletime > 0) {
+ wait_time = msecs_to_jiffies(bus->idletime * dhd_watchdog_ms);
+ }
+
+ ret = dhd_os_check_wakelock(bus->dhd);
+ // terence 20141124: fix for suspend issue
+ if (SLPAUTO_ENAB(bus) && (!ret) && (bus->dhd->up)) {
+ if (wait_event_timeout(bus->bus_sleep, bus->sleeping, wait_time) == 0) {
+ if (!bus->sleeping) {
+ return 1;
+ }
+ }
+ }
+ return ret;
+}
+
+static int
+dhdsdio_resume(void *context)
+{
+#if defined(OOB_INTR_ONLY)
+ dhd_bus_t *bus = (dhd_bus_t*)context;
+
+ if (dhd_os_check_if_up(bus->dhd))
+ bcmsdh_oob_intr_set(bus->sdh, TRUE);
+#endif
+ return 0;
+}
+
/* Register/Unregister functions are called by the main DHD entry
* point (e.g. module insertion) to link with the bus driver, in
static bcmsdh_driver_t dhd_sdio = {
dhdsdio_probe,
- dhdsdio_disconnect
+ dhdsdio_disconnect,
+ dhdsdio_suspend,
+ dhdsdio_resume
};
int
static int
dhd_bcmsdh_send_buf(dhd_bus_t *bus, uint32 addr, uint fn, uint flags, uint8 *buf, uint nbytes,
- void *pkt, bcmsdh_cmplt_fn_t complete, void *handle)
+ void *pkt, bcmsdh_cmplt_fn_t complete, void *handle, int max_retry)
{
+ int ret;
+ int i = 0;
+ int retries = 0;
+ bcmsdh_info_t *sdh;
+
if (!KSO_ENAB(bus)) {
DHD_ERROR(("%s: Device asleep\n", __FUNCTION__));
return BCME_NODEVICE;
}
- return (bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes, pkt, complete, handle));
-}
+ sdh = bus->sdh;
+ do {
+ ret = bcmsdh_send_buf(bus->sdh, addr, fn, flags, buf, nbytes,
+ pkt, complete, handle);
-#ifdef BCMSDIOH_TXGLOM
-static void
-dhd_bcmsdh_glom_post(dhd_bus_t *bus, uint8 *frame, void *pkt, uint len)
-{
- bcmsdh_glom_post(bus->sdh, frame, pkt, len);
-}
+ bus->f2txdata++;
+ ASSERT(ret != BCME_PENDING);
-static void
-dhd_bcmsdh_glom_clear(dhd_bus_t *bus)
-{
- bcmsdh_glom_clear(bus->sdh);
+ if (ret == BCME_NODEVICE) {
+ DHD_ERROR(("%s: Device asleep already\n", __FUNCTION__));
+ } else if (ret < 0) {
+ /* On failure, abort the command and terminate the frame */
+ DHD_ERROR(("%s: sdio error %d, abort command and terminate frame.\n",
+ __FUNCTION__, ret));
+ bus->tx_sderrs++;
+ bus->f1regdata++;
+ bus->dhd->tx_errors++;
+ bcmsdh_abort(sdh, SDIO_FUNC_2);
+ bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_FRAMECTRL,
+ SFC_WF_TERM, NULL);
+ for (i = 0; i < READ_FRM_CNT_RETRIES; i++) {
+ uint8 hi, lo;
+ hi = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCHI,
+ NULL);
+ lo = bcmsdh_cfg_read(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_WFRAMEBCLO,
+ NULL);
+ bus->f1regdata += 2;
+ if ((hi == 0) && (lo == 0))
+ break;
+ }
+ }
+ } while ((ret < 0) && retrydata && ++retries < max_retry);
+
+ return ret;
}
-#endif
uint
dhd_bus_chip(struct dhd_bus *bus)
return bus->sih->chip;
}
+uint
+dhd_bus_chiprev(struct dhd_bus *bus)
+{
+ ASSERT(bus);
+ ASSERT(bus->sih != NULL);
+ return bus->sih->chiprev;
+}
+
void *
dhd_bus_pub(struct dhd_bus *bus)
{
return bus->dhd;
}
+void *
+dhd_bus_sih(struct dhd_bus *bus)
+{
+ return (void *)bus->sih;
+}
+
void *
dhd_bus_txq(struct dhd_bus *bus)
{
uint
dhd_bus_hdrlen(struct dhd_bus *bus)
{
- return SDPCM_HDRLEN;
+ return (bus->txglom_enable) ? SDPCM_HDRLEN_TXGLOM : SDPCM_HDRLEN;
+}
+
+void
+dhd_bus_set_dotxinrx(struct dhd_bus *bus, bool val)
+{
+ bus->dotxinrx = val;
}
int
#if defined(OOB_INTR_ONLY)
/* Clean up any pending IRQ */
- bcmsdh_set_irq(FALSE);
+ dhd_enable_oob_intr(bus, FALSE);
+ bcmsdh_oob_intr_set(bus->sdh, FALSE);
+ bcmsdh_oob_intr_unregister(bus->sdh);
#endif
/* Clean tx/rx buffer pointers, detach from the dongle */
bus->dhd->dongle_reset = TRUE;
bus->dhd->up = FALSE;
-#ifdef BCMSDIOH_TXGLOM
dhd_txglom_enable(dhdp, FALSE);
-#endif
dhd_os_sdunlock(dhdp);
- DHD_ERROR(("%s: WLAN OFF DONE\n", __FUNCTION__));
+ printk("%s: WLAN OFF DONE\n", __FUNCTION__);
/* App can now remove power from device */
} else
bcmerror = BCME_SDIO_ERROR;
} else {
/* App must have restored power to device before calling */
- DHD_ERROR(("\n\n%s: == WLAN ON ==\n", __FUNCTION__));
+ printk("\n\n%s: == WLAN ON ==\n", __FUNCTION__);
if (bus->dhd->dongle_reset) {
/* Turn on WLAN */
-#ifdef DHDTHREAD
dhd_os_sdlock(dhdp);
-#endif /* DHDTHREAD */
/* Reset SD client */
bcmsdh_reset(bus->sdh);
/* Attempt to re-attach & download */
if (dhdsdio_probe_attach(bus, bus->dhd->osh, bus->sdh,
- (uint32 *)SI_ENUM_BASE,
- bus->cl_devid)) {
+ (uint32 *)SI_ENUM_BASE,
+ bus->cl_devid)) {
/* Attempt to download binary to the dongle */
- dhd_conf_set_fw_name_by_chip(dhdp, fw_path, firmware_path); // terence
if (dhdsdio_probe_init(bus, bus->dhd->osh, bus->sdh) &&
- dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh)) {
+ dhdsdio_download_firmware(bus, bus->dhd->osh, bus->sdh) >= 0) {
/* Re-init bus, enable F2 transfer */
bcmerror = dhd_bus_init((dhd_pub_t *) bus->dhd, FALSE);
if (bcmerror == BCME_OK) {
- bcmsdh_set_drvdata(dhdp); // terence 20131214: fix for null pointer issue
#if defined(OOB_INTR_ONLY)
- /* make sure oob intr get registered */
- if (!bcmsdh_is_oob_intr_registered()) {
- sdioh_start(NULL, 1);
- bcmsdh_register_oob_intr(dhdp);
- dhdp->iswl = TRUE;
- }
-
- bcmsdh_set_irq(TRUE);
dhd_enable_oob_intr(bus, TRUE);
+ bcmsdh_oob_intr_register(bus->sdh,
+ dhdsdio_isr, bus);
+ bcmsdh_oob_intr_set(bus->sdh, TRUE);
#endif
bus->dhd->dongle_reset = FALSE;
} else
bcmerror = BCME_SDIO_ERROR;
-#ifdef DHDTHREAD
- dhd_os_sdunlock(dhdp);
-#endif /* DHDTHREAD */
+ dhd_os_sdunlock(dhdp);
} else {
bcmerror = BCME_SDIO_ERROR;
- DHD_ERROR(("%s called when dongle is not in reset\n",
- __FUNCTION__));
- DHD_ERROR(("Will call dhd_bus_start instead\n"));
- sdioh_start(NULL, 1);
+ printk("%s called when dongle is not in reset\n",
+ __FUNCTION__);
+ printk("Will call dhd_bus_start instead\n");
+ dhd_bus_resume(dhdp, 1);
#if defined(HW_OOB)
dhd_conf_set_hw_oob_intr(bus->sdh, bus->sih->chip); // terence 20120615: fix for OOB initial issue
#endif
- dhd_conf_set_fw_name_by_chip(dhdp, fw_path, firmware_path);
if ((bcmerror = dhd_bus_start(dhdp)) != 0)
DHD_ERROR(("%s: dhd_bus_start fail with %d\n",
__FUNCTION__, bcmerror));
return bcmerror;
}
+int dhd_bus_suspend(dhd_pub_t *dhdpub)
+{
+ return bcmsdh_stop(dhdpub->bus->sdh);
+}
+
+int dhd_bus_resume(dhd_pub_t *dhdpub, int stage)
+{
+ return bcmsdh_start(dhdpub->bus->sdh, stage);
+}
+
/* Get Chip ID version */
uint dhd_bus_chip_id(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
- return bus->sih->chip;
+ if (bus && bus->sih)
+ return bus->sih->chip;
+ else
+ return 0;
}
/* Get Chip Rev ID version */
{
dhd_bus_t *bus = dhdp->bus;
- return bus->sih->chiprev;
+ if (bus && bus->sih)
+ return bus->sih->chiprev;
+ else
+ return 0;
}
/* Get Chip Pkg ID version */
return bus->sih->chippkg;
}
+int dhd_bus_get_ids(struct dhd_bus *bus, uint32 *bus_type, uint32 *bus_num, uint32 *slot_num)
+{
+ *bus_type = bus->bus;
+ *bus_num = bus->bus_num;
+ *slot_num = bus->slot_num;
+ return 0;
+}
+
int
dhd_bus_membytes(dhd_pub_t *dhdp, bool set, uint32 address, uint8 *data, uint size)
{
return dhdsdio_membytes(bus, set, address, data, size);
}
+#if defined(NDISVER) && (NDISVER >= 0x0630)
+void
+dhd_bus_reject_ioreqs(dhd_pub_t *dhdp, bool reject)
+{
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_reject_ioreqs(dhdp->bus->sdh, reject);
+}
+
+void
+dhd_bus_waitfor_iodrain(dhd_pub_t *dhdp)
+{
+
+ DHD_TRACE(("%s: Enter\n", __FUNCTION__));
+
+ bcmsdh_waitfor_iodrain(dhdp->bus->sdh);
+}
+#endif /* (NDISVER) && (NDISVER >= 0x0630) */
+
+void
+dhd_bus_update_fw_nv_path(struct dhd_bus *bus, char *pfw_path, char *pnv_path, char *pconf_path)
+{
+ bus->fw_path = pfw_path;
+ bus->nv_path = pnv_path;
+ bus->dhd->conf_path = pconf_path;
+}
+
int
dhd_enableOOB(dhd_pub_t *dhd, bool sleep)
{
dhd_bus_pktq_flush(dhd_pub_t *dhdp)
{
dhd_bus_t *bus = dhdp->bus;
- /* Clear the data packet queues */
- pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0);
+ bool wlfc_enabled = FALSE;
+
+#ifdef PROP_TXSTATUS
+ wlfc_enabled = (dhd_wlfc_cleanup_txq(dhdp, NULL, 0) != WLFC_UNSUPPORTED);
+#endif
+ if (!wlfc_enabled) {
+#ifdef DHDTCPACK_SUPPRESS
+ /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
+ * when there is a newly coming packet from network stack.
+ */
+ dhd_tcpack_info_tbl_clean(bus->dhd);
+#endif /* DHDTCPACK_SUPPRESS */
+ /* Clear the data packet queues */
+ pktq_flush(dhdp->osh, &bus->txq, TRUE, NULL, 0);
+ }
}
+#ifdef BCMSDIO
int
dhd_sr_config(dhd_pub_t *dhd, bool on)
{
else
return 0;
}
+#endif /* BCMSDIO */
+
+#ifdef DEBUGGER
+uint32 dhd_sdio_reg_read(void *h, uint32 addr)
+{
+ uint32 rval;
+ struct dhd_bus *bus = (struct dhd_bus *) h;
+
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ rval = bcmsdh_reg_read(bus->sdh, addr, 4);
+
+ dhd_os_sdunlock(bus->dhd);
+
+ return rval;
+}
+
+void dhd_sdio_reg_write(void *h, uint32 addr, uint32 val)
+{
+ struct dhd_bus *bus = (struct dhd_bus *) h;
+
+ dhd_os_sdlock(bus->dhd);
+
+ BUS_WAKE(bus);
+
+ dhdsdio_clkctl(bus, CLK_AVAIL, FALSE);
+
+ bcmsdh_reg_write(bus->sdh, addr, 4, val);
+
+ dhd_os_sdunlock(bus->dhd);
+}
+#endif /* DEBUGGER */
+
+#if defined(SOFTAP_TPUT_ENHANCE)
+void dhd_bus_setidletime(dhd_pub_t *dhdp, int idle_time)
+{
+ if (!dhdp || !dhdp->bus) {
+ DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+ return;
+ }
+ dhdp->bus->idletime = idle_time;
+}
+
+void dhd_bus_getidletime(dhd_pub_t *dhdp, int* idle_time)
+{
+ if (!dhdp || !dhdp->bus) {
+ DHD_ERROR(("%s:Bus is Invalid\n", __FUNCTION__));
+ return;
+ }
+
+ if (!idle_time) {
+ DHD_ERROR(("%s:Arg idle_time is NULL\n", __FUNCTION__));
+ return;
+ }
+ *idle_time = dhdp->bus->idletime;
+}
+#endif /* SOFTAP_TPUT_ENHANCE */
--- /dev/null
+#include <linux/module.h>\r
+#include <linux/kernel.h>\r
+#include <linux/init.h> \r
+#include <linux/platform_device.h> \r
+#include <linux/delay.h> \r
+#include <linux/err.h> \r
+#include <linux/skbuff.h> \r
+#include <linux/wlan_plat.h> \r
+\r
+#define CONFIG_BROADCOM_WIFI_RESERVED_MEM\r
+\r
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM\r
+\r
+#define WLAN_STATIC_PKT_BUF 4\r
+#define WLAN_STATIC_SCAN_BUF0 5\r
+#define WLAN_STATIC_SCAN_BUF1 6\r
+#define WLAN_STATIC_DHD_INFO 7\r
+#define PREALLOC_WLAN_SEC_NUM 5\r
+#define PREALLOC_WLAN_BUF_NUM 160\r
+#define PREALLOC_WLAN_SECTION_HEADER 24\r
+\r
+#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_BUF_NUM * 128)\r
+#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_BUF_NUM * 128)\r
+#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_BUF_NUM * 512)\r
+#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_BUF_NUM * 1024)\r
+#define WLAN_SECTION_SIZE_7 (PREALLOC_WLAN_BUF_NUM * 128)\r
+\r
+#define DHD_SKB_HDRSIZE 336\r
+#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)\r
+#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)\r
+#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)\r
+\r
+#define WLAN_SKB_BUF_NUM 17\r
+\r
+static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];\r
+\r
+struct wlan_mem_prealloc {\r
+ void *mem_ptr;\r
+ unsigned long size;\r
+};\r
+\r
+static struct wlan_mem_prealloc wlan_mem_array[PREALLOC_WLAN_SEC_NUM] = {\r
+ {NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER)},\r
+ {NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER)},\r
+ {NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER)},\r
+ {NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER)},\r
+ {NULL, (WLAN_SECTION_SIZE_7 + PREALLOC_WLAN_SECTION_HEADER)}\r
+};\r
+\r
+void *wlan_static_scan_buf0;\r
+void *wlan_static_scan_buf1;\r
+void *bcmdhd_mem_prealloc(int section, unsigned long size)\r
+{\r
+ if (section == WLAN_STATIC_PKT_BUF) {\r
+ printk("1 %s: section=%d, wlan_static_skb=%p\n",\r
+ __FUNCTION__, section, wlan_static_skb);\r
+ return wlan_static_skb;\r
+ }\r
+ if (section == WLAN_STATIC_SCAN_BUF0) {\r
+ printk("2 %s: section=%d, wlan_static_scan_buf0=%p\n",\r
+ __FUNCTION__, section, wlan_static_scan_buf0);\r
+ return wlan_static_scan_buf0;\r
+ }\r
+ if (section == WLAN_STATIC_SCAN_BUF1) {\r
+ printk("3 %s: section=%d, wlan_static_scan_buf1=%p\n",\r
+ __FUNCTION__, section, wlan_static_scan_buf1);\r
+ return wlan_static_scan_buf1;\r
+ }\r
+ if (section == WLAN_STATIC_DHD_INFO) {\r
+ printk("4 %s: section=%d, wlan_mem_array[4]=%p\n",\r
+ __FUNCTION__, section, wlan_mem_array[4].mem_ptr);\r
+ return wlan_mem_array[4].mem_ptr;\r
+ }\r
+ if ((section < 0) || (section > PREALLOC_WLAN_SEC_NUM)) {\r
+ printk("5 %s: out of section %d\n", __FUNCTION__, section);\r
+ return NULL;\r
+ }\r
+\r
+ if (wlan_mem_array[section].size < size) {\r
+ printk("6 %s: wlan_mem_array[section].size=%lu, size=%lu\n",\r
+ __FUNCTION__, wlan_mem_array[section].size, size);\r
+ return NULL;\r
+ }\r
+ printk("7 %s: wlan_mem_array[section].mem_ptr=%p, size=%lu\n",\r
+ __FUNCTION__, &wlan_mem_array[section], size);\r
+\r
+ return wlan_mem_array[section].mem_ptr;\r
+}\r
+\r
+EXPORT_SYMBOL(bcmdhd_mem_prealloc);\r
+\r
+int bcmdhd_init_wlan_mem(void) \r
+{\r
+ int i;\r
+ int j;\r
+\r
+ for (i=0; i<8; i++) {\r
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_1PAGE_BUFSIZE);\r
+ if (!wlan_static_skb[i])\r
+ goto err_skb_alloc; \r
+ printk("1 %s: wlan_static_skb[%d]=%p, size=%lu\n",\r
+ __FUNCTION__, i, wlan_static_skb[i], DHD_SKB_1PAGE_BUFSIZE);\r
+ }\r
+\r
+ for (; i<16; i++) {\r
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_2PAGE_BUFSIZE);\r
+ if (!wlan_static_skb[i])\r
+ goto err_skb_alloc; \r
+ printk("2 %s: wlan_static_skb[%d]=%p, size=%lu\n",\r
+ __FUNCTION__, i, wlan_static_skb[i], DHD_SKB_2PAGE_BUFSIZE);\r
+ }\r
+\r
+ wlan_static_skb[i] = dev_alloc_skb(DHD_SKB_4PAGE_BUFSIZE);\r
+ if (!wlan_static_skb[i])\r
+ goto err_skb_alloc; \r
+ printk("3 %s: wlan_static_skb[%d]=%p, size=%lu\n",\r
+ __FUNCTION__, i, wlan_static_skb[i], DHD_SKB_4PAGE_BUFSIZE);\r
+\r
+ for (i=0; i<PREALLOC_WLAN_SEC_NUM; i++) {\r
+ wlan_mem_array[i].mem_ptr =\r
+ kmalloc(wlan_mem_array[i].size, GFP_KERNEL);\r
+\r
+ if (!wlan_mem_array[i].mem_ptr)\r
+ goto err_mem_alloc;\r
+ printk("4 %s: wlan_mem_array[%d]=%p, size=%lu\n",\r
+ __FUNCTION__, i, wlan_static_skb[i], wlan_mem_array[i].size);\r
+ }\r
+\r
+ wlan_static_scan_buf0 = kmalloc (65536, GFP_KERNEL);\r
+ if(!wlan_static_scan_buf0)\r
+ goto err_mem_alloc;\r
+ printk("5 %s: wlan_static_scan_buf0=%p, size=%d\n",\r
+ __FUNCTION__, wlan_static_scan_buf0, 65536);\r
+\r
+ wlan_static_scan_buf1 = kmalloc (65536, GFP_KERNEL);\r
+ if(!wlan_static_scan_buf1)\r
+ goto err_mem_alloc;\r
+ printk("6 %s: wlan_static_scan_buf1=%p, size=%d\n",\r
+ __FUNCTION__, wlan_static_scan_buf1, 65536);\r
+\r
+ printk("%s: WIFI MEM Allocated\n", __FUNCTION__);\r
+ return 0;\r
+\r
+err_mem_alloc:\r
+ pr_err("Failed to mem_alloc for WLAN\n");\r
+ for (j=0; j<i; j++)\r
+ kfree(wlan_mem_array[j].mem_ptr);\r
+\r
+ i = WLAN_SKB_BUF_NUM;\r
+\r
+err_skb_alloc:\r
+ pr_err("Failed to skb_alloc for WLAN\n");\r
+ for (j=0; j<i; j++)\r
+ dev_kfree_skb(wlan_static_skb[j]);\r
+\r
+ return -ENOMEM;\r
+}\r
+#endif /* CONFIG_BROADCOM_WIFI_RESERVED_MEM */\r
+\r
+static int __init bcmdhd_wlan_init(void)\r
+{\r
+ printk("%s()\n", __FUNCTION__);\r
+\r
+#ifdef CONFIG_BROADCOM_WIFI_RESERVED_MEM\r
+ bcmdhd_init_wlan_mem();\r
+#endif\r
+\r
+ return 0;\r
+}\r
+\r
+__initcall(bcmdhd_wlan_init);\r
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_wlfc.c 412994 2013-07-17 12:38:03Z $
+ * $Id: dhd_wlfc.c 490028 2014-07-09 05:58:25Z $
*
*/
#include <wlfc_proto.h>
#include <dhd_wlfc.h>
#endif
+#ifdef DHDTCPACK_SUPPRESS
+#include <dhd_ip.h>
+#endif /* DHDTCPACK_SUPPRESS */
-
-
-#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */
-
-#ifdef PROP_TXSTATUS
-typedef struct dhd_wlfc_commit_info {
- uint8 needs_hdr;
- uint8 ac_fifo_credit_spent;
- ewlfc_packet_state_t pkt_type;
- wlfc_mac_descriptor_t* mac_entry;
- void* p;
-} dhd_wlfc_commit_info_t;
-#endif /* PROP_TXSTATUS */
+/*
+ * wlfc naming and lock rules:
+ *
+ * 1. Private functions name like _dhd_wlfc_XXX, declared as static and avoid wlfc lock operation.
+ * 2. Public functions name like dhd_wlfc_XXX, use wlfc lock if needed.
+ * 3. Non-Proptxstatus module call public functions only and avoid wlfc lock operation.
+ *
+ */
#ifdef PROP_TXSTATUS
#define DHD_WLFC_QMON_COMPLETE(entry)
#endif /* QMONITOR */
-void
-dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
-{
- int i;
- uint8* ea;
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhdp->wlfc_state;
- wlfc_hanger_t* h;
- wlfc_mac_descriptor_t* mac_table;
- wlfc_mac_descriptor_t* interfaces;
- char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
-
- if (wlfc == NULL) {
- bcm_bprintf(strbuf, "wlfc not initialized yet\n");
- return;
- }
- h = (wlfc_hanger_t*)wlfc->hanger;
- if (h == NULL) {
- bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
- }
-
- mac_table = wlfc->destination_entries.nodes;
- interfaces = wlfc->destination_entries.interfaces;
- bcm_bprintf(strbuf, "---- wlfc stats ----\n");
- if (h) {
- bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
- "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
- h->pushed,
- h->popped,
- h->failed_to_push,
- h->failed_to_pop,
- h->failed_slotfind,
- (h->pushed - h->popped));
- }
-
- bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
- "(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n",
- wlfc->stats.tlv_parse_failed,
- wlfc->stats.credit_request_failed,
- wlfc->stats.mac_update_failed,
- wlfc->stats.psmode_update_failed,
- wlfc->stats.delayq_full_error,
- wlfc->stats.rollback_failed);
-
- bcm_bprintf(strbuf, "PKTS (credit,sent) "
- "(AC0[%d,%d],AC1[%d,%d],AC2[%d,%d],AC3[%d,%d],BC_MC[%d,%d])\n",
- wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0],
- wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1],
- wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2],
- wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3],
- wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4]);
-
- bcm_bprintf(strbuf, "\n");
- for (i = 0; i < WLFC_MAX_IFNUM; i++) {
- if (interfaces[i].occupied) {
- char* iftype_desc;
-
- if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
- iftype_desc = "<Unknown";
- else
- iftype_desc = iftypes[interfaces[i].iftype];
+#define LIMIT_BORROW
- ea = interfaces[i].ea;
- bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
- "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s"
- "netif_flow_control:%s\n", i,
- ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
- interfaces[i].interface_id,
- iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
- ? " OFF":" ON"));
- bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ(len,state,credit)"
- "= (%d,%s,%d)\n",
- i,
- interfaces[i].psq.len,
- ((interfaces[i].state ==
- WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
- interfaces[i].requested_credit);
+static uint16
+_dhd_wlfc_adjusted_seq(void* p, uint8 current_seq)
+{
+ uint16 seq;
- bcm_bprintf(strbuf, "INTERFACE[%d].DELAYQ"
- "(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
- "(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
- i,
- interfaces[i].psq.q[0].len,
- interfaces[i].psq.q[1].len,
- interfaces[i].psq.q[2].len,
- interfaces[i].psq.q[3].len,
- interfaces[i].psq.q[4].len,
- interfaces[i].psq.q[5].len,
- interfaces[i].psq.q[6].len,
- interfaces[i].psq.q[7].len);
- }
+ if (!p) {
+ return 0xffff;
}
- bcm_bprintf(strbuf, "\n");
- for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
- if (mac_table[i].occupied) {
- ea = mac_table[i].ea;
- bcm_bprintf(strbuf, "MAC_table[%d].ea = "
- "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i,
- ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
- mac_table[i].interface_id);
-
- bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ(len,state,credit)"
- "= (%d,%s,%d)\n",
- i,
- mac_table[i].psq.len,
- ((mac_table[i].state ==
- WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
- mac_table[i].requested_credit);
-#ifdef PROP_TXSTATUS_DEBUG
- bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
- i, mac_table[i].opened_ct, mac_table[i].closed_ct);
-#endif
- bcm_bprintf(strbuf, "MAC_table[%d].DELAYQ"
- "(sup,ac0),(sup,ac1),(sup,ac2),(sup,ac3) = "
- "(%d,%d),(%d,%d),(%d,%d),(%d,%d)\n",
- i,
- mac_table[i].psq.q[0].len,
- mac_table[i].psq.q[1].len,
- mac_table[i].psq.q[2].len,
- mac_table[i].psq.q[3].len,
- mac_table[i].psq.q[4].len,
- mac_table[i].psq.q[5].len,
- mac_table[i].psq.q[6].len,
- mac_table[i].psq.q[7].len);
- }
+ seq = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ if (seq < current_seq) {
+ /* wrap around */
+ seq += 256;
}
-#ifdef PROP_TXSTATUS_DEBUG
- {
- int avg;
- int moving_avg = 0;
- int moving_samples;
-
- if (wlfc->stats.latency_sample_count) {
- moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
-
- for (i = 0; i < moving_samples; i++)
- moving_avg += wlfc->stats.deltas[i];
- moving_avg /= moving_samples;
-
- avg = (100 * wlfc->stats.total_status_latency) /
- wlfc->stats.latency_sample_count;
- bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
- "(%d.%d, %03d, %03d)\n",
- moving_samples, avg/100, (avg - (avg/100)*100),
- wlfc->stats.latency_most_recent,
- moving_avg);
- }
- }
+ return seq;
+}
- bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
- "back = (%d,%d,%d,%d,%d,%d)\n",
- wlfc->stats.fifo_credits_sent[0],
- wlfc->stats.fifo_credits_sent[1],
- wlfc->stats.fifo_credits_sent[2],
- wlfc->stats.fifo_credits_sent[3],
- wlfc->stats.fifo_credits_sent[4],
- wlfc->stats.fifo_credits_sent[5],
+static void
+_dhd_wlfc_prec_enque(struct pktq *pq, int prec, void* p, bool qHead,
+ uint8 current_seq, bool reOrder)
+{
+ struct pktq_prec *q;
+ uint16 seq, seq2;
+ void *p2, *p2_prev;
- wlfc->stats.fifo_credits_back[0],
- wlfc->stats.fifo_credits_back[1],
- wlfc->stats.fifo_credits_back[2],
- wlfc->stats.fifo_credits_back[3],
- wlfc->stats.fifo_credits_back[4],
- wlfc->stats.fifo_credits_back[5]);
- {
- uint32 fifo_cr_sent = 0;
- uint32 fifo_cr_acked = 0;
- uint32 request_cr_sent = 0;
- uint32 request_cr_ack = 0;
- uint32 bc_mc_cr_ack = 0;
+ if (!p)
+ return;
- for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
- fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
- }
- for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
- fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
- }
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ PKTSETLINK(p, NULL);
+ if (q->head == NULL) {
+ /* empty queue */
+ q->head = p;
+ q->tail = p;
+ } else {
+ if (reOrder && (prec & 1)) {
+ seq = _dhd_wlfc_adjusted_seq(p, current_seq);
+ p2 = qHead ? q->head : q->tail;
+ seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+ if ((qHead &&((seq+1) > seq2)) || (!qHead && ((seq2+1) > seq))) {
+ /* need reorder */
+ p2 = q->head;
+ p2_prev = NULL;
+ seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+
+ while (seq > seq2) {
+ p2_prev = p2;
+ p2 = PKTLINK(p2);
+ if (!p2) {
+ break;
+ }
+ seq2 = _dhd_wlfc_adjusted_seq(p2, current_seq);
+ }
- for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
- if (wlfc->destination_entries.nodes[i].occupied) {
- request_cr_sent +=
- wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
- }
- }
- for (i = 0; i < WLFC_MAX_IFNUM; i++) {
- if (wlfc->destination_entries.interfaces[i].occupied) {
- request_cr_sent +=
- wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
- }
- }
- for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
- if (wlfc->destination_entries.nodes[i].occupied) {
- request_cr_ack +=
- wlfc->destination_entries.nodes[i].dstncredit_acks;
+ if (p2_prev == NULL) {
+ /* insert head */
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ } else if (p2 == NULL) {
+ /* insert tail */
+ PKTSETLINK(p2_prev, p);
+ q->tail = p;
+ } else {
+ /* insert after p2_prev */
+ PKTSETLINK(p, PKTLINK(p2_prev));
+ PKTSETLINK(p2_prev, p);
+ }
+ goto exit;
}
}
- for (i = 0; i < WLFC_MAX_IFNUM; i++) {
- if (wlfc->destination_entries.interfaces[i].occupied) {
- request_cr_ack +=
- wlfc->destination_entries.interfaces[i].dstncredit_acks;
- }
+
+ if (qHead) {
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ } else {
+ PKTSETLINK(q->tail, p);
+ q->tail = p;
}
- bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
- "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
- fifo_cr_sent, fifo_cr_acked,
- request_cr_sent, request_cr_ack,
- wlfc->destination_entries.other.dstncredit_acks,
- bc_mc_cr_ack,
- wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
}
-#endif /* PROP_TXSTATUS_DEBUG */
- bcm_bprintf(strbuf, "\n");
- bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull),(dropped,hdr_only,wlc_tossed)"
- "(freed,free_err,rollback)) = "
- "((%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
- wlfc->stats.pktin,
- wlfc->stats.pkt2bus,
- wlfc->stats.txstatus_in,
- wlfc->stats.dhd_hdrpulls,
-
- wlfc->stats.pktdropped,
- wlfc->stats.wlfc_header_only_pkt,
- wlfc->stats.wlc_tossed_pkts,
-
- wlfc->stats.pkt_freed,
- wlfc->stats.pkt_free_err, wlfc->stats.rollback);
-
- bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
- "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
-
- wlfc->stats.d11_suppress,
- wlfc->stats.wl_suppress,
- wlfc->stats.bad_suppress,
- wlfc->stats.psq_d11sup_enq,
- wlfc->stats.psq_wlsup_enq,
- wlfc->stats.psq_hostq_enq,
- wlfc->stats.mac_handle_notfound,
+exit:
- wlfc->stats.psq_d11sup_retx,
- wlfc->stats.psq_wlsup_retx,
- wlfc->stats.psq_hostq_retx);
- bcm_bprintf(strbuf, "wlfc- generic error: %d", wlfc->stats.generic_error);
+ q->len++;
+ pq->len++;
- return;
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
}
/* Create a place to store all packet pointers submitted to the firmware until
hang-er: noun, a contrivance on which things are hung, as a hook.
*/
static void*
-dhd_wlfc_hanger_create(osl_t *osh, int max_items)
+_dhd_wlfc_hanger_create(osl_t *osh, int max_items)
{
int i;
wlfc_hanger_t* hanger;
}
static int
-dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
+_dhd_wlfc_hanger_delete(osl_t *osh, void* hanger)
{
wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
}
static uint16
-dhd_wlfc_hanger_get_free_slot(void* hanger)
+_dhd_wlfc_hanger_get_free_slot(void* hanger)
{
uint32 i;
wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
}
static int
-dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen)
+_dhd_wlfc_hanger_get_genbit(void* hanger, void* pkt, uint32 slot_id, int* gen)
{
int rc = BCME_OK;
wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
}
static int
-dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
+_dhd_wlfc_hanger_pushpkt(void* hanger, void* pkt, uint32 slot_id)
{
int rc = BCME_OK;
wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
if (h->items[slot_id].state == WLFC_HANGER_ITEM_STATE_FREE) {
h->items[slot_id].state = WLFC_HANGER_ITEM_STATE_INUSE;
h->items[slot_id].pkt = pkt;
- h->items[slot_id].identifier = slot_id;
+ h->items[slot_id].pkt_state = 0;
+ h->items[slot_id].pkt_txstatus = 0;
h->pushed++;
}
else {
}
static int
-dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, int remove_from_hanger)
+_dhd_wlfc_hanger_poppkt(void* hanger, uint32 slot_id, void** pktout, bool remove_from_hanger)
{
int rc = BCME_OK;
wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
h->items[slot_id].state =
WLFC_HANGER_ITEM_STATE_FREE;
h->items[slot_id].pkt = NULL;
- h->items[slot_id].identifier = 0;
h->items[slot_id].gen = 0xff;
+ h->items[slot_id].identifier = 0;
h->popped++;
}
}
}
static int
-dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen)
+_dhd_wlfc_hanger_mark_suppressed(void* hanger, uint32 slot_id, uint8 gen)
{
int rc = BCME_OK;
wlfc_hanger_t* h = (wlfc_hanger_t*)hanger;
return rc;
}
-static int
-_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
- uint8 tim_bmp, uint8 mac_handle, uint32 htodtag)
+/* remove reference of specific packet in hanger */
+static bool
+_dhd_wlfc_hanger_remove_reference(wlfc_hanger_t* h, void* pkt)
{
- uint32 wl_pktinfo = 0;
- uint8* wlh;
- uint8 dataOffset;
- uint8 fillers;
- uint8 tim_signal_len = 0;
+ int i;
- struct bdc_header *h;
+ if (!h || !pkt) {
+ return FALSE;
+ }
- if (tim_signal) {
- tim_signal_len = 1 + 1 + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ for (i = 0; i < h->max_items; i++) {
+ if (pkt == h->items[i].pkt) {
+ if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+ (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+ h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
+ h->items[i].pkt = NULL;
+ h->items[i].gen = 0xff;
+ h->items[i].identifier = 0;
+ }
+ return TRUE;
+ }
}
- /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
- dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + 2 + tim_signal_len;
- fillers = ROUNDUP(dataOffset, 4) - dataOffset;
- dataOffset += fillers;
+ return FALSE;
+}
- PKTPUSH(ctx->osh, p, dataOffset);
- wlh = (uint8*) PKTDATA(ctx->osh, p);
- wl_pktinfo = htol32(htodtag);
+static int
+_dhd_wlfc_enque_afq(athost_wl_status_info_t* ctx, void *p)
+{
+ wlfc_mac_descriptor_t* entry;
+ uint16 entry_idx = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ uint8 prec = DHD_PKTTAG_FIFO(PKTTAG(p));
- wlh[0] = WLFC_CTL_TYPE_PKTTAG;
- wlh[1] = WLFC_CTL_VALUE_LEN_PKTTAG;
- memcpy(&wlh[2], &wl_pktinfo, sizeof(uint32));
+ if (entry_idx < WLFC_MAC_DESC_TABLE_SIZE)
+ entry = &ctx->destination_entries.nodes[entry_idx];
+ else if (entry_idx < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+ entry = &ctx->destination_entries.interfaces[entry_idx - WLFC_MAC_DESC_TABLE_SIZE];
+ else
+ entry = &ctx->destination_entries.other;
- if (tim_signal_len) {
- wlh[dataOffset - fillers - tim_signal_len ] =
- WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
- wlh[dataOffset - fillers - tim_signal_len + 1] =
- WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
- wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
- wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
- }
- if (fillers)
- memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+ pktq_penq(&entry->afq, prec, p);
+
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_deque_afq(athost_wl_status_info_t* ctx, uint16 hslot, uint8 hcnt, uint8 prec,
+ void **pktout)
+{
+ wlfc_mac_descriptor_t *entry;
+ struct pktq *pq;
+ struct pktq_prec *q;
+ void *p, *b;
+
+ if (!ctx) {
+ DHD_ERROR(("%s: ctx(%p), pktout(%p)\n", __FUNCTION__, ctx, pktout));
+ return BCME_BADARG;
+ }
+
+ if (pktout) {
+ *pktout = NULL;
+ }
+
+ ASSERT(hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM + 1));
+
+ if (hslot < WLFC_MAC_DESC_TABLE_SIZE)
+ entry = &ctx->destination_entries.nodes[hslot];
+ else if (hslot < (WLFC_MAC_DESC_TABLE_SIZE + WLFC_MAX_IFNUM))
+ entry = &ctx->destination_entries.interfaces[hslot - WLFC_MAC_DESC_TABLE_SIZE];
+ else
+ entry = &ctx->destination_entries.other;
+
+ pq = &entry->afq;
+
+ ASSERT(prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ b = NULL;
+ p = q->head;
+
+ while (p && (hcnt != WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)))))
+ {
+ b = p;
+ p = PKTLINK(p);
+ }
+
+ if (p == NULL) {
+ /* none is matched */
+ if (b) {
+ DHD_ERROR(("%s: can't find matching seq(%d)\n", __FUNCTION__, hcnt));
+ } else {
+ DHD_ERROR(("%s: queue is empty\n", __FUNCTION__));
+ }
+
+ return BCME_ERROR;
+ }
+
+ if (!b) {
+ /* head packet is matched */
+ if ((q->head = PKTLINK(p)) == NULL) {
+ q->tail = NULL;
+ }
+ } else {
+ /* middle packet is matched */
+ DHD_INFO(("%s: out of order, seq(%d), head_seq(%d)\n", __FUNCTION__, hcnt,
+ WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(q->head)))));
+ ctx->stats.ooo_pkts[prec]++;
+ PKTSETLINK(b, PKTLINK(p));
+ if (PKTLINK(p) == NULL) {
+ q->tail = b;
+ }
+ }
+
+ q->len--;
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ if (pktout) {
+ *pktout = p;
+ }
+
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_pushheader(athost_wl_status_info_t* ctx, void* p, bool tim_signal,
+ uint8 tim_bmp, uint8 mac_handle, uint32 htodtag, uint16 htodseq, bool skip_wlfc_hdr)
+{
+ uint32 wl_pktinfo = 0;
+ uint8* wlh;
+ uint8 dataOffset = 0;
+ uint8 fillers;
+ uint8 tim_signal_len = 0;
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+ struct bdc_header *h;
+
+ if (skip_wlfc_hdr)
+ goto push_bdc_hdr;
+
+ if (tim_signal) {
+ tim_signal_len = TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ }
+
+ /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */
+ dataOffset = WLFC_CTL_VALUE_LEN_PKTTAG + TLV_HDR_LEN + tim_signal_len;
+ if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+ dataOffset += WLFC_CTL_VALUE_LEN_SEQ;
+ }
+
+ fillers = ROUNDUP(dataOffset, 4) - dataOffset;
+ dataOffset += fillers;
+
+ PKTPUSH(ctx->osh, p, dataOffset);
+ wlh = (uint8*) PKTDATA(ctx->osh, p);
+
+ wl_pktinfo = htol32(htodtag);
+
+ wlh[TLV_TAG_OFF] = WLFC_CTL_TYPE_PKTTAG;
+ wlh[TLV_LEN_OFF] = WLFC_CTL_VALUE_LEN_PKTTAG;
+ memcpy(&wlh[TLV_HDR_LEN], &wl_pktinfo, sizeof(uint32));
+
+ if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+ uint16 wl_seqinfo = htol16(htodseq);
+ wlh[TLV_LEN_OFF] += WLFC_CTL_VALUE_LEN_SEQ;
+ memcpy(&wlh[TLV_HDR_LEN + WLFC_CTL_VALUE_LEN_PKTTAG], &wl_seqinfo,
+ WLFC_CTL_VALUE_LEN_SEQ);
+ }
+
+ if (tim_signal_len) {
+ wlh[dataOffset - fillers - tim_signal_len ] =
+ WLFC_CTL_TYPE_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 1] =
+ WLFC_CTL_VALUE_LEN_PENDING_TRAFFIC_BMP;
+ wlh[dataOffset - fillers - tim_signal_len + 2] = mac_handle;
+ wlh[dataOffset - fillers - tim_signal_len + 3] = tim_bmp;
+ }
+ if (fillers)
+ memset(&wlh[dataOffset - fillers], WLFC_CTL_TYPE_FILLER, fillers);
+
+push_bdc_hdr:
PKTPUSH(ctx->osh, p, BDC_HEADER_LEN);
h = (struct bdc_header *)PKTDATA(ctx->osh, p);
struct bdc_header *h;
if (PKTLEN(ctx->osh, pktbuf) < BDC_HEADER_LEN) {
- WLFC_DBGMESG(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
PKTLEN(ctx->osh, pktbuf), BDC_HEADER_LEN));
return BCME_ERROR;
}
/* pull BDC header */
PKTPULL(ctx->osh, pktbuf, BDC_HEADER_LEN);
- if (PKTLEN(ctx->osh, pktbuf) < (h->dataOffset << 2)) {
- WLFC_DBGMESG(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
+ if (PKTLEN(ctx->osh, pktbuf) < (uint)(h->dataOffset << 2)) {
+ DHD_ERROR(("%s: rx data too short (%d < %d)\n", __FUNCTION__,
PKTLEN(ctx->osh, pktbuf), (h->dataOffset << 2)));
return BCME_ERROR;
}
wlfc_mac_descriptor_t* table = ctx->destination_entries.nodes;
uint8 ifid = DHD_PKTTAG_IF(PKTTAG(p));
uint8* dstn = DHD_PKTTAG_DSTN(PKTTAG(p));
- wlfc_mac_descriptor_t* entry = NULL;
+ wlfc_mac_descriptor_t* entry = DHD_PKTTAG_ENTRY(PKTTAG(p));
int iftype = ctx->destination_entries.interfaces[ifid].iftype;
- /* Multicast destination and P2P clients get the interface entry.
- * STA gets the interface entry if there is no exact match. For
- * example, TDLS destinations have their own entry.
+ /* saved one exists, return it */
+ if (entry)
+ return entry;
+
+ /* Multicast destination, STA and P2P clients get the interface entry.
+ * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations
+ * have their own entry.
*/
if ((iftype == WLC_E_IF_ROLE_STA || ETHER_ISMULTI(dstn) ||
iftype == WLC_E_IF_ROLE_P2P_CLIENT) &&
entry = &ctx->destination_entries.interfaces[ifid];
}
- if (entry != NULL && ETHER_ISMULTI(dstn))
+ if (entry && ETHER_ISMULTI(dstn)) {
+ DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
return entry;
+ }
for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
if (table[i].occupied) {
}
}
- return entry != NULL ? entry : &ctx->destination_entries.other;
+ if (entry == NULL)
+ entry = &ctx->destination_entries.other;
+
+ DHD_PKTTAG_SET_ENTRY(PKTTAG(p), entry);
+
+ return entry;
+}
+
+static int
+_dhd_wlfc_prec_drop(dhd_pub_t *dhdp, int prec, void* p, bool bPktInQ)
+{
+ athost_wl_status_info_t* ctx;
+ void *pout = NULL;
+
+ ASSERT(dhdp && p);
+ ASSERT(prec >= 0 && prec <= WLFC_PSQ_PREC_COUNT);
+
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+ /* suppressed queue, need pop from hanger */
+ _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG
+ (PKTTAG(p))), &pout, TRUE);
+ ASSERT(p == pout);
+ }
+
+ if (!(prec & 1)) {
+#ifdef DHDTCPACK_SUPPRESS
+ /* pkt in delayed q, so fake push BDC header for
+ * dhd_tcpack_check_xmit() and dhd_txcomplete().
+ */
+ _dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0, 0, 0, TRUE);
+
+ /* This packet is about to be freed, so remove it from tcp_ack_info_tbl
+ * This must be one of...
+ * 1. A pkt already in delayQ is evicted by another pkt with higher precedence
+ * in _dhd_wlfc_prec_enq_with_drop()
+ * 2. A pkt could not be enqueued to delayQ because it is full,
+ * in _dhd_wlfc_enque_delayq().
+ * 3. A pkt could not be enqueued to delayQ because it is full,
+ * in _dhd_wlfc_rollback_packet_toq().
+ */
+ if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+ " Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ }
+
+ if (bPktInQ) {
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+ ctx->pkt_cnt_per_ac[prec>>1]--;
+ }
+
+ ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+ ctx->stats.pktout++;
+ ctx->stats.drop_pkts[prec]++;
+
+ dhd_txcomplete(dhdp, p, FALSE);
+ PKTFREE(ctx->osh, p, TRUE);
+
+ return 0;
+}
+
+static bool
+_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec, bool qHead,
+ uint8 current_seq)
+{
+ void *p = NULL;
+ int eprec = -1; /* precedence to evict from */
+ athost_wl_status_info_t* ctx;
+
+ ASSERT(dhdp && pq && pkt);
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+ /* Fast case, precedence queue is not full and we are also not
+ * exceeding total queue length
+ */
+ if (!pktq_pfull(pq, prec) && !pktq_full(pq)) {
+ goto exit;
+ }
+
+ /* Determine precedence from which to evict packet, if any */
+ if (pktq_pfull(pq, prec))
+ eprec = prec;
+ else if (pktq_full(pq)) {
+ p = pktq_peek_tail(pq, &eprec);
+ if (!p) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return FALSE;
+ }
+ if ((eprec > prec) || (eprec < 0)) {
+ if (!pktq_pempty(pq, prec)) {
+ eprec = prec;
+ } else {
+ return FALSE;
+ }
+ }
+ }
+
+ /* Evict if needed */
+ if (eprec >= 0) {
+ /* Detect queueing to unconfigured precedence */
+ ASSERT(!pktq_pempty(pq, eprec));
+ /* Evict all fragmented frames */
+ dhd_prec_drop_pkts(dhdp, pq, eprec, _dhd_wlfc_prec_drop);
+ }
+
+exit:
+ /* Enqueue */
+ _dhd_wlfc_prec_enque(pq, prec, pkt, qHead, current_seq,
+ WLFC_GET_REORDERSUPP(dhdp->wlfc_mode));
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(pkt))][prec>>1]++;
+ ctx->pkt_cnt_per_ac[prec>>1]++;
+
+ return TRUE;
}
+
static int
_dhd_wlfc_rollback_packet_toq(athost_wl_status_info_t* ctx,
void* p, ewlfc_packet_state_t pkt_type, uint32 hslot)
Note: hslot is used only when header removal is done.
*/
wlfc_mac_descriptor_t* entry;
- void* pktout;
int rc = BCME_OK;
- int prec;
+ int prec, fifo_id;
entry = _dhd_wlfc_find_table_entry(ctx, p);
prec = DHD_PKTTAG_FIFO(PKTTAG(p));
+ fifo_id = prec << 1;
+ if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED)
+ fifo_id += 1;
if (entry != NULL) {
- if (pkt_type == eWLFC_PKTTYPE_SUPPRESSED) {
- /* wl-header is saved for suppressed packets */
- if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, ((prec << 1) + 1), p) == NULL) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
- rc = BCME_ERROR;
- }
- }
- else {
- /* remove header first */
- rc = _dhd_wlfc_pullheader(ctx, p);
- if (rc != BCME_OK) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
- /* free the hanger slot */
- dhd_wlfc_hanger_poppkt(ctx->hanger, hslot, &pktout, 1);
- PKTFREE(ctx->osh, p, TRUE);
- ctx->stats.rollback_failed++;
- return BCME_ERROR;
- }
-
- if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
- /* delay-q packets are going to delay-q */
- if (WLFC_PKTQ_PENQ_HEAD(&entry->psq, (prec << 1), p) == NULL) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
- rc = BCME_ERROR;
- }
- }
-
- /* free the hanger slot */
- dhd_wlfc_hanger_poppkt(ctx->hanger, hslot, &pktout, 1);
-
- /* decrement sequence count */
- WLFC_DECR_SEQCOUNT(entry, prec);
- }
/*
if this packet did not count against FIFO credit, it must have
taken a requested_credit from the firmware (for pspoll etc.)
*/
- if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
+ if ((prec != AC_COUNT) && !DHD_PKTTAG_CREDITCHECK(PKTTAG(p)))
entry->requested_credit++;
+
+ if (pkt_type == eWLFC_PKTTYPE_DELAYED) {
+ /* decrement sequence count */
+ WLFC_DECR_SEQCOUNT(entry, prec);
+ /* remove header first */
+ rc = _dhd_wlfc_pullheader(ctx, p);
+ if (rc != BCME_OK) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ goto exit;
+ }
}
- }
- else {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+
+ if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, fifo_id, TRUE,
+ WLFC_SEQCOUNT(entry, fifo_id>>1))
+ == FALSE) {
+ /* enque failed */
+ DHD_ERROR(("Error: %s():%d, fifo_id(%d)\n",
+ __FUNCTION__, __LINE__, fifo_id));
+ rc = BCME_ERROR;
+ }
+ } else {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
rc = BCME_ERROR;
}
- if (rc != BCME_OK)
+exit:
+ if (rc != BCME_OK) {
ctx->stats.rollback_failed++;
+ _dhd_wlfc_prec_drop(ctx->dhdp, fifo_id, p, FALSE);
+ }
else
ctx->stats.rollback++;
return rc;
}
+static bool
+_dhd_wlfc_allow_fc(athost_wl_status_info_t* ctx, uint8 ifid)
+{
+ int prec, ac_traffic = WLFC_NO_TRAFFIC;
+
+ for (prec = 0; prec < AC_COUNT; prec++) {
+ if (ctx->pkt_cnt_in_drv[ifid][prec] > 0) {
+ if (ac_traffic == WLFC_NO_TRAFFIC)
+ ac_traffic = prec + 1;
+ else if (ac_traffic != (prec + 1))
+ ac_traffic = WLFC_MULTI_TRAFFIC;
+ }
+ }
+
+ if (ac_traffic >= 1 && ac_traffic <= AC_COUNT) {
+ /* single AC (BE/BK/VI/VO) in queue */
+ if (ctx->allow_fc) {
+ return TRUE;
+ } else {
+ uint32 delta;
+ uint32 curr_t = OSL_SYSUPTIME();
+
+ if (ctx->fc_defer_timestamp == 0) {
+ /* first signle ac scenario */
+ ctx->fc_defer_timestamp = curr_t;
+ return FALSE;
+ }
+
+ /* single AC duration, this handles wrap around, e.g. 1 - ~0 = 2. */
+ delta = curr_t - ctx->fc_defer_timestamp;
+ if (delta >= WLFC_FC_DEFER_PERIOD_MS) {
+ ctx->allow_fc = TRUE;
+ }
+ }
+ } else {
+ /* multiple ACs or BCMC in queue */
+ ctx->allow_fc = FALSE;
+ ctx->fc_defer_timestamp = 0;
+ }
+
+ return ctx->allow_fc;
+}
+
static void
_dhd_wlfc_flow_control_check(athost_wl_status_info_t* ctx, struct pktq* pq, uint8 if_id)
{
ASSERT(ctx);
dhdp = (dhd_pub_t *)ctx->dhdp;
+ ASSERT(dhdp);
+
+ if (dhdp->skip_fc && dhdp->skip_fc())
+ return;
- if (dhdp && dhdp->skip_fc && dhdp->skip_fc())
+ if ((ctx->hostif_flow_state[if_id] == OFF) && !_dhd_wlfc_allow_fc(ctx, if_id))
return;
if ((pq->len <= WLFC_FLOWCONTROL_LOWATER) && (ctx->hostif_flow_state[if_id] == ON)) {
*/
WLFC_DBGMESG(("F"));
- dhd_txflowcontrol(ctx->dhdp, if_id, OFF);
+ dhd_txflowcontrol(dhdp, if_id, OFF);
ctx->toggle_host_if = 0;
}
+
if ((pq->len >= WLFC_FLOWCONTROL_HIWATER) && (ctx->hostif_flow_state[if_id] == OFF)) {
/* stop traffic */
ctx->hostif_flow_state[if_id] = ON;
*/
WLFC_DBGMESG(("N"));
- dhd_txflowcontrol(ctx->dhdp, if_id, ON);
+ dhd_txflowcontrol(dhdp, if_id, ON);
ctx->host_ifidx = if_id;
ctx->toggle_host_if = 1;
{
int rc = BCME_OK;
void* p = NULL;
- int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 12;
+ int dummylen = ((dhd_pub_t *)ctx->dhdp)->hdrlen+ 16;
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+ if (dhdp->proptxstatus_txoff) {
+ rc = BCME_NORESOURCE;
+ return rc;
+ }
/* allocate a dummy packet */
p = PKTGET(ctx->osh, dummylen, TRUE);
if (p) {
PKTPULL(ctx->osh, p, dummylen);
DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), 0);
- _dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0);
+ _dhd_wlfc_pushheader(ctx, p, TRUE, ta_bmp, entry->mac_handle, 0, 0, FALSE);
DHD_PKTTAG_SETSIGNALONLY(PKTTAG(p), 1);
+ DHD_PKTTAG_WLFCPKT_SET(PKTTAG(p), 1);
#ifdef PROP_TXSTATUS_DEBUG
ctx->stats.signal_only_pkts_sent++;
#endif
- rc = dhd_bus_txdata(((dhd_pub_t *)ctx->dhdp)->bus, p);
+
+#if defined(BCMPCIE)
+ rc = dhd_bus_txdata(dhdp->bus, p, ctx->host_ifidx);
+#else
+ rc = dhd_bus_txdata(dhdp->bus, p);
+#endif
if (rc != BCME_OK) {
+ _dhd_wlfc_pullheader(ctx, p);
PKTFREE(ctx->osh, p, TRUE);
}
}
entry = _dhd_wlfc_find_table_entry(ctx, p);
if (entry == NULL) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
return BCME_NOTFOUND;
}
/*
- delayed packets go to sub_queue[2*prec + 0] to ensure
order of delivery.
*/
- if (WLFC_PKTQ_PENQ(&entry->psq, ((prec << 1) + 1), p) == NULL) {
+ if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, p, ((prec << 1) + 1), FALSE,
+ WLFC_SEQCOUNT(entry, prec))
+ == FALSE) {
ctx->stats.delayq_full_error++;
/* WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__)); */
WLFC_DBGMESG(("s"));
return BCME_ERROR;
}
+
/* A packet has been pushed, update traffic availability bitmap, if applicable */
_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
_dhd_wlfc_flow_control_check(ctx, &entry->psq, DHD_PKTTAG_IF(PKTTAG(p)));
int hslot = WLFC_HANGER_MAXITEMS;
bool send_tim_update = FALSE;
uint32 htod = 0;
+ uint16 htodseq = 0;
uint8 free_ctr;
+ int gen = 0xff;
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
*slot = hslot;
}
if (entry == NULL) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
return BCME_ERROR;
}
+
if (entry->send_tim_signal) {
send_tim_update = TRUE;
entry->send_tim_signal = 0;
entry->traffic_lastreported_bmp = entry->traffic_pending_bmp;
}
+
if (header_needed) {
- hslot = dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+ if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ hslot = (uint)(entry - &ctx->destination_entries.nodes[0]);
+ } else {
+ hslot = _dhd_wlfc_hanger_get_free_slot(ctx->hanger);
+ }
+ gen = entry->generation;
free_ctr = WLFC_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
- DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
- WLFC_PKTFLAG_SET_GENERATION(htod, entry->generation);
- entry->transit_count++;
+ } else {
+ if (WLFC_GET_REUSESEQ(dhdp->wlfc_mode)) {
+ htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(p));
+ }
+
+ hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+
+ if (WLFC_GET_REORDERSUPP(dhdp->wlfc_mode)) {
+ gen = entry->generation;
+ } else if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ gen = WL_TXSTATUS_GET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ } else {
+ _dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen);
+ }
+
+ free_ctr = WL_TXSTATUS_GET_FREERUNCTR(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+ /* remove old header */
+ _dhd_wlfc_pullheader(ctx, p);
}
- else {
- hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
- free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
+
+ if (hslot >= WLFC_HANGER_MAXITEMS) {
+ DHD_ERROR(("Error: %s():no hanger slot available\n", __FUNCTION__));
+ return BCME_ERROR;
}
- WLFC_PKTID_HSLOT_SET(htod, hslot);
- WLFC_PKTID_FREERUNCTR_SET(htod, free_ctr);
- DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
- WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
- WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ WL_TXSTATUS_SET_FREERUNCTR(htod, free_ctr);
+ WL_TXSTATUS_SET_HSLOT(htod, hslot);
+ WL_TXSTATUS_SET_FIFO(htod, DHD_PKTTAG_FIFO(PKTTAG(p)));
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ WL_TXSTATUS_SET_GENERATION(htod, gen);
+ DHD_PKTTAG_SETPKTDIR(PKTTAG(p), 1);
if (!DHD_PKTTAG_CREDITCHECK(PKTTAG(p))) {
/*
explicit request from the firmware side.
*/
WLFC_PKTFLAG_SET_PKTREQUESTED(htod);
- }
- else {
+ } else {
WLFC_PKTFLAG_CLR_PKTREQUESTED(htod);
}
- if (header_needed) {
- rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
- entry->traffic_lastreported_bmp, entry->mac_handle, htod);
- if (rc == BCME_OK) {
- DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+
+ rc = _dhd_wlfc_pushheader(ctx, p, send_tim_update,
+ entry->traffic_lastreported_bmp, entry->mac_handle, htod, htodseq, FALSE);
+ if (rc == BCME_OK) {
+ DHD_PKTTAG_SET_H2DTAG(PKTTAG(p), htod);
+
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && header_needed) {
/*
a new header was created for this packet.
push to hanger slot and scrub q. Since bus
send succeeded, increment seq number as well.
*/
- rc = dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
+ rc = _dhd_wlfc_hanger_pushpkt(ctx->hanger, p, hslot);
if (rc == BCME_OK) {
- /* increment free running sequence count */
- WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
#ifdef PROP_TXSTATUS_DEBUG
((wlfc_hanger_t*)(ctx->hanger))->items[hslot].push_time =
OSL_SYSUPTIME();
#endif
- }
- else {
- WLFC_DBGMESG(("%s() hanger_pushpkt() failed, rc: %d\n",
+ } else {
+ DHD_ERROR(("%s() hanger_pushpkt() failed, rc: %d\n",
__FUNCTION__, rc));
}
}
- }
- else {
- int gen;
- /* remove old header */
- rc = _dhd_wlfc_pullheader(ctx, p);
- if (rc == BCME_OK) {
- hslot = WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
- dhd_wlfc_hanger_get_genbit(ctx->hanger, p, hslot, &gen);
-
- WLFC_PKTFLAG_SET_GENERATION(htod, gen);
- free_ctr = WLFC_PKTID_FREERUNCTR_GET(DHD_PKTTAG_H2DTAG(PKTTAG(p)));
- /* push new header */
- _dhd_wlfc_pushheader(ctx, p, send_tim_update,
- entry->traffic_lastreported_bmp, entry->mac_handle, htod);
+ if ((rc == BCME_OK) && header_needed) {
+ /* increment free running sequence count */
+ WLFC_INCR_SEQCOUNT(entry, DHD_PKTTAG_FIFO(PKTTAG(p)));
}
}
*slot = hslot;
}
static int
-_dhd_wlfc_is_destination_closed(athost_wl_status_info_t* ctx,
+_dhd_wlfc_is_destination_open(athost_wl_status_info_t* ctx,
wlfc_mac_descriptor_t* entry, int prec)
{
+ if (entry->interface_id >= WLFC_MAX_IFNUM) {
+ ASSERT(&ctx->destination_entries.other == entry);
+ return 1;
+ }
if (ctx->destination_entries.interfaces[entry->interface_id].iftype ==
WLC_E_IF_ROLE_P2P_GO) {
/* - destination interface is of type p2p GO.
firmware storing the destination-specific-requested packet in queue.
*/
if ((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
- (entry->requested_packet == 0))
- return 1;
+ (entry->requested_packet == 0)) {
+ return 0;
+ }
}
/* AP, p2p_go -> unicast desc entry, STA/p2p_cl -> interface desc. entry */
if (((entry->state == WLFC_STATE_CLOSE) && (entry->requested_credit == 0) &&
(entry->requested_packet == 0)) ||
- (!(entry->ac_bitmap & (1 << prec))))
- return 1;
+ (!(entry->ac_bitmap & (1 << prec)))) {
+ return 0;
+ }
- return 0;
+ return 1;
}
static void*
-_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx,
- int prec, uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out)
+_dhd_wlfc_deque_delayedq(athost_wl_status_info_t* ctx, int prec,
+ uint8* ac_credit_spent, uint8* needs_hdr, wlfc_mac_descriptor_t** entry_out,
+ bool only_no_credit)
{
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
wlfc_mac_descriptor_t* entry;
- wlfc_mac_descriptor_t* table;
- uint8 token_pos;
int total_entries;
void* p = NULL;
- int pout;
int i;
*entry_out = NULL;
- token_pos = ctx->token_pos[prec];
/* most cases a packet will count against FIFO credit */
- *ac_credit_spent = 1;
- *needs_hdr = 1;
+ *ac_credit_spent = ((prec == AC_COUNT) && !ctx->bcmc_credit_supported) ? 0 : 1;
/* search all entries, include nodes as well as interfaces */
- table = (wlfc_mac_descriptor_t*)&ctx->destination_entries;
- total_entries = sizeof(ctx->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+ if (only_no_credit) {
+ total_entries = ctx->requested_entry_count;
+ } else {
+ total_entries = ctx->active_entry_count;
+ }
for (i = 0; i < total_entries; i++) {
- entry = &table[(token_pos + i) % total_entries];
- if (entry->occupied && !entry->deleting) {
- if (!_dhd_wlfc_is_destination_closed(ctx, entry, prec)) {
- p = pktq_mdeq(&entry->psq,
- /* higher precedence will be picked up first,
- * i.e. suppressed packets before delayed ones
- */
- NBITVAL((prec << 1) + 1), &pout);
- *needs_hdr = 0;
-
- if (p == NULL) {
- if (entry->suppressed == TRUE) {
- if ((entry->suppr_transit_count <=
- entry->suppress_count)) {
- entry->suppressed = FALSE;
- } else {
- return NULL;
- }
- }
- /* De-Q from delay Q */
- p = pktq_mdeq(&entry->psq,
- NBITVAL((prec << 1)),
- &pout);
- *needs_hdr = 1;
+ if (only_no_credit) {
+ entry = ctx->requested_entry[i];
+ } else {
+ entry = ctx->active_entry_head;
+ /* move head to ensure fair round-robin */
+ ctx->active_entry_head = ctx->active_entry_head->next;
+ }
+ ASSERT(entry);
+
+ if (entry->occupied && _dhd_wlfc_is_destination_open(ctx, entry, prec) &&
+ (entry->transit_count < WL_TXSTATUS_FREERUNCTR_MASK) &&
+ !(WLFC_GET_REORDERSUPP(dhdp->wlfc_mode) && entry->suppressed)) {
+ if (entry->state == WLFC_STATE_CLOSE) {
+ *ac_credit_spent = 0;
+ }
+
+ /* higher precedence will be picked up first,
+ * i.e. suppressed packets before delayed ones
+ */
+ p = pktq_pdeq(&entry->psq, PSQ_SUP_IDX(prec));
+ *needs_hdr = 0;
+ if (p == NULL) {
+ if (entry->suppressed == TRUE) {
+ /* skip this entry */
+ continue;
}
+ /* De-Q from delay Q */
+ p = pktq_pdeq(&entry->psq, PSQ_DLY_IDX(prec));
+ *needs_hdr = 1;
+ }
- if (p != NULL) {
- /* did the packet come from suppress sub-queue? */
- if (entry->requested_credit > 0) {
- entry->requested_credit--;
+ if (p != NULL) {
+ /* did the packet come from suppress sub-queue? */
+ if (entry->requested_credit > 0) {
+ entry->requested_credit--;
#ifdef PROP_TXSTATUS_DEBUG
- entry->dstncredit_sent_packets++;
+ entry->dstncredit_sent_packets++;
#endif
- /*
- if the packet was pulled out while destination is in
- closed state but had a non-zero packets requested,
- then this should not count against the FIFO credit.
- That is due to the fact that the firmware will
- most likely hold onto this packet until a suitable
- time later to push it to the appropriate AC FIFO.
- */
- if (entry->state == WLFC_STATE_CLOSE)
- *ac_credit_spent = 0;
- }
- else if (entry->requested_packet > 0) {
- entry->requested_packet--;
- DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
- if (entry->state == WLFC_STATE_CLOSE)
- *ac_credit_spent = 0;
- }
- /* move token to ensure fair round-robin */
- ctx->token_pos[prec] =
- (token_pos + i + 1) % total_entries;
- *entry_out = entry;
- _dhd_wlfc_flow_control_check(ctx, &entry->psq,
- DHD_PKTTAG_IF(PKTTAG(p)));
- /*
- A packet has been picked up, update traffic
- availability bitmap, if applicable
- */
- _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
- return p;
+ } else if (entry->requested_packet > 0) {
+ entry->requested_packet--;
+ DHD_PKTTAG_SETONETIMEPKTRQST(PKTTAG(p));
}
- }
- }
- }
- return NULL;
-}
-
-void *
-_dhd_wlfc_pktq_peek_tail(struct pktq *pq, int *prec_out)
-{
- int prec;
-
- ASSERT(pq);
-
- if (pq->len == 0)
- return NULL;
-
- for (prec = 0; prec < pq->hi_prec; prec++)
- /* only pick packets from dealyed-q */
- if (((prec & 1) == 0) && pq->q[prec].head)
- break;
-
- if (prec_out)
- *prec_out = prec;
-
- return (pq->q[prec].tail);
-}
-
-bool
-_dhd_wlfc_prec_enq_with_drop(dhd_pub_t *dhdp, struct pktq *pq, void *pkt, int prec)
-{
- void *p = NULL;
- int eprec = -1; /* precedence to evict from */
-
- ASSERT(dhdp && pq && pkt);
- ASSERT(prec >= 0 && prec < pq->num_prec);
-
- /* Fast case, precedence queue is not full and we are also not
- * exceeding total queue length
- */
- if (!pktq_pfull(pq, prec) && !pktq_full(pq)) {
- pktq_penq(pq, prec, pkt);
- return TRUE;
- }
- /* Determine precedence from which to evict packet, if any */
- if (pktq_pfull(pq, prec))
- eprec = prec;
- else if (pktq_full(pq)) {
- p = _dhd_wlfc_pktq_peek_tail(pq, &eprec);
- if (!p) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
- return FALSE;
- }
- if ((eprec > prec) || (eprec < 0)) {
- if (!pktq_pempty(pq, prec)) {
- eprec = prec;
- } else {
- return FALSE;
+ *entry_out = entry;
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec]--;
+ ctx->pkt_cnt_per_ac[prec]--;
+ _dhd_wlfc_flow_control_check(ctx, &entry->psq,
+ DHD_PKTTAG_IF(PKTTAG(p)));
+ /*
+ A packet has been picked up, update traffic
+ availability bitmap, if applicable
+ */
+ _dhd_wlfc_traffic_pending_check(ctx, entry, prec);
+ return p;
}
}
}
-
- /* Evict if needed */
- if (eprec >= 0) {
- /* Detect queueing to unconfigured precedence */
- ASSERT(!pktq_pempty(pq, eprec));
- /* Evict all fragmented frames */
- dhd_prec_drop_pkts(dhdp->osh, pq, eprec);
- }
-
- /* Enqueue */
- p = pktq_penq(pq, prec, pkt);
- if (!p) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
- return FALSE;
- }
-
- return TRUE;
+ return NULL;
}
static int
if (pktbuf != NULL) {
entry = _dhd_wlfc_find_table_entry(ctx, pktbuf);
-
if (entry == NULL) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
return BCME_ERROR;
}
- delayed packets go to sub_queue[2*prec + 0] to ensure
order of delivery.
*/
- if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1))
+ if (_dhd_wlfc_prec_enq_with_drop(ctx->dhdp, &entry->psq, pktbuf, (prec << 1),
+ FALSE, WLFC_SEQCOUNT(entry, prec))
== FALSE) {
WLFC_DBGMESG(("D"));
- /* dhd_txcomplete(ctx->dhdp, pktbuf, FALSE); */
- PKTFREE(ctx->osh, pktbuf, TRUE);
ctx->stats.delayq_full_error++;
return BCME_ERROR;
}
#ifdef QMONITOR
dhd_qmon_tx(&entry->qmon);
#endif
+
/*
A packet has been pushed, update traffic availability bitmap,
if applicable
*/
_dhd_wlfc_traffic_pending_check(ctx, entry, prec);
-
}
+
return BCME_OK;
}
-bool ifpkt_fn(void* p, int ifid)
+static bool _dhd_wlfc_ifpkt_fn(void* p, void *p_ifid)
+{
+ if (!p || !p_ifid)
+ return FALSE;
+
+ return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (*((uint8 *)p_ifid) == DHD_PKTTAG_IF(PKTTAG(p))));
+}
+
+static bool _dhd_wlfc_entrypkt_fn(void* p, void *entry)
+{
+ if (!p || !entry)
+ return FALSE;
+
+ return (DHD_PKTTAG_WLFCPKT(PKTTAG(p))&& (entry == DHD_PKTTAG_ENTRY(PKTTAG(p))));
+}
+
+static void
+_dhd_wlfc_return_implied_credit(athost_wl_status_info_t* wlfc, void* pkt)
+{
+ dhd_pub_t *dhdp;
+
+ if (!wlfc || !pkt) {
+ return;
+ }
+
+ dhdp = (dhd_pub_t *)(wlfc->dhdp);
+ if (dhdp && (dhdp->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) &&
+ DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+ int lender, credit_returned = 0;
+ uint8 fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pkt));
+
+ /* Note that borrower is fifo_id */
+ /* Return credits to highest priority lender first */
+ for (lender = AC_COUNT; lender >= 0; lender--) {
+ if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
+ wlfc->FIFO_credit[lender]++;
+ wlfc->credits_borrowed[fifo_id][lender]--;
+ credit_returned = 1;
+ break;
+ }
+ }
+
+ if (!credit_returned) {
+ wlfc->FIFO_credit[fifo_id]++;
+ }
+ }
+}
+
+static void
+_dhd_wlfc_hanger_free_pkt(athost_wl_status_info_t* wlfc, uint32 slot_id, uint8 pkt_state,
+ int pkt_txstatus)
+{
+ wlfc_hanger_t* hanger;
+ wlfc_hanger_item_t* item;
+
+ if (!wlfc)
+ return;
+
+ hanger = (wlfc_hanger_t*)wlfc->hanger;
+ if (!hanger)
+ return;
+
+ if (slot_id == WLFC_HANGER_MAXITEMS)
+ return;
+
+ item = &hanger->items[slot_id];
+ item->pkt_state |= pkt_state;
+ if (pkt_txstatus != -1) {
+ item->pkt_txstatus = pkt_txstatus;
+ }
+
+ if (item->pkt) {
+ if ((item->pkt_state & WLFC_HANGER_PKT_STATE_TXCOMPLETE) &&
+ (item->pkt_state & (WLFC_HANGER_PKT_STATE_TXSTATUS |
+ WLFC_HANGER_PKT_STATE_CLEANUP))) {
+ void *p = NULL;
+ void *pkt = item->pkt;
+ uint8 old_state = item->state;
+ int ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, slot_id, &p, TRUE);
+ BCM_REFERENCE(ret);
+ BCM_REFERENCE(pkt);
+ ASSERT((ret == BCME_OK) && p && (pkt == p));
+
+ /* free packet */
+ if (!(item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS)) {
+ /* cleanup case */
+ wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, p);
+
+ ASSERT(entry);
+ entry->transit_count--;
+ if (entry->suppressed &&
+ (--entry->suppr_transit_count == 0)) {
+ entry->suppressed = FALSE;
+ }
+ _dhd_wlfc_return_implied_credit(wlfc, p);
+ wlfc->stats.cleanup_fw_cnt++;
+ /* slot not freeable yet */
+ item->state = old_state;
+ }
+
+ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))]
+ [DHD_PKTTAG_FIFO(PKTTAG(p))]--;
+ wlfc->stats.pktout++;
+ dhd_txcomplete((dhd_pub_t *)wlfc->dhdp, p, item->pkt_txstatus);
+ PKTFREE(wlfc->osh, p, TRUE);
+ }
+ } else {
+ if (item->pkt_state & WLFC_HANGER_PKT_STATE_TXSTATUS) {
+ /* free slot */
+ ASSERT(item->state != WLFC_HANGER_ITEM_STATE_FREE);
+ item->state = WLFC_HANGER_ITEM_STATE_FREE;
+ }
+ }
+}
+
+static void
+_dhd_wlfc_pktq_flush(athost_wl_status_info_t* ctx, struct pktq *pq,
+ bool dir, f_processpkt_t fn, void *arg, q_type_t q_type)
+{
+ int prec;
+ dhd_pub_t *dhdp = (dhd_pub_t *)ctx->dhdp;
+
+ ASSERT(dhdp);
+
+ /* Optimize flush, if pktq len = 0, just return.
+ * pktq len of 0 means pktq's prec q's are all empty.
+ */
+ if (pq->len == 0) {
+ return;
+ }
+
+
+ for (prec = 0; prec < pq->num_prec; prec++) {
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = PKTLINK(p);
+ else
+ PKTSETLINK(prev, PKTLINK(p));
+ if (q_type == Q_TYPE_PSQ) {
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode) && (prec & 1)) {
+ _dhd_wlfc_hanger_remove_reference(ctx->hanger, p);
+ }
+ ctx->pkt_cnt_in_q[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+ ctx->pkt_cnt_per_ac[prec>>1]--;
+ ctx->stats.cleanup_psq_cnt++;
+ if (!(prec & 1)) {
+ /* pkt in delayed q, so fake push BDC header for
+ * dhd_tcpack_check_xmit() and dhd_txcomplete().
+ */
+ _dhd_wlfc_pushheader(ctx, p, FALSE, 0, 0,
+ 0, 0, TRUE);
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!!"
+ " Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(dhdp,
+ TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ }
+ } else if (q_type == Q_TYPE_AFQ) {
+ wlfc_mac_descriptor_t* entry =
+ _dhd_wlfc_find_table_entry(ctx, p);
+ entry->transit_count--;
+ if (entry->suppressed &&
+ (--entry->suppr_transit_count == 0)) {
+ entry->suppressed = FALSE;
+ }
+ _dhd_wlfc_return_implied_credit(ctx, p);
+ ctx->stats.cleanup_fw_cnt++;
+ }
+ PKTSETLINK(p, NULL);
+ if (dir) {
+ ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(p))][prec>>1]--;
+ ctx->stats.pktout++;
+ dhd_txcomplete(dhdp, p, FALSE);
+ }
+ PKTFREE(ctx->osh, p, dir);
+
+ q->len--;
+ pq->len--;
+ p = (head ? q->head : PKTLINK(prev));
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+
+ if (q->head == NULL) {
+ ASSERT(q->len == 0);
+ q->tail = NULL;
+ }
+
+ }
+
+ if (fn == NULL)
+ ASSERT(pq->len == 0);
+}
+
+static void*
+_dhd_wlfc_pktq_pdeq_with_fn(struct pktq *pq, int prec, f_processpkt_t fn, void *arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+ p = q->head;
+
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ break;
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+ if (p == NULL)
+ return NULL;
+
+ if (prev == NULL) {
+ if ((q->head = PKTLINK(p)) == NULL) {
+ q->tail = NULL;
+ }
+ } else {
+ PKTSETLINK(prev, PKTLINK(p));
+ if (q->tail == p) {
+ q->tail = prev;
+ }
+ }
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+static void
+_dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ int prec;
+ void *pkt = NULL, *head = NULL, *tail = NULL;
+ struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ wlfc_mac_descriptor_t* entry;
+
+ dhd_os_sdlock_txq(dhd);
+ for (prec = 0; prec < txq->num_prec; prec++) {
+ while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+#ifdef DHDTCPACK_SUPPRESS
+ if (dhd_tcpack_check_xmit(dhd, pkt) == BCME_ERROR) {
+ DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
+ __FUNCTION__, __LINE__));
+ dhd_tcpack_suppress_set(dhd, TCPACK_SUP_OFF);
+ }
+#endif /* DHDTCPACK_SUPPRESS */
+ if (!head) {
+ head = pkt;
+ }
+ if (tail) {
+ PKTSETLINK(tail, pkt);
+ }
+ tail = pkt;
+ }
+ }
+ dhd_os_sdunlock_txq(dhd);
+
+
+ while ((pkt = head)) {
+ head = PKTLINK(pkt);
+ PKTSETLINK(pkt, NULL);
+ entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode) &&
+ !_dhd_wlfc_hanger_remove_reference(h, pkt)) {
+ DHD_ERROR(("%s: can't find pkt(%p) in hanger, free it anyway\n",
+ __FUNCTION__, pkt));
+ }
+ entry->transit_count--;
+ if (entry->suppressed &&
+ (--entry->suppr_transit_count == 0)) {
+ entry->suppressed = FALSE;
+ }
+ _dhd_wlfc_return_implied_credit(wlfc, pkt);
+ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pkt))][DHD_PKTTAG_FIFO(PKTTAG(pkt))]--;
+ wlfc->stats.pktout++;
+ wlfc->stats.cleanup_txq_cnt++;
+ dhd_txcomplete(dhd, pkt, FALSE);
+ PKTFREE(wlfc->osh, pkt, TRUE);
+ }
+}
+
+void
+_dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
{
- return (ifid == DHD_PKTTAG_IF(PKTTAG(p)));
+ int i;
+ int total_entries;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* table;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+
+ wlfc->stats.cleanup_txq_cnt = 0;
+ wlfc->stats.cleanup_psq_cnt = 0;
+ wlfc->stats.cleanup_fw_cnt = 0;
+ /*
+ * flush sequence shoulde be txq -> psq -> hanger/afq, hanger has to be last one
+ */
+ /* flush bus->txq */
+ _dhd_wlfc_cleanup_txq(dhd, fn, arg);
+
+
+ /* flush psq, search all entries, include nodes as well as interfaces */
+ total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
+ table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+
+ for (i = 0; i < total_entries; i++) {
+ if (table[i].occupied) {
+ /* release packets held in PSQ (both delayed and suppressed) */
+ if (table[i].psq.len) {
+ WLFC_DBGMESG(("%s(): PSQ[%d].len = %d\n",
+ __FUNCTION__, i, table[i].psq.len));
+ _dhd_wlfc_pktq_flush(wlfc, &table[i].psq, TRUE,
+ fn, arg, Q_TYPE_PSQ);
+ }
+
+ /* free packets held in AFQ */
+ if (WLFC_GET_AFQ(dhd->wlfc_mode) && (table[i].afq.len)) {
+ _dhd_wlfc_pktq_flush(wlfc, &table[i].afq, TRUE,
+ fn, arg, Q_TYPE_AFQ);
+ }
+
+ if ((fn == NULL) && (&table[i] != &wlfc->destination_entries.other)) {
+ table[i].occupied = 0;
+ if (table[i].transit_count || table[i].suppr_transit_count) {
+ DHD_ERROR(("%s: table[%d] transit(%d), suppr_tansit(%d)\n",
+ __FUNCTION__, i,
+ table[i].transit_count,
+ table[i].suppr_transit_count));
+ }
+ }
+ }
+ }
+
+ /*
+ . flush remained pkt in hanger queue, not in bus->txq nor psq.
+ . the remained pkt was successfully downloaded to dongle already.
+ . hanger slot state cannot be set to free until receive txstatus update.
+ */
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ for (i = 0; i < h->max_items; i++) {
+ if ((h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) ||
+ (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED)) {
+ if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
+ _dhd_wlfc_hanger_free_pkt(wlfc, i,
+ WLFC_HANGER_PKT_STATE_CLEANUP, FALSE);
+ }
+ }
+ }
+ }
+
+ return;
}
static int
_dhd_wlfc_mac_entry_update(athost_wl_status_info_t* ctx, wlfc_mac_descriptor_t* entry,
- ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+ uint8 action, uint8 ifid, uint8 iftype, uint8* ea,
+ f_processpkt_t fn, void *arg)
{
int rc = BCME_OK;
dhd_qmon_reset(&entry->qmon);
#endif
- if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
- entry->occupied = 1;
- entry->state = WLFC_STATE_OPEN;
- entry->requested_credit = 0;
- entry->interface_id = ifid;
- entry->iftype = iftype;
- entry->ac_bitmap = 0xff; /* update this when handling APSD */
- /* for an interface entry we may not care about the MAC address */
- if (ea != NULL)
- memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
- pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
- }
- else if (action == eWLFC_MAC_ENTRY_ACTION_UPDATE) {
+ if ((action == eWLFC_MAC_ENTRY_ACTION_ADD) || (action == eWLFC_MAC_ENTRY_ACTION_UPDATE)) {
entry->occupied = 1;
entry->state = WLFC_STATE_OPEN;
entry->requested_credit = 0;
/* for an interface entry we may not care about the MAC address */
if (ea != NULL)
memcpy(&entry->ea[0], ea, ETHER_ADDR_LEN);
- }
- else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
+
+ if (action == eWLFC_MAC_ENTRY_ACTION_ADD) {
+ dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
+ pktq_init(&entry->psq, WLFC_PSQ_PREC_COUNT, WLFC_PSQ_LEN);
+ if (WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ pktq_init(&entry->afq, WLFC_AFQ_PREC_COUNT, WLFC_PSQ_LEN);
+ }
+
+ if (entry->next == NULL) {
+ /* not linked to anywhere, add to tail */
+ if (ctx->active_entry_head) {
+ entry->prev = ctx->active_entry_head->prev;
+ ctx->active_entry_head->prev->next = entry;
+ ctx->active_entry_head->prev = entry;
+ entry->next = ctx->active_entry_head;
+
+ } else {
+ ASSERT(ctx->active_entry_count == 0);
+ entry->prev = entry->next = entry;
+ ctx->active_entry_head = entry;
+ }
+ ctx->active_entry_count++;
+ } else {
+ DHD_ERROR(("%s():%d, entry(%d)\n", __FUNCTION__, __LINE__,
+ (int)(entry - &ctx->destination_entries.nodes[0])));
+ }
+ }
+ } else if (action == eWLFC_MAC_ENTRY_ACTION_DEL) {
/* When the entry is deleted, the packets that are queued in the entry must be
- cleanup. The cleanup action should be before the occupied is set as 0. The
- flag deleting is set to avoid de-queue action when these queues are being
- cleanup
+ cleanup. The cleanup action should be before the occupied is set as 0.
*/
- entry->deleting = 1;
- dhd_wlfc_cleanup(ctx->dhdp, ifpkt_fn, ifid);
+ _dhd_wlfc_cleanup(ctx->dhdp, fn, arg);
_dhd_wlfc_flow_control_check(ctx, &entry->psq, ifid);
- entry->deleting = 0;
entry->occupied = 0;
entry->suppressed = 0;
entry->requested_credit = 0;
entry->transit_count = 0;
entry->suppr_transit_count = 0;
- entry->suppress_count = 0;
memset(&entry->ea[0], 0, ETHER_ADDR_LEN);
- /* enable after packets are queued-deqeued properly.
- pktq_flush(dhd->osh, &entry->psq, FALSE, NULL, 0);
- */
+ if (entry->next) {
+ /* not floating, remove from Q */
+ if (ctx->active_entry_count <= 1) {
+ /* last item */
+ ctx->active_entry_head = NULL;
+ ctx->active_entry_count = 0;
+ } else {
+ entry->prev->next = entry->next;
+ entry->next->prev = entry->prev;
+ if (entry == ctx->active_entry_head) {
+ ctx->active_entry_head = entry->next;
+ }
+ ctx->active_entry_count--;
+ }
+ entry->next = entry->prev = NULL;
+ } else {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ }
}
return rc;
}
-int
-_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, uint8 available_credit_map, int borrower_ac)
+#ifdef LIMIT_BORROW
+static int
+_dhd_wlfc_borrow_credit(athost_wl_status_info_t* ctx, int highest_lender_ac, int borrower_ac,
+ bool bBorrowAll)
{
- int lender_ac;
- int rc = BCME_ERROR;
+ int lender_ac, borrow_limit = 0;
+ int rc = -1;
- if (ctx == NULL || available_credit_map == 0) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
- return BCME_BADARG;
+ if (ctx == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return -1;
}
/* Borrow from lowest priority available AC (including BC/MC credits) */
- for (lender_ac = 0; lender_ac <= AC_COUNT; lender_ac++) {
- if ((available_credit_map && (1 << lender_ac)) &&
- (ctx->FIFO_credit[lender_ac] > 0)) {
+ for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) {
+ if (!bBorrowAll) {
+ borrow_limit = ctx->Init_FIFO_credit[lender_ac]/WLFC_BORROW_LIMIT_RATIO;
+ } else {
+ borrow_limit = 0;
+ }
+
+ if (ctx->FIFO_credit[lender_ac] > borrow_limit) {
ctx->credits_borrowed[borrower_ac][lender_ac]++;
ctx->FIFO_credit[lender_ac]--;
- rc = BCME_OK;
+ rc = lender_ac;
break;
}
}
return rc;
}
-int
-dhd_wlfc_interface_entry_update(void* state,
- ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea)
+static int _dhd_wlfc_return_credit(athost_wl_status_info_t* ctx, int lender_ac, int borrower_ac)
+{
+ if ((ctx == NULL) || (lender_ac < 0) || (lender_ac > AC_COUNT) ||
+ (borrower_ac < 0) || (borrower_ac > AC_COUNT)) {
+ DHD_ERROR(("Error: %s():%d, ctx(%p), lender_ac(%d), borrower_ac(%d)\n",
+ __FUNCTION__, __LINE__, ctx, lender_ac, borrower_ac));
+
+ return BCME_BADARG;
+ }
+
+ ctx->credits_borrowed[borrower_ac][lender_ac]--;
+ ctx->FIFO_credit[lender_ac]++;
+
+ return BCME_OK;
+}
+#endif /* LIMIT_BORROW */
+
+static int
+_dhd_wlfc_interface_entry_update(void* state,
+ uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
{
athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
wlfc_mac_descriptor_t* entry;
- int ret;
if (ifid >= WLFC_MAX_IFNUM)
return BCME_BADARG;
entry = &ctx->destination_entries.interfaces[ifid];
- ret = _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea);
- return ret;
+
+ return _dhd_wlfc_mac_entry_update(ctx, entry, action, ifid, iftype, ea,
+ _dhd_wlfc_ifpkt_fn, &ifid);
}
-int
-dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
+static int
+_dhd_wlfc_BCMCCredit_support_update(void* state)
+{
+ athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+
+ ctx->bcmc_credit_supported = TRUE;
+ return BCME_OK;
+}
+
+static int
+_dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits)
{
athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
+ int i;
+
+ for (i = 0; i <= 4; i++) {
+ if (ctx->Init_FIFO_credit[i] != ctx->FIFO_credit[i]) {
+ DHD_ERROR(("%s: credit[i] is not returned, (%d %d)\n",
+ __FUNCTION__, ctx->Init_FIFO_credit[i], ctx->FIFO_credit[i]));
+ }
+ }
/* update the AC FIFO credit map */
- ctx->FIFO_credit[0] = credits[0];
- ctx->FIFO_credit[1] = credits[1];
- ctx->FIFO_credit[2] = credits[2];
- ctx->FIFO_credit[3] = credits[3];
- /* credit for bc/mc packets */
- ctx->FIFO_credit[4] = credits[4];
+ ctx->FIFO_credit[0] += (credits[0] - ctx->Init_FIFO_credit[0]);
+ ctx->FIFO_credit[1] += (credits[1] - ctx->Init_FIFO_credit[1]);
+ ctx->FIFO_credit[2] += (credits[2] - ctx->Init_FIFO_credit[2]);
+ ctx->FIFO_credit[3] += (credits[3] - ctx->Init_FIFO_credit[3]);
+ ctx->FIFO_credit[4] += (credits[4] - ctx->Init_FIFO_credit[4]);
+
+ ctx->Init_FIFO_credit[0] = credits[0];
+ ctx->Init_FIFO_credit[1] = credits[1];
+ ctx->Init_FIFO_credit[2] = credits[2];
+ ctx->Init_FIFO_credit[3] = credits[3];
+ ctx->Init_FIFO_credit[4] = credits[4];
+
/* credit for ATIM FIFO is not used yet. */
- ctx->FIFO_credit[5] = 0;
+ ctx->Init_FIFO_credit[5] = ctx->FIFO_credit[5] = 0;
+
return BCME_OK;
}
-int
+static int
_dhd_wlfc_handle_packet_commit(athost_wl_status_info_t* ctx, int ac,
dhd_wlfc_commit_info_t *commit_info, f_commitpkt_t fcommit, void* commit_ctx)
{
uint32 hslot;
int rc;
+ dhd_pub_t *dhdp = (dhd_pub_t *)(ctx->dhdp);
/*
if ac_fifo_credit_spent = 0
rc = _dhd_wlfc_pretx_pktprocess(ctx, commit_info->mac_entry, commit_info->p,
commit_info->needs_hdr, &hslot);
- if (rc == BCME_OK)
- rc = fcommit(commit_ctx, commit_info->p);
- else
- ctx->stats.generic_error++;
-
if (rc == BCME_OK) {
- ctx->stats.pkt2bus++;
- if (commit_info->ac_fifo_credit_spent) {
- ctx->stats.send_pkts[ac]++;
- WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
- }
- } else if (rc == BCME_NORESOURCE)
- rc = BCME_ERROR;
- else {
- /*
- bus commit has failed, rollback.
- - remove wl-header for a delayed packet
- - save wl-header header for suppressed packets
- */
- rc = _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p,
- (commit_info->pkt_type), hslot);
-
- rc = BCME_ERROR;
- }
-
- return rc;
-}
-
-int
-dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf)
-{
- int ac;
- int credit;
- int rc;
- dhd_wlfc_commit_info_t commit_info;
- athost_wl_status_info_t* ctx = (athost_wl_status_info_t*)state;
- int credit_count = 0;
- int bus_retry_count = 0;
- uint8 ac_available = 0; /* Bitmask for 4 ACs + BC/MC */
-
- if ((state == NULL) ||
- (fcommit == NULL)) {
- WLFC_DBGMESG(("Error: %s():%d\n", __FUNCTION__, __LINE__));
- return BCME_BADARG;
- }
-
- memset(&commit_info, 0, sizeof(commit_info));
-
- /*
- Commit packets for regular AC traffic. Higher priority first.
- First, use up FIFO credits available to each AC. Based on distribution
- and credits left, borrow from other ACs as applicable
-
- -NOTE:
- If the bus between the host and firmware is overwhelmed by the
- traffic from host, it is possible that higher priority traffic
- starves the lower priority queue. If that occurs often, we may
- have to employ weighted round-robin or ucode scheme to avoid
- low priority packet starvation.
- */
-
- if (pktbuf) {
- ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
- if (ETHER_ISMULTI(DHD_PKTTAG_DSTN(PKTTAG(pktbuf)))) {
- ASSERT(ac == AC_COUNT);
- commit_info.needs_hdr = 1;
- commit_info.mac_entry = NULL;
- commit_info.pkt_type = eWLFC_PKTTYPE_NEW;
- commit_info.p = pktbuf;
- if (ctx->FIFO_credit[ac]) {
- rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
- fcommit, commit_ctx);
-
- /* Bus commits may fail (e.g. flow control); abort after retries */
- if (rc == BCME_OK) {
- if (commit_info.ac_fifo_credit_spent) {
- (void) _dhd_wlfc_borrow_credit(ctx,
- ac_available, ac);
- credit_count--;
- }
- } else {
- bus_retry_count++;
- if (bus_retry_count >= BUS_RETRIES) {
- DHD_ERROR((" %s: bus error %d\n",
- __FUNCTION__, rc));
- return rc;
- }
- }
+ rc = fcommit(commit_ctx, commit_info->p);
+ if (rc == BCME_OK) {
+ uint8 gen = WL_TXSTATUS_GET_GENERATION(
+ DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p)));
+ ctx->stats.pkt2bus++;
+ if (commit_info->ac_fifo_credit_spent || (ac == AC_COUNT)) {
+ ctx->stats.send_pkts[ac]++;
+ WLFC_HOST_FIFO_CREDIT_INC_SENTCTRS(ctx, ac);
}
- }
- else {
- /* en-queue the packets to respective queue. */
- rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
- }
- }
-
- for (ac = AC_COUNT; ac >= 0; ac--) {
-
- bool bQueueIdle = TRUE;
-
- /* packets from delayQ with less priority are fresh and they'd need header and
- * have no MAC entry
- */
- commit_info.needs_hdr = 1;
- commit_info.mac_entry = NULL;
- commit_info.pkt_type = eWLFC_PKTTYPE_NEW;
- for (credit = 0; credit < ctx->FIFO_credit[ac];) {
- commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
- &(commit_info.ac_fifo_credit_spent),
- &(commit_info.needs_hdr),
- &(commit_info.mac_entry));
-
- if (commit_info.p == NULL)
- break;
-
- bQueueIdle = FALSE;
-
- commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
- eWLFC_PKTTYPE_SUPPRESSED;
-
- rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
- fcommit, commit_ctx);
-
- /* Bus commits may fail (e.g. flow control); abort after retries */
- if (rc == BCME_OK) {
- if (commit_info.ac_fifo_credit_spent) {
- credit++;
+ if (gen != commit_info->mac_entry->generation) {
+ /* will be suppressed back by design */
+ if (!commit_info->mac_entry->suppressed) {
+ commit_info->mac_entry->suppressed = TRUE;
}
+ commit_info->mac_entry->suppr_transit_count++;
}
- else {
- bus_retry_count++;
- if (bus_retry_count >= BUS_RETRIES) {
- DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
- ctx->FIFO_credit[ac] -= credit;
- return rc;
- }
+ commit_info->mac_entry->transit_count++;
+ } else if (commit_info->needs_hdr) {
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ void *pout = NULL;
+ /* pop hanger for delayed packet */
+ _dhd_wlfc_hanger_poppkt(ctx->hanger, WL_TXSTATUS_GET_HSLOT(
+ DHD_PKTTAG_H2DTAG(PKTTAG(commit_info->p))), &pout, TRUE);
+ ASSERT(commit_info->p == pout);
}
}
-
- ctx->FIFO_credit[ac] -= credit;
-
-
- /* If no pkts can be dequed, the credit can be borrowed */
- if (bQueueIdle) {
- ac_available |= (1 << ac);
- credit_count += ctx->FIFO_credit[ac];
- }
+ } else {
+ ctx->stats.generic_error++;
}
- /* We borrow only for AC_BE and only if no other traffic seen for DEFER_PERIOD
-
- Note that (ac_available & WLFC_AC_BE_TRAFFIC_ONLY) is done to:
- a) ignore BC/MC for deferring borrow
- b) ignore AC_BE being available along with other ACs
- (this should happen only for pure BC/MC traffic)
-
- i.e. AC_VI, AC_VO, AC_BK all MUST be available (i.e. no traffic) and
- we do not care if AC_BE and BC/MC are available or not
- */
- if ((ac_available & WLFC_AC_BE_TRAFFIC_ONLY) == WLFC_AC_BE_TRAFFIC_ONLY) {
-
- if (ctx->allow_credit_borrow) {
- ac = 1; /* Set ac to AC_BE and borrow credits */
- }
- else {
- int delta;
- int curr_t = OSL_SYSUPTIME();
-
- if (curr_t > ctx->borrow_defer_timestamp)
- delta = curr_t - ctx->borrow_defer_timestamp;
- else
- delta = 0xffffffff + curr_t - ctx->borrow_defer_timestamp;
-
- if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
- /* Reset borrow but defer to next iteration (defensive borrowing) */
- ctx->allow_credit_borrow = TRUE;
- ctx->borrow_defer_timestamp = 0;
- }
- return BCME_OK;
- }
- }
- else {
- /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
- ctx->allow_credit_borrow = FALSE;
- ctx->borrow_defer_timestamp = OSL_SYSUPTIME();
- return BCME_OK;
+ if (rc != BCME_OK) {
+ /*
+ pretx pkt process or bus commit has failed, rollback.
+ - remove wl-header for a delayed packet
+ - save wl-header header for suppressed packets
+ - reset credit check flag
+ */
+ _dhd_wlfc_rollback_packet_toq(ctx, commit_info->p, commit_info->pkt_type, hslot);
+ DHD_PKTTAG_SETCREDITCHECK(PKTTAG(commit_info->p), 0);
}
- /* At this point, borrow all credits only for "ac" (which should be set above to AC_BE)
- Generically use "ac" only in case we extend to all ACs in future
- */
- for (; (credit_count > 0);) {
-
- commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
- &(commit_info.ac_fifo_credit_spent),
- &(commit_info.needs_hdr),
- &(commit_info.mac_entry));
- if (commit_info.p == NULL)
- break;
-
- commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
- eWLFC_PKTTYPE_SUPPRESSED;
-
- rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
- fcommit, commit_ctx);
-
- /* Bus commits may fail (e.g. flow control); abort after retries */
- if (rc == BCME_OK) {
- if (commit_info.ac_fifo_credit_spent) {
- (void) _dhd_wlfc_borrow_credit(ctx, ac_available, ac);
- credit_count--;
- }
- }
- else {
- bus_retry_count++;
- if (bus_retry_count >= BUS_RETRIES) {
- DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
- return rc;
- }
- }
- }
- return BCME_OK;
+ return rc;
}
static uint8
-dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
+_dhd_wlfc_find_mac_desc_id_from_mac(dhd_pub_t *dhdp, uint8* ea)
{
wlfc_mac_descriptor_t* table =
((athost_wl_status_info_t*)dhdp->wlfc_state)->destination_entries.nodes;
return WLFC_MAC_DESC_ID_INVALID;
}
-void
-dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
-{
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
- void* p;
- int fifo_id;
-
- if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
-#ifdef PROP_TXSTATUS_DEBUG
- wlfc->stats.signal_only_pkts_freed++;
-#endif
- /* is this a signal-only packet? */
- if (success)
- PKTFREE(wlfc->osh, txp, TRUE);
- return;
- }
- if (!success) {
- WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
- __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
- dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(DHD_PKTTAG_H2DTAG
- (PKTTAG(txp))), &p, 1);
-
- /* indicate failure and free the packet */
- dhd_txcomplete(dhd, txp, FALSE);
-
- /* return the credit, if necessary */
- if (DHD_PKTTAG_CREDITCHECK(PKTTAG(txp))) {
- int lender, credit_returned = 0; /* Note that borrower is fifo_id */
-
- fifo_id = DHD_PKTTAG_FIFO(PKTTAG(txp));
-
- /* Return credits to highest priority lender first */
- for (lender = AC_COUNT; lender >= 0; lender--) {
- if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
- wlfc->FIFO_credit[lender]++;
- wlfc->credits_borrowed[fifo_id][lender]--;
- credit_returned = 1;
- break;
- }
- }
-
- if (!credit_returned) {
- wlfc->FIFO_credit[fifo_id]++;
- }
- }
-
- PKTFREE(wlfc->osh, txp, TRUE);
- }
- return;
-}
-
static int
-dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len)
-{
- uint8 status_flag;
- uint32 status;
- int ret;
- int remove_from_hanger = 1;
- void* pktbuf;
- uint8 fifo_id;
- uint8 count = 0;
- uint32 status_g;
- uint32 hslot, hcnt;
+_dhd_wlfc_compressed_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info, uint8 len, void** p_mac)
+{
+ uint8 status_flag;
+ uint32 status;
+ int ret = BCME_OK;
+ int remove_from_hanger = 1;
+ void* pktbuf = NULL;
+ uint8 fifo_id = 0, gen = 0, count = 0, hcnt;
+ uint16 hslot;
wlfc_mac_descriptor_t* entry = NULL;
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ uint16 seq = 0, seq_fromfw = 0, seq_num = 0;
memcpy(&status, pkt_info, sizeof(uint32));
status_flag = WL_TXSTATUS_GET_FLAGS(status);
- status_g = status & 0xff000000;
- hslot = (status & 0x00ffff00) >> 8;
- hcnt = status & 0xff;
- len = pkt_info[4];
+ hcnt = WL_TXSTATUS_GET_FREERUNCTR(status);
+ hslot = WL_TXSTATUS_GET_HSLOT(status);
+ fifo_id = WL_TXSTATUS_GET_FIFO(status);
+ gen = WL_TXSTATUS_GET_GENERATION(status);
+
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ memcpy(&seq, pkt_info + WLFC_CTL_VALUE_LEN_TXSTATUS, WLFC_CTL_VALUE_LEN_SEQ);
+ seq_fromfw = WL_SEQ_GET_FROMFW(seq);
+ seq_num = WL_SEQ_GET_NUM(seq);
+ }
- wlfc->stats.txstatus_in++;
+ wlfc->stats.txstatus_in += len;
if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
- wlfc->stats.pkt_freed++;
+ wlfc->stats.pkt_freed += len;
+ }
+
+ else if (status_flag == WLFC_CTL_PKTFLAG_DISCARD_NOACK) {
+ wlfc->stats.pkt_freed += len;
}
else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
- wlfc->stats.d11_suppress++;
+ wlfc->stats.d11_suppress += len;
remove_from_hanger = 0;
}
else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
- wlfc->stats.wl_suppress++;
+ wlfc->stats.wl_suppress += len;
remove_from_hanger = 0;
}
else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
- wlfc->stats.wlc_tossed_pkts++;
+ wlfc->stats.wlc_tossed_pkts += len;
+ }
+
+ if (dhd->proptxstatus_txstatus_ignore) {
+ if (!remove_from_hanger) {
+ DHD_ERROR(("suppress txstatus: %d\n", status_flag));
+ }
+ return BCME_OK;
}
+
while (count < len) {
- status = (status_g << 24) | (hslot << 8) | (hcnt);
- count++;
- hslot++;
- hcnt++;
+ if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ ret = _dhd_wlfc_deque_afq(wlfc, hslot, hcnt, fifo_id, &pktbuf);
+ } else {
+ ret = _dhd_wlfc_hanger_poppkt(wlfc->hanger, hslot, &pktbuf, FALSE);
+ if (!pktbuf) {
+ _dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+ WLFC_HANGER_PKT_STATE_TXSTATUS, -1);
+ goto cont;
+ }
+ }
- ret = dhd_wlfc_hanger_poppkt(wlfc->hanger,
- WLFC_PKTID_HSLOT_GET(status), &pktbuf, remove_from_hanger);
- if (ret != BCME_OK) {
- /* do something */
- continue;
+ if ((ret != BCME_OK) || !pktbuf) {
+ goto cont;
}
+ /* set fifo_id to correct value because not all FW does that */
+ fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+
entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
if (!remove_from_hanger) {
/* this packet was suppressed */
- if (!entry->suppressed || entry->generation != WLFC_PKTID_GEN(status)) {
+ if (!entry->suppressed || (entry->generation != gen)) {
+ if (!entry->suppressed) {
+ entry->suppr_transit_count = entry->transit_count;
+ if (p_mac) {
+ *p_mac = entry;
+ }
+ } else {
+ DHD_ERROR(("gen(%d), entry->generation(%d)\n",
+ gen, entry->generation));
+ }
entry->suppressed = TRUE;
- entry->suppress_count = pktq_mlen(&entry->psq,
- NBITVAL((WL_TXSTATUS_GET_FIFO(status) << 1) + 1));
- entry->suppr_transit_count = entry->transit_count;
+
}
- entry->generation = WLFC_PKTID_GEN(status);
+ entry->generation = gen;
}
#ifdef PROP_TXSTATUS_DEBUG
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode))
{
uint32 new_t = OSL_SYSUPTIME();
uint32 old_t;
uint32 delta;
- old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[
- WLFC_PKTID_HSLOT_GET(status)].push_time;
+ old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[hslot].push_time;
wlfc->stats.latency_sample_count++;
}
#endif /* PROP_TXSTATUS_DEBUG */
- fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
-
/* pick up the implicit credit from this packet */
if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
- if (wlfc->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) {
-
- int lender, credit_returned = 0; /* Note that borrower is fifo_id */
-
- /* Return credits to highest priority lender first */
- for (lender = AC_COUNT; lender >= 0; lender--) {
- if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
- wlfc->FIFO_credit[lender]++;
- wlfc->credits_borrowed[fifo_id][lender]--;
- credit_returned = 1;
- break;
- }
- }
-
- if (!credit_returned) {
- wlfc->FIFO_credit[fifo_id]++;
- }
- }
- }
- else {
+ _dhd_wlfc_return_implied_credit(wlfc, pktbuf);
+ } else {
/*
if this packet did not count against FIFO credit, it must have
taken a requested_credit from the destination entry (for pspoll etc.)
*/
- if (!entry) {
-
- entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
- }
if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
entry->requested_credit++;
#ifdef PROP_TXSTATUS_DEBUG
entry->dstncredit_acks++;
#endif
}
+
if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
(status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
+ /* save generation bit inside packet */
+ WL_TXSTATUS_SET_GENERATION(DHD_PKTTAG_H2DTAG(PKTTAG(pktbuf)), gen);
+
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ WL_SEQ_SET_FROMDRV(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_fromfw);
+ WL_SEQ_SET_NUM(DHD_PKTTAG_H2DSEQ(PKTTAG(pktbuf)), seq_num);
+ }
ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
if (ret != BCME_OK) {
/* delay q is full, drop this packet */
- dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(status),
- &pktbuf, 1);
-
- /* indicate failure and free the packet */
- dhd_txcomplete(dhd, pktbuf, FALSE);
- entry->transit_count--;
DHD_WLFC_QMON_COMPLETE(entry);
- /* packet is transmitted Successfully by dongle
- * after first suppress.
- */
- if (entry->suppressed) {
- entry->suppr_transit_count--;
- }
- PKTFREE(wlfc->osh, pktbuf, TRUE);
+ _dhd_wlfc_prec_drop(dhd, (fifo_id << 1) + 1, pktbuf, FALSE);
} else {
- /* Mark suppressed to avoid a double free during wlfc cleanup */
-
- dhd_wlfc_hanger_mark_suppressed(wlfc->hanger,
- WLFC_PKTID_HSLOT_GET(status), WLFC_PKTID_GEN(status));
- entry->suppress_count++;
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ /* Mark suppressed to avoid a double free
+ during wlfc cleanup
+ */
+ _dhd_wlfc_hanger_mark_suppressed(wlfc->hanger, hslot, gen);
+ }
}
- }
- else {
- dhd_txcomplete(dhd, pktbuf, TRUE);
- entry->transit_count--;
+ } else {
+
DHD_WLFC_QMON_COMPLETE(entry);
- /* This packet is transmitted Successfully by dongle
- * even after first suppress.
- */
- if (entry->suppressed) {
- entry->suppr_transit_count--;
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+ WLFC_HANGER_PKT_STATE_TXSTATUS, TRUE);
+ } else {
+ dhd_txcomplete(dhd, pktbuf, TRUE);
+ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))]
+ [DHD_PKTTAG_FIFO(PKTTAG(pktbuf))]--;
+ wlfc->stats.pktout++;
+ /* free the packet */
+ PKTFREE(wlfc->osh, pktbuf, TRUE);
}
- /* free the packet */
- PKTFREE(wlfc->osh, pktbuf, TRUE);
}
+ /* pkt back from firmware side */
+ entry->transit_count--;
+ if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
+ entry->suppressed = FALSE;
+ }
+
+cont:
+ hcnt = (hcnt + 1) & WL_TXSTATUS_FREERUNCTR_MASK;
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ hslot = (hslot + 1) & WL_TXSTATUS_HSLOT_MASK;
+ }
+
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode) && seq_fromfw) {
+ seq_num = (seq_num + 1) & WL_SEQ_NUM_MASK;
+ }
+
+ count++;
}
return BCME_OK;
}
-/* Handle discard or suppress indication */
static int
-dhd_wlfc_txstatus_update(dhd_pub_t *dhd, uint8* pkt_info)
-{
- uint8 status_flag;
- uint32 status;
- int ret;
- int remove_from_hanger = 1;
- void* pktbuf;
- uint8 fifo_id;
- wlfc_mac_descriptor_t* entry = NULL;
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
-
- memcpy(&status, pkt_info, sizeof(uint32));
- status_flag = WL_TXSTATUS_GET_FLAGS(status);
- wlfc->stats.txstatus_in++;
+_dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
+{
+ int i;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.fifo_credits_back[i] += credits[i];
+#endif
- if (status_flag == WLFC_CTL_PKTFLAG_DISCARD) {
- wlfc->stats.pkt_freed++;
- }
-
- else if (status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) {
- wlfc->stats.d11_suppress++;
- remove_from_hanger = 0;
- }
-
- else if (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS) {
- wlfc->stats.wl_suppress++;
- remove_from_hanger = 0;
- }
-
- else if (status_flag == WLFC_CTL_PKTFLAG_TOSSED_BYWLC) {
- wlfc->stats.wlc_tossed_pkts++;
- }
-
- ret = dhd_wlfc_hanger_poppkt(wlfc->hanger,
- WLFC_PKTID_HSLOT_GET(status), &pktbuf, remove_from_hanger);
- if (ret != BCME_OK) {
- /* do something */
- return ret;
- }
-
- entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
-
- if (!remove_from_hanger) {
- /* this packet was suppressed */
- if (!entry->suppressed || entry->generation != WLFC_PKTID_GEN(status)) {
- entry->suppressed = TRUE;
- entry->suppress_count = pktq_mlen(&entry->psq,
- NBITVAL((WL_TXSTATUS_GET_FIFO(status) << 1) + 1));
- entry->suppr_transit_count = entry->transit_count;
- }
- entry->generation = WLFC_PKTID_GEN(status);
- }
-
-#ifdef PROP_TXSTATUS_DEBUG
- {
- uint32 new_t = OSL_SYSUPTIME();
- uint32 old_t;
- uint32 delta;
- old_t = ((wlfc_hanger_t*)(wlfc->hanger))->items[
- WLFC_PKTID_HSLOT_GET(status)].push_time;
-
-
- wlfc->stats.latency_sample_count++;
- if (new_t > old_t)
- delta = new_t - old_t;
- else
- delta = 0xffffffff + new_t - old_t;
- wlfc->stats.total_status_latency += delta;
- wlfc->stats.latency_most_recent = delta;
-
- wlfc->stats.deltas[wlfc->stats.idx_delta++] = delta;
- if (wlfc->stats.idx_delta == sizeof(wlfc->stats.deltas)/sizeof(uint32))
- wlfc->stats.idx_delta = 0;
- }
-#endif /* PROP_TXSTATUS_DEBUG */
-
- fifo_id = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
-
- /* pick up the implicit credit from this packet */
- if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pktbuf))) {
- if (wlfc->proptxstatus_mode == WLFC_FCMODE_IMPLIED_CREDIT) {
-
- int lender, credit_returned = 0; /* Note that borrower is fifo_id */
-
- /* Return credits to highest priority lender first */
- for (lender = AC_COUNT; lender >= 0; lender--) {
- if (wlfc->credits_borrowed[fifo_id][lender] > 0) {
- wlfc->FIFO_credit[lender]++;
- wlfc->credits_borrowed[fifo_id][lender]--;
- credit_returned = 1;
- break;
- }
- }
-
- if (!credit_returned) {
- wlfc->FIFO_credit[fifo_id]++;
- }
- }
- }
- else {
- /*
- if this packet did not count against FIFO credit, it must have
- taken a requested_credit from the destination entry (for pspoll etc.)
- */
- if (!entry) {
-
- entry = _dhd_wlfc_find_table_entry(wlfc, pktbuf);
- }
- if (!DHD_PKTTAG_ONETIMEPKTRQST(PKTTAG(pktbuf)))
- entry->requested_credit++;
-#ifdef PROP_TXSTATUS_DEBUG
- entry->dstncredit_acks++;
-#endif
- }
- if ((status_flag == WLFC_CTL_PKTFLAG_D11SUPPRESS) ||
- (status_flag == WLFC_CTL_PKTFLAG_WLSUPPRESS)) {
-
- ret = _dhd_wlfc_enque_suppressed(wlfc, fifo_id, pktbuf);
- if (ret != BCME_OK) {
- /* delay q is full, drop this packet */
- dhd_wlfc_hanger_poppkt(wlfc->hanger, WLFC_PKTID_HSLOT_GET(status),
- &pktbuf, 1);
-
- /* indicate failure and free the packet */
- dhd_txcomplete(dhd, pktbuf, FALSE);
- entry->transit_count--;
- DHD_WLFC_QMON_COMPLETE(entry);
- /* This packet is transmitted Successfully by
- * dongle even after first suppress.
- */
- if (entry->suppressed) {
- entry->suppr_transit_count--;
- }
- PKTFREE(wlfc->osh, pktbuf, TRUE);
- } else {
- /* Mark suppressed to avoid a double free during wlfc cleanup */
-
- dhd_wlfc_hanger_mark_suppressed(wlfc->hanger,
- WLFC_PKTID_HSLOT_GET(status), WLFC_PKTID_GEN(status));
- entry->suppress_count++;
- }
- }
- else {
- dhd_txcomplete(dhd, pktbuf, TRUE);
- entry->transit_count--;
- DHD_WLFC_QMON_COMPLETE(entry);
-
- /* This packet is transmitted Successfully by dongle even after first suppress. */
- if (entry->suppressed) {
- entry->suppr_transit_count--;
- }
- /* free the packet */
- PKTFREE(wlfc->osh, pktbuf, TRUE);
- }
- return BCME_OK;
-}
-
-static int
-dhd_wlfc_fifocreditback_indicate(dhd_pub_t *dhd, uint8* credits)
-{
- int i;
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
- for (i = 0; i < WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK; i++) {
-#ifdef PROP_TXSTATUS_DEBUG
- wlfc->stats.fifo_credits_back[i] += credits[i];
-#endif
- /* update FIFO credits */
- if (wlfc->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
- {
- int lender; /* Note that borrower is i */
+ /* update FIFO credits */
+ if (dhd->proptxstatus_mode == WLFC_FCMODE_EXPLICIT_CREDIT)
+ {
+ int lender; /* Note that borrower is i */
/* Return credits to highest priority lender first */
for (lender = AC_COUNT; (lender >= 0) && (credits[i] > 0); lender--) {
if (wlfc->credits_borrowed[i][lender] > 0) {
if (credits[i] >= wlfc->credits_borrowed[i][lender]) {
- credits[i] -= wlfc->credits_borrowed[i][lender];
+ credits[i] -=
+ (uint8)wlfc->credits_borrowed[i][lender];
wlfc->FIFO_credit[lender] +=
wlfc->credits_borrowed[i][lender];
wlfc->credits_borrowed[i][lender] = 0;
if (credits[i] > 0) {
wlfc->FIFO_credit[i] += credits[i];
}
+
+ if (wlfc->FIFO_credit[i] > wlfc->Init_FIFO_credit[i]) {
+ wlfc->FIFO_credit[i] = wlfc->Init_FIFO_credit[i];
+ }
}
}
return BCME_OK;
}
+static void
+_dhd_wlfc_suppress_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ wlfc_mac_descriptor_t* entry;
+ int prec;
+ void *pkt = NULL, *head = NULL, *tail = NULL;
+ struct pktq *txq = (struct pktq *)dhd_bus_txq(dhd->bus);
+ uint8 results[WLFC_CTL_VALUE_LEN_TXSTATUS+WLFC_CTL_VALUE_LEN_SEQ];
+ uint8 credits[WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK] = {0};
+ uint32 htod = 0;
+ uint16 htodseq = 0;
+ bool bCreditUpdate = FALSE;
+
+ dhd_os_sdlock_txq(dhd);
+ for (prec = 0; prec < txq->num_prec; prec++) {
+ while ((pkt = _dhd_wlfc_pktq_pdeq_with_fn(txq, prec, fn, arg))) {
+ if (!head) {
+ head = pkt;
+ }
+ if (tail) {
+ PKTSETLINK(tail, pkt);
+ }
+ tail = pkt;
+ }
+ }
+ dhd_os_sdunlock_txq(dhd);
+
+ while ((pkt = head)) {
+ head = PKTLINK(pkt);
+ PKTSETLINK(pkt, NULL);
+
+ entry = _dhd_wlfc_find_table_entry(wlfc, pkt);
+
+ /* fake a suppression txstatus */
+ htod = DHD_PKTTAG_H2DTAG(PKTTAG(pkt));
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_CTL_PKTFLAG_WLSUPPRESS);
+ WL_TXSTATUS_SET_GENERATION(htod, entry->generation);
+ memcpy(results, &htod, WLFC_CTL_VALUE_LEN_TXSTATUS);
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ htodseq = DHD_PKTTAG_H2DSEQ(PKTTAG(pkt));
+ if (WL_SEQ_GET_FROMDRV(htodseq)) {
+ WL_SEQ_SET_FROMFW(htodseq, 1);
+ WL_SEQ_SET_FROMDRV(htodseq, 0);
+ }
+ memcpy(results + WLFC_CTL_VALUE_LEN_TXSTATUS, &htodseq,
+ WLFC_CTL_VALUE_LEN_SEQ);
+ }
+ if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_enque_afq(wlfc, pkt);
+ }
+ _dhd_wlfc_compressed_txstatus_update(dhd, results, 1, NULL);
+
+ /* fake a fifo credit back */
+ if (DHD_PKTTAG_CREDITCHECK(PKTTAG(pkt))) {
+ credits[DHD_PKTTAG_FIFO(PKTTAG(pkt))]++;
+ bCreditUpdate = TRUE;
+ }
+ }
+
+ if (bCreditUpdate) {
+ _dhd_wlfc_fifocreditback_indicate(dhd, credits);
+ }
+}
+
+
static int
-dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value)
+_dhd_wlfc_dbg_senum_check(dhd_pub_t *dhd, uint8 *value)
{
uint32 timestamp;
return BCME_OK;
}
-
static int
-dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
+_dhd_wlfc_rssi_indicate(dhd_pub_t *dhd, uint8* rssi)
{
(void)dhd;
(void)rssi;
return BCME_OK;
}
+static void
+_dhd_wlfc_add_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+ int i;
+
+ if (!wlfc || !entry) {
+ return;
+ }
+
+ for (i = 0; i < wlfc->requested_entry_count; i++) {
+ if (entry == wlfc->requested_entry[i]) {
+ break;
+ }
+ }
+
+ if (i == wlfc->requested_entry_count) {
+ /* no match entry found */
+ ASSERT(wlfc->requested_entry_count <= (WLFC_MAC_DESC_TABLE_SIZE-1));
+ wlfc->requested_entry[wlfc->requested_entry_count++] = entry;
+ }
+}
+
+static void
+_dhd_wlfc_remove_requested_entry(athost_wl_status_info_t* wlfc, wlfc_mac_descriptor_t* entry)
+{
+ int i;
+
+ if (!wlfc || !entry) {
+ return;
+ }
+
+ for (i = 0; i < wlfc->requested_entry_count; i++) {
+ if (entry == wlfc->requested_entry[i]) {
+ break;
+ }
+ }
+
+ if (i < wlfc->requested_entry_count) {
+ /* found */
+ ASSERT(wlfc->requested_entry_count > 0);
+ wlfc->requested_entry_count--;
+ if (i != wlfc->requested_entry_count) {
+ wlfc->requested_entry[i] =
+ wlfc->requested_entry[wlfc->requested_entry_count];
+ }
+ wlfc->requested_entry[wlfc->requested_entry_count] = NULL;
+ }
+}
+
static int
-dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+_dhd_wlfc_mac_table_update(dhd_pub_t *dhd, uint8* value, uint8 type)
{
int rc;
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
wlfc_mac_descriptor_t* table;
uint8 existing_index;
uint8 table_index;
ifid = value[1];
ea = &value[2];
+ _dhd_wlfc_remove_requested_entry(wlfc, &table[table_index]);
if (type == WLFC_CTL_TYPE_MACDESC_ADD) {
- existing_index = dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
- if (existing_index == WLFC_MAC_DESC_ID_INVALID) {
- /* this MAC entry does not exist, create one */
- if (!table[table_index].occupied) {
- table[table_index].mac_handle = value[0];
- rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
- eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
- wlfc->destination_entries.interfaces[ifid].iftype,
- ea);
- }
- else {
- /* the space should have been empty, but it's not */
- wlfc->stats.mac_update_failed++;
- }
- }
- else {
+ existing_index = _dhd_wlfc_find_mac_desc_id_from_mac(dhd, &value[2]);
+ if ((existing_index != WLFC_MAC_DESC_ID_INVALID) &&
+ (existing_index != table_index) && table[existing_index].occupied) {
/*
- there is an existing entry, move it to new index
- if necessary.
+ there is an existing different entry, free the old one
+ and move it to new index if necessary.
*/
- if (existing_index != table_index) {
- /* if we already have an entry, free the old one */
- table[existing_index].occupied = 0;
- table[existing_index].state = WLFC_STATE_CLOSE;
- table[existing_index].requested_credit = 0;
- table[existing_index].interface_id = 0;
- /* enable after packets are queued-deqeued properly.
- pktq_flush(dhd->osh, &table[existing_index].psq, FALSE, NULL, 0);
- */
- }
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[existing_index],
+ eWLFC_MAC_ENTRY_ACTION_DEL, table[existing_index].interface_id,
+ table[existing_index].iftype, NULL, _dhd_wlfc_entrypkt_fn,
+ &table[existing_index]);
+ }
+
+ if (!table[table_index].occupied) {
+ /* this new MAC entry does not exist, create one */
+ table[table_index].mac_handle = value[0];
+ rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
+ eWLFC_MAC_ENTRY_ACTION_ADD, ifid,
+ wlfc->destination_entries.interfaces[ifid].iftype,
+ ea, NULL, NULL);
+ } else {
+ /* the space should have been empty, but it's not */
+ wlfc->stats.mac_update_failed++;
}
}
+
if (type == WLFC_CTL_TYPE_MACDESC_DEL) {
if (table[table_index].occupied) {
rc = _dhd_wlfc_mac_entry_update(wlfc, &table[table_index],
eWLFC_MAC_ENTRY_ACTION_DEL, ifid,
wlfc->destination_entries.interfaces[ifid].iftype,
- ea);
- }
- else {
+ ea, _dhd_wlfc_entrypkt_fn, &table[table_index]);
+ } else {
/* the space should have been occupied, but it's not */
wlfc->stats.mac_update_failed++;
}
}
static int
-dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+_dhd_wlfc_psmode_update(dhd_pub_t *dhd, uint8* value, uint8 type)
{
/* Handle PS on/off indication */
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
wlfc_mac_descriptor_t* table;
wlfc_mac_descriptor_t* desc;
uint8 mac_handle = value[0];
table = wlfc->destination_entries.nodes;
desc = &table[WLFC_MAC_DESC_GET_LOOKUP_INDEX(mac_handle)];
if (desc->occupied) {
- /* a fresh PS mode should wipe old ps credits? */
- desc->requested_credit = 0;
if (type == WLFC_CTL_TYPE_MAC_OPEN) {
desc->state = WLFC_STATE_OPEN;
+ desc->ac_bitmap = 0xff;
DHD_WLFC_CTRINC_MAC_OPEN(desc);
+ desc->requested_credit = 0;
+ desc->requested_packet = 0;
+ _dhd_wlfc_remove_requested_entry(wlfc, desc);
}
else {
desc->state = WLFC_STATE_CLOSE;
/*
Indicate to firmware if there is any traffic pending.
*/
- for (i = AC_BE; i < AC_COUNT; i++) {
+ for (i = 0; i < AC_COUNT; i++) {
_dhd_wlfc_traffic_pending_check(wlfc, desc, i);
}
}
}
static int
-dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
+_dhd_wlfc_interface_update(dhd_pub_t *dhd, uint8* value, uint8 type)
{
/* Handle PS on/off indication */
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
wlfc_mac_descriptor_t* table;
uint8 if_id = value[0];
}
static int
-dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
+_dhd_wlfc_credit_request(dhd_pub_t *dhd, uint8* value)
{
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
wlfc_mac_descriptor_t* table;
wlfc_mac_descriptor_t* desc;
uint8 mac_handle;
if (desc->occupied) {
desc->requested_credit = credit;
- desc->ac_bitmap = value[2];
+ desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+ _dhd_wlfc_add_requested_entry(wlfc, desc);
}
else {
wlfc->stats.credit_request_failed++;
}
static int
-dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
+_dhd_wlfc_packet_request(dhd_pub_t *dhd, uint8* value)
{
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
+ athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
wlfc_mac_descriptor_t* table;
wlfc_mac_descriptor_t* desc;
uint8 mac_handle;
if (desc->occupied) {
desc->requested_packet = packet_count;
- desc->ac_bitmap = value[2];
+ desc->ac_bitmap = value[2] & (~(1<<AC_COUNT));
+ _dhd_wlfc_add_requested_entry(wlfc, desc);
}
else {
wlfc->stats.packet_request_failed++;
}
static void
-dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
+_dhd_wlfc_reorderinfo_indicate(uint8 *val, uint8 len, uchar *info_buf, uint *info_len)
{
if (info_len) {
if (info_buf) {
}
}
+/*
+ * public functions
+ */
+
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd)
+{
+ bool rc = TRUE;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return FALSE;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ rc = FALSE;
+ }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return rc;
+}
+
+int dhd_wlfc_enable(dhd_pub_t *dhd)
+{
+ int i, rc = BCME_OK;
+ athost_wl_status_info_t* wlfc;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_enabled || dhd->wlfc_state) {
+ rc = BCME_OK;
+ goto exit;
+ }
+
+ /* allocate space to track txstatus propagated from firmware */
+ dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
+ if (dhd->wlfc_state == NULL) {
+ rc = BCME_NOMEM;
+ goto exit;
+ }
+
+ /* initialize state space */
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ memset(wlfc, 0, sizeof(athost_wl_status_info_t));
+
+ /* remember osh & dhdp */
+ wlfc->osh = dhd->osh;
+ wlfc->dhdp = dhd;
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ wlfc->hanger = _dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
+ if (wlfc->hanger == NULL) {
+ MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ rc = BCME_NOMEM;
+ goto exit;
+ }
+ }
+
+ dhd->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+ /* default to check rx pkt */
+ if (dhd->op_mode & DHD_FLAG_IBSS_MODE) {
+ dhd->wlfc_rxpkt_chk = FALSE;
+ } else {
+ dhd->wlfc_rxpkt_chk = TRUE;
+ }
+
+
+ /* initialize all interfaces to accept traffic */
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ wlfc->hostif_flow_state[i] = OFF;
+ }
+
+ _dhd_wlfc_mac_entry_update(wlfc, &wlfc->destination_entries.other,
+ eWLFC_MAC_ENTRY_ACTION_ADD, 0xff, 0, NULL, NULL, NULL);
+
+ wlfc->allow_credit_borrow = 0;
+ wlfc->single_ac = 0;
+ wlfc->single_ac_timestamp = 0;
+
+
+exit:
+ dhd_os_wlfc_unblock(dhd);
+
+ return rc;
+}
+#ifdef SUPPORT_P2P_GO_PS
+int
+dhd_wlfc_suspend(dhd_pub_t *dhd)
+{
+
+ uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
+ uint32 tlv = 0;
+
+ DHD_TRACE(("%s: masking wlfc events\n", __FUNCTION__));
+ if (!dhd->wlfc_enabled)
+ return -1;
+
+ bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+ DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+ return -1;
+ }
+ tlv = iovbuf[0];
+ if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) == 0)
+ return 0;
+ tlv &= ~(WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+ bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+ __FUNCTION__, tlv));
+ return -1;
+ }
+
+ return 0;
+}
+
+ int
+dhd_wlfc_resume(dhd_pub_t *dhd)
+{
+ uint32 iovbuf[4]; /* Room for "tlv" + '\0' + parameter */
+ uint32 tlv = 0;
+
+ DHD_TRACE(("%s: unmasking wlfc events\n", __FUNCTION__));
+ if (!dhd->wlfc_enabled)
+ return -1;
+
+ bcm_mkiovar("tlv", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
+ DHD_ERROR(("%s: failed to get bdcv2 tlv signaling\n", __FUNCTION__));
+ return -1;
+ }
+ tlv = iovbuf[0];
+ if ((tlv & (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS)) ==
+ (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS))
+ return 0;
+ tlv |= (WLFC_FLAGS_RSSI_SIGNALS | WLFC_FLAGS_XONXOFF_SIGNALS);
+ bcm_mkiovar("tlv", (char *)&tlv, 4, (char*)iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, (char*)iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+ __FUNCTION__, tlv));
+ return -1;
+ }
+
+ return 0;
+}
+#endif /* SUPPORT_P2P_GO_PS */
+
int
dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len, uchar *reorder_info_buf,
uint *reorder_info_len)
uint8 type, len;
uint8* value;
uint8* tmpbuf;
- uint16 remainder = tlv_hdr_len;
+ uint16 remainder = (uint16)tlv_hdr_len;
uint16 processed = 0;
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
+ athost_wl_status_info_t* wlfc = NULL;
+ void* entry;
+
+ if ((dhd == NULL) || (pktbuf == NULL)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (dhd->proptxstatus_mode != WLFC_ONLY_AMPDU_HOSTREORDER) {
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ }
+
tmpbuf = (uint8*)PKTDATA(dhd->osh, pktbuf);
+
if (remainder) {
while ((processed < (WLFC_MAX_PENDING_DATALEN * 2)) && (remainder > 0)) {
type = tmpbuf[processed];
remainder -= 2 + len;
processed += 2 + len;
- if (type == WLFC_CTL_TYPE_TXSTATUS)
- dhd_wlfc_txstatus_update(dhd, value);
- if (type == WLFC_CTL_TYPE_COMP_TXSTATUS)
- dhd_wlfc_compressed_txstatus_update(dhd, value, len);
+ entry = NULL;
+
+ DHD_INFO(("%s():%d type %d remainder %d processed %d\n",
+ __FUNCTION__, __LINE__, type, remainder, processed));
- else if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
- dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
+ if (type == WLFC_CTL_TYPE_HOST_REORDER_RXPKTS)
+ _dhd_wlfc_reorderinfo_indicate(value, len, reorder_info_buf,
reorder_info_len);
+
+ if (wlfc == NULL) {
+ ASSERT(dhd->proptxstatus_mode == WLFC_ONLY_AMPDU_HOSTREORDER);
+
+ if (type != WLFC_CTL_TYPE_HOST_REORDER_RXPKTS &&
+ type != WLFC_CTL_TYPE_TRANS_ID)
+ DHD_INFO(("%s():%d dhd->wlfc_state is NULL yet!"
+ " type %d remainder %d processed %d\n",
+ __FUNCTION__, __LINE__, type, remainder, processed));
+ continue;
+ }
+
+ if (type == WLFC_CTL_TYPE_TXSTATUS) {
+ _dhd_wlfc_compressed_txstatus_update(dhd, value, 1, &entry);
+ }
+ else if (type == WLFC_CTL_TYPE_COMP_TXSTATUS) {
+ uint8 compcnt_offset = WLFC_CTL_VALUE_LEN_TXSTATUS;
+
+ if (WLFC_GET_REUSESEQ(dhd->wlfc_mode)) {
+ compcnt_offset += WLFC_CTL_VALUE_LEN_SEQ;
+ }
+ _dhd_wlfc_compressed_txstatus_update(dhd, value,
+ value[compcnt_offset], &entry);
+ }
else if (type == WLFC_CTL_TYPE_FIFO_CREDITBACK)
- dhd_wlfc_fifocreditback_indicate(dhd, value);
+ _dhd_wlfc_fifocreditback_indicate(dhd, value);
else if (type == WLFC_CTL_TYPE_RSSI)
- dhd_wlfc_rssi_indicate(dhd, value);
+ _dhd_wlfc_rssi_indicate(dhd, value);
else if (type == WLFC_CTL_TYPE_MAC_REQUEST_CREDIT)
- dhd_wlfc_credit_request(dhd, value);
+ _dhd_wlfc_credit_request(dhd, value);
else if (type == WLFC_CTL_TYPE_MAC_REQUEST_PACKET)
- dhd_wlfc_packet_request(dhd, value);
+ _dhd_wlfc_packet_request(dhd, value);
else if ((type == WLFC_CTL_TYPE_MAC_OPEN) ||
(type == WLFC_CTL_TYPE_MAC_CLOSE))
- dhd_wlfc_psmode_update(dhd, value, type);
+ _dhd_wlfc_psmode_update(dhd, value, type);
else if ((type == WLFC_CTL_TYPE_MACDESC_ADD) ||
(type == WLFC_CTL_TYPE_MACDESC_DEL))
- dhd_wlfc_mac_table_update(dhd, value, type);
+ _dhd_wlfc_mac_table_update(dhd, value, type);
else if (type == WLFC_CTL_TYPE_TRANS_ID)
- dhd_wlfc_dbg_senum_check(dhd, value);
+ _dhd_wlfc_dbg_senum_check(dhd, value);
else if ((type == WLFC_CTL_TYPE_INTERFACE_OPEN) ||
(type == WLFC_CTL_TYPE_INTERFACE_CLOSE)) {
- dhd_wlfc_interface_update(dhd, value, type);
+ _dhd_wlfc_interface_update(dhd, value, type);
+ }
+
+ if (entry && WLFC_GET_REORDERSUPP(dhd->wlfc_mode)) {
+ /* suppress all packets for this mac entry from bus->txq */
+ _dhd_wlfc_suppress_txq(dhd, _dhd_wlfc_entrypkt_fn, entry);
}
}
- if (remainder != 0) {
+ if (remainder != 0 && wlfc) {
/* trouble..., something is not right */
wlfc->stats.tlv_parse_failed++;
}
}
+
+ if (wlfc)
+ wlfc->stats.dhd_hdrpulls++;
+
+ dhd_os_wlfc_unblock(dhd);
return BCME_OK;
}
int
-dhd_wlfc_init(dhd_pub_t *dhd)
+dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit, void* commit_ctx, void *pktbuf,
+ bool need_toggle_host_if)
{
- char iovbuf[12]; /* Room for "tlv" + '\0' + parameter */
- /* enable all signals & indicate host proptxstatus logic is active */
- uint32 tlv = dhd->wlfc_enabled?
- WLFC_FLAGS_RSSI_SIGNALS |
- WLFC_FLAGS_XONXOFF_SIGNALS |
- WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+ int ac, single_ac = 0, rc = BCME_OK;
+ dhd_wlfc_commit_info_t commit_info;
+ athost_wl_status_info_t* ctx;
+ int bus_retry_count = 0;
+
+ uint8 tx_map = 0; /* packets (send + in queue), Bitmask for 4 ACs + BC/MC */
+ uint8 rx_map = 0; /* received packets, Bitmask for 4 ACs + BC/MC */
+ uint8 packets_map = 0; /* packets in queue, Bitmask for 4 ACs + BC/MC */
+ bool no_credit = FALSE;
+
+ int lender;
+
+ if ((dhdp == NULL) || (fcommit == NULL)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ if (pktbuf) {
+ DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 0);
+ }
+ rc = WLFC_UNSUPPORTED;
+ goto exit2;
+ }
+
+ ctx = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+
+ if (dhdp->proptxstatus_module_ignore) {
+ if (pktbuf) {
+ uint32 htod = 0;
+ WL_TXSTATUS_SET_FLAGS(htod, WLFC_PKTFLAG_PKTFROMHOST);
+ _dhd_wlfc_pushheader(ctx, pktbuf, FALSE, 0, 0, htod, 0, FALSE);
+ if (fcommit(commit_ctx, pktbuf))
+ PKTFREE(ctx->osh, pktbuf, TRUE);
+ rc = BCME_OK;
+ }
+ goto exit;
+ }
+
+ memset(&commit_info, 0, sizeof(commit_info));
+
+ /*
+ Commit packets for regular AC traffic. Higher priority first.
+ First, use up FIFO credits available to each AC. Based on distribution
+ and credits left, borrow from other ACs as applicable
+
+ -NOTE:
+ If the bus between the host and firmware is overwhelmed by the
+ traffic from host, it is possible that higher priority traffic
+ starves the lower priority queue. If that occurs often, we may
+ have to employ weighted round-robin or ucode scheme to avoid
+ low priority packet starvation.
+ */
+
+ if (pktbuf) {
+ DHD_PKTTAG_WLFCPKT_SET(PKTTAG(pktbuf), 1);
+ ac = DHD_PKTTAG_FIFO(PKTTAG(pktbuf));
+ /* en-queue the packets to respective queue. */
+ rc = _dhd_wlfc_enque_delayq(ctx, pktbuf, ac);
+ if (rc) {
+ _dhd_wlfc_prec_drop(ctx->dhdp, (ac << 1), pktbuf, FALSE);
+ } else {
+ ctx->stats.pktin++;
+ ctx->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(pktbuf))][ac]++;
+ }
+ }
+
+ for (ac = AC_COUNT; ac >= 0; ac--) {
+ if (dhdp->wlfc_rxpkt_chk) {
+ /* check rx packet */
+ uint32 curr_t = OSL_SYSUPTIME(), delta;
+
+ delta = curr_t - ctx->rx_timestamp[ac];
+ if (delta < WLFC_RX_DETECTION_THRESHOLD_MS) {
+ rx_map |= (1 << ac);
+ }
+ }
+
+ if (ctx->pkt_cnt_per_ac[ac] == 0) {
+ continue;
+ }
+ tx_map |= (1 << ac);
+ single_ac = ac + 1;
+ while (FALSE == dhdp->proptxstatus_txoff) {
+ /* packets from delayQ with less priority are fresh and
+ * they'd need header and have no MAC entry
+ */
+ no_credit = (ctx->FIFO_credit[ac] < 1);
+ if (dhdp->proptxstatus_credit_ignore ||
+ ((ac == AC_COUNT) && !ctx->bcmc_credit_supported)) {
+ no_credit = FALSE;
+ }
+
+ lender = -1;
+#ifdef LIMIT_BORROW
+ if (no_credit && (ac < AC_COUNT) && (tx_map >= rx_map)) {
+ /* try borrow from lower priority */
+ lender = _dhd_wlfc_borrow_credit(ctx, ac - 1, ac, FALSE);
+ if (lender != -1) {
+ no_credit = FALSE;
+ }
+ }
+#endif
+ commit_info.needs_hdr = 1;
+ commit_info.mac_entry = NULL;
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry),
+ no_credit);
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ if (commit_info.p == NULL) {
+#ifdef LIMIT_BORROW
+ if (lender != -1) {
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+ }
+#endif
+ break;
+ }
+
+ if (!dhdp->proptxstatus_credit_ignore && (lender == -1)) {
+ ASSERT(ctx->FIFO_credit[ac] >= commit_info.ac_fifo_credit_spent);
+ }
+ /* here we can ensure have credit or no credit needed */
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info, fcommit,
+ commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+ if (commit_info.ac_fifo_credit_spent && (lender == -1)) {
+ ctx->FIFO_credit[ac]--;
+ }
+#ifdef LIMIT_BORROW
+ else if (!commit_info.ac_fifo_credit_spent && (lender != -1)) {
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+ }
+#endif
+ } else {
+#ifdef LIMIT_BORROW
+ if (lender != -1) {
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+ }
+#endif
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+ goto exit;
+ }
+ }
+ }
+
+ if (ctx->pkt_cnt_per_ac[ac]) {
+ packets_map |= (1 << ac);
+ }
+ }
+
+ if ((tx_map == 0) || dhdp->proptxstatus_credit_ignore) {
+ /* nothing send out or remain in queue */
+ rc = BCME_OK;
+ goto exit;
+ }
+
+ if (((tx_map & (tx_map - 1)) == 0) && (tx_map >= rx_map)) {
+ /* only one tx ac exist and no higher rx ac */
+ if ((single_ac == ctx->single_ac) && ctx->allow_credit_borrow) {
+ ac = single_ac - 1;
+ } else {
+ uint32 delta;
+ uint32 curr_t = OSL_SYSUPTIME();
+
+ if (single_ac != ctx->single_ac) {
+ /* new single ac traffic (first single ac or different single ac) */
+ ctx->allow_credit_borrow = 0;
+ ctx->single_ac_timestamp = curr_t;
+ ctx->single_ac = (uint8)single_ac;
+ rc = BCME_OK;
+ goto exit;
+ }
+ /* same ac traffic, check if it lasts enough time */
+ delta = curr_t - ctx->single_ac_timestamp;
+
+ if (delta >= WLFC_BORROW_DEFER_PERIOD_MS) {
+ /* wait enough time, can borrow now */
+ ctx->allow_credit_borrow = 1;
+ ac = single_ac - 1;
+ } else {
+ rc = BCME_OK;
+ goto exit;
+ }
+ }
+ } else {
+ /* If we have multiple AC traffic, turn off borrowing, mark time and bail out */
+ ctx->allow_credit_borrow = 0;
+ ctx->single_ac_timestamp = 0;
+ ctx->single_ac = 0;
+ rc = BCME_OK;
+ goto exit;
+ }
+
+ if (packets_map == 0) {
+ /* nothing to send, skip borrow */
+ rc = BCME_OK;
+ goto exit;
+ }
+
+ /* At this point, borrow all credits only for ac */
+ while (FALSE == dhdp->proptxstatus_txoff) {
+#ifdef LIMIT_BORROW
+ if ((lender = _dhd_wlfc_borrow_credit(ctx, AC_COUNT, ac, TRUE)) == -1) {
+ break;
+ }
+#endif
+ commit_info.p = _dhd_wlfc_deque_delayedq(ctx, ac,
+ &(commit_info.ac_fifo_credit_spent),
+ &(commit_info.needs_hdr),
+ &(commit_info.mac_entry),
+ FALSE);
+ if (commit_info.p == NULL) {
+ /* before borrow only one ac exists and now this only ac is empty */
+#ifdef LIMIT_BORROW
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+ break;
+ }
+
+ commit_info.pkt_type = (commit_info.needs_hdr) ? eWLFC_PKTTYPE_DELAYED :
+ eWLFC_PKTTYPE_SUPPRESSED;
+
+ rc = _dhd_wlfc_handle_packet_commit(ctx, ac, &commit_info,
+ fcommit, commit_ctx);
+
+ /* Bus commits may fail (e.g. flow control); abort after retries */
+ if (rc == BCME_OK) {
+
+ if (commit_info.ac_fifo_credit_spent) {
+#ifndef LIMIT_BORROW
+ ctx->FIFO_credit[ac]--;
+#endif
+ } else {
+#ifdef LIMIT_BORROW
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+ }
+ } else {
+#ifdef LIMIT_BORROW
+ _dhd_wlfc_return_credit(ctx, lender, ac);
+#endif
+ bus_retry_count++;
+ if (bus_retry_count >= BUS_RETRIES) {
+ DHD_ERROR(("%s: bus error %d\n", __FUNCTION__, rc));
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ if (need_toggle_host_if && ctx->toggle_host_if) {
+ ctx->toggle_host_if = 0;
+ }
+
+exit2:
+ dhd_os_wlfc_unblock(dhdp);
+ return rc;
+}
+
+int
+dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success)
+{
+ athost_wl_status_info_t* wlfc;
+ void* pout = NULL;
+ int rtn = BCME_OK;
+ if ((dhd == NULL) || (txp == NULL)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ rtn = WLFC_UNSUPPORTED;
+ goto EXIT;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+ if (DHD_PKTTAG_SIGNALONLY(PKTTAG(txp))) {
+#ifdef PROP_TXSTATUS_DEBUG
+ wlfc->stats.signal_only_pkts_freed++;
+#endif
+ /* is this a signal-only packet? */
+ _dhd_wlfc_pullheader(wlfc, txp);
+ PKTFREE(wlfc->osh, txp, TRUE);
+ goto EXIT;
+ }
+
+ if (!success || dhd->proptxstatus_txstatus_ignore) {
+ wlfc_mac_descriptor_t *entry = _dhd_wlfc_find_table_entry(wlfc, txp);
+
+ WLFC_DBGMESG(("At: %s():%d, bus_complete() failure for %p, htod_tag:0x%08x\n",
+ __FUNCTION__, __LINE__, txp, DHD_PKTTAG_H2DTAG(PKTTAG(txp))));
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_hanger_poppkt(wlfc->hanger, WL_TXSTATUS_GET_HSLOT(
+ DHD_PKTTAG_H2DTAG(PKTTAG(txp))), &pout, TRUE);
+ ASSERT(txp == pout);
+ }
+
+ /* indicate failure and free the packet */
+ dhd_txcomplete(dhd, txp, success);
+
+ /* return the credit, if necessary */
+ _dhd_wlfc_return_implied_credit(wlfc, txp);
+
+ entry->transit_count--;
+ if (entry->suppressed && (--entry->suppr_transit_count == 0)) {
+ entry->suppressed = FALSE;
+ }
+ wlfc->pkt_cnt_in_drv[DHD_PKTTAG_IF(PKTTAG(txp))][DHD_PKTTAG_FIFO(PKTTAG(txp))]--;
+ wlfc->stats.pktout++;
+ PKTFREE(wlfc->osh, txp, TRUE);
+ } else {
+ /* bus confirmed pkt went to firmware side */
+ if (WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ _dhd_wlfc_enque_afq(wlfc, txp);
+ } else {
+ int hslot = WL_TXSTATUS_GET_HSLOT(DHD_PKTTAG_H2DTAG(PKTTAG(txp)));
+ _dhd_wlfc_hanger_free_pkt(wlfc, hslot,
+ WLFC_HANGER_PKT_STATE_TXCOMPLETE, -1);
+ }
+ }
+
+EXIT:
+ dhd_os_wlfc_unblock(dhd);
+ return rtn;
+}
+
+int
+dhd_wlfc_init(dhd_pub_t *dhd)
+{
+ char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+ /* enable all signals & indicate host proptxstatus logic is active */
+ uint32 tlv, mode, fw_caps;
+ int ret = 0;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+ if (dhd->wlfc_enabled) {
+ DHD_ERROR(("%s():%d, Already enabled!\n", __FUNCTION__, __LINE__));
+ dhd_os_wlfc_unblock(dhd);
+ return BCME_OK;
+ }
+ dhd->wlfc_enabled = TRUE;
+ dhd_os_wlfc_unblock(dhd);
+
+ tlv = WLFC_FLAGS_RSSI_SIGNALS |
+ WLFC_FLAGS_XONXOFF_SIGNALS |
+ WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
- WLFC_FLAGS_HOST_RXRERODER_ACTIVE : 0;
- /* WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE | WLFC_FLAGS_HOST_RXRERODER_ACTIVE : 0; */
+ WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+
+ /*
+ try to enable/disable signaling by sending "tlv" iovar. if that fails,
+ fallback to no flow control? Print a message for now.
+ */
+
+ /* enable proptxtstatus signaling by default */
+ bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+ }
+ else {
+ /*
+ Leaving the message for now, it should be removed after a while; once
+ the tlv situation is stable.
+ */
+ DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
+ dhd->wlfc_enabled?"enabled":"disabled", tlv));
+ }
+
+ /* query caps */
+ ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
+ if (ret > 0) {
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ }
+
+ if (ret >= 0) {
+ fw_caps = *((uint32 *)iovbuf);
+ mode = 0;
+ DHD_ERROR(("%s: query wlfc_mode succeed, fw_caps=0x%x\n", __FUNCTION__, fw_caps));
+
+ if (WLFC_IS_OLD_DEF(fw_caps)) {
+ /* enable proptxtstatus v2 by default */
+ mode = WLFC_MODE_AFQ;
+ } else {
+ WLFC_SET_AFQ(mode, WLFC_GET_AFQ(fw_caps));
+ WLFC_SET_REUSESEQ(mode, WLFC_GET_REUSESEQ(fw_caps));
+ WLFC_SET_REORDERSUPP(mode, WLFC_GET_REORDERSUPP(fw_caps));
+ }
+ ret = bcm_mkiovar("wlfc_mode", (char *)&mode, 4, iovbuf, sizeof(iovbuf));
+ if (ret > 0) {
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+ }
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ dhd->wlfc_mode = 0;
+ if (ret >= 0) {
+ if (WLFC_IS_OLD_DEF(mode)) {
+ WLFC_SET_AFQ(dhd->wlfc_mode, (mode == WLFC_MODE_AFQ));
+ } else {
+ dhd->wlfc_mode = mode;
+ }
+ }
+ DHD_ERROR(("dhd_wlfc_init(): wlfc_mode=0x%x, ret=%d\n", dhd->wlfc_mode, ret));
+
+ dhd_os_wlfc_unblock(dhd);
+
+ if (dhd->plat_init)
+ dhd->plat_init((void *)dhd);
+
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_hostreorder_init(dhd_pub_t *dhd)
+{
+ char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+ /* enable only ampdu hostreorder here */
+ uint32 tlv;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ DHD_TRACE(("%s():%d Enter\n", __FUNCTION__, __LINE__));
+
+ tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+
+ /* enable proptxtstatus signaling by default */
+ bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("%s(): failed to enable/disable bdcv2 tlv signaling\n",
+ __FUNCTION__));
+ }
+ else {
+ /*
+ Leaving the message for now, it should be removed after a while; once
+ the tlv situation is stable.
+ */
+ DHD_ERROR(("%s(): successful bdcv2 tlv signaling, %d\n",
+ __FUNCTION__, tlv));
+ }
+
+ dhd_os_wlfc_block(dhd);
+ dhd->proptxstatus_mode = WLFC_ONLY_AMPDU_HOSTREORDER;
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+ _dhd_wlfc_cleanup_txq(dhd, fn, arg);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+/* release all packet resources */
+int
+dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void *arg)
+{
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+ _dhd_wlfc_cleanup(dhd, fn, arg);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int
+dhd_wlfc_deinit(dhd_pub_t *dhd)
+{
+ char iovbuf[32]; /* Room for "ampdu_hostreorder" or "tlv" + '\0' + parameter */
+ /* cleanup all psq related resources */
+ athost_wl_status_info_t* wlfc;
+ uint32 tlv = 0;
+ uint32 hostreorder = 0;
+ int ret = BCME_OK;
+
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+ if (!dhd->wlfc_enabled) {
+ DHD_ERROR(("%s():%d, Already disabled!\n", __FUNCTION__, __LINE__));
+ dhd_os_wlfc_unblock(dhd);
+ return BCME_OK;
+ }
+ dhd->wlfc_enabled = FALSE;
+ dhd_os_wlfc_unblock(dhd);
+
+ /* query ampdu hostreorder */
+ bcm_mkiovar("ampdu_hostreorder", NULL, 0, iovbuf, sizeof(iovbuf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
+ if (ret == BCME_OK)
+ hostreorder = *((uint32 *)iovbuf);
+ else {
+ hostreorder = 0;
+ DHD_ERROR(("%s():%d, ampdu_hostreorder get failed Err = %d\n",
+ __FUNCTION__, __LINE__, ret));
+ }
+
+ if (hostreorder) {
+ tlv = WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+ DHD_ERROR(("%s():%d, maintain HOST RXRERODER flag in tvl\n",
+ __FUNCTION__, __LINE__));
+ }
+
+ /* Disable proptxtstatus signaling for deinit */
+ bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+ ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0);
+
+ if (ret == BCME_OK) {
+ /*
+ Leaving the message for now, it should be removed after a while; once
+ the tlv situation is stable.
+ */
+ DHD_ERROR(("%s():%d successfully %s bdcv2 tlv signaling, %d\n",
+ __FUNCTION__, __LINE__,
+ dhd->wlfc_enabled?"enabled":"disabled", tlv));
+ } else
+ DHD_ERROR(("%s():%d failed to enable/disable bdcv2 tlv signaling Err = %d\n",
+ __FUNCTION__, __LINE__, ret));
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+#ifdef PROP_TXSTATUS_DEBUG
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode))
+ {
+ int i;
+ wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
+ for (i = 0; i < h->max_items; i++) {
+ if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) {
+ WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
+ __FUNCTION__, i, h->items[i].pkt,
+ DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
+ }
+ }
+ }
+#endif
+
+ _dhd_wlfc_cleanup(dhd, NULL, NULL);
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ /* delete hanger */
+ _dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
+ }
+
+
+ /* free top structure */
+ MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
+ dhd->wlfc_state = NULL;
+ dhd->proptxstatus_mode = hostreorder ?
+ WLFC_ONLY_AMPDU_HOSTREORDER : WLFC_FCMODE_NONE;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ if (dhd->plat_deinit)
+ dhd->plat_deinit((void *)dhd);
+ return BCME_OK;
+}
+
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea)
+{
+ int rc;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhdp);
+ return WLFC_UNSUPPORTED;
+ }
+
+ rc = _dhd_wlfc_interface_entry_update(dhdp->wlfc_state, action, ifid, iftype, ea);
+
+ dhd_os_wlfc_unblock(dhdp);
+ return rc;
+}
+
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data)
+{
+ int rc;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhdp);
+ return WLFC_UNSUPPORTED;
+ }
+
+ rc = _dhd_wlfc_FIFOcreditmap_update(dhdp->wlfc_state, event_data);
+
+ dhd_os_wlfc_unblock(dhdp);
+
+ return rc;
+}
+
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp)
+{
+ int rc;
+
+ if (dhdp == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhdp);
+ return WLFC_UNSUPPORTED;
+ }
+
+ rc = _dhd_wlfc_BCMCCredit_support_update(dhdp->wlfc_state);
+
+ dhd_os_wlfc_unblock(dhdp);
+ return rc;
+}
+
+int
+dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf)
+{
+ int i;
+ uint8* ea;
+ athost_wl_status_info_t* wlfc;
+ wlfc_hanger_t* h;
+ wlfc_mac_descriptor_t* mac_table;
+ wlfc_mac_descriptor_t* interfaces;
+ char* iftypes[] = {"STA", "AP", "WDS", "p2pGO", "p2pCL"};
+
+ if (!dhdp || !strbuf) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhdp);
+
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhdp);
+ return WLFC_UNSUPPORTED;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhdp->wlfc_state;
+
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h == NULL) {
+ bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+ }
+
+ mac_table = wlfc->destination_entries.nodes;
+ interfaces = wlfc->destination_entries.interfaces;
+ bcm_bprintf(strbuf, "---- wlfc stats ----\n");
+
+ if (!WLFC_GET_AFQ(dhdp->wlfc_mode)) {
+ h = (wlfc_hanger_t*)wlfc->hanger;
+ if (h == NULL) {
+ bcm_bprintf(strbuf, "wlfc-hanger not initialized yet\n");
+ } else {
+ bcm_bprintf(strbuf, "wlfc hanger (pushed,popped,f_push,"
+ "f_pop,f_slot, pending) = (%d,%d,%d,%d,%d,%d)\n",
+ h->pushed,
+ h->popped,
+ h->failed_to_push,
+ h->failed_to_pop,
+ h->failed_slotfind,
+ (h->pushed - h->popped));
+ }
+ }
+
+ bcm_bprintf(strbuf, "wlfc fail(tlv,credit_rqst,mac_update,psmode_update), "
+ "(dq_full,rollback_fail) = (%d,%d,%d,%d), (%d,%d)\n",
+ wlfc->stats.tlv_parse_failed,
+ wlfc->stats.credit_request_failed,
+ wlfc->stats.mac_update_failed,
+ wlfc->stats.psmode_update_failed,
+ wlfc->stats.delayq_full_error,
+ wlfc->stats.rollback_failed);
+
+ bcm_bprintf(strbuf, "PKTS (init_credit,credit,sent,drop_d,drop_s,outoforder) "
+ "(AC0[%d,%d,%d,%d,%d,%d],AC1[%d,%d,%d,%d,%d,%d],AC2[%d,%d,%d,%d,%d,%d],"
+ "AC3[%d,%d,%d,%d,%d,%d],BC_MC[%d,%d,%d,%d,%d,%d])\n",
+ wlfc->Init_FIFO_credit[0], wlfc->FIFO_credit[0], wlfc->stats.send_pkts[0],
+ wlfc->stats.drop_pkts[0], wlfc->stats.drop_pkts[1], wlfc->stats.ooo_pkts[0],
+ wlfc->Init_FIFO_credit[1], wlfc->FIFO_credit[1], wlfc->stats.send_pkts[1],
+ wlfc->stats.drop_pkts[2], wlfc->stats.drop_pkts[3], wlfc->stats.ooo_pkts[1],
+ wlfc->Init_FIFO_credit[2], wlfc->FIFO_credit[2], wlfc->stats.send_pkts[2],
+ wlfc->stats.drop_pkts[4], wlfc->stats.drop_pkts[5], wlfc->stats.ooo_pkts[2],
+ wlfc->Init_FIFO_credit[3], wlfc->FIFO_credit[3], wlfc->stats.send_pkts[3],
+ wlfc->stats.drop_pkts[6], wlfc->stats.drop_pkts[7], wlfc->stats.ooo_pkts[3],
+ wlfc->Init_FIFO_credit[4], wlfc->FIFO_credit[4], wlfc->stats.send_pkts[4],
+ wlfc->stats.drop_pkts[8], wlfc->stats.drop_pkts[9], wlfc->stats.ooo_pkts[4]);
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (interfaces[i].occupied) {
+ char* iftype_desc;
+
+ if (interfaces[i].iftype > WLC_E_IF_ROLE_P2P_CLIENT)
+ iftype_desc = "<Unknown";
+ else
+ iftype_desc = iftypes[interfaces[i].iftype];
+
+ ea = interfaces[i].ea;
+ bcm_bprintf(strbuf, "INTERFACE[%d].ea = "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d, type: %s "
+ "netif_flow_control:%s\n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ interfaces[i].interface_id,
+ iftype_desc, ((wlfc->hostif_flow_state[i] == OFF)
+ ? " OFF":" ON"));
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].PSQ(len,state,credit),(trans,supp_trans)"
+ "= (%d,%s,%d),(%d,%d)\n",
+ i,
+ interfaces[i].psq.len,
+ ((interfaces[i].state ==
+ WLFC_STATE_OPEN) ? "OPEN":"CLOSE"),
+ interfaces[i].requested_credit,
+ interfaces[i].transit_count, interfaces[i].suppr_transit_count);
+
+ bcm_bprintf(strbuf, "INTERFACE[%d].PSQ"
+ "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+ "(delay3,sup3,afq3),(delay4,sup4,afq4) = (%d,%d,%d),"
+ "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+ i,
+ interfaces[i].psq.q[0].len,
+ interfaces[i].psq.q[1].len,
+ interfaces[i].afq.q[0].len,
+ interfaces[i].psq.q[2].len,
+ interfaces[i].psq.q[3].len,
+ interfaces[i].afq.q[1].len,
+ interfaces[i].psq.q[4].len,
+ interfaces[i].psq.q[5].len,
+ interfaces[i].afq.q[2].len,
+ interfaces[i].psq.q[6].len,
+ interfaces[i].psq.q[7].len,
+ interfaces[i].afq.q[3].len,
+ interfaces[i].psq.q[8].len,
+ interfaces[i].psq.q[9].len,
+ interfaces[i].afq.q[4].len);
+ }
+ }
+
+ bcm_bprintf(strbuf, "\n");
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (mac_table[i].occupied) {
+ ea = mac_table[i].ea;
+ bcm_bprintf(strbuf, "MAC_table[%d].ea = "
+ "[%02x:%02x:%02x:%02x:%02x:%02x], if:%d \n", i,
+ ea[0], ea[1], ea[2], ea[3], ea[4], ea[5],
+ mac_table[i].interface_id);
+
+ bcm_bprintf(strbuf, "MAC_table[%d].PSQ(len,state,credit),(trans,supp_trans)"
+ "= (%d,%s,%d),(%d,%d)\n",
+ i,
+ mac_table[i].psq.len,
+ ((mac_table[i].state ==
+ WLFC_STATE_OPEN) ? " OPEN":"CLOSE"),
+ mac_table[i].requested_credit,
+ mac_table[i].transit_count, mac_table[i].suppr_transit_count);
+#ifdef PROP_TXSTATUS_DEBUG
+ bcm_bprintf(strbuf, "MAC_table[%d]: (opened, closed) = (%d, %d)\n",
+ i, mac_table[i].opened_ct, mac_table[i].closed_ct);
+#endif
+ bcm_bprintf(strbuf, "MAC_table[%d].PSQ"
+ "(delay0,sup0,afq0),(delay1,sup1,afq1),(delay2,sup2,afq2),"
+ "(delay3,sup3,afq3),(delay4,sup4,afq4) =(%d,%d,%d),"
+ "(%d,%d,%d),(%d,%d,%d),(%d,%d,%d),(%d,%d,%d)\n",
+ i,
+ mac_table[i].psq.q[0].len,
+ mac_table[i].psq.q[1].len,
+ mac_table[i].afq.q[0].len,
+ mac_table[i].psq.q[2].len,
+ mac_table[i].psq.q[3].len,
+ mac_table[i].afq.q[1].len,
+ mac_table[i].psq.q[4].len,
+ mac_table[i].psq.q[5].len,
+ mac_table[i].afq.q[2].len,
+ mac_table[i].psq.q[6].len,
+ mac_table[i].psq.q[7].len,
+ mac_table[i].afq.q[3].len,
+ mac_table[i].psq.q[8].len,
+ mac_table[i].psq.q[9].len,
+ mac_table[i].afq.q[4].len);
+
+ }
+ }
+
+#ifdef PROP_TXSTATUS_DEBUG
+ {
+ int avg;
+ int moving_avg = 0;
+ int moving_samples;
+
+ if (wlfc->stats.latency_sample_count) {
+ moving_samples = sizeof(wlfc->stats.deltas)/sizeof(uint32);
+
+ for (i = 0; i < moving_samples; i++)
+ moving_avg += wlfc->stats.deltas[i];
+ moving_avg /= moving_samples;
+
+ avg = (100 * wlfc->stats.total_status_latency) /
+ wlfc->stats.latency_sample_count;
+ bcm_bprintf(strbuf, "txstatus latency (average, last, moving[%d]) = "
+ "(%d.%d, %03d, %03d)\n",
+ moving_samples, avg/100, (avg - (avg/100)*100),
+ wlfc->stats.latency_most_recent,
+ moving_avg);
+ }
+ }
+
+ bcm_bprintf(strbuf, "wlfc- fifo[0-5] credit stats: sent = (%d,%d,%d,%d,%d,%d), "
+ "back = (%d,%d,%d,%d,%d,%d)\n",
+ wlfc->stats.fifo_credits_sent[0],
+ wlfc->stats.fifo_credits_sent[1],
+ wlfc->stats.fifo_credits_sent[2],
+ wlfc->stats.fifo_credits_sent[3],
+ wlfc->stats.fifo_credits_sent[4],
+ wlfc->stats.fifo_credits_sent[5],
+
+ wlfc->stats.fifo_credits_back[0],
+ wlfc->stats.fifo_credits_back[1],
+ wlfc->stats.fifo_credits_back[2],
+ wlfc->stats.fifo_credits_back[3],
+ wlfc->stats.fifo_credits_back[4],
+ wlfc->stats.fifo_credits_back[5]);
+ {
+ uint32 fifo_cr_sent = 0;
+ uint32 fifo_cr_acked = 0;
+ uint32 request_cr_sent = 0;
+ uint32 request_cr_ack = 0;
+ uint32 bc_mc_cr_ack = 0;
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_sent)/sizeof(uint32); i++) {
+ fifo_cr_sent += wlfc->stats.fifo_credits_sent[i];
+ }
+
+ for (i = 0; i < sizeof(wlfc->stats.fifo_credits_back)/sizeof(uint32); i++) {
+ fifo_cr_acked += wlfc->stats.fifo_credits_back[i];
+ }
+
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.nodes[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_sent +=
+ wlfc->destination_entries.interfaces[i].dstncredit_sent_packets;
+ }
+ }
+ for (i = 0; i < WLFC_MAC_DESC_TABLE_SIZE; i++) {
+ if (wlfc->destination_entries.nodes[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.nodes[i].dstncredit_acks;
+ }
+ }
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ if (wlfc->destination_entries.interfaces[i].occupied) {
+ request_cr_ack +=
+ wlfc->destination_entries.interfaces[i].dstncredit_acks;
+ }
+ }
+ bcm_bprintf(strbuf, "wlfc- (sent, status) => pq(%d,%d), vq(%d,%d),"
+ "other:%d, bc_mc:%d, signal-only, (sent,freed): (%d,%d)",
+ fifo_cr_sent, fifo_cr_acked,
+ request_cr_sent, request_cr_ack,
+ wlfc->destination_entries.other.dstncredit_acks,
+ bc_mc_cr_ack,
+ wlfc->stats.signal_only_pkts_sent, wlfc->stats.signal_only_pkts_freed);
+ }
+#endif /* PROP_TXSTATUS_DEBUG */
+ bcm_bprintf(strbuf, "\n");
+ bcm_bprintf(strbuf, "wlfc- pkt((in,2bus,txstats,hdrpull,out),(dropped,hdr_only,wlc_tossed)"
+ "(freed,free_err,rollback)) = "
+ "((%d,%d,%d,%d,%d),(%d,%d,%d),(%d,%d,%d))\n",
+ wlfc->stats.pktin,
+ wlfc->stats.pkt2bus,
+ wlfc->stats.txstatus_in,
+ wlfc->stats.dhd_hdrpulls,
+ wlfc->stats.pktout,
+
+ wlfc->stats.pktdropped,
+ wlfc->stats.wlfc_header_only_pkt,
+ wlfc->stats.wlc_tossed_pkts,
+
+ wlfc->stats.pkt_freed,
+ wlfc->stats.pkt_free_err, wlfc->stats.rollback);
+
+ bcm_bprintf(strbuf, "wlfc- suppress((d11,wlc,err),enq(d11,wl,hq,mac?),retx(d11,wlc,hq)) = "
+ "((%d,%d,%d),(%d,%d,%d,%d),(%d,%d,%d))\n",
+ wlfc->stats.d11_suppress,
+ wlfc->stats.wl_suppress,
+ wlfc->stats.bad_suppress,
+
+ wlfc->stats.psq_d11sup_enq,
+ wlfc->stats.psq_wlsup_enq,
+ wlfc->stats.psq_hostq_enq,
+ wlfc->stats.mac_handle_notfound,
+
+ wlfc->stats.psq_d11sup_retx,
+ wlfc->stats.psq_wlsup_retx,
+ wlfc->stats.psq_hostq_retx);
+
+ bcm_bprintf(strbuf, "wlfc- cleanup(txq,psq,fw) = (%d,%d,%d)\n",
+ wlfc->stats.cleanup_txq_cnt,
+ wlfc->stats.cleanup_psq_cnt,
+ wlfc->stats.cleanup_fw_cnt);
+
+ bcm_bprintf(strbuf, "wlfc- generic error: %d\n", wlfc->stats.generic_error);
+
+ for (i = 0; i < WLFC_MAX_IFNUM; i++) {
+ bcm_bprintf(strbuf, "wlfc- if[%d], pkt_cnt_in_q/AC[0-4] = (%d,%d,%d,%d,%d)\n", i,
+ wlfc->pkt_cnt_in_q[i][0],
+ wlfc->pkt_cnt_in_q[i][1],
+ wlfc->pkt_cnt_in_q[i][2],
+ wlfc->pkt_cnt_in_q[i][3],
+ wlfc->pkt_cnt_in_q[i][4]);
+ }
+ bcm_bprintf(strbuf, "\n");
+
+ dhd_os_wlfc_unblock(dhdp);
+ return BCME_OK;
+}
+
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd)
+{
+ athost_wl_status_info_t* wlfc;
+ wlfc_hanger_t* hanger;
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
- /*
- try to enable/disable signaling by sending "tlv" iovar. if that fails,
- fallback to no flow control? Print a message for now.
- */
+ dhd_os_wlfc_block(dhd);
- /* enable proptxtstatus signaling by default */
- bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
- if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
- DHD_ERROR(("dhd_wlfc_init(): failed to enable/disable bdcv2 tlv signaling\n"));
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
}
- else {
- /*
- Leaving the message for now, it should be removed after a while; once
- the tlv situation is stable.
- */
- DHD_ERROR(("dhd_wlfc_init(): successfully %s bdcv2 tlv signaling, %d\n",
- dhd->wlfc_enabled?"enabled":"disabled", tlv));
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+ memset(&wlfc->stats, 0, sizeof(athost_wl_stat_counters_t));
+
+ if (!WLFC_GET_AFQ(dhd->wlfc_mode)) {
+ hanger = (wlfc_hanger_t*)wlfc->hanger;
+
+ hanger->pushed = 0;
+ hanger->popped = 0;
+ hanger->failed_slotfind = 0;
+ hanger->failed_to_pop = 0;
+ hanger->failed_to_push = 0;
}
+
+ dhd_os_wlfc_unblock(dhd);
+
return BCME_OK;
}
-int
-dhd_wlfc_enable(dhd_pub_t *dhd)
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->wlfc_enabled;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->wlfc_state ? dhd->proptxstatus_mode : 0;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val)
+{
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if (dhd->wlfc_state) {
+ dhd->proptxstatus_mode = val & 0xff;
+ }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf)
{
- int i;
athost_wl_status_info_t* wlfc;
+ bool rc = FALSE;
- if (!dhd->wlfc_enabled || dhd->wlfc_state)
- return BCME_OK;
+ if (dhd == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return FALSE;
+ }
- /* allocate space to track txstatus propagated from firmware */
- dhd->wlfc_state = MALLOC(dhd->osh, sizeof(athost_wl_status_info_t));
- if (dhd->wlfc_state == NULL)
- return BCME_NOMEM;
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return FALSE;
+ }
- /* initialize state space */
wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
- memset(wlfc, 0, sizeof(athost_wl_status_info_t));
- /* remember osh & dhdp */
- wlfc->osh = dhd->osh;
- wlfc->dhdp = dhd;
+ if (PKTLEN(wlfc->osh, pktbuf) == 0) {
+ wlfc->stats.wlfc_header_only_pkt++;
+ rc = TRUE;
+ }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return rc;
+}
- wlfc->hanger =
- dhd_wlfc_hanger_create(dhd->osh, WLFC_HANGER_MAXITEMS);
- if (wlfc->hanger == NULL) {
- MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
- dhd->wlfc_state = NULL;
- DHD_ERROR(("Failed to malloc dhd->wlfc_state\n"));
- return BCME_NOMEM;
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock)
+{
+ if (dhdp == NULL) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
}
- /* initialize all interfaces to accept traffic */
- for (i = 0; i < WLFC_MAX_IFNUM; i++) {
- wlfc->hostif_flow_state[i] = OFF;
+ if (bAcquireLock) {
+ dhd_os_wlfc_block(dhdp);
}
- wlfc->destination_entries.other.state = WLFC_STATE_OPEN;
- /* bc/mc FIFO is always open [credit aside], i.e. b[5] */
- wlfc->destination_entries.other.ac_bitmap = 0x1f;
- wlfc->destination_entries.other.interface_id = 0;
+ if (!dhdp->wlfc_state || (dhdp->proptxstatus_mode == WLFC_FCMODE_NONE) ||
+ dhdp->proptxstatus_module_ignore) {
+ if (bAcquireLock) {
+ dhd_os_wlfc_unblock(dhdp);
+ }
+ return WLFC_UNSUPPORTED;
+ }
- wlfc->proptxstatus_mode = WLFC_FCMODE_EXPLICIT_CREDIT;
+ if (state != dhdp->proptxstatus_txoff) {
+ dhdp->proptxstatus_txoff = state;
+ }
- wlfc->allow_credit_borrow = TRUE;
- wlfc->borrow_defer_timestamp = 0;
+ if (bAcquireLock) {
+ dhd_os_wlfc_unblock(dhdp);
+ }
return BCME_OK;
}
-/* release all packet resources */
-void
-dhd_wlfc_cleanup(dhd_pub_t *dhd, ifpkt_cb_t fn, int arg)
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio)
{
- int i;
- int total_entries;
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
- wlfc_mac_descriptor_t* table;
- wlfc_hanger_t* h;
- int prec;
- void *pkt = NULL;
- struct pktq *txq = NULL;
- if (dhd->wlfc_state == NULL)
- return;
- /* flush bus->txq */
- txq = dhd_bus_txq(dhd->bus);
- /* any in the hanger? */
- h = (wlfc_hanger_t*)wlfc->hanger;
- total_entries = sizeof(wlfc->destination_entries)/sizeof(wlfc_mac_descriptor_t);
- /* search all entries, include nodes as well as interfaces */
- table = (wlfc_mac_descriptor_t*)&wlfc->destination_entries;
+ athost_wl_status_info_t* wlfc;
+ int rx_path_ac = -1;
- for (i = 0; i < total_entries; i++) {
- if (table[i].occupied && (fn == NULL || (arg == table[i].interface_id))) {
- if (table[i].psq.len) {
- WLFC_DBGMESG(("%s(): DELAYQ[%d].len = %d\n",
- __FUNCTION__, i, table[i].psq.len));
- /* release packets held in DELAYQ */
- pktq_flush(wlfc->osh, &table[i].psq, TRUE, fn, arg);
- }
- if (fn == NULL)
- table[i].occupied = 0;
- }
+ if ((dhd == NULL) || (prio >= NUMPRIO)) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
}
- for (prec = 0; prec < txq->num_prec; prec++) {
- pkt = pktq_pdeq_with_fn(txq, prec, fn, arg);
- while (pkt) {
- for (i = 0; i < h->max_items; i++) {
- if (pkt == h->items[i].pkt) {
- if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
- PKTFREE(wlfc->osh, h->items[i].pkt, TRUE);
- h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
- } else if (h->items[i].state ==
- WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
- /* These are already freed from the psq */
- h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
- }
- break;
- }
- }
- pkt = pktq_pdeq(txq, prec);
+
+ dhd_os_wlfc_block(dhd);
+
+ if (!dhd->wlfc_rxpkt_chk) {
+ dhd_os_wlfc_unblock(dhd);
+ return BCME_OK;
+ }
+
+ if (!dhd->wlfc_state || (dhd->proptxstatus_mode == WLFC_FCMODE_NONE)) {
+ dhd_os_wlfc_unblock(dhd);
+ return WLFC_UNSUPPORTED;
+ }
+
+ wlfc = (athost_wl_status_info_t*)dhd->wlfc_state;
+
+ rx_path_ac = prio2fifo[prio];
+ wlfc->rx_timestamp[rx_path_ac] = OSL_SYSUPTIME();
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->proptxstatus_module_ignore;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val)
+{
+ char iovbuf[14]; /* Room for "tlv" + '\0' + parameter */
+ uint32 tlv = 0;
+ bool bChanged = FALSE;
+
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ if ((bool)val != dhd->proptxstatus_module_ignore) {
+ dhd->proptxstatus_module_ignore = (val != 0);
+ /* force txstatus_ignore sync with proptxstatus_module_ignore */
+ dhd->proptxstatus_txstatus_ignore = dhd->proptxstatus_module_ignore;
+ if (FALSE == dhd->proptxstatus_module_ignore) {
+ tlv = WLFC_FLAGS_RSSI_SIGNALS |
+ WLFC_FLAGS_XONXOFF_SIGNALS |
+ WLFC_FLAGS_CREDIT_STATUS_SIGNALS |
+ WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
}
+ /* always enable host reorder */
+ tlv |= WLFC_FLAGS_HOST_RXRERODER_ACTIVE;
+ bChanged = TRUE;
}
- /* flush remained pkt in hanger queue, not in bus->txq */
- for (i = 0; i < h->max_items; i++) {
- if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE) {
- if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
- PKTFREE(wlfc->osh, h->items[i].pkt, TRUE);
- h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
- }
- } else if (h->items[i].state == WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED) {
- if (fn == NULL || (*fn)(h->items[i].pkt, arg)) {
- /* These are freed from the psq so no need to free again */
- h->items[i].state = WLFC_HANGER_ITEM_STATE_FREE;
- }
+
+ dhd_os_wlfc_unblock(dhd);
+
+ if (bChanged) {
+ /* select enable proptxtstatus signaling */
+ bcm_mkiovar("tlv", (char *)&tlv, 4, iovbuf, sizeof(iovbuf));
+ if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0) < 0) {
+ DHD_ERROR(("%s: failed to set bdcv2 tlv signaling to 0x%x\n",
+ __FUNCTION__, tlv));
+ }
+ else {
+ DHD_ERROR(("%s: successfully set bdcv2 tlv signaling to 0x%x\n",
+ __FUNCTION__, tlv));
}
}
- return;
+ return BCME_OK;
}
-void
-dhd_wlfc_deinit(dhd_pub_t *dhd)
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val)
{
- /* cleanup all psq related resources */
- athost_wl_status_info_t* wlfc = (athost_wl_status_info_t*)
- dhd->wlfc_state;
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
dhd_os_wlfc_block(dhd);
- if (dhd->wlfc_state == NULL) {
- dhd_os_wlfc_unblock(dhd);
- return;
+
+ *val = dhd->proptxstatus_credit_ignore;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val)
+{
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
}
-#ifdef PROP_TXSTATUS_DEBUG
- {
- int i;
- wlfc_hanger_t* h = (wlfc_hanger_t*)wlfc->hanger;
- for (i = 0; i < h->max_items; i++) {
- if (h->items[i].state != WLFC_HANGER_ITEM_STATE_FREE) {
- WLFC_DBGMESG(("%s() pkt[%d] = 0x%p, FIFO_credit_used:%d\n",
- __FUNCTION__, i, h->items[i].pkt,
- DHD_PKTTAG_CREDITCHECK(PKTTAG(h->items[i].pkt))));
- }
- }
+ dhd_os_wlfc_block(dhd);
+
+ dhd->proptxstatus_credit_ignore = (val != 0);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
}
-#endif
- /* delete hanger */
- dhd_wlfc_hanger_delete(dhd->osh, wlfc->hanger);
- /* free top structure */
- MFREE(dhd->osh, dhd->wlfc_state, sizeof(athost_wl_status_info_t));
- dhd->wlfc_state = NULL;
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->proptxstatus_txstatus_ignore;
+
dhd_os_wlfc_unblock(dhd);
- return;
+ return BCME_OK;
+}
+
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val)
+{
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ dhd->proptxstatus_txstatus_ignore = (val != 0);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val)
+{
+ if (!dhd || !val) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ *val = dhd->wlfc_rxpkt_chk;
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
+}
+
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val)
+{
+ if (!dhd) {
+ DHD_ERROR(("Error: %s():%d\n", __FUNCTION__, __LINE__));
+ return BCME_BADARG;
+ }
+
+ dhd_os_wlfc_block(dhd);
+
+ dhd->wlfc_rxpkt_chk = (val != 0);
+
+ dhd_os_wlfc_unblock(dhd);
+
+ return BCME_OK;
}
#endif /* PROP_TXSTATUS */
/*
* $Copyright Open 2009 Broadcom Corporation$
-* $Id: dhd_wlfc.h 398418 2013-04-24 15:18:27Z $
+* $Id: dhd_wlfc.h 490028 2014-07-09 05:58:25Z $
*
*/
#ifndef __wlfc_host_driver_definitions_h__
#include <dhd_qmon.h>
#endif
+/* #define OOO_DEBUG */
-/* 16 bits will provide an absolute max of 65536 slots */
-#define WLFC_HANGER_MAXITEMS 1024
+#define WLFC_UNSUPPORTED -9999
-#define WLFC_HANGER_ITEM_STATE_FREE 1
-#define WLFC_HANGER_ITEM_STATE_INUSE 2
-#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3
+#define WLFC_NO_TRAFFIC -1
+#define WLFC_MULTI_TRAFFIC 0
-#define WLFC_PKTID_HSLOT_MASK 0xffff /* allow 16 bits only */
-#define WLFC_PKTID_HSLOT_SHIFT 8
+#define BUS_RETRIES 1 /* # of retries before aborting a bus tx operation */
-/* x -> TXSTATUS TAG to/from firmware */
-#define WLFC_PKTID_HSLOT_GET(x) \
- (((x) >> WLFC_PKTID_HSLOT_SHIFT) & WLFC_PKTID_HSLOT_MASK)
-#define WLFC_PKTID_HSLOT_SET(var, slot) \
- ((var) = ((var) & ~(WLFC_PKTID_HSLOT_MASK << WLFC_PKTID_HSLOT_SHIFT)) | \
- (((slot) & WLFC_PKTID_HSLOT_MASK) << WLFC_PKTID_HSLOT_SHIFT))
+/* 16 bits will provide an absolute max of 65536 slots */
+#define WLFC_HANGER_MAXITEMS 3072
-#define WLFC_PKTID_FREERUNCTR_MASK 0xff
+#define WLFC_HANGER_ITEM_STATE_FREE 1
+#define WLFC_HANGER_ITEM_STATE_INUSE 2
+#define WLFC_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3
-#define WLFC_PKTID_FREERUNCTR_GET(x) ((x) & WLFC_PKTID_FREERUNCTR_MASK)
-#define WLFC_PKTID_FREERUNCTR_SET(var, ctr) \
- ((var) = (((var) & ~WLFC_PKTID_FREERUNCTR_MASK) | \
- (((ctr) & WLFC_PKTID_FREERUNCTR_MASK))))
+#define WLFC_HANGER_PKT_STATE_TXSTATUS 1
+#define WLFC_HANGER_PKT_STATE_TXCOMPLETE 2
+#define WLFC_HANGER_PKT_STATE_CLEANUP 4
-#define WLFC_PKTQ_PENQ(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec)))? \
- NULL : pktq_penq((pq), (prec), (p)))
-#define WLFC_PKTQ_PENQ_HEAD(pq, prec, p) ((pktq_full((pq)) || pktq_pfull((pq), (prec))) ? \
- NULL : pktq_penq_head((pq), (prec), (p)))
+typedef enum {
+ Q_TYPE_PSQ,
+ Q_TYPE_AFQ
+} q_type_t;
typedef enum ewlfc_packet_state {
eWLFC_PKTTYPE_NEW,
typedef struct wlfc_hanger_item {
uint8 state;
uint8 gen;
- uint8 pad[2];
+ uint8 pkt_state;
+ uint8 pkt_txstatus;
uint32 identifier;
void* pkt;
#ifdef PROP_TXSTATUS_DEBUG
uint32 push_time;
#endif
+ struct wlfc_hanger_item *next;
} wlfc_hanger_item_t;
typedef struct wlfc_hanger {
#define WLFC_STATE_CLOSE 2
#define WLFC_PSQ_PREC_COUNT ((AC_COUNT + 1) * 2) /* 2 for each AC traffic and bc/mc */
+#define WLFC_AFQ_PREC_COUNT (AC_COUNT + 1)
#define WLFC_PSQ_LEN 2048
#define WLFC_FLOWCONTROL_HIWATER (2048 - 256)
#define WLFC_FLOWCONTROL_LOWATER 256
+#define WLFC_LOG_BUF_SIZE (1024*1024)
+
typedef struct wlfc_mac_descriptor {
uint8 occupied;
uint8 interface_id;
uint8 seq[AC_COUNT + 1];
uint8 generation;
struct pktq psq;
+ /* packets at firmware */
+ struct pktq afq;
/* The AC pending bitmap that was reported to the fw at last change */
uint8 traffic_lastreported_bmp;
/* The new AC pending bitmap */
/* 1= send on next opportunity */
uint8 send_tim_signal;
uint8 mac_handle;
- /* Number of packets in transit for this entry. */
+ /* Number of packets at dongle for this entry. */
uint transit_count;
/* Numbe of suppression to wait before evict from delayQ */
uint suppr_transit_count;
- /* Used when a new suppress is detected to track the number of
- * packets getting suppressed
- */
- uint suppress_count;
/* flag. TRUE when in suppress state */
uint8 suppressed;
- uint8 deleting;
#ifdef QMONITOR
dhd_qmon_t qmon;
uint32 opened_ct;
uint32 closed_ct;
#endif
+ struct wlfc_mac_descriptor* prev;
+ struct wlfc_mac_descriptor* next;
} wlfc_mac_descriptor_t;
+typedef struct dhd_wlfc_commit_info {
+ uint8 needs_hdr;
+ uint8 ac_fifo_credit_spent;
+ ewlfc_packet_state_t pkt_type;
+ wlfc_mac_descriptor_t* mac_entry;
+ void* p;
+} dhd_wlfc_commit_info_t;
+
#define WLFC_DECR_SEQCOUNT(entry, prec) do { if (entry->seq[(prec)] == 0) {\
entry->seq[prec] = 0xff; } else entry->seq[prec]--;} while (0)
typedef struct athost_wl_stat_counters {
uint32 pktin;
+ uint32 pktout;
uint32 pkt2bus;
uint32 pktdropped;
uint32 tlv_parse_failed;
uint32 generic_error;
/* an extra one for bc/mc traffic */
uint32 send_pkts[AC_COUNT + 1];
+ uint32 drop_pkts[WLFC_PSQ_PREC_COUNT];
+ uint32 ooo_pkts[AC_COUNT + 1];
#ifdef PROP_TXSTATUS_DEBUG
/* all pkt2bus -> txstatus latency accumulated */
uint32 latency_sample_count;
uint32 total_status_latency;
uint32 latency_most_recent;
- int idx_delta;
+ int idx_delta;
uint32 deltas[10];
uint32 fifo_credits_sent[6];
uint32 fifo_credits_back[6];
uint32 signal_only_pkts_sent;
uint32 signal_only_pkts_freed;
#endif
+ uint32 cleanup_txq_cnt;
+ uint32 cleanup_psq_cnt;
+ uint32 cleanup_fw_cnt;
} athost_wl_stat_counters_t;
#ifdef PROP_TXSTATUS_DEBUG
#define WLFC_FCMODE_NONE 0
#define WLFC_FCMODE_IMPLIED_CREDIT 1
#define WLFC_FCMODE_EXPLICIT_CREDIT 2
+#define WLFC_ONLY_AMPDU_HOSTREORDER 3
+
+/* Reserved credits ratio when borrowed by hihger priority */
+#define WLFC_BORROW_LIMIT_RATIO 4
/* How long to defer borrowing in milliseconds */
#define WLFC_BORROW_DEFER_PERIOD_MS 100
+/* How long to defer flow control in milliseconds */
+#define WLFC_FC_DEFER_PERIOD_MS 200
+
+/* How long to detect occurance per AC in miliseconds */
+#define WLFC_RX_DETECTION_THRESHOLD_MS 100
+
/* Mask to represent available ACs (note: BC/MC is ignored */
#define WLFC_AC_MASK 0xF
-/* Mask to check for only on-going AC_BE traffic */
-#define WLFC_AC_BE_TRAFFIC_ONLY 0xD
-
typedef struct athost_wl_status_info {
uint8 last_seqid_to_wlc;
/* stats */
athost_wl_stat_counters_t stats;
+ int Init_FIFO_credit[AC_COUNT + 2];
+
/* the additional ones are for bc/mc and ATIM FIFO */
int FIFO_credit[AC_COUNT + 2];
/* A place holder for bc/mc and packets to unknown destinations */
wlfc_mac_descriptor_t other;
} destination_entries;
- /* token position for different priority packets */
- uint8 token_pos[AC_COUNT+1];
+
+ wlfc_mac_descriptor_t *active_entry_head;
+ int active_entry_count;
+
+ wlfc_mac_descriptor_t* requested_entry[WLFC_MAC_DESC_TABLE_SIZE];
+ int requested_entry_count;
+
+ /* pkt counts for each interface and ac */
+ int pkt_cnt_in_q[WLFC_MAX_IFNUM][AC_COUNT+1];
+ int pkt_cnt_per_ac[AC_COUNT+1];
+ int pkt_cnt_in_drv[WLFC_MAX_IFNUM][AC_COUNT+1];
+ uint8 allow_fc;
+ uint32 fc_defer_timestamp;
+ uint32 rx_timestamp[AC_COUNT+1];
/* ON/OFF state for flow control to the host network interface */
uint8 hostif_flow_state[WLFC_MAX_IFNUM];
uint8 host_ifidx;
/* to flow control an OS interface */
uint8 toggle_host_if;
- /*
- Mode in which the dhd flow control shall operate. Must be set before
- traffic starts to the device.
- 0 - Do not do any proptxtstatus flow control
- 1 - Use implied credit from a packet status
- 2 - Use explicit credit
- */
- uint8 proptxstatus_mode;
-
/* To borrow credits */
uint8 allow_credit_borrow;
- /* Timestamp to compute how long to defer borrowing for */
- uint32 borrow_defer_timestamp;
+ /* ac number for the first single ac traffic */
+ uint8 single_ac;
+
+ /* Timestamp for the first single ac traffic */
+ uint32 single_ac_timestamp;
+
+ bool bcmc_credit_supported;
} athost_wl_status_info_t;
-int dhd_wlfc_enable(dhd_pub_t *dhd);
-int dhd_wlfc_interface_event(struct dhd_info *,
- ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea);
-int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data);
-int dhd_wlfc_event(struct dhd_info *dhd);
-int dhd_os_wlfc_block(dhd_pub_t *pub);
-int dhd_os_wlfc_unblock(dhd_pub_t *pub);
+/* Please be mindful that total pkttag space is 32 octets only */
+typedef struct dhd_pkttag {
+ /*
+ b[15] - 1 = wlfc packet
+ b[14:13] - encryption exemption
+ b[12 ] - 1 = event channel
+ b[11 ] - 1 = this packet was sent in response to one time packet request,
+ do not increment credit on status for this one. [WLFC_CTL_TYPE_MAC_REQUEST_PACKET].
+ b[10 ] - 1 = signal-only-packet to firmware [i.e. nothing to piggyback on]
+ b[9 ] - 1 = packet is host->firmware (transmit direction)
+ - 0 = packet received from firmware (firmware->host)
+ b[8 ] - 1 = packet was sent due to credit_request (pspoll),
+ packet does not count against FIFO credit.
+ - 0 = normal transaction, packet counts against FIFO credit
+ b[7 ] - 1 = AP, 0 = STA
+ b[6:4] - AC FIFO number
+ b[3:0] - interface index
+ */
+ uint16 if_flags;
+ /* destination MAC address for this packet so that not every
+ module needs to open the packet to find this
+ */
+ uint8 dstn_ether[ETHER_ADDR_LEN];
+ /*
+ This 32-bit goes from host to device for every packet.
+ */
+ uint32 htod_tag;
-void dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
-int dhd_wlfc_init(dhd_pub_t *dhd);
-void dhd_wlfc_deinit(dhd_pub_t *dhd);
+ /*
+ This 16-bit is original seq number for every suppress packet.
+ */
+ uint16 htod_seq;
+
+ /*
+ This address is mac entry for every packet.
+ */
+ void* entry;
+ /* bus specific stuff */
+ union {
+ struct {
+ void* stuff;
+ uint32 thing1;
+ uint32 thing2;
+ } sd;
+ struct {
+ void* bus;
+ void* urb;
+ } usb;
+ } bus_specific;
+} dhd_pkttag_t;
+
+#define DHD_PKTTAG_WLFCPKT_MASK 0x1
+#define DHD_PKTTAG_WLFCPKT_SHIFT 15
+#define DHD_PKTTAG_WLFCPKT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_WLFCPKT_MASK << DHD_PKTTAG_WLFCPKT_SHIFT)) | \
+ (((value) & DHD_PKTTAG_WLFCPKT_MASK) << DHD_PKTTAG_WLFCPKT_SHIFT)
+#define DHD_PKTTAG_WLFCPKT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_WLFCPKT_SHIFT) & DHD_PKTTAG_WLFCPKT_MASK)
+
+#define DHD_PKTTAG_EXEMPT_MASK 0x3
+#define DHD_PKTTAG_EXEMPT_SHIFT 13
+#define DHD_PKTTAG_EXEMPT_SET(tag, value) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_EXEMPT_MASK << DHD_PKTTAG_EXEMPT_SHIFT)) | \
+ (((value) & DHD_PKTTAG_EXEMPT_MASK) << DHD_PKTTAG_EXEMPT_SHIFT)
+#define DHD_PKTTAG_EXEMPT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_EXEMPT_SHIFT) & DHD_PKTTAG_EXEMPT_MASK)
+
+#define DHD_PKTTAG_EVENT_MASK 0x1
+#define DHD_PKTTAG_EVENT_SHIFT 12
+#define DHD_PKTTAG_SETEVENT(tag, event) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_EVENT_MASK << DHD_PKTTAG_EVENT_SHIFT)) | \
+ (((event) & DHD_PKTTAG_EVENT_MASK) << DHD_PKTTAG_EVENT_SHIFT)
+#define DHD_PKTTAG_EVENT(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_EVENT_SHIFT) & DHD_PKTTAG_EVENT_MASK)
+
+#define DHD_PKTTAG_ONETIMEPKTRQST_MASK 0x1
+#define DHD_PKTTAG_ONETIMEPKTRQST_SHIFT 11
+#define DHD_PKTTAG_SETONETIMEPKTRQST(tag) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_ONETIMEPKTRQST_MASK << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)) | \
+ (1 << DHD_PKTTAG_ONETIMEPKTRQST_SHIFT)
+#define DHD_PKTTAG_ONETIMEPKTRQST(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_ONETIMEPKTRQST_SHIFT) & DHD_PKTTAG_ONETIMEPKTRQST_MASK)
+
+#define DHD_PKTTAG_SIGNALONLY_MASK 0x1
+#define DHD_PKTTAG_SIGNALONLY_SHIFT 10
+#define DHD_PKTTAG_SETSIGNALONLY(tag, signalonly) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_SIGNALONLY_MASK << DHD_PKTTAG_SIGNALONLY_SHIFT)) | \
+ (((signalonly) & DHD_PKTTAG_SIGNALONLY_MASK) << DHD_PKTTAG_SIGNALONLY_SHIFT)
+#define DHD_PKTTAG_SIGNALONLY(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_SIGNALONLY_SHIFT) & DHD_PKTTAG_SIGNALONLY_MASK)
+
+#define DHD_PKTTAG_PKTDIR_MASK 0x1
+#define DHD_PKTTAG_PKTDIR_SHIFT 9
+#define DHD_PKTTAG_SETPKTDIR(tag, dir) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_PKTDIR_MASK << DHD_PKTTAG_PKTDIR_SHIFT)) | \
+ (((dir) & DHD_PKTTAG_PKTDIR_MASK) << DHD_PKTTAG_PKTDIR_SHIFT)
+#define DHD_PKTTAG_PKTDIR(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_PKTDIR_SHIFT) & DHD_PKTTAG_PKTDIR_MASK)
+
+#define DHD_PKTTAG_CREDITCHECK_MASK 0x1
+#define DHD_PKTTAG_CREDITCHECK_SHIFT 8
+#define DHD_PKTTAG_SETCREDITCHECK(tag, check) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_CREDITCHECK_MASK << DHD_PKTTAG_CREDITCHECK_SHIFT)) | \
+ (((check) & DHD_PKTTAG_CREDITCHECK_MASK) << DHD_PKTTAG_CREDITCHECK_SHIFT)
+#define DHD_PKTTAG_CREDITCHECK(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_CREDITCHECK_SHIFT) & DHD_PKTTAG_CREDITCHECK_MASK)
+
+#define DHD_PKTTAG_IFTYPE_MASK 0x1
+#define DHD_PKTTAG_IFTYPE_SHIFT 7
+#define DHD_PKTTAG_SETIFTYPE(tag, isAP) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & \
+ ~(DHD_PKTTAG_IFTYPE_MASK << DHD_PKTTAG_IFTYPE_SHIFT)) | \
+ (((isAP) & DHD_PKTTAG_IFTYPE_MASK) << DHD_PKTTAG_IFTYPE_SHIFT)
+#define DHD_PKTTAG_IFTYPE(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_IFTYPE_SHIFT) & DHD_PKTTAG_IFTYPE_MASK)
+
+#define DHD_PKTTAG_FIFO_MASK 0x7
+#define DHD_PKTTAG_FIFO_SHIFT 4
+#define DHD_PKTTAG_SETFIFO(tag, fifo) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_FIFO_MASK << DHD_PKTTAG_FIFO_SHIFT)) | \
+ (((fifo) & DHD_PKTTAG_FIFO_MASK) << DHD_PKTTAG_FIFO_SHIFT)
+#define DHD_PKTTAG_FIFO(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_FIFO_SHIFT) & DHD_PKTTAG_FIFO_MASK)
+
+#define DHD_PKTTAG_IF_MASK 0xf
+#define DHD_PKTTAG_IF_SHIFT 0
+#define DHD_PKTTAG_SETIF(tag, if) ((dhd_pkttag_t*)(tag))->if_flags = \
+ (((dhd_pkttag_t*)(tag))->if_flags & ~(DHD_PKTTAG_IF_MASK << DHD_PKTTAG_IF_SHIFT)) | \
+ (((if) & DHD_PKTTAG_IF_MASK) << DHD_PKTTAG_IF_SHIFT)
+#define DHD_PKTTAG_IF(tag) ((((dhd_pkttag_t*)(tag))->if_flags >> \
+ DHD_PKTTAG_IF_SHIFT) & DHD_PKTTAG_IF_MASK)
+
+#define DHD_PKTTAG_SETDSTN(tag, dstn_MAC_ea) memcpy(((dhd_pkttag_t*)((tag)))->dstn_ether, \
+ (dstn_MAC_ea), ETHER_ADDR_LEN)
+#define DHD_PKTTAG_DSTN(tag) ((dhd_pkttag_t*)(tag))->dstn_ether
+
+#define DHD_PKTTAG_SET_H2DTAG(tag, h2dvalue) ((dhd_pkttag_t*)(tag))->htod_tag = (h2dvalue)
+#define DHD_PKTTAG_H2DTAG(tag) (((dhd_pkttag_t*)(tag))->htod_tag)
+
+#define DHD_PKTTAG_SET_H2DSEQ(tag, seq) ((dhd_pkttag_t*)(tag))->htod_seq = (seq)
+#define DHD_PKTTAG_H2DSEQ(tag) (((dhd_pkttag_t*)(tag))->htod_seq)
+
+#define DHD_PKTTAG_SET_ENTRY(tag, entry) ((dhd_pkttag_t*)(tag))->entry = (entry)
+#define DHD_PKTTAG_ENTRY(tag) (((dhd_pkttag_t*)(tag))->entry)
+
+#define PSQ_SUP_IDX(x) (x * 2 + 1)
+#define PSQ_DLY_IDX(x) (x * 2)
+
+typedef int (*f_commitpkt_t)(void* ctx, void* p);
+typedef bool (*f_processpkt_t)(void* p, void* arg);
+
+#ifdef PROP_TXSTATUS_DEBUG
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do { (entry)->closed_ct++; } while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do { (entry)->opened_ct++; } while (0)
+#else
+#define DHD_WLFC_CTRINC_MAC_CLOSE(entry) do {} while (0)
+#define DHD_WLFC_CTRINC_MAC_OPEN(entry) do {} while (0)
+#endif
+
+/* public functions */
int dhd_wlfc_parse_header_info(dhd_pub_t *dhd, void* pktbuf, int tlv_hdr_len,
uchar *reorder_info_buf, uint *reorder_info_len);
-int dhd_wlfc_commit_packets(void* state, f_commitpkt_t fcommit,
- void* commit_ctx, void *pktbuf);
-void dhd_wlfc_cleanup(dhd_pub_t *dhd, ifpkt_cb_t fn, int arg);
-bool ifpkt_fn(void* p, int ifid);
+int dhd_wlfc_commit_packets(dhd_pub_t *dhdp, f_commitpkt_t fcommit,
+ void* commit_ctx, void *pktbuf, bool need_toggle_host_if);
+int dhd_wlfc_txcomplete(dhd_pub_t *dhd, void *txp, bool success);
+int dhd_wlfc_init(dhd_pub_t *dhd);
+#ifdef SUPPORT_P2P_GO_PS
+int dhd_wlfc_suspend(dhd_pub_t *dhd);
+int dhd_wlfc_resume(dhd_pub_t *dhd);
+#endif /* SUPPORT_P2P_GO_PS */
+int dhd_wlfc_hostreorder_init(dhd_pub_t *dhd);
+int dhd_wlfc_cleanup_txq(dhd_pub_t *dhd, f_processpkt_t fn, void *arg);
+int dhd_wlfc_cleanup(dhd_pub_t *dhd, f_processpkt_t fn, void* arg);
+int dhd_wlfc_deinit(dhd_pub_t *dhd);
+int dhd_wlfc_interface_event(dhd_pub_t *dhdp, uint8 action, uint8 ifid, uint8 iftype, uint8* ea);
+int dhd_wlfc_FIFOcreditmap_event(dhd_pub_t *dhdp, uint8* event_data);
+int dhd_wlfc_BCMCCredit_support_event(dhd_pub_t *dhdp);
+int dhd_wlfc_enable(dhd_pub_t *dhdp);
+int dhd_wlfc_dump(dhd_pub_t *dhdp, struct bcmstrbuf *strbuf);
+int dhd_wlfc_clear_counts(dhd_pub_t *dhd);
+int dhd_wlfc_get_enable(dhd_pub_t *dhd, bool *val);
+int dhd_wlfc_get_mode(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_mode(dhd_pub_t *dhd, int val);
+bool dhd_wlfc_is_supported(dhd_pub_t *dhd);
+bool dhd_wlfc_is_header_only_pkt(dhd_pub_t * dhd, void *pktbuf);
+int dhd_wlfc_flowcontrol(dhd_pub_t *dhdp, bool state, bool bAcquireLock);
+int dhd_wlfc_save_rxpath_ac_time(dhd_pub_t * dhd, uint8 prio);
+
+int dhd_wlfc_get_module_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_module_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_credit_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_credit_ignore(dhd_pub_t *dhd, int val);
+int dhd_wlfc_get_txstatus_ignore(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_txstatus_ignore(dhd_pub_t *dhd, int val);
+
+int dhd_wlfc_get_rxpkt_chk(dhd_pub_t *dhd, int *val);
+int dhd_wlfc_set_rxpkt_chk(dhd_pub_t *dhd, int val);
#endif /* __wlfc_host_driver_definitions_h__ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dngl_stats.h 241182 2011-02-17 21:50:03Z $
+ * $Id: dngl_stats.h 464743 2014-03-25 21:04:32Z $
*/
#ifndef _dngl_stats_h_
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dngl_wlhdr.h 241182 2011-02-17 21:50:03Z $
+ * $Id: dngl_wlhdr.h 464743 2014-03-25 21:04:32Z $
*/
#ifndef _dngl_wlhdr_h_
--- /dev/null
+/*
+ * HND generic packet pool operation primitives
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <hnd_pktpool.h>
+
+/* Registry size is one larger than max pools, as slot #0 is reserved */
+#define PKTPOOLREG_RSVD_ID (0U)
+#define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
+#define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
+
+#define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
+#define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
+
+/* Tag a registry entry as free for use */
+#define PKTPOOL_REGISTRY_CLR(id) \
+ PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
+#define PKTPOOL_REGISTRY_ISCLR(id) \
+ (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
+
+/* Tag registry entry 0 as reserved */
+#define PKTPOOL_REGISTRY_RSV() \
+ PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
+#define PKTPOOL_REGISTRY_ISRSVD() \
+ (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
+
+/* Walk all un-reserved entries in registry */
+#define PKTPOOL_REGISTRY_FOREACH(id) \
+ for ((id) = 1U; (id) <= pktpools_max; (id)++)
+
+uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
+pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
+
+/* Register/Deregister a pktpool with registry during pktpool_init/deinit */
+static int pktpool_register(pktpool_t * poolptr);
+static int pktpool_deregister(pktpool_t * poolptr);
+
+/** accessor functions required when ROMming this file, forced into RAM */
+static void
+BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
+{
+ pktpools_registry[id] = pp;
+}
+
+static bool
+BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
+{
+ return pktpools_registry[id] == pp;
+}
+
+int /* Construct a pool registry to serve a maximum of total_pools */
+pktpool_attach(osl_t *osh, uint32 total_pools)
+{
+ uint32 poolid;
+
+ if (pktpools_max != 0U) {
+ return BCME_ERROR;
+ }
+
+ ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
+
+ /* Initialize registry: reserve slot#0 and tag others as free */
+ PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
+
+ PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
+ PKTPOOL_REGISTRY_CLR(poolid);
+ }
+
+ pktpools_max = total_pools;
+
+ return (int)pktpools_max;
+}
+
+int /* Destruct the pool registry. Ascertain all pools were first de-inited */
+pktpool_dettach(osl_t *osh)
+{
+ uint32 poolid;
+
+ if (pktpools_max == 0U) {
+ return BCME_OK;
+ }
+
+ /* Ascertain that no pools are still registered */
+ ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
+
+ PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
+ ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
+ }
+
+ pktpools_max = 0U; /* restore boot state */
+
+ return BCME_OK;
+}
+
+static int /* Register a pool in a free slot; return the registry slot index */
+pktpool_register(pktpool_t * poolptr)
+{
+ uint32 poolid;
+
+ if (pktpools_max == 0U) {
+ return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
+ }
+
+ ASSERT(pktpools_max != 0U);
+
+ /* find an empty slot in pktpools_registry */
+ PKTPOOL_REGISTRY_FOREACH(poolid) {
+ if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
+ PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
+ return (int)poolid; /* return pool ID */
+ }
+ } /* FOREACH */
+
+ return PKTPOOL_INVALID_ID; /* error: registry is full */
+}
+
+static int /* Deregister a pktpool, given the pool pointer; tag slot as free */
+pktpool_deregister(pktpool_t * poolptr)
+{
+ uint32 poolid;
+
+ ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
+
+ poolid = POOLID(poolptr);
+ ASSERT(poolid <= pktpools_max);
+
+ /* Asertain that a previously registered poolptr is being de-registered */
+ if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
+ PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
+ } else {
+ ASSERT(0);
+ return BCME_ERROR; /* mismatch in registry */
+ }
+
+ return BCME_OK;
+}
+
+
+/*
+ * pktpool_init:
+ * User provides a pktpool_t sturcture and specifies the number of packets to
+ * be pre-filled into the pool (pplen). The size of all packets in a pool must
+ * be the same and is specified by plen.
+ * pktpool_init first attempts to register the pool and fetch a unique poolid.
+ * If registration fails, it is considered an BCME_ERR, caused by either the
+ * registry was not pre-created (pktpool_attach) or the registry is full.
+ * If registration succeeds, then the requested number of packets will be filled
+ * into the pool as part of initialization. In the event that there is no
+ * available memory to service the request, then BCME_NOMEM will be returned
+ * along with the count of how many packets were successfully allocated.
+ * In dongle builds, prior to memory reclaimation, one should limit the number
+ * of packets to be allocated during pktpool_init and fill the pool up after
+ * reclaim stage.
+ */
+int
+pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type)
+{
+ int i, err = BCME_OK;
+ int pktplen;
+ uint8 pktp_id;
+
+ ASSERT(pktp != NULL);
+ ASSERT(osh != NULL);
+ ASSERT(pplen != NULL);
+
+ pktplen = *pplen;
+
+ bzero(pktp, sizeof(pktpool_t));
+
+ /* assign a unique pktpool id */
+ if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
+ return BCME_ERROR;
+ }
+ POOLSETID(pktp, pktp_id);
+
+ pktp->inited = TRUE;
+ pktp->istx = istx ? TRUE : FALSE;
+ pktp->plen = (uint16)plen;
+ pktp->type = type;
+
+ pktp->maxlen = PKTPOOL_LEN_MAX;
+ pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
+
+ for (i = 0; i < pktplen; i++) {
+ void *p;
+ p = PKTGET(osh, plen, TRUE);
+
+ if (p == NULL) {
+ /* Not able to allocate all requested pkts
+ * so just return what was actually allocated
+ * We can add to the pool later
+ */
+ if (pktp->freelist == NULL) /* pktpool free list is empty */
+ err = BCME_NOMEM;
+
+ goto exit;
+ }
+
+ PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
+
+ PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
+ pktp->freelist = p;
+
+ pktp->avail++;
+
+#ifdef BCMDBG_POOL
+ pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+ }
+
+exit:
+ pktp->len = pktp->avail;
+
+ *pplen = pktp->len;
+ return err;
+}
+
+/*
+ * pktpool_deinit:
+ * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
+ * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
+ * An assert is in place to ensure that there are no packets still lingering
+ * around. Packets freed to a pool after the deinit will cause a memory
+ * corruption as the pktpool_t structure no longer exists.
+ */
+int
+pktpool_deinit(osl_t *osh, pktpool_t *pktp)
+{
+ uint16 freed = 0;
+
+ ASSERT(osh != NULL);
+ ASSERT(pktp != NULL);
+
+#ifdef BCMDBG_POOL
+ {
+ int i;
+ for (i = 0; i <= pktp->len; i++) {
+ pktp->dbg_q[i].p = NULL;
+ }
+ }
+#endif
+
+ while (pktp->freelist != NULL) {
+ void * p = pktp->freelist;
+
+ pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
+ PKTSETFREELIST(p, NULL);
+
+ PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
+
+ PKTFREE(osh, p, pktp->istx); /* free the packet */
+
+ freed++;
+ ASSERT(freed <= pktp->len);
+ }
+
+ pktp->avail -= freed;
+ ASSERT(pktp->avail == 0);
+
+ pktp->len -= freed;
+
+ pktpool_deregister(pktp); /* release previously acquired unique pool id */
+ POOLSETID(pktp, PKTPOOL_INVALID_ID);
+
+ pktp->inited = FALSE;
+
+ /* Are there still pending pkts? */
+ ASSERT(pktp->len == 0);
+
+ return 0;
+}
+
+int
+pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
+{
+ void *p;
+ int err = 0;
+ int len, psize, maxlen;
+
+ ASSERT(pktp->plen != 0);
+
+ maxlen = pktp->maxlen;
+ psize = minimal ? (maxlen >> 2) : maxlen;
+ for (len = (int)pktp->len; len < psize; len++) {
+
+ p = PKTGET(osh, pktp->len, TRUE);
+
+ if (p == NULL) {
+ err = BCME_NOMEM;
+ break;
+ }
+
+ if (pktpool_add(pktp, p) != BCME_OK) {
+ PKTFREE(osh, p, FALSE);
+ err = BCME_ERROR;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static void *
+pktpool_deq(pktpool_t *pktp)
+{
+ void *p;
+
+ if (pktp->avail == 0)
+ return NULL;
+
+ ASSERT(pktp->freelist != NULL);
+
+ p = pktp->freelist; /* dequeue packet from head of pktpool free list */
+ pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
+ PKTSETFREELIST(p, NULL);
+
+ pktp->avail--;
+
+ return p;
+}
+
+static void
+pktpool_enq(pktpool_t *pktp, void *p)
+{
+ ASSERT(p != NULL);
+
+ PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
+ pktp->freelist = p; /* free list points to newly inserted packet */
+
+ pktp->avail++;
+ ASSERT(pktp->avail <= pktp->len);
+}
+
+/* utility for registering host addr fill function called from pciedev */
+int
+/* BCMATTACHFN */
+(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+ ASSERT(cb != NULL);
+
+ ASSERT(pktp->cbext.cb == NULL);
+ pktp->cbext.cb = cb;
+ pktp->cbext.arg = arg;
+ return 0;
+}
+
+int
+pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
+{
+
+ ASSERT(cb != NULL);
+
+ ASSERT(pktp->rxcplidfn.cb == NULL);
+ pktp->rxcplidfn.cb = cb;
+ pktp->rxcplidfn.arg = arg;
+ return 0;
+}
+/* Callback functions for split rx modes */
+/* when evr host posts rxbuffer, invike dma_rxfill from pciedev layer */
+void
+pktpool_invoke_dmarxfill(pktpool_t *pktp)
+{
+ ASSERT(pktp->dmarxfill.cb);
+ ASSERT(pktp->dmarxfill.arg);
+
+ if (pktp->dmarxfill.cb)
+ pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
+}
+int
+pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+
+ ASSERT(cb != NULL);
+
+ pktp->dmarxfill.cb = cb;
+ pktp->dmarxfill.arg = arg;
+
+ return 0;
+}
+/* No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function */
+int
+pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+ int i;
+
+ ASSERT(cb != NULL);
+
+ i = pktp->cbcnt;
+ if (i == PKTPOOL_CB_MAX)
+ return BCME_ERROR;
+
+ ASSERT(pktp->cbs[i].cb == NULL);
+ pktp->cbs[i].cb = cb;
+ pktp->cbs[i].arg = arg;
+ pktp->cbcnt++;
+
+ return 0;
+}
+
+int
+pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+ int i;
+
+ ASSERT(cb != NULL);
+
+ i = pktp->ecbcnt;
+ if (i == PKTPOOL_CB_MAX)
+ return BCME_ERROR;
+
+ ASSERT(pktp->ecbs[i].cb == NULL);
+ pktp->ecbs[i].cb = cb;
+ pktp->ecbs[i].arg = arg;
+ pktp->ecbcnt++;
+
+ return 0;
+}
+
+static int
+pktpool_empty_notify(pktpool_t *pktp)
+{
+ int i;
+
+ pktp->empty = TRUE;
+ for (i = 0; i < pktp->ecbcnt; i++) {
+ ASSERT(pktp->ecbs[i].cb != NULL);
+ pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
+ }
+ pktp->empty = FALSE;
+
+ return 0;
+}
+
+#ifdef BCMDBG_POOL
+int
+pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
+{
+ int i;
+
+ ASSERT(cb);
+
+ i = pktp->dbg_cbcnt;
+ if (i == PKTPOOL_CB_MAX)
+ return BCME_ERROR;
+
+ ASSERT(pktp->dbg_cbs[i].cb == NULL);
+ pktp->dbg_cbs[i].cb = cb;
+ pktp->dbg_cbs[i].arg = arg;
+ pktp->dbg_cbcnt++;
+
+ return 0;
+}
+
+int pktpool_dbg_notify(pktpool_t *pktp);
+
+int
+pktpool_dbg_notify(pktpool_t *pktp)
+{
+ int i;
+
+ for (i = 0; i < pktp->dbg_cbcnt; i++) {
+ ASSERT(pktp->dbg_cbs[i].cb);
+ pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
+ }
+
+ return 0;
+}
+
+int
+pktpool_dbg_dump(pktpool_t *pktp)
+{
+ int i;
+
+ printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
+ for (i = 0; i < pktp->dbg_qlen; i++) {
+ ASSERT(pktp->dbg_q[i].p);
+ printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
+ pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
+ }
+
+ return 0;
+}
+
+int
+pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
+{
+ int i;
+ int state;
+
+ bzero(stats, sizeof(pktpool_stats_t));
+ for (i = 0; i < pktp->dbg_qlen; i++) {
+ ASSERT(pktp->dbg_q[i].p != NULL);
+
+ state = PKTPOOLSTATE(pktp->dbg_q[i].p);
+ switch (state) {
+ case POOL_TXENQ:
+ stats->enq++; break;
+ case POOL_TXDH:
+ stats->txdh++; break;
+ case POOL_TXD11:
+ stats->txd11++; break;
+ case POOL_RXDH:
+ stats->rxdh++; break;
+ case POOL_RXD11:
+ stats->rxd11++; break;
+ case POOL_RXFILL:
+ stats->rxfill++; break;
+ case POOL_IDLE:
+ stats->idle++; break;
+ }
+ }
+
+ return 0;
+}
+
+int
+pktpool_start_trigger(pktpool_t *pktp, void *p)
+{
+ uint32 cycles, i;
+
+ if (!PKTPOOL(OSH_NULL, p))
+ return 0;
+
+ OSL_GETCYCLES(cycles);
+
+ for (i = 0; i < pktp->dbg_qlen; i++) {
+ ASSERT(pktp->dbg_q[i].p != NULL);
+
+ if (pktp->dbg_q[i].p == p) {
+ pktp->dbg_q[i].cycles = cycles;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int pktpool_stop_trigger(pktpool_t *pktp, void *p);
+int
+pktpool_stop_trigger(pktpool_t *pktp, void *p)
+{
+ uint32 cycles, i;
+
+ if (!PKTPOOL(OSH_NULL, p))
+ return 0;
+
+ OSL_GETCYCLES(cycles);
+
+ for (i = 0; i < pktp->dbg_qlen; i++) {
+ ASSERT(pktp->dbg_q[i].p != NULL);
+
+ if (pktp->dbg_q[i].p == p) {
+ if (pktp->dbg_q[i].cycles == 0)
+ break;
+
+ if (cycles >= pktp->dbg_q[i].cycles)
+ pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
+ else
+ pktp->dbg_q[i].dur =
+ (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
+
+ pktp->dbg_q[i].cycles = 0;
+ break;
+ }
+ }
+
+ return 0;
+}
+#endif /* BCMDBG_POOL */
+
+int
+pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
+{
+ ASSERT(pktp);
+ pktp->availcb_excl = NULL;
+ return 0;
+}
+
+int
+pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
+{
+ int i;
+
+ ASSERT(pktp);
+ ASSERT(pktp->availcb_excl == NULL);
+ for (i = 0; i < pktp->cbcnt; i++) {
+ if (cb == pktp->cbs[i].cb) {
+ pktp->availcb_excl = &pktp->cbs[i];
+ break;
+ }
+ }
+
+ if (pktp->availcb_excl == NULL)
+ return BCME_ERROR;
+ else
+ return 0;
+}
+
+static int
+pktpool_avail_notify(pktpool_t *pktp)
+{
+ int i, k, idx;
+ int avail;
+
+ ASSERT(pktp);
+ if (pktp->availcb_excl != NULL) {
+ pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
+ return 0;
+ }
+
+ k = pktp->cbcnt - 1;
+ for (i = 0; i < pktp->cbcnt; i++) {
+ avail = pktp->avail;
+
+ if (avail) {
+ if (pktp->cbtoggle)
+ idx = i;
+ else
+ idx = k--;
+
+ ASSERT(pktp->cbs[idx].cb != NULL);
+ pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
+ }
+ }
+
+ /* Alternate between filling from head or tail
+ */
+ pktp->cbtoggle ^= 1;
+
+ return 0;
+}
+
+void *
+pktpool_get(pktpool_t *pktp)
+{
+ void *p;
+
+ p = pktpool_deq(pktp);
+
+ if (p == NULL) {
+ /* Notify and try to reclaim tx pkts */
+ if (pktp->ecbcnt)
+ pktpool_empty_notify(pktp);
+
+ p = pktpool_deq(pktp);
+ if (p == NULL)
+ return NULL;
+ }
+
+ return p;
+}
+
+void
+pktpool_free(pktpool_t *pktp, void *p)
+{
+ ASSERT(p != NULL);
+#ifdef BCMDBG_POOL
+ /* pktpool_stop_trigger(pktp, p); */
+#endif
+
+ pktpool_enq(pktp, p);
+
+ if (pktp->emptycb_disable)
+ return;
+
+ if (pktp->cbcnt) {
+ if (pktp->empty == FALSE)
+ pktpool_avail_notify(pktp);
+ }
+}
+
+int
+pktpool_add(pktpool_t *pktp, void *p)
+{
+ ASSERT(p != NULL);
+
+ if (pktp->len == pktp->maxlen)
+ return BCME_RANGE;
+
+ /* pkts in pool have same length */
+ ASSERT(pktp->plen == PKTLEN(OSH_NULL, p));
+ PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
+
+ pktp->len++;
+ pktpool_enq(pktp, p);
+
+#ifdef BCMDBG_POOL
+ pktp->dbg_q[pktp->dbg_qlen++].p = p;
+#endif
+
+ return 0;
+}
+
+/* Force pktpool_setmaxlen () into RAM as it uses a constant
+ * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
+ */
+int
+BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
+{
+ if (maxlen > PKTPOOL_LEN_MAX)
+ maxlen = PKTPOOL_LEN_MAX;
+
+ /* if pool is already beyond maxlen, then just cap it
+ * since we currently do not reduce the pool len
+ * already allocated
+ */
+ pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen;
+
+ return pktp->maxlen;
+}
+
+void
+pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
+{
+ ASSERT(pktp);
+
+ pktp->emptycb_disable = disable;
+}
+
+bool
+pktpool_emptycb_disabled(pktpool_t *pktp)
+{
+ ASSERT(pktp);
+ return pktp->emptycb_disable;
+}
--- /dev/null
+/*
+ * HND generic pktq operation primitives
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: $
+ */
+
+#include <typedefs.h>
+#include <osl.h>
+#include <bcmutils.h>
+#include <hnd_pktq.h>
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty precedence
+ */
+void * BCMFASTPATH
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+/*
+ * Append spktq 'list' to the tail of pktq 'pq'
+ */
+void BCMFASTPATH
+pktq_append(struct pktq *pq, int prec, struct spktq *list)
+{
+ struct pktq_prec *q;
+ struct pktq_prec *list_q;
+
+ list_q = &list->q[0];
+
+ /* empty list check */
+ if (list_q->head == NULL)
+ return;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, list_q->head);
+ else
+ q->head = list_q->head;
+
+ q->tail = list_q->tail;
+ q->len += list_q->len;
+ pq->len += list_q->len;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ list_q->head = NULL;
+ list_q->tail = NULL;
+ list_q->len = 0;
+ list->len = 0;
+}
+
+/*
+ * Prepend spktq 'list' to the head of pktq 'pq'
+ */
+void BCMFASTPATH
+pktq_prepend(struct pktq *pq, int prec, struct spktq *list)
+{
+ struct pktq_prec *q;
+ struct pktq_prec *list_q;
+
+ list_q = &list->q[0];
+
+ /* empty list check */
+ if (list_q->head == NULL)
+ return;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(list_q->tail) == NULL); /* terminated list */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ /* set the tail packet of list to point at the former pq head */
+ PKTSETLINK(list_q->tail, q->head);
+ /* the new q head is the head of list */
+ q->head = list_q->head;
+
+ /* If the q tail was non-null, then it stays as is.
+ * If the q tail was null, it is now the tail of list
+ */
+ if (q->tail == NULL) {
+ q->tail = list_q->tail;
+ }
+
+ q->len += list_q->len;
+ pq->len += list_q->len;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ list_q->head = NULL;
+ list_q->tail = NULL;
+ list_q->len = 0;
+ list->len = 0;
+}
+
+void * BCMFASTPATH
+pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if (prev_p == NULL)
+ return NULL;
+
+ if ((p = PKTLINK(prev_p)) == NULL)
+ return NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(prev_p, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+ p = q->head;
+
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ break;
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+ if (p == NULL)
+ return NULL;
+
+ if (prev == NULL) {
+ if ((q->head = PKTLINK(p)) == NULL) {
+ q->tail = NULL;
+ }
+ } else {
+ PKTSETLINK(prev, PKTLINK(p));
+ if (q->tail == p) {
+ q->tail = prev;
+ }
+ }
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg)
+{
+ struct pktq_prec *q;
+ void *p, *prev = NULL;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ if (fn == NULL || (*fn)(p, arg)) {
+ bool head = (p == q->head);
+ if (head)
+ q->head = PKTLINK(p);
+ else
+ PKTSETLINK(prev, PKTLINK(p));
+ PKTSETLINK(p, NULL);
+ PKTFREE(osh, p, dir);
+ q->len--;
+ pq->len--;
+ p = (head ? q->head : PKTLINK(prev));
+ } else {
+ prev = p;
+ p = PKTLINK(p);
+ }
+ }
+
+ if (q->head == NULL) {
+ ASSERT(q->len == 0);
+ q->tail = NULL;
+ }
+}
+
+bool BCMFASTPATH
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ /* Should this just assert pktbuf? */
+ if (!pktbuf)
+ return FALSE;
+
+ q = &pq->q[prec];
+
+ if (q->head == pktbuf) {
+ if ((q->head = PKTLINK(pktbuf)) == NULL)
+ q->tail = NULL;
+ } else {
+ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+ ;
+ if (p == NULL)
+ return FALSE;
+
+ PKTSETLINK(p, PKTLINK(pktbuf));
+ if (q->tail == pktbuf)
+ q->tail = p;
+ }
+
+ q->len--;
+ pq->len--;
+ PKTSETLINK(pktbuf, NULL);
+ return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+ /* pq is variable size; only zero out what's requested */
+ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec));
+
+ pq->num_prec = (uint16)num_prec;
+
+ pq->max = (uint16)max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+
+void
+pktq_set_max_plen(struct pktq *pq, int prec, int max_len)
+{
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (prec < pq->num_prec)
+ pq->q[prec].max = (uint16)max_len;
+}
+
+void * BCMFASTPATH
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void * BCMFASTPATH
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg)
+{
+ int prec;
+
+ /* Optimize flush, if pktq len = 0, just return.
+ * pktq len of 0 means pktq's prec q's are all empty.
+ */
+ if (pq->len == 0) {
+ return;
+ }
+
+ for (prec = 0; prec < pq->num_prec; prec++)
+ pktq_pflush(osh, pq, prec, dir, fn, arg);
+ if (fn == NULL)
+ ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+
+/* Priority peek from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ {
+ return NULL;
+ }
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return p;
+}
+/* Priority dequeue from a specific set of precedences */
+void * BCMFASTPATH
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0))
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: hndpmu.c 414368 2013-07-24 15:00:23Z $
+ * $Id: hndpmu.c 475037 2014-05-02 23:55:49Z $
*/
+
/*
* Note: this file contains PLL/FLL related functions. A chip can contain multiple PLLs/FLLs.
* However, in the context of this file the baseband ('BB') PLL/FLL is referred to.
*
* Throughout this code, the prefixes 'pmu0_', 'pmu1_' and 'pmu2_' are used.
* They refer to different revisions of the PMU (which is at revision 18 @ Apr 25, 2012)
- * pmu2_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop)
+ * pmu1_ marks the transition from PLL to ADFLL (Digital Frequency Locked Loop). It supports
+ * fractional frequency generation. pmu2_ does not support fractional frequency generation.
*/
#include <bcm_cfg.h>
*/
#define PMU_NONE(args)
+/** contains resource bit positions for a specific chip */
+struct rsc_per_chip_s {
+ uint8 ht_avail;
+ uint8 macphy_clkavail;
+ uint8 ht_start;
+ uint8 otp_pu;
+};
+
+typedef struct rsc_per_chip_s rsc_per_chip_t;
+
/* SDIO Pad drive strength to select value mappings.
* The last strength value in each table must be 0 (the tri-state value).
/* SDIO Drive Strength to sel value table for PMU Rev 13 (3.3v) */
-/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+/** SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
static const sdiod_drive_str_t sdiod_drive_strength_tab6_1v8[] = {
{3, 0x3},
{2, 0x2},
{0, 0x0} };
-/*
+/**
* SDIO Drive Strength to sel value table for 43143 PMU Rev 17, see Confluence 43143 Toplevel
* architecture page, section 'PMU Chip Control 1 Register definition', click link to picture
* BCM43143_sel_sdio_signals.jpg. Valid after PMU Chip Control 0 Register, bit31 (override) has
void
si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength)
{
- chipcregs_t *cc;
+ chipcregs_t *cc = NULL;
uint origidx, intr_val = 0;
sdiod_drive_str_t *str_tab = NULL;
uint32 str_mask = 0; /* only alter desired bits in PMU chipcontrol 1 register */
}
/* Remember original core before switch to chipc */
- cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
+ if (CHIPID(sih->chip) == BCM43362_CHIP_ID)
+ cc = (chipcregs_t *) si_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
switch (SDIOD_DRVSTR_KEY(sih->chip, sih->pmurev)) {
case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
break;
}
- if (str_tab != NULL && cc != NULL) {
+ if (CHIPID(sih->chip) == BCM43362_CHIP_ID) {
+ if (str_tab != NULL && cc != NULL) {
+ uint32 cc_data_temp;
+ int i;
+
+ /* Pick the lowest available drive strength equal or greater than the
+ * requested strength. Drive strength of 0 requests tri-state.
+ */
+ for (i = 0; drivestrength < str_tab[i].strength; i++)
+ ;
+
+ if (i > 0 && drivestrength > str_tab[i].strength)
+ i--;
+
+ W_REG(osh, &cc->chipcontrol_addr, PMU_CHIPCTL1);
+ cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+ cc_data_temp &= ~str_mask;
+ cc_data_temp |= str_tab[i].sel << str_shift;
+ W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+ if (str_ovr_pmuval) { /* enables the selected drive strength */
+ W_REG(osh, &cc->chipcontrol_addr, str_ovr_pmuctl);
+ OR_REG(osh, &cc->chipcontrol_data, str_ovr_pmuval);
+ }
+ PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
+ drivestrength, str_tab[i].strength));
+ }
+ /* Return to original core */
+ si_restore_core(sih, origidx, intr_val);
+ }
+ else if (str_tab != NULL) {
uint32 cc_data_temp;
int i;
if (i > 0 && drivestrength > str_tab[i].strength)
i--;
- W_REG(osh, &cc->chipcontrol_addr, PMU_CHIPCTL1);
- cc_data_temp = R_REG(osh, &cc->chipcontrol_data);
+ W_REG(osh, PMUREG(sih, chipcontrol_addr), PMU_CHIPCTL1);
+ cc_data_temp = R_REG(osh, PMUREG(sih, chipcontrol_data));
cc_data_temp &= ~str_mask;
cc_data_temp |= str_tab[i].sel << str_shift;
- W_REG(osh, &cc->chipcontrol_data, cc_data_temp);
+ W_REG(osh, PMUREG(sih, chipcontrol_data), cc_data_temp);
if (str_ovr_pmuval) { /* enables the selected drive strength */
- W_REG(osh, &cc->chipcontrol_addr, str_ovr_pmuctl);
- OR_REG(osh, &cc->chipcontrol_data, str_ovr_pmuval);
+ W_REG(osh, PMUREG(sih, chipcontrol_addr), str_ovr_pmuctl);
+ OR_REG(osh, PMUREG(sih, chipcontrol_data), str_ovr_pmuval);
}
PMU_MSG(("SDIO: %dmA drive strength requested; set to %dmA\n",
drivestrength, str_tab[i].strength));
}
-
- /* Return to original core */
- si_restore_core(sih, origidx, intr_val);
} /* si_sdiod_drive_strength_init */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: aidmp.h 385510 2013-02-15 21:02:07Z $
+ * $Id: aidmp.h 456346 2014-02-18 16:48:52Z $
*/
#ifndef _AIDMP_H
#define _AIDMP_H
-
+/* Manufacturer Ids */
#define MFGID_ARM 0x43b
#define MFGID_BRCM 0x4bf
#define MFGID_MIPS 0x4a7
-
+/* Component Classes */
#define CC_SIM 0
#define CC_EROM 1
#define CC_CORESIGHT 9
#define CC_GEN 0xe
#define CC_PRIMECELL 0xf
-
+/* Enumeration ROM registers */
#define ER_EROMENTRY 0x000
#define ER_REMAPCONTROL 0xe00
#define ER_REMAPSELECT 0xe04
#define ER_ITCR 0xf00
#define ER_ITIP 0xf04
-
+/* Erom entries */
#define ER_TAG 0xe
#define ER_TAG1 0x6
#define ER_VALID 1
#define ER_END 0xe
#define ER_BAD 0xffffffff
-
+/* EROM CompIdentA */
#define CIA_MFG_MASK 0xfff00000
#define CIA_MFG_SHIFT 20
#define CIA_CID_MASK 0x000fff00
#define CIA_CCL_MASK 0x000000f0
#define CIA_CCL_SHIFT 4
-
+/* EROM CompIdentB */
#define CIB_REV_MASK 0xff000000
#define CIB_REV_SHIFT 24
#define CIB_NSW_MASK 0x00f80000
#define CIB_NMP_MASK 0x000001f0
#define CIB_NMP_SHIFT 4
-
+/* EROM MasterPortDesc */
#define MPD_MUI_MASK 0x0000ff00
#define MPD_MUI_SHIFT 8
#define MPD_MP_MASK 0x000000f0
#define MPD_MP_SHIFT 4
-
+/* EROM AddrDesc */
#define AD_ADDR_MASK 0xfffff000
#define AD_SP_MASK 0x00000f00
#define AD_SP_SHIFT 8
#define AD_SZ_SZD 0x00000030
#define AD_AG32 0x00000008
#define AD_ADDR_ALIGN 0x00000fff
-#define AD_SZ_BASE 0x00001000
-
+#define AD_SZ_BASE 0x00001000 /* 4KB */
+/* EROM SizeDesc */
#define SD_SZ_MASK 0xfffff000
#define SD_SG32 0x00000008
#define SD_SZ_ALIGN 0x00000fff
-#ifndef _LANGUAGE_ASSEMBLY
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
typedef volatile struct _aidmp {
- uint32 oobselina30;
- uint32 oobselina74;
+ uint32 oobselina30; /* 0x000 */
+ uint32 oobselina74; /* 0x004 */
uint32 PAD[6];
- uint32 oobselinb30;
- uint32 oobselinb74;
+ uint32 oobselinb30; /* 0x020 */
+ uint32 oobselinb74; /* 0x024 */
uint32 PAD[6];
- uint32 oobselinc30;
- uint32 oobselinc74;
+ uint32 oobselinc30; /* 0x040 */
+ uint32 oobselinc74; /* 0x044 */
uint32 PAD[6];
- uint32 oobselind30;
- uint32 oobselind74;
+ uint32 oobselind30; /* 0x060 */
+ uint32 oobselind74; /* 0x064 */
uint32 PAD[38];
- uint32 oobselouta30;
- uint32 oobselouta74;
+ uint32 oobselouta30; /* 0x100 */
+ uint32 oobselouta74; /* 0x104 */
uint32 PAD[6];
- uint32 oobseloutb30;
- uint32 oobseloutb74;
+ uint32 oobseloutb30; /* 0x120 */
+ uint32 oobseloutb74; /* 0x124 */
uint32 PAD[6];
- uint32 oobseloutc30;
- uint32 oobseloutc74;
+ uint32 oobseloutc30; /* 0x140 */
+ uint32 oobseloutc74; /* 0x144 */
uint32 PAD[6];
- uint32 oobseloutd30;
- uint32 oobseloutd74;
+ uint32 oobseloutd30; /* 0x160 */
+ uint32 oobseloutd74; /* 0x164 */
uint32 PAD[38];
- uint32 oobsynca;
- uint32 oobseloutaen;
+ uint32 oobsynca; /* 0x200 */
+ uint32 oobseloutaen; /* 0x204 */
uint32 PAD[6];
- uint32 oobsyncb;
- uint32 oobseloutben;
+ uint32 oobsyncb; /* 0x220 */
+ uint32 oobseloutben; /* 0x224 */
uint32 PAD[6];
- uint32 oobsyncc;
- uint32 oobseloutcen;
+ uint32 oobsyncc; /* 0x240 */
+ uint32 oobseloutcen; /* 0x244 */
uint32 PAD[6];
- uint32 oobsyncd;
- uint32 oobseloutden;
+ uint32 oobsyncd; /* 0x260 */
+ uint32 oobseloutden; /* 0x264 */
uint32 PAD[38];
- uint32 oobaextwidth;
- uint32 oobainwidth;
- uint32 oobaoutwidth;
+ uint32 oobaextwidth; /* 0x300 */
+ uint32 oobainwidth; /* 0x304 */
+ uint32 oobaoutwidth; /* 0x308 */
uint32 PAD[5];
- uint32 oobbextwidth;
- uint32 oobbinwidth;
- uint32 oobboutwidth;
+ uint32 oobbextwidth; /* 0x320 */
+ uint32 oobbinwidth; /* 0x324 */
+ uint32 oobboutwidth; /* 0x328 */
uint32 PAD[5];
- uint32 oobcextwidth;
- uint32 oobcinwidth;
- uint32 oobcoutwidth;
+ uint32 oobcextwidth; /* 0x340 */
+ uint32 oobcinwidth; /* 0x344 */
+ uint32 oobcoutwidth; /* 0x348 */
uint32 PAD[5];
- uint32 oobdextwidth;
- uint32 oobdinwidth;
- uint32 oobdoutwidth;
+ uint32 oobdextwidth; /* 0x360 */
+ uint32 oobdinwidth; /* 0x364 */
+ uint32 oobdoutwidth; /* 0x368 */
uint32 PAD[37];
- uint32 ioctrlset;
- uint32 ioctrlclear;
- uint32 ioctrl;
+ uint32 ioctrlset; /* 0x400 */
+ uint32 ioctrlclear; /* 0x404 */
+ uint32 ioctrl; /* 0x408 */
uint32 PAD[61];
- uint32 iostatus;
+ uint32 iostatus; /* 0x500 */
uint32 PAD[127];
- uint32 ioctrlwidth;
- uint32 iostatuswidth;
+ uint32 ioctrlwidth; /* 0x700 */
+ uint32 iostatuswidth; /* 0x704 */
uint32 PAD[62];
- uint32 resetctrl;
- uint32 resetstatus;
- uint32 resetreadid;
- uint32 resetwriteid;
+ uint32 resetctrl; /* 0x800 */
+ uint32 resetstatus; /* 0x804 */
+ uint32 resetreadid; /* 0x808 */
+ uint32 resetwriteid; /* 0x80c */
uint32 PAD[60];
- uint32 errlogctrl;
- uint32 errlogdone;
- uint32 errlogstatus;
- uint32 errlogaddrlo;
- uint32 errlogaddrhi;
- uint32 errlogid;
- uint32 errloguser;
- uint32 errlogflags;
+ uint32 errlogctrl; /* 0x900 */
+ uint32 errlogdone; /* 0x904 */
+ uint32 errlogstatus; /* 0x908 */
+ uint32 errlogaddrlo; /* 0x90c */
+ uint32 errlogaddrhi; /* 0x910 */
+ uint32 errlogid; /* 0x914 */
+ uint32 errloguser; /* 0x918 */
+ uint32 errlogflags; /* 0x91c */
uint32 PAD[56];
- uint32 intstatus;
+ uint32 intstatus; /* 0xa00 */
uint32 PAD[255];
- uint32 config;
+ uint32 config; /* 0xe00 */
uint32 PAD[63];
- uint32 itcr;
+ uint32 itcr; /* 0xf00 */
uint32 PAD[3];
- uint32 itipooba;
- uint32 itipoobb;
- uint32 itipoobc;
- uint32 itipoobd;
+ uint32 itipooba; /* 0xf10 */
+ uint32 itipoobb; /* 0xf14 */
+ uint32 itipoobc; /* 0xf18 */
+ uint32 itipoobd; /* 0xf1c */
uint32 PAD[4];
- uint32 itipoobaout;
- uint32 itipoobbout;
- uint32 itipoobcout;
- uint32 itipoobdout;
+ uint32 itipoobaout; /* 0xf30 */
+ uint32 itipoobbout; /* 0xf34 */
+ uint32 itipoobcout; /* 0xf38 */
+ uint32 itipoobdout; /* 0xf3c */
uint32 PAD[4];
- uint32 itopooba;
- uint32 itopoobb;
- uint32 itopoobc;
- uint32 itopoobd;
+ uint32 itopooba; /* 0xf50 */
+ uint32 itopoobb; /* 0xf54 */
+ uint32 itopoobc; /* 0xf58 */
+ uint32 itopoobd; /* 0xf5c */
uint32 PAD[4];
- uint32 itopoobain;
- uint32 itopoobbin;
- uint32 itopoobcin;
- uint32 itopoobdin;
+ uint32 itopoobain; /* 0xf70 */
+ uint32 itopoobbin; /* 0xf74 */
+ uint32 itopoobcin; /* 0xf78 */
+ uint32 itopoobdin; /* 0xf7c */
uint32 PAD[4];
- uint32 itopreset;
+ uint32 itopreset; /* 0xf90 */
uint32 PAD[15];
- uint32 peripherialid4;
- uint32 peripherialid5;
- uint32 peripherialid6;
- uint32 peripherialid7;
- uint32 peripherialid0;
- uint32 peripherialid1;
- uint32 peripherialid2;
- uint32 peripherialid3;
- uint32 componentid0;
- uint32 componentid1;
- uint32 componentid2;
- uint32 componentid3;
+ uint32 peripherialid4; /* 0xfd0 */
+ uint32 peripherialid5; /* 0xfd4 */
+ uint32 peripherialid6; /* 0xfd8 */
+ uint32 peripherialid7; /* 0xfdc */
+ uint32 peripherialid0; /* 0xfe0 */
+ uint32 peripherialid1; /* 0xfe4 */
+ uint32 peripherialid2; /* 0xfe8 */
+ uint32 peripherialid3; /* 0xfec */
+ uint32 componentid0; /* 0xff0 */
+ uint32 componentid1; /* 0xff4 */
+ uint32 componentid2; /* 0xff8 */
+ uint32 componentid3; /* 0xffc */
} aidmp_t;
-#endif
-
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
+/* Out-of-band Router registers */
#define OOB_BUSCONFIG 0x020
#define OOB_STATUSA 0x100
#define OOB_STATUSB 0x104
#define OOB_ITOPOOBC 0xf38
#define OOB_ITOPOOBD 0xf3c
-
+/* DMP wrapper registers */
#define AI_OOBSELINA30 0x000
#define AI_OOBSELINA74 0x004
#define AI_OOBSELINB30 0x020
#define AI_COMPONENTID2 0xff8
#define AI_COMPONENTID3 0xffc
-
+/* resetctrl */
#define AIRC_RESET 1
-
+/* config */
#define AICFG_OOB 0x00000020
#define AICFG_IOS 0x00000010
#define AICFG_IOC 0x00000008
#define AICFG_ERRL 0x00000002
#define AICFG_RST 0x00000001
-
+/* bit defines for AI_OOBSELOUTB74 reg */
#define OOB_SEL_OUTEN_B_5 15
#define OOB_SEL_OUTEN_B_6 23
-
+/* AI_OOBSEL for A/B/C/D, 0-7 */
#define AI_OOBSEL_MASK 0x1F
#define AI_OOBSEL_0_SHIFT 0
#define AI_OOBSEL_1_SHIFT 8
#define AI_OOBSEL_6_SHIFT 16
#define AI_OOBSEL_7_SHIFT 24
-#endif
+#endif /* _AIDMP_H */
#ifndef _bcm_cfg_h_
#define _bcm_cfg_h_
-#endif
+#endif /* _bcm_cfg_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id$
+ * $Id: bcm_mpool_pub.h 407097 2013-06-11 18:43:16Z $
*/
#ifndef _BCM_MPOOL_PUB_H
int nobj,
void *memstart,
unsigned int memsize,
- char poolname[BCM_MP_NAMELEN],
+ const char poolname[BCM_MP_NAMELEN],
bcm_mp_pool_h *newp);
*
*/
int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz,
- char poolname[BCM_MP_NAMELEN],
+ const char poolname[BCM_MP_NAMELEN],
bcm_mp_pool_h *newp);
#include <proto/ethernet.h>
typedef struct cdc_ioctl {
- uint32 cmd;
- uint32 len;
- uint32 flags;
- uint32 status;
+ uint32 cmd; /* ioctl command value */
+ uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */
+ uint32 flags; /* flag defns given below */
+ uint32 status; /* status code returned from the device */
} cdc_ioctl_t;
-
+/* Max valid buffer size that can be sent to the dongle */
#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN
-
-#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF
-
+/* len field is divided into input and output buffer lengths */
+#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */
+ /* excluding IOCTL header */
#define CDCL_IOC_OUTLEN_SHIFT 0
-#define CDCL_IOC_INLEN_MASK 0xFFFF0000
+#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */
#define CDCL_IOC_INLEN_SHIFT 16
-
-#define CDCF_IOC_ERROR 0x01
-#define CDCF_IOC_SET 0x02
-#define CDCF_IOC_OVL_IDX_MASK 0x3c
-#define CDCF_IOC_OVL_RSV 0x40
-#define CDCF_IOC_OVL 0x80
-#define CDCF_IOC_ACTION_MASK 0xfe
-#define CDCF_IOC_ACTION_SHIFT 1
-#define CDCF_IOC_IF_MASK 0xF000
+/* CDC flag definitions */
+#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */
+#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */
+#define CDCF_IOC_OVL_IDX_MASK 0x3c /* overlay region index mask */
+#define CDCF_IOC_OVL_RSV 0x40 /* 1=reserve this overlay region */
+#define CDCF_IOC_OVL 0x80 /* 1=this ioctl corresponds to an overlay */
+#define CDCF_IOC_ACTION_MASK 0xfe /* SET/GET, OVL_IDX, OVL_RSV, OVL mask */
+#define CDCF_IOC_ACTION_SHIFT 1 /* SET/GET, OVL_IDX, OVL_RSV, OVL shift */
+#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */
#define CDCF_IOC_IF_SHIFT 12
-#define CDCF_IOC_ID_MASK 0xFFFF0000
-#define CDCF_IOC_ID_SHIFT 16
+#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */
+#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */
#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)
#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT)
#define CDC_SET_IF_IDX(hdr, idx) \
((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT)))
-
+/*
+ * BDC header
+ *
+ * The BDC header is used on data packets to convey priority across USB.
+ */
struct bdc_header {
- uint8 flags;
- uint8 priority;
+ uint8 flags; /* Flags */
+ uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 USB flow control info */
uint8 flags2;
- uint8 dataOffset;
+ uint8 dataOffset; /* Offset from end of BDC header to packet data, in
+ * 4-byte words. Leaves room for optional headers.
+ */
};
#define BDC_HEADER_LEN 4
+/* flags field bitmap */
+#define BDC_FLAG_80211_PKT 0x01 /* Packet is in 802.11 format (dongle -> host) */
+#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */
+#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums: host->device */
+#define BDC_FLAG_EVENT_MSG 0x08 /* Payload contains an event msg: device->host */
+#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
+#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
-#define BDC_FLAG_80211_PKT 0x01
-#define BDC_FLAG_SUM_GOOD 0x04
-#define BDC_FLAG_SUM_NEEDED 0x08
-#define BDC_FLAG_EVENT_MSG 0x08
-#define BDC_FLAG_VER_MASK 0xf0
-#define BDC_FLAG_VER_SHIFT 4
-
-
+/* priority field bitmap */
#define BDC_PRIORITY_MASK 0x07
-#define BDC_PRIORITY_FC_MASK 0xf0
-#define BDC_PRIORITY_FC_SHIFT 4
+#define BDC_PRIORITY_FC_MASK 0xf0 /* flow control info mask */
+#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */
-
-#define BDC_FLAG2_IF_MASK 0x0f
+/* flags2 field bitmap */
+#define BDC_FLAG2_IF_MASK 0x0f /* interface index (host <-> dongle) */
#define BDC_FLAG2_IF_SHIFT 0
-#define BDC_FLAG2_FC_FLAG 0x10
-
-
-
-#define BDC_PROTO_VER_1 1
-#define BDC_PROTO_VER 2
+#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */
+ /* FLOW CONTROL info only */
+/* version numbers */
+#define BDC_PROTO_VER_1 1 /* Old Protocol version */
+#define BDC_PROTO_VER 2 /* Protocol version */
+/* flags2.if field access macros */
#define BDC_GET_IF_IDX(hdr) \
((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT))
#define BDC_SET_IF_IDX(hdr, idx) \
((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \
(((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT)))
-#endif
+#endif /* _bcmcdc_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmdefs.h 416231 2013-08-02 07:38:34Z $
+ * $Id: bcmdefs.h 474209 2014-04-30 12:16:47Z $
*/
#ifndef _bcmdefs_h_
#define _bcmdefs_h_
+/*
+ * One doesn't need to include this file explicitly, gets included automatically if
+ * typedefs.h is included.
+ */
-
-
+/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function
+ * arguments or local variables.
+ */
#define BCM_REFERENCE(data) ((void)(data))
+/* Allow for suppressing unused variable warnings. */
+#ifdef __GNUC__
+#define UNUSED_VAR __attribute__ ((unused))
+#else
+#define UNUSED_VAR
+#endif
+/* Compile-time assert can be used in place of ASSERT if the expression evaluates
+ * to a constant at compile time.
+ */
#define STATIC_ASSERT(expr) { \
- \
- typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e; \
- \
- typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1]; \
+ /* Make sure the expression is constant. */ \
+ typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e UNUSED_VAR; \
+ /* Make sure the expression is true. */ \
+ typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1] UNUSED_VAR; \
}
-
+/* Reclaiming text and data :
+ * The following macros specify special linker sections that can be reclaimed
+ * after a system is considered 'up'.
+ * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN,
+ * as in most cases, the attach function calls the detach function to clean up on error).
+ */
#define bcmreclaimed 0
#define _data _data
#define BCMNMIATTACHDATA(_data) _data
#define CONST const
-
#undef BCM47XX_CA9
-#ifdef __ARM_ARCH_7A__
-#define BCM47XX_CA9
-#endif
+
#ifndef BCMFASTPATH
#if defined(BCM47XX_CA9)
#define BCMFASTPATH __attribute__ ((__section__ (".text.fastpath")))
#define BCMFASTPATH
#define BCMFASTPATH_HOST
#endif
-#endif
+#endif /* BCMFASTPATH */
+/* Use the BCMRAMFN() macro to tag functions in source that must be included in RAM (excluded from
+ * ROM). This should eliminate the need to manually specify these functions in the ROM config file.
+ * It should only be used in special cases where the function must be in RAM for *all* ROM-based
+ * chips.
+ */
+ #define BCMRAMFN(_fn) _fn
-#define _data _data
-#define BCMROMDAT_NAME(_data) _data
-#define _fn _fn
-#define _fn _fn
#define STATIC static
-#define BCMROMDAT_ARYSIZ(data) ARRAYSIZE(data)
-#define BCMROMDAT_SIZEOF(data) sizeof(data)
-#define BCMROMDAT_APATCH(data)
-#define BCMROMDAT_SPATCH(data)
-
-
-#define SI_BUS 0
-#define PCI_BUS 1
-#define PCMCIA_BUS 2
-#define SDIO_BUS 3
-#define JTAG_BUS 4
-#define USB_BUS 5
-#define SPI_BUS 6
-#define RPC_BUS 7
-
+/* Bus types */
+#define SI_BUS 0 /* SOC Interconnect */
+#define PCI_BUS 1 /* PCI target */
+#define PCMCIA_BUS 2 /* PCMCIA target */
+#define SDIO_BUS 3 /* SDIO target */
+#define JTAG_BUS 4 /* JTAG */
+#define USB_BUS 5 /* USB (does not support R/W REG) */
+#define SPI_BUS 6 /* gSPI target */
+#define RPC_BUS 7 /* RPC target */
+
+/* Allows size optimization for single-bus image */
#ifdef BCMBUSTYPE
#define BUSTYPE(bus) (BCMBUSTYPE)
#else
#define BUSTYPE(bus) (bus)
#endif
-
+/* Allows size optimization for single-backplane image */
#ifdef BCMCHIPTYPE
#define CHIPTYPE(bus) (BCMCHIPTYPE)
#else
#endif
-
+/* Allows size optimization for SPROM support */
#if defined(BCMSPROMBUS)
#define SPROMBUS (BCMSPROMBUS)
#elif defined(SI_PCMCIA_SROM)
#define SPROMBUS (PCI_BUS)
#endif
-
+/* Allows size optimization for single-chip image */
#ifdef BCMCHIPID
#define CHIPID(chip) (BCMCHIPID)
#else
#define CHIPREV(rev) (rev)
#endif
+/* Defines for DMA Address Width - Shared between OSL and HNDDMA */
+#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */
+#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */
+#define DMADDR_MASK_26 0xFC000000 /* Address maks for 26-bits */
+#define DMADDR_MASK_0 0xffffffff /* Address mask for 0-bits (hi-part) */
-#define DMADDR_MASK_32 0x0
-#define DMADDR_MASK_30 0xc0000000
-#define DMADDR_MASK_0 0xffffffff
-
-#define DMADDRWIDTH_30 30
-#define DMADDRWIDTH_32 32
-#define DMADDRWIDTH_63 63
-#define DMADDRWIDTH_64 64
+#define DMADDRWIDTH_26 26 /* 26-bit addressing capability */
+#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */
+#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */
+#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */
+#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */
-#ifdef BCMDMA64OSL
typedef struct {
uint32 loaddr;
uint32 hiaddr;
} dma64addr_t;
-typedef dma64addr_t dmaaddr_t;
-#define PHYSADDRHI(_pa) ((_pa).hiaddr)
-#define PHYSADDRHISET(_pa, _val) \
+#define PHYSADDR64HI(_pa) ((_pa).hiaddr)
+#define PHYSADDR64HISET(_pa, _val) \
do { \
(_pa).hiaddr = (_val); \
} while (0)
-#define PHYSADDRLO(_pa) ((_pa).loaddr)
-#define PHYSADDRLOSET(_pa, _val) \
+#define PHYSADDR64LO(_pa) ((_pa).loaddr)
+#define PHYSADDR64LOSET(_pa, _val) \
do { \
(_pa).loaddr = (_val); \
} while (0)
+#ifdef BCMDMA64OSL
+typedef dma64addr_t dmaaddr_t;
+#define PHYSADDRHI(_pa) PHYSADDR64HI(_pa)
+#define PHYSADDRHISET(_pa, _val) PHYSADDR64HISET(_pa, _val)
+#define PHYSADDRLO(_pa) PHYSADDR64LO(_pa)
+#define PHYSADDRLOSET(_pa, _val) PHYSADDR64LOSET(_pa, _val)
+
#else
typedef unsigned long dmaaddr_t;
#define PHYSADDRHI(_pa) (0)
do { \
(_pa) = (_val); \
} while (0)
-#endif
-
+#endif /* BCMDMA64OSL */
+#define PHYSADDRISZERO(_pa) (PHYSADDRLO(_pa) == 0 && PHYSADDRHI(_pa) == 0)
+/* One physical DMA segment */
typedef struct {
dmaaddr_t addr;
uint32 length;
} hnddma_seg_t;
-#define MAX_DMA_SEGS 4
+#define MAX_DMA_SEGS 8
typedef struct {
- void *oshdmah;
- uint origsize;
+ void *oshdmah; /* Opaque handle for OSL to store its information */
+ uint origsize; /* Size of the virtual packet */
uint nsegs;
hnddma_seg_t segs[MAX_DMA_SEGS];
} hnddma_seg_map_t;
-
+/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
+ * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
+ * There is a compile time check in wlc.c which ensure that this value is at least as big
+ * as TXOFF. This value is used in dma_rxfill (hnddma.c).
+ */
#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY)
-
+/* add 40 bytes to allow for extra RPC header and info */
#define BCMEXTRAHDROOM 260
-#else
+#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
#if defined(BCM47XX_CA9)
#define BCMEXTRAHDROOM 224
#else
#define BCMEXTRAHDROOM 204
-#endif
-#endif
-
+#endif /* linux && BCM47XX_CA9 */
+#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */
+/* Packet alignment for most efficient SDIO (can change based on platform) */
#ifndef SDALIGN
#define SDALIGN 32
#endif
-
+/* Headroom required for dongle-to-host communication. Packets allocated
+ * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should
+ * leave this much room in front for low-level message headers which may
+ * be needed to get across the dongle bus to the host. (These messages
+ * don't go over the network, so room for the full WL header above would
+ * be a waste.).
+*/
#define BCMDONGLEHDRSZ 12
#define BCMDONGLEPADSZ 16
#define BCMASSERT_SUPPORT
#endif
-
+/* Macros for doing definition and get/set of bitfields
+ * Usage example, e.g. a three-bit field (bits 4-6):
+ * #define <NAME>_M BITFIELD_MASK(3)
+ * #define <NAME>_S 4
+ * ...
+ * regval = R_REG(osh, ®s->regfoo);
+ * field = GFIELD(regval, <NAME>);
+ * regval = SFIELD(regval, <NAME>, 1);
+ * W_REG(osh, ®s->regfoo, regval);
+ */
#define BITFIELD_MASK(width) \
(((unsigned)1 << (width)) - 1)
#define GFIELD(val, field) \
(((val) & (~(field ## _M << field ## _S))) | \
((unsigned)(bits) << field ## _S))
-
+/* define BCMSMALL to remove misc features for memory-constrained environments */
#ifdef BCMSMALL
#undef BCMSPACE
-#define bcmspace FALSE
+#define bcmspace FALSE /* if (bcmspace) code is discarded */
#else
#define BCMSPACE
-#define bcmspace TRUE
+#define bcmspace TRUE /* if (bcmspace) code is retained */
#endif
-
+/* Max. nvram variable table size */
+#ifndef MAXSZ_NVRAM_VARS
#define MAXSZ_NVRAM_VARS 4096
+#endif
+/* WL_ENAB_RUNTIME_CHECK may be set based upon the #define below (for ROM builds). It may also
+ * be defined via makefiles (e.g. ROM auto abandon unoptimized compiles).
+ */
+
+
+#ifdef BCMLFRAG /* BCMLFRAG support enab macros */
+ extern bool _bcmlfrag;
+ #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMLFRAG_ENAB() (_bcmlfrag)
+ #elif defined(BCMLFRAG_DISABLED)
+ #define BCMLFRAG_ENAB() (0)
+ #else
+ #define BCMLFRAG_ENAB() (1)
+ #endif
+#else
+ #define BCMLFRAG_ENAB() (0)
+#endif /* BCMLFRAG_ENAB */
+#ifdef BCMSPLITRX /* BCMLFRAG support enab macros */
+ extern bool _bcmsplitrx;
+ #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCMSPLITRX_ENAB() (_bcmsplitrx)
+ #elif defined(BCMSPLITRX_DISABLED)
+ #define BCMSPLITRX_ENAB() (0)
+ #else
+ #define BCMSPLITRX_ENAB() (1)
+ #endif
+#else
+ #define BCMSPLITRX_ENAB() (0)
+#endif /* BCMSPLITRX */
+#ifdef BCM_SPLITBUF
+ extern bool _bcmsplitbuf;
+ #if defined(WL_ENAB_RUNTIME_CHECK) || !defined(DONGLEBUILD)
+ #define BCM_SPLITBUF_ENAB() (_bcmsplitbuf)
+ #elif defined(BCM_SPLITBUF_DISABLED)
+ #define BCM_SPLITBUF_ENAB() (0)
+ #else
+ #define BCM_SPLITBUF_ENAB() (1)
+ #endif
+#else
+ #define BCM_SPLITBUF_ENAB() (0)
+#endif /* BCM_SPLITBUF */
+/* Max size for reclaimable NVRAM array */
#ifdef DL_NVRAM
#define NVRAM_ARRAY_MAXSIZE DL_NVRAM
#else
#define NVRAM_ARRAY_MAXSIZE MAXSZ_NVRAM_VARS
-#endif
+#endif /* DL_NVRAM */
-#ifdef BCMUSBDEV_ENABLED
extern uint32 gFWID;
-#endif
-#endif
+
+#endif /* _bcmdefs_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmdevs.h 414368 2013-07-24 15:00:23Z $
+ * $Id: bcmdevs.h 484136 2014-06-12 04:36:10Z $
*/
#ifndef _BCMDEVS_H
#define _BCMDEVS_H
-
+/* PCI vendor IDs */
#define VENDOR_EPIGRAM 0xfeda
#define VENDOR_BROADCOM 0x14e4
#define VENDOR_3COM 0x10b7
#define VENDOR_HP 0x103c
#define VENDOR_HP_COMPAQ 0x0e11
#define VENDOR_APPLE 0x106b
-#define VENDOR_SI_IMAGE 0x1095
-#define VENDOR_BUFFALO 0x1154
-#define VENDOR_TI 0x104c
-#define VENDOR_RICOH 0x1180
+#define VENDOR_SI_IMAGE 0x1095 /* Silicon Image, used by Arasan SDIO Host */
+#define VENDOR_BUFFALO 0x1154 /* Buffalo vendor id */
+#define VENDOR_TI 0x104c /* Texas Instruments */
+#define VENDOR_RICOH 0x1180 /* Ricoh */
#define VENDOR_JMICRON 0x197b
-
+/* PCMCIA vendor IDs */
#define VENDOR_BROADCOM_PCMCIA 0x02d0
-
+/* SDIO vendor IDs */
#define VENDOR_BROADCOM_SDIO 0x00BF
-
+/* DONGLE VID/PIDs */
#define BCM_DNGL_VID 0x0a5c
#define BCM_DNGL_BL_PID_4328 0xbd12
#define BCM_DNGL_BL_PID_4322 0xbd13
#define BCM_DNGL_BL_PID_43242 0xbd1f
#define BCM_DNGL_BL_PID_43342 0xbd21
#define BCM_DNGL_BL_PID_4335 0xbd20
-#define BCM_DNGL_BL_PID_4350 0xbd23
#define BCM_DNGL_BL_PID_43341 0xbd22
+#define BCM_DNGL_BL_PID_4350 0xbd23
+#define BCM_DNGL_BL_PID_4345 0xbd24
+#define BCM_DNGL_BL_PID_4349 0xbd25
+#define BCM_DNGL_BL_PID_4354 0xbd26
+#define BCM_DNGL_BL_PID_43569 0xbd27
+#define BCM_DNGL_BL_PID_43909 0xbd28
#define BCM_DNGL_BDC_PID 0x0bdc
#define BCM_DNGL_JTAG_PID 0x4a44
-
+/* HW USB BLOCK [CPULESS USB] PIDs */
#define BCM_HWUSB_PID_43239 43239
-
-#define BCM4210_DEVICE_ID 0x1072
-#define BCM4230_DEVICE_ID 0x1086
-#define BCM4401_ENET_ID 0x170c
-#define BCM3352_DEVICE_ID 0x3352
-#define BCM3360_DEVICE_ID 0x3360
+/* PCI Device IDs */
+#define BCM4210_DEVICE_ID 0x1072 /* never used */
+#define BCM4230_DEVICE_ID 0x1086 /* never used */
+#define BCM4401_ENET_ID 0x170c /* 4401b0 production enet cards */
+#define BCM3352_DEVICE_ID 0x3352 /* bcm3352 device id */
+#define BCM3360_DEVICE_ID 0x3360 /* bcm3360 device id */
#define BCM4211_DEVICE_ID 0x4211
#define BCM4231_DEVICE_ID 0x4231
-#define BCM4303_D11B_ID 0x4303
-#define BCM4311_D11G_ID 0x4311
-#define BCM4311_D11DUAL_ID 0x4312
-#define BCM4311_D11A_ID 0x4313
-#define BCM4328_D11DUAL_ID 0x4314
-#define BCM4328_D11G_ID 0x4315
-#define BCM4328_D11A_ID 0x4316
-#define BCM4318_D11G_ID 0x4318
-#define BCM4318_D11DUAL_ID 0x4319
-#define BCM4318_D11A_ID 0x431a
-#define BCM4325_D11DUAL_ID 0x431b
-#define BCM4325_D11G_ID 0x431c
-#define BCM4325_D11A_ID 0x431d
-#define BCM4306_D11G_ID 0x4320
-#define BCM4306_D11A_ID 0x4321
-#define BCM4306_UART_ID 0x4322
-#define BCM4306_V90_ID 0x4323
-#define BCM4306_D11DUAL_ID 0x4324
-#define BCM4306_D11G_ID2 0x4325
-#define BCM4321_D11N_ID 0x4328
-#define BCM4321_D11N2G_ID 0x4329
-#define BCM4321_D11N5G_ID 0x432a
-#define BCM4322_D11N_ID 0x432b
-#define BCM4322_D11N2G_ID 0x432c
-#define BCM4322_D11N5G_ID 0x432d
-#define BCM4329_D11N_ID 0x432e
-#define BCM4329_D11N2G_ID 0x432f
-#define BCM4329_D11N5G_ID 0x4330
-#define BCM4315_D11DUAL_ID 0x4334
-#define BCM4315_D11G_ID 0x4335
-#define BCM4315_D11A_ID 0x4336
-#define BCM4319_D11N_ID 0x4337
-#define BCM4319_D11N2G_ID 0x4338
-#define BCM4319_D11N5G_ID 0x4339
-#define BCM43231_D11N2G_ID 0x4340
-#define BCM43221_D11N2G_ID 0x4341
-#define BCM43222_D11N_ID 0x4350
-#define BCM43222_D11N2G_ID 0x4351
-#define BCM43222_D11N5G_ID 0x4352
-#define BCM43224_D11N_ID 0x4353
-#define BCM43224_D11N_ID_VEN1 0x0576
-#define BCM43226_D11N_ID 0x4354
-#define BCM43236_D11N_ID 0x4346
-#define BCM43236_D11N2G_ID 0x4347
-#define BCM43236_D11N5G_ID 0x4348
-#define BCM43225_D11N2G_ID 0x4357
-#define BCM43421_D11N_ID 0xA99D
-#define BCM4313_D11N2G_ID 0x4727
-#define BCM4330_D11N_ID 0x4360
-#define BCM4330_D11N2G_ID 0x4361
-#define BCM4330_D11N5G_ID 0x4362
-#define BCM4336_D11N_ID 0x4343
-#define BCM6362_D11N_ID 0x435f
-#define BCM6362_D11N2G_ID 0x433f
-#define BCM6362_D11N5G_ID 0x434f
-#define BCM4331_D11N_ID 0x4331
-#define BCM4331_D11N2G_ID 0x4332
-#define BCM4331_D11N5G_ID 0x4333
-#define BCM43237_D11N_ID 0x4355
-#define BCM43237_D11N5G_ID 0x4356
-#define BCM43227_D11N2G_ID 0x4358
-#define BCM43228_D11N_ID 0x4359
-#define BCM43228_D11N5G_ID 0x435a
-#define BCM43362_D11N_ID 0x4363
-#define BCM43239_D11N_ID 0x4370
-#define BCM4324_D11N_ID 0x4374
-#define BCM43217_D11N2G_ID 0x43a9
-#define BCM43131_D11N2G_ID 0x43aa
-#define BCM4314_D11N2G_ID 0x4364
-#define BCM43142_D11N2G_ID 0x4365
-#define BCM43143_D11N2G_ID 0x4366
-#define BCM4334_D11N_ID 0x4380
-#define BCM4334_D11N2G_ID 0x4381
-#define BCM4334_D11N5G_ID 0x4382
-#define BCM43342_D11N_ID 0x4383
-#define BCM43342_D11N2G_ID 0x4384
-#define BCM43342_D11N5G_ID 0x4385
-#define BCM43341_D11N_ID 0x4386
-#define BCM43341_D11N2G_ID 0x4387
-#define BCM43341_D11N5G_ID 0x4388
+#define BCM4303_D11B_ID 0x4303 /* 4303 802.11b */
+#define BCM4311_D11G_ID 0x4311 /* 4311 802.11b/g id */
+#define BCM4311_D11DUAL_ID 0x4312 /* 4311 802.11a/b/g id */
+#define BCM4311_D11A_ID 0x4313 /* 4311 802.11a id */
+#define BCM4328_D11DUAL_ID 0x4314 /* 4328/4312 802.11a/g id */
+#define BCM4328_D11G_ID 0x4315 /* 4328/4312 802.11g id */
+#define BCM4328_D11A_ID 0x4316 /* 4328/4312 802.11a id */
+#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */
+#define BCM4318_D11DUAL_ID 0x4319 /* 4318 802.11a/b/g id */
+#define BCM4318_D11A_ID 0x431a /* 4318 802.11a id */
+#define BCM4325_D11DUAL_ID 0x431b /* 4325 802.11a/g id */
+#define BCM4325_D11G_ID 0x431c /* 4325 802.11g id */
+#define BCM4325_D11A_ID 0x431d /* 4325 802.11a id */
+#define BCM4306_D11G_ID 0x4320 /* 4306 802.11g */
+#define BCM4306_D11A_ID 0x4321 /* 4306 802.11a */
+#define BCM4306_UART_ID 0x4322 /* 4306 uart */
+#define BCM4306_V90_ID 0x4323 /* 4306 v90 codec */
+#define BCM4306_D11DUAL_ID 0x4324 /* 4306 dual A+B */
+#define BCM4306_D11G_ID2 0x4325 /* BCM4306_D11G_ID; INF w/loose binding war */
+#define BCM4321_D11N_ID 0x4328 /* 4321 802.11n dualband id */
+#define BCM4321_D11N2G_ID 0x4329 /* 4321 802.11n 2.4Ghz band id */
+#define BCM4321_D11N5G_ID 0x432a /* 4321 802.11n 5Ghz band id */
+#define BCM4322_D11N_ID 0x432b /* 4322 802.11n dualband device */
+#define BCM4322_D11N2G_ID 0x432c /* 4322 802.11n 2.4GHz device */
+#define BCM4322_D11N5G_ID 0x432d /* 4322 802.11n 5GHz device */
+#define BCM4329_D11N_ID 0x432e /* 4329 802.11n dualband device */
+#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */
+#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */
+#define BCM4315_D11DUAL_ID 0x4334 /* 4315 802.11a/g id */
+#define BCM4315_D11G_ID 0x4335 /* 4315 802.11g id */
+#define BCM4315_D11A_ID 0x4336 /* 4315 802.11a id */
+#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */
+#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */
+#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */
+#define BCM43231_D11N2G_ID 0x4340 /* 43231 802.11n 2.4GHz device */
+#define BCM43221_D11N2G_ID 0x4341 /* 43221 802.11n 2.4GHz device */
+#define BCM43222_D11N_ID 0x4350 /* 43222 802.11n dualband device */
+#define BCM43222_D11N2G_ID 0x4351 /* 43222 802.11n 2.4GHz device */
+#define BCM43222_D11N5G_ID 0x4352 /* 43222 802.11n 5GHz device */
+#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */
+#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db device */
+#define BCM43226_D11N_ID 0x4354 /* 43226 802.11n dualband device */
+#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */
+#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */
+#define BCM43236_D11N5G_ID 0x4348 /* 43236 802.11n 5GHz device */
+#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */
+#define BCM43421_D11N_ID 0xA99D /* 43421 802.11n dualband device */
+#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
+#define BCM4330_D11N_ID 0x4360 /* 4330 802.11n dualband device */
+#define BCM4330_D11N2G_ID 0x4361 /* 4330 802.11n 2.4G device */
+#define BCM4330_D11N5G_ID 0x4362 /* 4330 802.11n 5G device */
+#define BCM4336_D11N_ID 0x4343 /* 4336 802.11n 2.4GHz device */
+#define BCM6362_D11N_ID 0x435f /* 6362 802.11n dualband device */
+#define BCM6362_D11N2G_ID 0x433f /* 6362 802.11n 2.4Ghz band id */
+#define BCM6362_D11N5G_ID 0x434f /* 6362 802.11n 5Ghz band id */
+#define BCM4331_D11N_ID 0x4331 /* 4331 802.11n dualband id */
+#define BCM4331_D11N2G_ID 0x4332 /* 4331 802.11n 2.4Ghz band id */
+#define BCM4331_D11N5G_ID 0x4333 /* 4331 802.11n 5Ghz band id */
+#define BCM43237_D11N_ID 0x4355 /* 43237 802.11n dualband device */
+#define BCM43237_D11N5G_ID 0x4356 /* 43237 802.11n 5GHz device */
+#define BCM43227_D11N2G_ID 0x4358 /* 43228 802.11n 2.4GHz device */
+#define BCM43228_D11N_ID 0x4359 /* 43228 802.11n DualBand device */
+#define BCM43228_D11N5G_ID 0x435a /* 43228 802.11n 5GHz device */
+#define BCM43362_D11N_ID 0x4363 /* 43362 802.11n 2.4GHz device */
+#define BCM43239_D11N_ID 0x4370 /* 43239 802.11n dualband device */
+#define BCM4324_D11N_ID 0x4374 /* 4324 802.11n dualband device */
+#define BCM43217_D11N2G_ID 0x43a9 /* 43217 802.11n 2.4GHz device */
+#define BCM43131_D11N2G_ID 0x43aa /* 43131 802.11n 2.4GHz device */
+#define BCM4314_D11N2G_ID 0x4364 /* 4314 802.11n 2.4G device */
+#define BCM43142_D11N2G_ID 0x4365 /* 43142 802.11n 2.4G device */
+#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */
+#define BCM4334_D11N_ID 0x4380 /* 4334 802.11n dualband device */
+#define BCM4334_D11N2G_ID 0x4381 /* 4334 802.11n 2.4G device */
+#define BCM4334_D11N5G_ID 0x4382 /* 4334 802.11n 5G device */
+#define BCM43342_D11N_ID 0x4383 /* 43342 802.11n dualband device */
+#define BCM43342_D11N2G_ID 0x4384 /* 43342 802.11n 2.4G device */
+#define BCM43342_D11N5G_ID 0x4385 /* 43342 802.11n 5G device */
+#define BCM43341_D11N_ID 0x4386 /* 43341 802.11n dualband device */
+#define BCM43341_D11N2G_ID 0x4387 /* 43341 802.11n 2.4G device */
+#define BCM43341_D11N5G_ID 0x4388 /* 43341 802.11n 5G device */
#define BCM4360_D11AC_ID 0x43a0
#define BCM4360_D11AC2G_ID 0x43a1
#define BCM4360_D11AC5G_ID 0x43a2
+#define BCM4345_D11AC_ID 0x43ab /* 4345 802.11ac dualband device */
+#define BCM4345_D11AC2G_ID 0x43ac /* 4345 802.11ac 2.4G device */
+#define BCM4345_D11AC5G_ID 0x43ad /* 4345 802.11ac 5G device */
#define BCM4335_D11AC_ID 0x43ae
#define BCM4335_D11AC2G_ID 0x43af
#define BCM4335_D11AC5G_ID 0x43b0
-#define BCM4352_D11AC_ID 0x43b1
-#define BCM4352_D11AC2G_ID 0x43b2
-#define BCM4352_D11AC5G_ID 0x43b3
-
+#define BCM4352_D11AC_ID 0x43b1 /* 4352 802.11ac dualband device */
+#define BCM4352_D11AC2G_ID 0x43b2 /* 4352 802.11ac 2.4G device */
+#define BCM4352_D11AC5G_ID 0x43b3 /* 4352 802.11ac 5G device */
+#define BCM43602_D11AC_ID 0x43ba /* ac dualband PCI devid SPROM programmed */
+#define BCM43602_D11AC2G_ID 0x43bb /* 43602 802.11ac 2.4G device */
+#define BCM43602_D11AC5G_ID 0x43bc /* 43602 802.11ac 5G device */
+#define BCM4349_D11AC_ID 0x4349 /* 4349 802.11ac dualband device */
+#define BCM4349_D11AC2G_ID 0x43dd /* 4349 802.11ac 2.4G device */
+#define BCM4349_D11AC5G_ID 0x43de /* 4349 802.11ac 5G device */
+#define BCM4355_D11AC_ID 0x43d3 /* 4355 802.11ac dualband device */
+#define BCM4355_D11AC2G_ID 0x43d4 /* 4355 802.11ac 2.4G device */
+#define BCM4355_D11AC5G_ID 0x43d5 /* 4355 802.11ac 5G device */
+#define BCM4359_D11AC_ID 0x43d6 /* 4359 802.11ac dualband device */
+#define BCM4359_D11AC2G_ID 0x43d7 /* 4359 802.11ac 2.4G device */
+#define BCM4359_D11AC5G_ID 0x43d8 /* 4359 802.11ac 5G device */
+/* PCI Subsystem ID */
#define BCM943228HMB_SSID_VEN1 0x0607
#define BCM94313HMGBL_SSID_VEN1 0x0608
#define BCM94313HMG_SSID_VEN1 0x0609
#define BCM943142HM_SSID_VEN1 0x0611
-#define BCM43143_D11N2G_ID 0x4366
+#define BCM43143_D11N2G_ID 0x4366 /* 43143 802.11n 2.4G device */
-#define BCM43242_D11N_ID 0x4367
-#define BCM43242_D11N2G_ID 0x4368
-#define BCM43242_D11N5G_ID 0x4369
+#define BCM43242_D11N_ID 0x4367 /* 43242 802.11n dualband device */
+#define BCM43242_D11N2G_ID 0x4368 /* 43242 802.11n 2.4G device */
+#define BCM43242_D11N5G_ID 0x4369 /* 43242 802.11n 5G device */
#define BCM4350_D11AC_ID 0x43a3
#define BCM4350_D11AC2G_ID 0x43a4
#define BCM4350_D11AC5G_ID 0x43a5
+#define BCM43556_D11AC_ID 0x43b7
+#define BCM43556_D11AC2G_ID 0x43b8
+#define BCM43556_D11AC5G_ID 0x43b9
+
+#define BCM43558_D11AC_ID 0x43c0
+#define BCM43558_D11AC2G_ID 0x43c1
+#define BCM43558_D11AC5G_ID 0x43c2
+
+#define BCM43566_D11AC_ID 0x43d3
+#define BCM43566_D11AC2G_ID 0x43d4
+#define BCM43566_D11AC5G_ID 0x43d5
+
+#define BCM43568_D11AC_ID 0x43d6
+#define BCM43568_D11AC2G_ID 0x43d7
+#define BCM43568_D11AC5G_ID 0x43d8
+
+#define BCM43569_D11AC_ID 0x43d9
+#define BCM43569_D11AC2G_ID 0x43da
+#define BCM43569_D11AC5G_ID 0x43db
+
+#define BCM43570_D11AC_ID 0x43d9
+#define BCM43570_D11AC2G_ID 0x43da
+#define BCM43570_D11AC5G_ID 0x43db
+
+#define BCM4354_D11AC_ID 0x43df /* 4354 802.11ac dualband device */
+#define BCM4354_D11AC2G_ID 0x43e0 /* 4354 802.11ac 2.4G device */
+#define BCM4354_D11AC5G_ID 0x43e1 /* 4354 802.11ac 5G device */
+#define BCM43430_D11N2G_ID 0x43e2 /* 43430 802.11n 2.4G device */
+
+
+#define BCM43349_D11N_ID 0x43e6 /* 43349 802.11n dualband id */
+#define BCM43349_D11N2G_ID 0x43e7 /* 43349 802.11n 2.4Ghz band id */
+#define BCM43349_D11N5G_ID 0x43e8 /* 43349 802.11n 5Ghz band id */
+
+#define BCM4358_D11AC_ID 0x43e9 /* 4358 802.11ac dualband device */
+#define BCM4358_D11AC2G_ID 0x43ea /* 4358 802.11ac 2.4G device */
+#define BCM4358_D11AC5G_ID 0x43eb /* 4358 802.11ac 5G device */
-#define BCMGPRS_UART_ID 0x4333
-#define BCMGPRS2_UART_ID 0x4344
-#define FPGA_JTAGM_ID 0x43f0
-#define BCM_JTAGM_ID 0x43f1
-#define SDIOH_FPGA_ID 0x43f2
-#define BCM_SDIOH_ID 0x43f3
-#define SDIOD_FPGA_ID 0x43f4
-#define SPIH_FPGA_ID 0x43f5
-#define BCM_SPIH_ID 0x43f6
-#define MIMO_FPGA_ID 0x43f8
-#define BCM_JTAGM2_ID 0x43f9
-#define SDHCI_FPGA_ID 0x43fa
-#define BCM4402_ENET_ID 0x4402
-#define BCM4402_V90_ID 0x4403
-#define BCM4410_DEVICE_ID 0x4410
-#define BCM4412_DEVICE_ID 0x4412
-#define BCM4430_DEVICE_ID 0x4430
-#define BCM4432_DEVICE_ID 0x4432
-#define BCM4704_ENET_ID 0x4706
-#define BCM4710_DEVICE_ID 0x4710
-#define BCM47XX_AUDIO_ID 0x4711
-#define BCM47XX_V90_ID 0x4712
-#define BCM47XX_ENET_ID 0x4713
-#define BCM47XX_EXT_ID 0x4714
-#define BCM47XX_GMAC_ID 0x4715
-#define BCM47XX_USBH_ID 0x4716
-#define BCM47XX_USBD_ID 0x4717
-#define BCM47XX_IPSEC_ID 0x4718
-#define BCM47XX_ROBO_ID 0x4719
-#define BCM47XX_USB20H_ID 0x471a
-#define BCM47XX_USB20D_ID 0x471b
-#define BCM47XX_ATA100_ID 0x471d
-#define BCM47XX_SATAXOR_ID 0x471e
-#define BCM47XX_GIGETH_ID 0x471f
-#define BCM4712_MIPS_ID 0x4720
-#define BCM4716_DEVICE_ID 0x4722
-#define BCM47XX_USB30H_ID 0x472a
-#define BCM47XX_USB30D_ID 0x472b
-#define BCM47XX_SMBUS_EMU_ID 0x47fe
-#define BCM47XX_XOR_EMU_ID 0x47ff
-#define EPI41210_DEVICE_ID 0xa0fa
-#define EPI41230_DEVICE_ID 0xa10e
-#define JINVANI_SDIOH_ID 0x4743
-#define BCM27XX_SDIOH_ID 0x2702
-#define PCIXX21_FLASHMEDIA_ID 0x803b
-#define PCIXX21_SDIOH_ID 0x803c
-#define R5C822_SDIOH_ID 0x0822
-#define JMICRON_SDIOH_ID 0x2381
-
-
-#define BCM4306_CHIP_ID 0x4306
-#define BCM4311_CHIP_ID 0x4311
-#define BCM43111_CHIP_ID 43111
-#define BCM43112_CHIP_ID 43112
-#define BCM4312_CHIP_ID 0x4312
-#define BCM4313_CHIP_ID 0x4313
-#define BCM43131_CHIP_ID 43131
-#define BCM4315_CHIP_ID 0x4315
-#define BCM4318_CHIP_ID 0x4318
-#define BCM4319_CHIP_ID 0x4319
-#define BCM4320_CHIP_ID 0x4320
-#define BCM4321_CHIP_ID 0x4321
-#define BCM43217_CHIP_ID 43217
-#define BCM4322_CHIP_ID 0x4322
-#define BCM43221_CHIP_ID 43221
-#define BCM43222_CHIP_ID 43222
-#define BCM43224_CHIP_ID 43224
-#define BCM43225_CHIP_ID 43225
-#define BCM43227_CHIP_ID 43227
-#define BCM43228_CHIP_ID 43228
-#define BCM43226_CHIP_ID 43226
-#define BCM43231_CHIP_ID 43231
-#define BCM43234_CHIP_ID 43234
-#define BCM43235_CHIP_ID 43235
-#define BCM43236_CHIP_ID 43236
-#define BCM43237_CHIP_ID 43237
-#define BCM43238_CHIP_ID 43238
-#define BCM43239_CHIP_ID 43239
-#define BCM43420_CHIP_ID 43420
-#define BCM43421_CHIP_ID 43421
-#define BCM43428_CHIP_ID 43428
-#define BCM43431_CHIP_ID 43431
-#define BCM43460_CHIP_ID 43460
-#define BCM4325_CHIP_ID 0x4325
-#define BCM4328_CHIP_ID 0x4328
-#define BCM4329_CHIP_ID 0x4329
-#define BCM4331_CHIP_ID 0x4331
-#define BCM4336_CHIP_ID 0x4336
-#define BCM43362_CHIP_ID 43362
-#define BCM4330_CHIP_ID 0x4330
-#define BCM6362_CHIP_ID 0x6362
-#define BCM4314_CHIP_ID 0x4314
-#define BCM43142_CHIP_ID 43142
-#define BCM43143_CHIP_ID 43143
-#define BCM4324_CHIP_ID 0x4324
-#define BCM43242_CHIP_ID 43242
-#define BCM43243_CHIP_ID 43243
-#define BCM4334_CHIP_ID 0x4334
-#define BCM4335_CHIP_ID 0x4335
-#define BCM4339_CHIP_ID 0x4339
-#define BCM4360_CHIP_ID 0x4360
-#define BCM4352_CHIP_ID 0x4352
+#define BCM4356_D11AC_ID 0x43ec /* 4356 802.11ac dualband device */
+#define BCM4356_D11AC2G_ID 0x43ed /* 4356 802.11ac 2.4G device */
+#define BCM4356_D11AC5G_ID 0x43ee /* 4356 802.11ac 5G device */
+
+#define BCMGPRS_UART_ID 0x4333 /* Uart id used by 4306/gprs card */
+#define BCMGPRS2_UART_ID 0x4344 /* Uart id used by 4306/gprs card */
+#define FPGA_JTAGM_ID 0x43f0 /* FPGA jtagm device id */
+#define BCM_JTAGM_ID 0x43f1 /* BCM jtagm device id */
+#define SDIOH_FPGA_ID 0x43f2 /* sdio host fpga */
+#define BCM_SDIOH_ID 0x43f3 /* BCM sdio host id */
+#define SDIOD_FPGA_ID 0x43f4 /* sdio device fpga */
+#define SPIH_FPGA_ID 0x43f5 /* PCI SPI Host Controller FPGA */
+#define BCM_SPIH_ID 0x43f6 /* Synopsis SPI Host Controller */
+#define MIMO_FPGA_ID 0x43f8 /* FPGA mimo minimacphy device id */
+#define BCM_JTAGM2_ID 0x43f9 /* BCM alternate jtagm device id */
+#define SDHCI_FPGA_ID 0x43fa /* Standard SDIO Host Controller FPGA */
+#define BCM4402_ENET_ID 0x4402 /* 4402 enet */
+#define BCM4402_V90_ID 0x4403 /* 4402 v90 codec */
+#define BCM4410_DEVICE_ID 0x4410 /* bcm44xx family pci iline */
+#define BCM4412_DEVICE_ID 0x4412 /* bcm44xx family pci enet */
+#define BCM4430_DEVICE_ID 0x4430 /* bcm44xx family cardbus iline */
+#define BCM4432_DEVICE_ID 0x4432 /* bcm44xx family cardbus enet */
+#define BCM4704_ENET_ID 0x4706 /* 4704 enet (Use 47XX_ENET_ID instead!) */
+#define BCM4710_DEVICE_ID 0x4710 /* 4710 primary function 0 */
+#define BCM47XX_AUDIO_ID 0x4711 /* 47xx audio codec */
+#define BCM47XX_V90_ID 0x4712 /* 47xx v90 codec */
+#define BCM47XX_ENET_ID 0x4713 /* 47xx enet */
+#define BCM47XX_EXT_ID 0x4714 /* 47xx external i/f */
+#define BCM47XX_GMAC_ID 0x4715 /* 47xx Unimac based GbE */
+#define BCM47XX_USBH_ID 0x4716 /* 47xx usb host */
+#define BCM47XX_USBD_ID 0x4717 /* 47xx usb device */
+#define BCM47XX_IPSEC_ID 0x4718 /* 47xx ipsec */
+#define BCM47XX_ROBO_ID 0x4719 /* 47xx/53xx roboswitch core */
+#define BCM47XX_USB20H_ID 0x471a /* 47xx usb 2.0 host */
+#define BCM47XX_USB20D_ID 0x471b /* 47xx usb 2.0 device */
+#define BCM47XX_ATA100_ID 0x471d /* 47xx parallel ATA */
+#define BCM47XX_SATAXOR_ID 0x471e /* 47xx serial ATA & XOR DMA */
+#define BCM47XX_GIGETH_ID 0x471f /* 47xx GbE (5700) */
+#define BCM4712_MIPS_ID 0x4720 /* 4712 base devid */
+#define BCM4716_DEVICE_ID 0x4722 /* 4716 base devid */
+#define BCM47XX_USB30H_ID 0x472a /* 47xx usb 3.0 host */
+#define BCM47XX_USB30D_ID 0x472b /* 47xx usb 3.0 device */
+#define BCM47XX_SMBUS_EMU_ID 0x47fe /* 47xx emulated SMBus device */
+#define BCM47XX_XOR_EMU_ID 0x47ff /* 47xx emulated XOR engine */
+#define EPI41210_DEVICE_ID 0xa0fa /* bcm4210 */
+#define EPI41230_DEVICE_ID 0xa10e /* bcm4230 */
+#define JINVANI_SDIOH_ID 0x4743 /* Jinvani SDIO Gold Host */
+#define BCM27XX_SDIOH_ID 0x2702 /* BCM27xx Standard SDIO Host */
+#define PCIXX21_FLASHMEDIA_ID 0x803b /* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH_ID 0x803c /* TI PCI xx21 Standard Host Controller */
+#define R5C822_SDIOH_ID 0x0822 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */
+#define JMICRON_SDIOH_ID 0x2381 /* JMicron Standard SDIO Host Controller */
+
+/* Chip IDs */
+#define BCM4306_CHIP_ID 0x4306 /* 4306 chipcommon chipid */
+#define BCM4311_CHIP_ID 0x4311 /* 4311 PCIe 802.11a/b/g */
+#define BCM43111_CHIP_ID 43111 /* 43111 chipcommon chipid (OTP chipid) */
+#define BCM43112_CHIP_ID 43112 /* 43112 chipcommon chipid (OTP chipid) */
+#define BCM4312_CHIP_ID 0x4312 /* 4312 chipcommon chipid */
+#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */
+#define BCM43131_CHIP_ID 43131 /* 43131 chip id (OTP chipid) */
+#define BCM4315_CHIP_ID 0x4315 /* 4315 chip id */
+#define BCM4318_CHIP_ID 0x4318 /* 4318 chipcommon chipid */
+#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */
+#define BCM4320_CHIP_ID 0x4320 /* 4320 chipcommon chipid */
+#define BCM4321_CHIP_ID 0x4321 /* 4321 chipcommon chipid */
+#define BCM43217_CHIP_ID 43217 /* 43217 chip id (OTP chipid) */
+#define BCM4322_CHIP_ID 0x4322 /* 4322 chipcommon chipid */
+#define BCM43221_CHIP_ID 43221 /* 43221 chipcommon chipid (OTP chipid) */
+#define BCM43222_CHIP_ID 43222 /* 43222 chipcommon chipid */
+#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */
+#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */
+#define BCM43227_CHIP_ID 43227 /* 43227 chipcommon chipid */
+#define BCM43228_CHIP_ID 43228 /* 43228 chipcommon chipid */
+#define BCM43226_CHIP_ID 43226 /* 43226 chipcommon chipid */
+#define BCM43231_CHIP_ID 43231 /* 43231 chipcommon chipid (OTP chipid) */
+#define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */
+#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */
+#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */
+#define BCM43237_CHIP_ID 43237 /* 43237 chipcommon chipid */
+#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */
+#define BCM43239_CHIP_ID 43239 /* 43239 chipcommon chipid */
+#define BCM43420_CHIP_ID 43420 /* 43222 chipcommon chipid (OTP, RBBU) */
+#define BCM43421_CHIP_ID 43421 /* 43224 chipcommon chipid (OTP, RBBU) */
+#define BCM43428_CHIP_ID 43428 /* 43228 chipcommon chipid (OTP, RBBU) */
+#define BCM43431_CHIP_ID 43431 /* 4331 chipcommon chipid (OTP, RBBU) */
+#define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */
+#define BCM4325_CHIP_ID 0x4325 /* 4325 chip id */
+#define BCM4328_CHIP_ID 0x4328 /* 4328 chip id */
+#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */
+#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */
+#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */
+#define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */
+#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */
+#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */
+#define BCM4314_CHIP_ID 0x4314 /* 4314 chipcommon chipid */
+#define BCM43142_CHIP_ID 43142 /* 43142 chipcommon chipid */
+#define BCM43143_CHIP_ID 43143 /* 43143 chipcommon chipid */
+#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */
+#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */
+#define BCM43243_CHIP_ID 43243 /* 43243 chipcommon chipid */
+#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */
+#define BCM4335_CHIP_ID 0x4335 /* 4335 chipcommon chipid */
+#define BCM4339_CHIP_ID 0x4339 /* 4339 chipcommon chipid */
+#define BCM43349_CHIP_ID 43349 /* 43349(0xA955) chipcommon chipid */
+#define BCM4360_CHIP_ID 0x4360 /* 4360 chipcommon chipid */
+#define BCM4352_CHIP_ID 0x4352 /* 4352 chipcommon chipid */
#define BCM43526_CHIP_ID 0xAA06
-#define BCM43340_CHIP_ID 43340
-#define BCM43341_CHIP_ID 43341
-#define BCM43342_CHIP_ID 43342
-#define BCM4350_CHIP_ID 0x4350
-
-#define BCM4342_CHIP_ID 4342
-#define BCM4402_CHIP_ID 0x4402
-#define BCM4704_CHIP_ID 0x4704
-#define BCM4706_CHIP_ID 0x5300
-#define BCM4707_CHIP_ID 53010
-#define BCM53018_CHIP_ID 53018
+#define BCM43340_CHIP_ID 43340 /* 43340 chipcommon chipid */
+#define BCM43341_CHIP_ID 43341 /* 43341 chipcommon chipid */
+#define BCM43342_CHIP_ID 43342 /* 43342 chipcommon chipid */
+#define BCM4350_CHIP_ID 0x4350 /* 4350 chipcommon chipid */
+#define BCM4354_CHIP_ID 0x4354 /* 4354 chipcommon chipid */
+#define BCM4356_CHIP_ID 0x4356 /* 4356 chipcommon chipid */
+#define BCM43556_CHIP_ID 0xAA24 /* 43556 chipcommon chipid */
+#define BCM43558_CHIP_ID 0xAA26 /* 43558 chipcommon chipid */
+#define BCM43566_CHIP_ID 0xAA2E /* 43566 chipcommon chipid */
+#define BCM43567_CHIP_ID 0xAA2F /* 43567 chipcommon chipid */
+#define BCM43568_CHIP_ID 0xAA30 /* 43568 chipcommon chipid */
+#define BCM43569_CHIP_ID 0xAA31 /* 43569 chipcommon chipid */
+#define BCM43570_CHIP_ID 0xAA32 /* 43570 chipcommon chipid */
+#define BCM4358_CHIP_ID 0x4358 /* 4358 chipcommon chipid */
+#define BCM4350_CHIP(chipid) ((CHIPID(chipid) == BCM4350_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4354_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4356_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43556_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43558_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43566_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43567_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43568_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43569_CHIP_ID) || \
+ (CHIPID(chipid) == BCM43570_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4358_CHIP_ID)) /* 4350 variations */
+#define BCM4345_CHIP_ID 0x4345 /* 4345 chipcommon chipid */
+#define BCM43430_CHIP_ID 43430 /* 43430 chipcommon chipid */
+#define BCM4349_CHIP_ID 0x4349 /* 4349 chipcommon chipid */
+#define BCM4355_CHIP_ID 0x4355 /* 4355 chipcommon chipid */
+#define BCM4359_CHIP_ID 0x4359 /* 4359 chipcommon chipid */
+#define BCM4349_CHIP(chipid) ((CHIPID(chipid) == BCM4349_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4355_CHIP_ID) || \
+ (CHIPID(chipid) == BCM4359_CHIP_ID))
+#define BCM4349_CHIP_GRPID BCM4349_CHIP_ID: \
+ case BCM4355_CHIP_ID: \
+ case BCM4359_CHIP_ID
+
+#define BCM43602_CHIP_ID 0xaa52 /* 43602 chipcommon chipid */
+#define BCM43462_CHIP_ID 0xa9c6 /* 43462 chipcommon chipid */
+
+#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */
+#define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */
+#define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */
+#define BCM4706_CHIP_ID 0x5300 /* 4706 chipcommon chipid */
+#define BCM4707_CHIP_ID 53010 /* 4707 chipcommon chipid */
+#define BCM53018_CHIP_ID 53018 /* 53018 chipcommon chipid */
#define BCM4707_CHIP(chipid) (((chipid) == BCM4707_CHIP_ID) || ((chipid) == BCM53018_CHIP_ID))
-#define BCM4710_CHIP_ID 0x4710
-#define BCM4712_CHIP_ID 0x4712
-#define BCM4716_CHIP_ID 0x4716
-#define BCM47162_CHIP_ID 47162
-#define BCM4748_CHIP_ID 0x4748
-#define BCM4749_CHIP_ID 0x4749
-#define BCM4785_CHIP_ID 0x4785
-#define BCM5350_CHIP_ID 0x5350
-#define BCM5352_CHIP_ID 0x5352
-#define BCM5354_CHIP_ID 0x5354
-#define BCM5365_CHIP_ID 0x5365
-#define BCM5356_CHIP_ID 0x5356
-#define BCM5357_CHIP_ID 0x5357
-#define BCM53572_CHIP_ID 53572
-
-
-#define BCM4303_PKG_ID 2
-#define BCM4309_PKG_ID 1
-#define BCM4712LARGE_PKG_ID 0
-#define BCM4712SMALL_PKG_ID 1
-#define BCM4712MID_PKG_ID 2
-#define BCM4328USBD11G_PKG_ID 2
-#define BCM4328USBDUAL_PKG_ID 3
-#define BCM4328SDIOD11G_PKG_ID 4
-#define BCM4328SDIODUAL_PKG_ID 5
-#define BCM4329_289PIN_PKG_ID 0
-#define BCM4329_182PIN_PKG_ID 1
-#define BCM5354E_PKG_ID 1
-#define BCM4716_PKG_ID 8
-#define BCM4717_PKG_ID 9
-#define BCM4718_PKG_ID 10
-#define BCM5356_PKG_NONMODE 1
-#define BCM5358U_PKG_ID 8
-#define BCM5358_PKG_ID 9
-#define BCM47186_PKG_ID 10
-#define BCM5357_PKG_ID 11
-#define BCM5356U_PKG_ID 12
-#define BCM53572_PKG_ID 8
-#define BCM5357C0_PKG_ID 8
-#define BCM47188_PKG_ID 9
-#define BCM5358C0_PKG_ID 0xa
-#define BCM5356C0_PKG_ID 0xb
-#define BCM4331TT_PKG_ID 8
-#define BCM4331TN_PKG_ID 9
-#define BCM4331TNA0_PKG_ID 0xb
-#define BCM4706L_PKG_ID 1
-
-#define HDLSIM5350_PKG_ID 1
-#define HDLSIM_PKG_ID 14
-#define HWSIM_PKG_ID 15
-#define BCM43224_FAB_CSM 0x8
-#define BCM43224_FAB_SMIC 0xa
+#define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */
+#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */
+#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
+#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
+#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
+#define BCM4749_CHIP_ID 0x4749 /* 5357 chipcommon chipid (OTP, RBBU) */
+#define BCM4785_CHIP_ID 0x4785 /* 4785 chipcommon chipid */
+#define BCM5350_CHIP_ID 0x5350 /* 5350 chipcommon chipid */
+#define BCM5352_CHIP_ID 0x5352 /* 5352 chipcommon chipid */
+#define BCM5354_CHIP_ID 0x5354 /* 5354 chipcommon chipid */
+#define BCM5365_CHIP_ID 0x5365 /* 5365 chipcommon chipid */
+#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */
+#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */
+#define BCM53572_CHIP_ID 53572 /* 53572 chipcommon chipid */
+
+/* Package IDs */
+#define BCM4303_PKG_ID 2 /* 4303 package id */
+#define BCM4309_PKG_ID 1 /* 4309 package id */
+#define BCM4712LARGE_PKG_ID 0 /* 340pin 4712 package id */
+#define BCM4712SMALL_PKG_ID 1 /* 200pin 4712 package id */
+#define BCM4712MID_PKG_ID 2 /* 225pin 4712 package id */
+#define BCM4328USBD11G_PKG_ID 2 /* 4328 802.11g USB package id */
+#define BCM4328USBDUAL_PKG_ID 3 /* 4328 802.11a/g USB package id */
+#define BCM4328SDIOD11G_PKG_ID 4 /* 4328 802.11g SDIO package id */
+#define BCM4328SDIODUAL_PKG_ID 5 /* 4328 802.11a/g SDIO package id */
+#define BCM4329_289PIN_PKG_ID 0 /* 4329 289-pin package id */
+#define BCM4329_182PIN_PKG_ID 1 /* 4329N 182-pin package id */
+#define BCM5354E_PKG_ID 1 /* 5354E package id */
+#define BCM4716_PKG_ID 8 /* 4716 package id */
+#define BCM4717_PKG_ID 9 /* 4717 package id */
+#define BCM4718_PKG_ID 10 /* 4718 package id */
+#define BCM5356_PKG_NONMODE 1 /* 5356 package without nmode suppport */
+#define BCM5358U_PKG_ID 8 /* 5358U package id */
+#define BCM5358_PKG_ID 9 /* 5358 package id */
+#define BCM47186_PKG_ID 10 /* 47186 package id */
+#define BCM5357_PKG_ID 11 /* 5357 package id */
+#define BCM5356U_PKG_ID 12 /* 5356U package id */
+#define BCM53572_PKG_ID 8 /* 53572 package id */
+#define BCM5357C0_PKG_ID 8 /* 5357c0 package id (the same as 53572) */
+#define BCM47188_PKG_ID 9 /* 47188 package id */
+#define BCM5358C0_PKG_ID 0xa /* 5358c0 package id */
+#define BCM5356C0_PKG_ID 0xb /* 5356c0 package id */
+#define BCM4331TT_PKG_ID 8 /* 4331 12x12 package id */
+#define BCM4331TN_PKG_ID 9 /* 4331 12x9 package id */
+#define BCM4331TNA0_PKG_ID 0xb /* 4331 12x9 package id */
+#define BCM4706L_PKG_ID 1 /* 4706L package id */
+
+#define HDLSIM5350_PKG_ID 1 /* HDL simulator package id for a 5350 */
+#define HDLSIM_PKG_ID 14 /* HDL simulator package id */
+#define HWSIM_PKG_ID 15 /* Hardware simulator package id */
+#define BCM43224_FAB_CSM 0x8 /* the chip is manufactured by CSM */
+#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
#define BCM4336_WLBGA_PKG_ID 0x8
#define BCM4330_WLBGA_PKG_ID 0x0
-#define BCM4314PCIE_ARM_PKG_ID (8 | 0)
-#define BCM4314SDIO_PKG_ID (8 | 1)
-#define BCM4314PCIE_PKG_ID (8 | 2)
-#define BCM4314SDIO_ARM_PKG_ID (8 | 3)
-#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4)
-#define BCM4314DEV_PKG_ID (8 | 6)
-
-#define BCM4707_PKG_ID 1
-#define BCM4708_PKG_ID 2
-#define BCM4709_PKG_ID 0
-
-#define PCIXX21_FLASHMEDIA0_ID 0x8033
-#define PCIXX21_SDIOH0_ID 0x8034
-
-#define BCM4335_WLCSP_PKG_ID (0x0)
-#define BCM4335_FCBGA_PKG_ID (0x1)
-#define BCM4335_WLBGA_PKG_ID (0x2)
-#define BCM4335_FCBGAD_PKG_ID (0x3)
-#define BCM4335_PKG_MASK (0x3)
+#define BCM4314PCIE_ARM_PKG_ID (8 | 0) /* 4314 QFN PCI package id, bit 3 tie high */
+#define BCM4314SDIO_PKG_ID (8 | 1) /* 4314 QFN SDIO package id */
+#define BCM4314PCIE_PKG_ID (8 | 2) /* 4314 QFN PCI (ARM-less) package id */
+#define BCM4314SDIO_ARM_PKG_ID (8 | 3) /* 4314 QFN SDIO (ARM-less) package id */
+#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4) /* 4314 FpBGA SDIO package id */
+#define BCM4314DEV_PKG_ID (8 | 6) /* 4314 Developement package id */
+
+#define BCM4707_PKG_ID 1 /* 4707 package id */
+#define BCM4708_PKG_ID 2 /* 4708 package id */
+#define BCM4709_PKG_ID 0 /* 4709 package id */
+#define PCIXX21_FLASHMEDIA0_ID 0x8033 /* TI PCI xx21 Standard Host Controller */
+#define PCIXX21_SDIOH0_ID 0x8034 /* TI PCI xx21 Standard Host Controller */
-#define BFL_BTC2WIRE 0x00000001
-#define BFL_BTCOEX 0x00000001
-#define BFL_PACTRL 0x00000002
-#define BFL_AIRLINEMODE 0x00000004
-#define BFL_ADCDIV 0x00000008
+#define BCM4335_WLCSP_PKG_ID (0x0) /* WLCSP Module/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGA_PKG_ID (0x1) /* FCBGA PC/Embeded/Media PCIE/SDIO */
+#define BCM4335_WLBGA_PKG_ID (0x2) /* WLBGA COB/Mobile SDIO/HSIC. */
+#define BCM4335_FCBGAD_PKG_ID (0x3) /* FCBGA Debug Debug/Dev All if's. */
+#define BCM4335_PKG_MASK (0x3)
+
+/* boardflags */
+#define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */
+#define BFL_BTCOEX 0x00000001 /* Board supports BTCOEX */
+#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */
+#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio 13 radio disable indication, UNUSED */
+#define BFL_ADCDIV 0x00000008 /* Board has the rssi ADC divider */
#define BFL_DIS_256QAM 0x00000008
-#define BFL_ENETROBO 0x00000010
-#define BFL_NOPLLDOWN 0x00000020
-#define BFL_CCKHIPWR 0x00000040
-#define BFL_ENETADM 0x00000080
-#define BFL_ENETVLAN 0x00000100
-#define BFL_LTECOEX 0x00000200
-#define BFL_NOPCI 0x00000400
-#define BFL_FEM 0x00000800
-#define BFL_EXTLNA 0x00001000
-#define BFL_HGPA 0x00002000
-#define BFL_BTC2WIRE_ALTGPIO 0x00004000
-
-#define BFL_ALTIQ 0x00008000
-#define BFL_NOPA 0x00010000
-#define BFL_RSSIINV 0x00020000
-#define BFL_PAREF 0x00040000
-#define BFL_3TSWITCH 0x00080000
-#define BFL_PHASESHIFT 0x00100000
-#define BFL_BUCKBOOST 0x00200000
-#define BFL_FEM_BT 0x00400000
-#define BFL_RXCHAIN_OFF_BT 0x00400000
-#define BFL_NOCBUCK 0x00800000
-#define BFL_CCKFAVOREVM 0x01000000
-#define BFL_PALDO 0x02000000
-#define BFL_LNLDO2_2P5 0x04000000
+#define BFL_ENETROBO 0x00000010 /* Board has robo switch or core */
+#define BFL_TSSIAVG 0x00000010 /* TSSI averaging for ACPHY chips */
+#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */
+#define BFL_CCKHIPWR 0x00000040 /* Can do high-power CCK transmission */
+#define BFL_ENETADM 0x00000080 /* Board has ADMtek switch */
+#define BFL_ENETVLAN 0x00000100 /* Board has VLAN capability */
+#define BFL_LTECOEX 0x00000200 /* LTE Coex enabled */
+#define BFL_NOPCI 0x00000400 /* Board leaves PCI floating */
+#define BFL_FEM 0x00000800 /* Board supports the Front End Module */
+#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */
+#define BFL_HGPA 0x00002000 /* Board has a high gain PA */
+#define BFL_BTC2WIRE_ALTGPIO 0x00004000 /* Board's BTC 2wire is in the alternate gpios */
+#define BFL_ALTIQ 0x00008000 /* Alternate I/Q settings */
+#define BFL_NOPA 0x00010000 /* Board has no PA */
+#define BFL_RSSIINV 0x00020000 /* Board's RSSI uses positive slope(not TSSI) */
+#define BFL_PAREF 0x00040000 /* Board uses the PARef LDO */
+#define BFL_3TSWITCH 0x00080000 /* Board uses a triple throw switch shared with BT */
+#define BFL_PHASESHIFT 0x00100000 /* Board can support phase shifter */
+#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */
+#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */
+#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */
+#define BFL_CCKFAVOREVM 0x01000000 /* Favor CCK EVM over spectral mask */
+#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */
+#define BFL_LNLDO2_2P5 0x04000000 /* Select 2.5V as LNLDO2 output voltage */
#define BFL_FASTPWR 0x08000000
-#define BFL_UCPWRCTL_MININDX 0x08000000
-#define BFL_EXTLNA_5GHz 0x10000000
-#define BFL_TRSW_1by2 0x20000000
-#define BFL_GAINBOOSTA01 0x20000000
-#define BFL_LO_TRSW_R_5GHz 0x40000000
-#define BFL_ELNA_GAINDEF 0x80000000
-#define BFL_EXTLNA_TX 0x20000000
-
-
-#define BFL2_RXBB_INT_REG_DIS 0x00000001
-#define BFL2_APLL_WAR 0x00000002
-#define BFL2_TXPWRCTRL_EN 0x00000004
-#define BFL2_2X4_DIV 0x00000008
-#define BFL2_5G_PWRGAIN 0x00000010
-#define BFL2_PCIEWAR_OVR 0x00000020
-#define BFL2_CAESERS_BRD 0x00000040
-#define BFL2_BTC3WIRE 0x00000080
-#define BFL2_BTCLEGACY 0x00000080
-#define BFL2_SKWRKFEM_BRD 0x00000100
-#define BFL2_SPUR_WAR 0x00000200
-#define BFL2_GPLL_WAR 0x00000400
-#define BFL2_TRISTATE_LED 0x00000800
-#define BFL2_SINGLEANT_CCK 0x00001000
-#define BFL2_2G_SPUR_WAR 0x00002000
-#define BFL2_BPHY_ALL_TXCORES 0x00004000
-#define BFL2_FCC_BANDEDGE_WAR 0x00008000
-#define BFL2_GPLL_WAR2 0x00010000
+#define BFL_UCPWRCTL_MININDX 0x08000000 /* Enforce min power index to avoid FEM damage */
+#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */
+#define BFL_TRSW_1by2 0x20000000 /* Board has 2 TRSW's in 1by2 designs */
+#define BFL_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */
+#define BFL_LO_TRSW_R_5GHz 0x40000000 /* In 5G do not throw TRSW to T for clipLO gain */
+#define BFL_ELNA_GAINDEF 0x80000000 /* Backoff InitGain based on elna_2g/5g field
+ * when this flag is set
+ */
+#define BFL_EXTLNA_TX 0x20000000 /* Temp boardflag to indicate to */
+
+/* boardflags2 */
+#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */
+#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */
+#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */
+#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */
+#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */
+#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */
+#define BFL2_CAESERS_BRD 0x00000040 /* Board is Caesers brd (unused by sw) */
+#define BFL2_BTC3WIRE 0x00000080 /* Board support legacy 3 wire or 4 wire */
+#define BFL2_BTCLEGACY 0x00000080 /* Board support legacy 3/4 wire, to replace
+ * BFL2_BTC3WIRE
+ */
+#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */
+#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */
+#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */
+#define BFL2_TRISTATE_LED 0x00000800 /* Tri-state the LED */
+#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */
+#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */
+#define BFL2_BPHY_ALL_TXCORES 0x00004000 /* Transmit bphy frames using all tx cores */
+#define BFL2_FCC_BANDEDGE_WAR 0x00008000 /* Activates WAR to improve FCC bandedge performance */
+#define BFL2_DAC_SPUR_IMPROVEMENT 0x00008000 /* Reducing DAC Spurs */
+#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */
+#define BFL2_REDUCED_PA_TURNONTIME 0x00010000 /* Flag to reduce PA turn on Time */
#define BFL2_IPALVLSHIFT_3P3 0x00020000
-#define BFL2_INTERNDET_TXIQCAL 0x00040000
-#define BFL2_XTALBUFOUTEN 0x00080000
-
-
-
-#define BFL2_ANAPACTRL_2G 0x00100000
-#define BFL2_ANAPACTRL_5G 0x00200000
-#define BFL2_ELNACTRL_TRSW_2G 0x00400000
-#define BFL2_BT_SHARE_ANT0 0x00800000
-#define BFL2_BT_SHARE_BM_BIT0 0x00800000
-#define BFL2_TEMPSENSE_HIGHER 0x01000000
-#define BFL2_BTC3WIREONLY 0x02000000
-#define BFL2_PWR_NOMINAL 0x04000000
-#define BFL2_EXTLNA_PWRSAVE 0x08000000
-
+#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */
+#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio on */
+ /* Most drivers will turn it off without this flag */
+ /* to save power. */
+
+#define BFL2_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are controlled by analog PA ctrl lines */
+#define BFL2_ELNACTRL_TRSW_2G 0x00400000 /* AZW4329: 2G gmode_elna_gain controls TR Switch */
+#define BFL2_BT_SHARE_ANT0 0x00800000 /* share core0 antenna with BT */
+#define BFL2_TEMPSENSE_HIGHER 0x01000000 /* The tempsense threshold can sustain higher value
+ * than programmed. The exact delta is decided by
+ * driver per chip/boardtype. This can be used
+ * when tempsense qualification happens after shipment
+ */
+#define BFL2_BTC3WIREONLY 0x02000000 /* standard 3 wire btc only. 4 wire not supported */
+#define BFL2_PWR_NOMINAL 0x04000000 /* 0: power reduction on, 1: no power reduction */
+#define BFL2_EXTLNA_PWRSAVE 0x08000000 /* boardflag to enable ucode to apply power save */
+ /* ucode control of eLNA during Tx */
#define BFL2_4313_RADIOREG 0x10000000
-
-#define BFL2_DYNAMIC_VMID 0x10000000
-#define BFL2_SDR_EN 0x20000000
-#define BFL2_LNA1BYPFORTR2G 0x40000000
-#define BFL2_LNA1BYPFORTR5G 0x80000000
-
-
-#define BFL_SROM11_BTCOEX 0x00000001
-#define BFL_SROM11_WLAN_BT_SH_XTL 0x00000002
-#define BFL_SROM11_EXTLNA 0x00001000
-#define BFL_SROM11_EXTLNA_5GHz 0x10000000
-#define BFL_SROM11_GAINBOOSTA01 0x20000000
-#define BFL2_SROM11_APLL_WAR 0x00000002
-#define BFL2_SROM11_ANAPACTRL_2G 0x00100000
-#define BFL2_SROM11_ANAPACTRL_5G 0x00200000
-
-
-#define BFL3_FEMCTRL_SUB 0x00000007
-#define BFL3_RCAL_WAR 0x00000008
-#define BFL3_TXGAINTBLID 0x00000070
-#define BFL3_TXGAINTBLID_SHIFT 0x4
-#define BFL3_TSSI_DIV_WAR 0x00000080
-#define BFL3_TSSI_DIV_WAR_SHIFT 0x7
-#define BFL3_FEMTBL_FROM_NVRAM 0x00000100
-#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8
-#define BFL3_AGC_CFG_2G 0x00000200
-#define BFL3_AGC_CFG_5G 0x00000400
-#define BFL3_PPR_BIT_EXT 0x00000800
-#define BFL3_PPR_BIT_EXT_SHIFT 11
-#define BFL3_BBPLL_SPR_MODE_DIS 0x00001000
-#define BFL3_RCAL_OTP_VAL_EN 0x00002000
-#define BFL3_2GTXGAINTBL_BLANK 0x00004000
-#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14
-#define BFL3_5GTXGAINTBL_BLANK 0x00008000
-#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15
-#define BFL3_BT_SHARE_BM_BIT1 0x40000000
-#define BFL3_PHASETRACK_MAX_ALPHABETA 0x00010000
-#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16
-#define BFL3_BT_SHARE_BM_BIT1 0x40000000
-#define BFL3_EN_NONBRCM_TXBF 0x10000000
-#define BFL3_EN_P2PLINK_TXBF 0x20000000
-
-
-#define BOARD_GPIO_BTC3W_IN 0x850
-#define BOARD_GPIO_BTC3W_OUT 0x020
-#define BOARD_GPIO_BTCMOD_IN 0x010
-#define BOARD_GPIO_BTCMOD_OUT 0x020
-#define BOARD_GPIO_BTC_IN 0x080
-#define BOARD_GPIO_BTC_OUT 0x100
-#define BOARD_GPIO_PACTRL 0x200
-#define BOARD_GPIO_12 0x1000
-#define BOARD_GPIO_13 0x2000
-#define BOARD_GPIO_BTC4_IN 0x0800
-#define BOARD_GPIO_BTC4_BT 0x2000
-#define BOARD_GPIO_BTC4_STAT 0x4000
-#define BOARD_GPIO_BTC4_WLAN 0x8000
-#define BOARD_GPIO_1_WLAN_PWR 0x02
-#define BOARD_GPIO_3_WLAN_PWR 0x08
-#define BOARD_GPIO_4_WLAN_PWR 0x10
-
-#define GPIO_BTC4W_OUT_4312 0x010
-#define GPIO_BTC4W_OUT_43224 0x020
-#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0
-#define GPIO_BTC4W_OUT_43225 0x0e0
-#define GPIO_BTC4W_OUT_43421 0x020
-#define GPIO_BTC4W_OUT_4313 0x060
-#define GPIO_BTC4W_OUT_4331_SHARED 0x010
-
-#define PCI_CFG_GPIO_SCS 0x10
-#define PCI_CFG_GPIO_HWRAD 0x20
-#define PCI_CFG_GPIO_XTAL 0x40
-#define PCI_CFG_GPIO_PLL 0x80
-
-
-#define PLL_DELAY 150
-#define FREF_DELAY 200
-#define MIN_SLOW_CLK 32
-#define XTAL_ON_DELAY 1000
+ /* board rework */
+#define BFL2_DYNAMIC_VMID 0x10000000 /* enable dynamic Vmid in idle TSSI CAL for 4331 */
+#define BFL2_SDR_EN 0x20000000 /* SDR enabled or disabled */
+#define BFL2_DYNAMIC_VMID 0x10000000 /* boardflag to enable dynamic Vmid idle TSSI CAL */
+#define BFL2_LNA1BYPFORTR2G 0x40000000 /* acphy, enable lna1 bypass for clip gain, 2g */
+#define BFL2_LNA1BYPFORTR5G 0x80000000 /* acphy, enable lna1 bypass for clip gain, 5g */
+/* SROM 11 - 11ac boardflag definitions */
+#define BFL_SROM11_BTCOEX 0x00000001 /* Board supports BTCOEX */
+#define BFL_SROM11_WLAN_BT_SH_XTL 0x00000002 /* bluetooth and wlan share same crystal */
+#define BFL_SROM11_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */
+#define BFL_SROM11_EPA_TURNON_TIME 0x00018000 /* 2 bits for different PA turn on times */
+#define BFL_SROM11_EPA_TURNON_TIME_SHIFT 15
+#define BFL_SROM11_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */
+#define BFL_SROM11_GAINBOOSTA01 0x20000000 /* 5g Gainboost for core0 and core1 */
+#define BFL2_SROM11_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */
+#define BFL2_SROM11_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are ctrl-ed by analog PA ctrl lines */
+#define BFL2_SROM11_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */
-#define BCM943341WLABGS_SSID 0x062d
+/* boardflags3 */
+#define BFL3_FEMCTRL_SUB 0x00000007 /* acphy, subrevs of femctrl on top of srom_femctrl */
+#define BFL3_RCAL_WAR 0x00000008 /* acphy, rcal war active on this board (4335a0) */
+#define BFL3_TXGAINTBLID 0x00000070 /* acphy, txgain table id */
+#define BFL3_TXGAINTBLID_SHIFT 0x4 /* acphy, txgain table id shift bit */
+#define BFL3_TSSI_DIV_WAR 0x00000080 /* acphy, Seperate paparam for 20/40/80 */
+#define BFL3_TSSI_DIV_WAR_SHIFT 0x7 /* acphy, Seperate paparam for 20/40/80 shift bit */
+#define BFL3_FEMTBL_FROM_NVRAM 0x00000100 /* acphy, femctrl table is read from nvram */
+#define BFL3_FEMTBL_FROM_NVRAM_SHIFT 0x8 /* acphy, femctrl table is read from nvram */
+#define BFL3_AGC_CFG_2G 0x00000200 /* acphy, gain control configuration for 2G */
+#define BFL3_AGC_CFG_5G 0x00000400 /* acphy, gain control configuration for 5G */
+#define BFL3_PPR_BIT_EXT 0x00000800 /* acphy, bit position for 1bit extension for ppr */
+#define BFL3_PPR_BIT_EXT_SHIFT 11 /* acphy, bit shift for 1bit extension for ppr */
+#define BFL3_BBPLL_SPR_MODE_DIS 0x00001000 /* acphy, disables bbpll spur modes */
+#define BFL3_RCAL_OTP_VAL_EN 0x00002000 /* acphy, to read rcal_trim value from otp */
+#define BFL3_2GTXGAINTBL_BLANK 0x00004000 /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_2GTXGAINTBL_BLANK_SHIFT 14 /* acphy, blank the first X ticks of 2g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK 0x00008000 /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_5GTXGAINTBL_BLANK_SHIFT 15 /* acphy, blank the first X ticks of 5g gaintbl */
+#define BFL3_PHASETRACK_MAX_ALPHABETA 0x00010000 /* acphy, to max out alpha,beta to 511 */
+#define BFL3_PHASETRACK_MAX_ALPHABETA_SHIFT 16 /* acphy, to max out alpha,beta to 511 */
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN 0x00060000
+/* acphy, to use backed off gaintbl for lte-coex */
+#define BFL3_LTECOEX_GAINTBL_EN_SHIFT 17
+#define BFL3_5G_SPUR_WAR 0x00080000 /* acphy, enable spur WAR in 5G band */
+#define BFL3_1X1_RSDB_ANT 0x01000000 /* to find if 2-ant RSDB board or 1-ant RSDB board */
+#define BFL3_1X1_RSDB_ANT_SHIFT 24
+
+/* acphy: lpmode2g and lpmode_5g related boardflags */
+#define BFL3_ACPHY_LPMODE_2G 0x00300000 /* bits 20:21 for lpmode_2g choice */
+#define BFL3_ACPHY_LPMODE_2G_SHIFT 20
+
+#define BFL3_ACPHY_LPMODE_5G 0x00C00000 /* bits 22:23 for lpmode_5g choice */
+#define BFL3_ACPHY_LPMODE_5G_SHIFT 22
+
+#define BFL3_EXT_LPO_ISCLOCK 0x02000000 /* External LPO is clock, not x-tal */
+#define BFL3_FORCE_INT_LPO_SEL 0x04000000 /* Force internal lpo */
+#define BFL3_FORCE_EXT_LPO_SEL 0x08000000 /* Force external lpo */
+
+#define BFL3_EN_BRCM_IMPBF 0x10000000 /* acphy, Allow BRCM Implicit TxBF */
+#define BFL3_AVVMID_FROM_NVRAM 0x40000000 /* Read Av Vmid from NVRAM */
+#define BFL3_VLIN_EN_FROM_NVRAM 0x80000000 /* Read Vlin En from NVRAM */
+
+#define BFL3_AVVMID_FROM_NVRAM_SHIFT 30 /* Read Av Vmid from NVRAM */
+#define BFL3_VLIN_EN_FROM_NVRAM_SHIFT 31 /* Enable Vlin from NVRAM */
+
+
+/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */
+#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */
+#define BOARD_GPIO_BTC3W_OUT 0x020 /* bit 5 is TX_CONF */
+#define BOARD_GPIO_BTCMOD_IN 0x010 /* bit 4 is the alternate BT Coexistence Input */
+#define BOARD_GPIO_BTCMOD_OUT 0x020 /* bit 5 is the alternate BT Coexistence Out */
+#define BOARD_GPIO_BTC_IN 0x080 /* bit 7 is BT Coexistence Input */
+#define BOARD_GPIO_BTC_OUT 0x100 /* bit 8 is BT Coexistence Out */
+#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */
+#define BOARD_GPIO_12 0x1000 /* gpio 12 */
+#define BOARD_GPIO_13 0x2000 /* gpio 13 */
+#define BOARD_GPIO_BTC4_IN 0x0800 /* gpio 11, coex4, in */
+#define BOARD_GPIO_BTC4_BT 0x2000 /* gpio 12, coex4, bt active */
+#define BOARD_GPIO_BTC4_STAT 0x4000 /* gpio 14, coex4, status */
+#define BOARD_GPIO_BTC4_WLAN 0x8000 /* gpio 15, coex4, wlan active */
+#define BOARD_GPIO_1_WLAN_PWR 0x02 /* throttle WLAN power on X21 board */
+#define BOARD_GPIO_2_WLAN_PWR 0x04 /* throttle WLAN power on X29C board */
+#define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */
+#define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */
+
+#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224 0x020 /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0 /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_43225 0x0e0 /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */
+#define GPIO_BTC4W_OUT_43421 0x020 /* bit 5 is BT_IODISABLE */
+#define GPIO_BTC4W_OUT_4313 0x060 /* bit 5 SW_BT, bit 6 SW_WL */
+#define GPIO_BTC4W_OUT_4331_SHARED 0x010 /* GPIO 4 */
+
+#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
+#define PCI_CFG_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */
+#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */
+#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */
+
+/* power control defines */
+#define PLL_DELAY 150 /* us pll on delay */
+#define FREF_DELAY 200 /* us fref change delay */
+#define MIN_SLOW_CLK 32 /* us Slow clock period */
+#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* Reference Board Types */
+#define BU4710_BOARD 0x0400
+#define VSIM4710_BOARD 0x0401
+#define QT4710_BOARD 0x0402
+
+#define BU4309_BOARD 0x040a
+#define BCM94309CB_BOARD 0x040b
+#define BCM94309MP_BOARD 0x040c
+#define BCM4309AP_BOARD 0x040d
+
+#define BCM94302MP_BOARD 0x040e
+
+#define BU4306_BOARD 0x0416
+#define BCM94306CB_BOARD 0x0417
+#define BCM94306MP_BOARD 0x0418
+
+#define BCM94710D_BOARD 0x041a
+#define BCM94710R1_BOARD 0x041b
+#define BCM94710R4_BOARD 0x041c
+#define BCM94710AP_BOARD 0x041d
+
+#define BU2050_BOARD 0x041f
+
+#define BCM94306P50_BOARD 0x0420
+
+#define BCM94309G_BOARD 0x0421
+
+#define BU4704_BOARD 0x0423
+#define BU4702_BOARD 0x0424
+
+#define BCM94306PC_BOARD 0x0425 /* pcmcia 3.3v 4306 card */
+
+#define MPSG4306_BOARD 0x0427
+
+#define BCM94702MN_BOARD 0x0428
+
+/* BCM4702 1U CompactPCI Board */
+#define BCM94702CPCI_BOARD 0x0429
+
+/* BCM4702 with BCM95380 VLAN Router */
+#define BCM95380RR_BOARD 0x042a
+
+/* cb4306 with SiGe PA */
+#define BCM94306CBSG_BOARD 0x042b
+
+/* cb4306 with SiGe PA */
+#define PCSG94306_BOARD 0x042d
+
+/* bu4704 with sdram */
+#define BU4704SD_BOARD 0x042e
+
+/* Dual 11a/11g Router */
+#define BCM94704AGR_BOARD 0x042f
+
+/* 11a-only minipci */
+#define BCM94308MP_BOARD 0x0430
+
+/* 4306/gprs combo */
+#define BCM94306GPRS_BOARD 0x0432
+
+/* BCM5365/BCM4704 FPGA Bringup Board */
+#define BU5365_FPGA_BOARD 0x0433
+
+#define BU4712_BOARD 0x0444
+#define BU4712SD_BOARD 0x045d
+#define BU4712L_BOARD 0x045f
+/* BCM4712 boards */
+#define BCM94712AP_BOARD 0x0445
+#define BCM94712P_BOARD 0x0446
+/* BCM4318 boards */
+#define BU4318_BOARD 0x0447
+#define CB4318_BOARD 0x0448
+#define MPG4318_BOARD 0x0449
+#define MP4318_BOARD 0x044a
+#define SD4318_BOARD 0x044b
+
+/* BCM4313 boards */
+#define BCM94313BU_BOARD 0x050f
+#define BCM94313HM_BOARD 0x0510
+#define BCM94313EPA_BOARD 0x0511
+#define BCM94313HMG_BOARD 0x051C
+
+/* BCM63XX boards */
+#define BCM96338_BOARD 0x6338
+#define BCM96348_BOARD 0x6348
+#define BCM96358_BOARD 0x6358
+#define BCM96368_BOARD 0x6368
+
+/* Another mp4306 with SiGe */
+#define BCM94306P_BOARD 0x044c
+
+/* mp4303 */
+#define BCM94303MP_BOARD 0x044e
+
+/* mpsgh4306 */
+#define BCM94306MPSGH_BOARD 0x044f
+
+/* BRCM 4306 w/ Front End Modules */
+#define BCM94306MPM 0x0450
+#define BCM94306MPL 0x0453
+
+/* 4712agr */
+#define BCM94712AGR_BOARD 0x0451
+
+/* pcmcia 4303 */
+#define PC4303_BOARD 0x0454
+
+/* 5350K */
+#define BCM95350K_BOARD 0x0455
+
+/* 5350R */
+#define BCM95350R_BOARD 0x0456
+
+/* 4306mplna */
+#define BCM94306MPLNA_BOARD 0x0457
+
+/* 4320 boards */
+#define BU4320_BOARD 0x0458
+#define BU4320S_BOARD 0x0459
+#define BCM94320PH_BOARD 0x045a
+
+/* 4306mph */
+#define BCM94306MPH_BOARD 0x045b
+
+/* 4306pciv */
+#define BCM94306PCIV_BOARD 0x045c
+
+#define BU4712SD_BOARD 0x045d
+
+#define BCM94320PFLSH_BOARD 0x045e
+
+#define BU4712L_BOARD 0x045f
+#define BCM94712LGR_BOARD 0x0460
+#define BCM94320R_BOARD 0x0461
+
+#define BU5352_BOARD 0x0462
+
+#define BCM94318MPGH_BOARD 0x0463
+
+#define BU4311_BOARD 0x0464
+#define BCM94311MC_BOARD 0x0465
+#define BCM94311MCAG_BOARD 0x0466
+
+#define BCM95352GR_BOARD 0x0467
+
+/* bcm95351agr */
+#define BCM95351AGR_BOARD 0x0470
+
+/* bcm94704mpcb */
+#define BCM94704MPCB_BOARD 0x0472
+
+/* 4785 boards */
+#define BU4785_BOARD 0x0478
+
+/* 4321 boards */
+#define BU4321_BOARD 0x046b
+#define BU4321E_BOARD 0x047c
+#define MP4321_BOARD 0x046c
+#define CB2_4321_BOARD 0x046d
+#define CB2_4321_AG_BOARD 0x0066
+#define MC4321_BOARD 0x046e
+
+/* 4328 boards */
+#define BU4328_BOARD 0x0481
+#define BCM4328SDG_BOARD 0x0482
+#define BCM4328SDAG_BOARD 0x0483
+#define BCM4328UG_BOARD 0x0484
+#define BCM4328UAG_BOARD 0x0485
+#define BCM4328PC_BOARD 0x0486
+#define BCM4328CF_BOARD 0x0487
+
+/* 4325 boards */
+#define BCM94325DEVBU_BOARD 0x0490
+#define BCM94325BGABU_BOARD 0x0491
+
+#define BCM94325SDGWB_BOARD 0x0492
+
+#define BCM94325SDGMDL_BOARD 0x04aa
+#define BCM94325SDGMDL2_BOARD 0x04c6
+#define BCM94325SDGMDL3_BOARD 0x04c9
+
+#define BCM94325SDABGWBA_BOARD 0x04e1
+
+/* 4322 boards */
+#define BCM94322MC_SSID 0x04a4
+#define BCM94322USB_SSID 0x04a8 /* dualband */
+#define BCM94322HM_SSID 0x04b0
+#define BCM94322USB2D_SSID 0x04bf /* single band discrete front end */
+
+/* 4312 boards */
+#define BCM4312MCGSG_BOARD 0x04b5
+
+/* 4315 boards */
+#define BCM94315DEVBU_SSID 0x04c2
+#define BCM94315USBGP_SSID 0x04c7
+#define BCM94315BGABU_SSID 0x04ca
+#define BCM94315USBGP41_SSID 0x04cb
+
+/* 4319 boards */
+#define BCM94319DEVBU_SSID 0X04e5
+#define BCM94319USB_SSID 0X04e6
+#define BCM94319SD_SSID 0X04e7
+
+/* 4716 boards */
+#define BCM94716NR2_SSID 0x04cd
+
+/* 4319 boards */
+#define BCM94319DEVBU_SSID 0X04e5
+#define BCM94319USBNP4L_SSID 0X04e6
+#define BCM94319WLUSBN4L_SSID 0X04e7
+#define BCM94319SDG_SSID 0X04ea
+#define BCM94319LCUSBSDN4L_SSID 0X04eb
+#define BCM94319USBB_SSID 0x04ee
+#define BCM94319LCSDN4L_SSID 0X0507
+#define BCM94319LSUSBN4L_SSID 0X0508
+#define BCM94319SDNA4L_SSID 0X0517
+#define BCM94319SDELNA4L_SSID 0X0518
+#define BCM94319SDELNA6L_SSID 0X0539
+#define BCM94319ARCADYAN_SSID 0X0546
+#define BCM94319WINDSOR_SSID 0x0561
+#define BCM94319MLAP_SSID 0x0562
+#define BCM94319SDNA_SSID 0x058b
+#define BCM94319BHEMU3_SSID 0x0563
+#define BCM94319SDHMB_SSID 0x058c
+#define BCM94319SDBREF_SSID 0x05a1
+#define BCM94319USBSDB_SSID 0x05a2
+
+
+/* 4329 boards */
+#define BCM94329AGB_SSID 0X04b9
+#define BCM94329TDKMDL1_SSID 0X04ba
+#define BCM94329TDKMDL11_SSID 0X04fc
+#define BCM94329OLYMPICN18_SSID 0X04fd
+#define BCM94329OLYMPICN90_SSID 0X04fe
+#define BCM94329OLYMPICN90U_SSID 0X050c
+#define BCM94329OLYMPICN90M_SSID 0X050b
+#define BCM94329AGBF_SSID 0X04ff
+#define BCM94329OLYMPICX17_SSID 0X0504
+#define BCM94329OLYMPICX17M_SSID 0X050a
+#define BCM94329OLYMPICX17U_SSID 0X0509
+#define BCM94329OLYMPICUNO_SSID 0X0564
+#define BCM94329MOTOROLA_SSID 0X0565
+#define BCM94329OLYMPICLOCO_SSID 0X0568
+/* 4336 SDIO board types */
+#define BCM94336SD_WLBGABU_SSID 0x0511
+#define BCM94336SD_WLBGAREF_SSID 0x0519
+#define BCM94336SDGP_SSID 0x0538
+#define BCM94336SDG_SSID 0x0519
+#define BCM94336SDGN_SSID 0x0538
+#define BCM94336SDGFC_SSID 0x056B
+
+/* 4330 SDIO board types */
+#define BCM94330SDG_SSID 0x0528
+#define BCM94330SD_FCBGABU_SSID 0x052e
+#define BCM94330SD_WLBGABU_SSID 0x052f
+#define BCM94330SD_FCBGA_SSID 0x0530
+#define BCM94330FCSDAGB_SSID 0x0532
+#define BCM94330OLYMPICAMG_SSID 0x0549
+#define BCM94330OLYMPICAMGEPA_SSID 0x054F
+#define BCM94330OLYMPICUNO3_SSID 0x0551
+#define BCM94330WLSDAGB_SSID 0x0547
+#define BCM94330CSPSDAGBB_SSID 0x054A
+
+/* 43224 boards */
+#define BCM943224X21 0x056e
+#define BCM943224X21_FCC 0x00d1
+#define BCM943224X21B 0x00e9
+#define BCM943224M93 0x008b
+#define BCM943224M93A 0x0090
+#define BCM943224X16 0x0093
+#define BCM94322X9 0x008d
+#define BCM94322M35e 0x008e
+
+/* 43228 Boards */
+#define BCM943228BU8_SSID 0x0540
+#define BCM943228BU9_SSID 0x0541
+#define BCM943228BU_SSID 0x0542
+#define BCM943227HM4L_SSID 0x0543
+#define BCM943227HMB_SSID 0x0544
+#define BCM943228HM4L_SSID 0x0545
+#define BCM943228SD_SSID 0x0573
+
+/* 43239 Boards */
+#define BCM943239MOD_SSID 0x05ac
+#define BCM943239REF_SSID 0x05aa
+
+/* 4331 boards */
+#define BCM94331X19 0x00D6 /* X19B */
+#define BCM94331X28 0x00E4 /* X28 */
+#define BCM94331X28B 0x010E /* X28B */
+#define BCM94331PCIEBT3Ax_SSID BCM94331X28
+#define BCM94331X12_2G_SSID 0x00EC /* X12 2G */
+#define BCM94331X12_5G_SSID 0x00ED /* X12 5G */
+#define BCM94331X29B 0x00EF /* X29B */
+#define BCM94331X29D 0x010F /* X29D */
+#define BCM94331CSAX_SSID BCM94331X29B
+#define BCM94331X19C 0x00F5 /* X19C */
+#define BCM94331X33 0x00F4 /* X33 */
+#define BCM94331BU_SSID 0x0523
+#define BCM94331S9BU_SSID 0x0524
+#define BCM94331MC_SSID 0x0525
+#define BCM94331MCI_SSID 0x0526
+#define BCM94331PCIEBT4_SSID 0x0527
+#define BCM94331HM_SSID 0x0574
+#define BCM94331PCIEDUAL_SSID 0x059B
+#define BCM94331MCH5_SSID 0x05A9
+#define BCM94331CS_SSID 0x05C6
+#define BCM94331CD_SSID 0x05DA
+
+/* 4314 Boards */
+#define BCM94314BU_SSID 0x05b1
+
+/* 53572 Boards */
+#define BCM953572BU_SSID 0x058D
+#define BCM953572NR2_SSID 0x058E
+#define BCM947188NR2_SSID 0x058F
+#define BCM953572SDRNR2_SSID 0x0590
+
+/* 43236 boards */
+#define BCM943236OLYMPICSULLEY_SSID 0x594
+#define BCM943236PREPROTOBLU2O3_SSID 0x5b9
+#define BCM943236USBELNA_SSID 0x5f8
+
+/* 4314 Boards */
+#define BCM94314BUSDIO_SSID 0x05c8
+#define BCM94314BGABU_SSID 0x05c9
+#define BCM94314HMEPA_SSID 0x05ca
+#define BCM94314HMEPABK_SSID 0x05cb
+#define BCM94314SUHMEPA_SSID 0x05cc
+#define BCM94314SUHM_SSID 0x05cd
+#define BCM94314HM_SSID 0x05d1
+
+/* 4334 Boards */
+#define BCM94334FCAGBI_SSID 0x05df
+#define BCM94334WLAGBI_SSID 0x05dd
+
+/* 4335 Boards */
+#define BCM94335X52 0x0114
+
+/* 4345 Boards */
+#define BCM94345_SSID 0x0687
+
+/* 4360 Boards */
+#define BCM94360X52C 0X0117
+#define BCM94360X52D 0X0137
+#define BCM94360X29C 0X0112
+#define BCM94360X29CP2 0X0134
+#define BCM94360X29CP3 0X013B
+#define BCM94360X51 0x0111
+#define BCM94360X51P2 0x0129
+#define BCM94360X51P3 0x0142
+#define BCM94360X51A 0x0135
+#define BCM94360X51B 0x0136
+#define BCM94360CS 0x061B
+#define BCM94360J28_D11AC2G 0x0c00
+#define BCM94360J28_D11AC5G 0x0c01
+#define BCM94360USBH5_D11AC5G 0x06aa
+#define BCM94360MCM5 0x06d8
+
+/* 4350 Boards */
+#define BCM94350X52B 0X0116
+#define BCM94350X14 0X0131
+
+/* 43217 Boards */
+#define BCM943217BU_SSID 0x05d5
+#define BCM943217HM2L_SSID 0x05d6
+#define BCM943217HMITR2L_SSID 0x05d7
+
+/* 43142 Boards */
+#define BCM943142HM_SSID 0x05e0
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* 43341 Boards */
+#define BCM943341WLABGS_SSID 0x062d
+
+/* 43342 Boards */
#define BCM943342FCAGBI_SSID 0x0641
+/* 43602 Boards, unclear yet what boards will be created. */
+#define BCM943602RSVD1_SSID 0x06a5
+#define BCM943602RSVD2_SSID 0x06a6
+#define BCM943602X87 0X0133
+#define BCM943602X238 0X0132
+/* # of GPIO pins */
#define GPIO_NUMPINS 32
-
+/* These values are used by dhd host driver. */
#define RDL_RAM_BASE_4319 0x60000000
#define RDL_RAM_BASE_4329 0x60000000
#define RDL_RAM_SIZE_4319 0x48000
#define RDL_RAM_SIZE_4350 0xC0000
#define RDL_RAM_BASE_4350 0x180800
-
+/* generic defs for nvram "muxenab" bits
+* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options.
+*/
#define MUXENAB_UART 0x00000001
#define MUXENAB_GPIO 0x00000002
-#define MUXENAB_ERCX 0x00000004
+#define MUXENAB_ERCX 0x00000004 /* External Radio BT coex */
#define MUXENAB_JTAG 0x00000008
-#define MUXENAB_HOST_WAKE 0x00000010
+#define MUXENAB_HOST_WAKE 0x00000010 /* configure GPIO for SDIO host_wake */
#define MUXENAB_I2S_EN 0x00000020
#define MUXENAB_I2S_MASTER 0x00000040
#define MUXENAB_I2S_FULL 0x00000080
#define MUXENAB_RFSWCTRL2 0x00000800
#define MUXENAB_SECI 0x00001000
#define MUXENAB_BT_LEGACY 0x00002000
-#define MUXENAB_HOST_WAKE1 0x00004000
-
+#define MUXENAB_HOST_WAKE1 0x00004000 /* configure alternative GPIO for SDIO host_wake */
+/* Boot flags */
#define FLASH_KERNEL_NFLASH 0x00000001
#define FLASH_BOOT_NFLASH 0x00000002
-#endif
+#endif /* _BCMDEVS_H */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmendian.h 241182 2011-02-17 21:50:03Z $
+ * $Id: bcmendian.h 402715 2013-05-16 18:50:09Z $
*
* This file by default provides proper behavior on little-endian architectures.
* On big-endian architectures, IL_BIGENDIAN should be defined.
#include <typedefs.h>
-
+/* Reverse the bytes in a 16-bit value */
#define BCMSWAP16(val) \
((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \
(((uint16)(val) & (uint16)0xff00U) >> 8)))
-
+/* Reverse the bytes in a 32-bit value */
#define BCMSWAP32(val) \
((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \
(((uint32)(val) & (uint32)0x0000ff00U) << 8) | \
(((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \
(((uint32)(val) & (uint32)0xff000000U) >> 24)))
-
+/* Reverse the two 16-bit halves of a 32-bit value */
#define BCMSWAP32BY16(val) \
((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \
(((uint32)(val) & (uint32)0xffff0000U) >> 16)))
-
+/* Reverse the bytes in a 64-bit value */
+#define BCMSWAP64(val) \
+ ((uint64)((((uint64)(val) & 0x00000000000000ffULL) << 56) | \
+ (((uint64)(val) & 0x000000000000ff00ULL) << 40) | \
+ (((uint64)(val) & 0x0000000000ff0000ULL) << 24) | \
+ (((uint64)(val) & 0x00000000ff000000ULL) << 8) | \
+ (((uint64)(val) & 0x000000ff00000000ULL) >> 8) | \
+ (((uint64)(val) & 0x0000ff0000000000ULL) >> 24) | \
+ (((uint64)(val) & 0x00ff000000000000ULL) >> 40) | \
+ (((uint64)(val) & 0xff00000000000000ULL) >> 56)))
+
+/* Reverse the two 32-bit halves of a 64-bit value */
+#define BCMSWAP64BY32(val) \
+ ((uint64)((((uint64)(val) & 0x00000000ffffffffULL) << 32) | \
+ (((uint64)(val) & 0xffffffff00000000ULL) >> 32)))
+
+
+/* Byte swapping macros
+ * Host <=> Network (Big Endian) for 16- and 32-bit values
+ * Host <=> Little-Endian for 16- and 32-bit values
+ */
#ifndef hton16
#define HTON16(i) BCMSWAP16(i)
#define hton16(i) bcmswap16(i)
#define htol16(i) (i)
#define HTOL32(i) (i)
#define htol32(i) (i)
-#endif
+#define HTOL64(i) (i)
+#define htol64(i) (i)
+#endif /* hton16 */
#define ltoh16_buf(buf, i)
#define htol16_buf(buf, i)
-
+/* Unaligned loads and stores in host byte order */
#define load32_ua(a) ltoh32_ua(a)
#define store32_ua(a, v) htol32_ua_store(v, a)
#define load16_ua(a) ltoh16_ua(a)
#ifdef __GNUC__
-
+/* GNU macro versions avoid referencing the argument multiple times, while also
+ * avoiding the -fno-inline used in ROM builds.
+ */
#define bcmswap16(val) ({ \
uint16 _val = (val); \
BCMSWAP32(_val); \
})
+#define bcmswap64(val) ({ \
+ uint64 _val = (val); \
+ BCMSWAP64(_val); \
+})
+
#define bcmswap32by16(val) ({ \
uint32 _val = (val); \
BCMSWAP32BY16(_val); \
_NTOH32_UA(_bytes); \
})
-#else
-
+#else /* !__GNUC__ */
+/* Inline versions avoid referencing the argument multiple times */
static INLINE uint16
bcmswap16(uint16 val)
{
return BCMSWAP32(val);
}
+static INLINE uint64
+bcmswap64(uint64 val)
+{
+ return BCMSWAP64(val);
+}
+
static INLINE uint32
bcmswap32by16(uint32 val)
{
return BCMSWAP32BY16(val);
}
-
-
-
+/* Reverse pairs of bytes in a buffer (not for high-performance use) */
+/* buf - start of buffer of shorts to swap */
+/* len - byte length of buffer */
static INLINE void
bcmswap16_buf(uint16 *buf, uint len)
{
}
}
-
+/*
+ * Store 16-bit value to unaligned little-endian byte array.
+ */
static INLINE void
htol16_ua_store(uint16 val, uint8 *bytes)
{
bytes[1] = val >> 8;
}
-
+/*
+ * Store 32-bit value to unaligned little-endian byte array.
+ */
static INLINE void
htol32_ua_store(uint32 val, uint8 *bytes)
{
bytes[3] = val >> 24;
}
-
+/*
+ * Store 16-bit value to unaligned network-(big-)endian byte array.
+ */
static INLINE void
hton16_ua_store(uint16 val, uint8 *bytes)
{
bytes[1] = val & 0xff;
}
-
+/*
+ * Store 32-bit value to unaligned network-(big-)endian byte array.
+ */
static INLINE void
hton32_ua_store(uint32 val, uint8 *bytes)
{
bytes[3] = val & 0xff;
}
-
+/*
+ * Load 16-bit value from unaligned little-endian byte array.
+ */
static INLINE uint16
ltoh16_ua(const void *bytes)
{
return _LTOH16_UA((const uint8 *)bytes);
}
-
+/*
+ * Load 32-bit value from unaligned little-endian byte array.
+ */
static INLINE uint32
ltoh32_ua(const void *bytes)
{
return _LTOH32_UA((const uint8 *)bytes);
}
-
+/*
+ * Load 16-bit value from unaligned big-(network-)endian byte array.
+ */
static INLINE uint16
ntoh16_ua(const void *bytes)
{
return _NTOH16_UA((const uint8 *)bytes);
}
-
+/*
+ * Load 32-bit value from unaligned big-(network-)endian byte array.
+ */
static INLINE uint32
ntoh32_ua(const void *bytes)
{
return _NTOH32_UA((const uint8 *)bytes);
}
-#endif
-#endif
+#endif /* !__GNUC__ */
+#endif /* !_BCMENDIAN_H_ */
--- /dev/null
+/*
+ * MSGBUF network driver ioctl/indication encoding
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmmsgbuf.h 490808 2014-07-12 00:33:13Z $
+ */
+#ifndef _bcmmsgbuf_h_
+#define _bcmmsgbuf_h_
+#include <proto/ethernet.h>
+#include <wlioctl.h>
+#include <bcmpcie.h>
+
+#define MSGBUF_MAX_MSG_SIZE ETHER_MAX_LEN
+
+#define D2H_EPOCH_MODULO 253 /* sequence number wrap */
+#define D2H_EPOCH_INIT_VAL (D2H_EPOCH_MODULO + 1)
+
+#define H2DRING_TXPOST_ITEMSIZE 48
+#define H2DRING_RXPOST_ITEMSIZE 32
+#define H2DRING_CTRL_SUB_ITEMSIZE 40
+#define D2HRING_TXCMPLT_ITEMSIZE 16
+#define D2HRING_RXCMPLT_ITEMSIZE 32
+#define D2HRING_CTRL_CMPLT_ITEMSIZE 24
+
+#define H2DRING_TXPOST_MAX_ITEM 512
+#define H2DRING_RXPOST_MAX_ITEM 256
+#define H2DRING_CTRL_SUB_MAX_ITEM 20
+#define D2HRING_TXCMPLT_MAX_ITEM 1024
+#define D2HRING_RXCMPLT_MAX_ITEM 256
+#define D2HRING_CTRL_CMPLT_MAX_ITEM 20
+enum {
+ DNGL_TO_HOST_MSGBUF,
+ HOST_TO_DNGL_MSGBUF
+};
+
+enum {
+ HOST_TO_DNGL_TXP_DATA,
+ HOST_TO_DNGL_RXP_DATA,
+ HOST_TO_DNGL_CTRL,
+ DNGL_TO_HOST_DATA,
+ DNGL_TO_HOST_CTRL
+};
+
+#define MESSAGE_PAYLOAD(a) (a & MSG_TYPE_INTERNAL_USE_START) ? TRUE : FALSE
+
+#ifdef PCIE_API_REV1
+
+#define BCMMSGBUF_DUMMY_REF(a, b) do {BCM_REFERENCE((a));BCM_REFERENCE((b));} while (0)
+
+#define BCMMSGBUF_API_IFIDX(a) 0
+#define BCMMSGBUF_API_SEQNUM(a) 0
+#define BCMMSGBUF_IOCTL_XTID(a) 0
+#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->cmd_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b) BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_SET_API_SEQNUM(a, b) BCMMSGBUF_DUMMY_REF(a, b)
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID(a) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b) BCMMSGBUF_DUMMY_REF(a, b)
+
+#else /* PCIE_API_REV1 */
+
+#define BCMMSGBUF_API_IFIDX(a) ((a)->if_id)
+#define BCMMSGBUF_IOCTL_PKTID(a) ((a)->pkt_id)
+#define BCMMSGBUF_API_SEQNUM(a) ((a)->u.seq.seq_no)
+#define BCMMSGBUF_IOCTL_XTID(a) ((a)->xt_id)
+
+#define BCMMSGBUF_SET_API_IFIDX(a, b) (BCMMSGBUF_API_IFIDX((a)) = (b))
+#define BCMMSGBUF_SET_API_SEQNUM(a, b) (BCMMSGBUF_API_SEQNUM((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_PKTID(a, b) (BCMMSGBUF_IOCTL_PKTID((a)) = (b))
+#define BCMMSGBUF_IOCTL_SET_XTID(a, b) (BCMMSGBUF_IOCTL_XTID((a)) = (b))
+
+#endif /* PCIE_API_REV1 */
+
+/* utility data structures */
+union addr64 {
+ struct {
+ uint32 low;
+ uint32 high;
+ };
+ struct {
+ uint32 low_addr;
+ uint32 high_addr;
+ };
+ uint64 u64;
+} DECLSPEC_ALIGN(8);
+
+typedef union addr64 addr64_t;
+
+/* IOCTL req Hdr */
+/* cmn Msg Hdr */
+typedef struct cmn_msg_hdr {
+ /* message type */
+ uint8 msg_type;
+ /* interface index this is valid for */
+ uint8 if_id;
+ /* flags */
+ uint8 flags;
+ /* sequence number */
+ uint8 epoch;
+ /* packet Identifier for the associated host buffer */
+ uint32 request_id;
+} cmn_msg_hdr_t;
+
+/* message type */
+typedef enum bcmpcie_msgtype {
+ MSG_TYPE_GEN_STATUS = 0x1,
+ MSG_TYPE_RING_STATUS = 0x2,
+ MSG_TYPE_FLOW_RING_CREATE = 0x3,
+ MSG_TYPE_FLOW_RING_CREATE_CMPLT = 0x4,
+ MSG_TYPE_FLOW_RING_DELETE = 0x5,
+ MSG_TYPE_FLOW_RING_DELETE_CMPLT = 0x6,
+ MSG_TYPE_FLOW_RING_FLUSH = 0x7,
+ MSG_TYPE_FLOW_RING_FLUSH_CMPLT = 0x8,
+ MSG_TYPE_IOCTLPTR_REQ = 0x9,
+ MSG_TYPE_IOCTLPTR_REQ_ACK = 0xA,
+ MSG_TYPE_IOCTLRESP_BUF_POST = 0xB,
+ MSG_TYPE_IOCTL_CMPLT = 0xC,
+ MSG_TYPE_EVENT_BUF_POST = 0xD,
+ MSG_TYPE_WL_EVENT = 0xE,
+ MSG_TYPE_TX_POST = 0xF,
+ MSG_TYPE_TX_STATUS = 0x10,
+ MSG_TYPE_RXBUF_POST = 0x11,
+ MSG_TYPE_RX_CMPLT = 0x12,
+ MSG_TYPE_LPBK_DMAXFER = 0x13,
+ MSG_TYPE_LPBK_DMAXFER_CMPLT = 0x14,
+ MSG_TYPE_API_MAX_RSVD = 0x3F
+} bcmpcie_msg_type_t;
+
+typedef enum bcmpcie_msgtype_int {
+ MSG_TYPE_INTERNAL_USE_START = 0x40,
+ MSG_TYPE_EVENT_PYLD = 0x41,
+ MSG_TYPE_IOCT_PYLD = 0x42,
+ MSG_TYPE_RX_PYLD = 0x43,
+ MSG_TYPE_HOST_FETCH = 0x44,
+ MSG_TYPE_LPBK_DMAXFER_PYLD = 0x45,
+ MSG_TYPE_TXMETADATA_PYLD = 0x46,
+ MSG_TYPE_HOSTDMA_PTRS = 0x47
+} bcmpcie_msgtype_int_t;
+
+typedef enum bcmpcie_msgtype_u {
+ MSG_TYPE_TX_BATCH_POST = 0x80,
+ MSG_TYPE_IOCTL_REQ = 0x81,
+ MSG_TYPE_HOST_EVNT = 0x82,
+ MSG_TYPE_LOOPBACK = 0x83
+} bcmpcie_msgtype_u_t;
+
+
+/* if_id */
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT 5
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX 0x7
+#define BCMPCIE_CMNHDR_IFIDX_PHYINTF_MASK \
+ (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_SHFT 0
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MAX 0x1F
+#define BCMPCIE_CMNHDR_IFIDX_VIRTINTF_MASK \
+ (BCMPCIE_CMNHDR_IFIDX_PHYINTF_MAX << BCMPCIE_CMNHDR_IFIDX_PHYINTF_SHFT)
+
+/* flags */
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX 0x1
+#define BCMPCIE_CMNHDR_FLAGS_DMA_R_IDX_INTR 0x2
+#define BCMPCIE_CMNHDR_FLAGS_PHASE_BIT 0x80
+
+
+/* IOCTL request message */
+typedef struct ioctl_req_msg {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+
+ /* ioctl command type */
+ uint32 cmd;
+ /* ioctl transaction ID, to pair with a ioctl response */
+ uint16 trans_id;
+ /* input arguments buffer len */
+ uint16 input_buf_len;
+ /* expected output len */
+ uint16 output_buf_len;
+ /* to aling the host address on 8 byte boundary */
+ uint16 rsvd[3];
+ /* always aling on 8 byte boundary */
+ addr64_t host_input_buf_addr;
+ /* rsvd */
+ uint32 rsvd1[2];
+} ioctl_req_msg_t;
+
+/* buffer post messages for device to use to return IOCTL responses, Events */
+typedef struct ioctl_resp_evt_buf_post_msg {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* length of the host buffer supplied */
+ uint16 host_buf_len;
+ /* to aling the host address on 8 byte boundary */
+ uint16 reserved[3];
+ /* always aling on 8 byte boundary */
+ addr64_t host_buf_addr;
+ uint32 rsvd[4];
+} ioctl_resp_evt_buf_post_msg_t;
+
+
+typedef struct pcie_dma_xfer_params {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+
+ /* always aling on 8 byte boundary */
+ addr64_t host_input_buf_addr;
+
+ /* always aling on 8 byte boundary */
+ addr64_t host_ouput_buf_addr;
+
+ /* length of transfer */
+ uint32 xfer_len;
+ /* delay before doing the src txfer */
+ uint32 srcdelay;
+ /* delay before doing the dest txfer */
+ uint32 destdelay;
+ uint32 rsvd;
+} pcie_dma_xfer_params_t;
+
+/* Complete msgbuf hdr for flow ring update from host to dongle */
+typedef struct tx_flowring_create_request {
+ cmn_msg_hdr_t msg;
+ uint8 da[ETHER_ADDR_LEN];
+ uint8 sa[ETHER_ADDR_LEN];
+ uint8 tid;
+ uint8 if_flags;
+ uint16 flow_ring_id;
+ uint8 tc;
+ uint8 priority;
+ uint16 int_vector;
+ uint16 max_items;
+ uint16 len_item;
+ addr64_t flow_ring_ptr;
+} tx_flowring_create_request_t;
+
+typedef struct tx_flowring_delete_request {
+ cmn_msg_hdr_t msg;
+ uint16 flow_ring_id;
+ uint16 reason;
+ uint32 rsvd[7];
+} tx_flowring_delete_request_t;
+
+typedef struct tx_flowring_flush_request {
+ cmn_msg_hdr_t msg;
+ uint16 flow_ring_id;
+ uint16 reason;
+ uint32 rsvd[7];
+} tx_flowring_flush_request_t;
+
+typedef union ctrl_submit_item {
+ ioctl_req_msg_t ioctl_req;
+ ioctl_resp_evt_buf_post_msg_t resp_buf_post;
+ pcie_dma_xfer_params_t dma_xfer;
+ tx_flowring_create_request_t flow_create;
+ tx_flowring_delete_request_t flow_delete;
+ tx_flowring_flush_request_t flow_flush;
+ unsigned char check[H2DRING_CTRL_SUB_ITEMSIZE];
+} ctrl_submit_item_t;
+
+/* Control Completion messages (20 bytes) */
+typedef struct compl_msg_hdr {
+ /* status for the completion */
+ int16 status;
+ /* submisison flow ring id which generated this status */
+ uint16 flow_ring_id;
+} compl_msg_hdr_t;
+
+/* XOR checksum or a magic number to audit DMA done */
+typedef uint32 dma_done_t;
+
+/* completion header status codes */
+#define BCMPCIE_SUCCESS 0
+#define BCMPCIE_NOTFOUND 1
+#define BCMPCIE_NOMEM 2
+#define BCMPCIE_BADOPTION 3
+#define BCMPCIE_RING_IN_USE 4
+#define BCMPCIE_RING_ID_INVALID 5
+#define BCMPCIE_PKT_FLUSH 6
+#define BCMPCIE_NO_EVENT_BUF 7
+#define BCMPCIE_NO_RX_BUF 8
+#define BCMPCIE_NO_IOCTLRESP_BUF 9
+#define BCMPCIE_MAX_IOCTLRESP_BUF 10
+#define BCMPCIE_MAX_EVENT_BUF 11
+
+/* IOCTL completion response */
+typedef struct ioctl_compl_resp_msg {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /* response buffer len where a host buffer is involved */
+ uint16 resp_len;
+ /* transaction id to pair with a request */
+ uint16 trans_id;
+ /* cmd id */
+ uint32 cmd;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ioctl_comp_resp_msg_t;
+
+/* IOCTL request acknowledgement */
+typedef struct ioctl_req_ack_msg {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /* cmd id */
+ uint32 cmd;
+ uint32 rsvd[1];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ioctl_req_ack_msg_t;
+
+/* WL event message: send from device to host */
+typedef struct wlevent_req_msg {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /* event data len valid with the event buffer */
+ uint16 event_data_len;
+ /* sequence number */
+ uint16 seqnum;
+ /* rsvd */
+ uint32 rsvd;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} wlevent_req_msg_t;
+
+/* dma xfer complete message */
+typedef struct pcie_dmaxfer_cmplt {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint32 rsvd[2];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} pcie_dmaxfer_cmplt_t;
+
+/* general status message */
+typedef struct pcie_gen_status {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint32 rsvd[2];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} pcie_gen_status_t;
+
+/* ring status message */
+typedef struct pcie_ring_status {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /* message which firmware couldn't decode */
+ uint16 write_idx;
+ uint16 rsvd[3];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} pcie_ring_status_t;
+
+typedef struct tx_flowring_create_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} tx_flowring_create_response_t;
+typedef struct tx_flowring_delete_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} tx_flowring_delete_response_t;
+
+typedef struct tx_flowring_flush_response {
+ cmn_msg_hdr_t msg;
+ compl_msg_hdr_t cmplt;
+ uint32 rsvd[2];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} tx_flowring_flush_response_t;
+
+/* Common layout of all d2h control messages */
+typedef struct ctrl_compl_msg {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ uint32 rsvd[2];
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} ctrl_compl_msg_t;
+
+typedef union ctrl_completion_item {
+ ioctl_comp_resp_msg_t ioctl_resp;
+ wlevent_req_msg_t event;
+ ioctl_req_ack_msg_t ioct_ack;
+ pcie_dmaxfer_cmplt_t pcie_xfer_cmplt;
+ pcie_gen_status_t pcie_gen_status;
+ pcie_ring_status_t pcie_ring_status;
+ tx_flowring_create_response_t txfl_create_resp;
+ tx_flowring_delete_response_t txfl_delete_resp;
+ tx_flowring_flush_response_t txfl_flush_resp;
+ ctrl_compl_msg_t ctrl_compl;
+ unsigned char check[D2HRING_CTRL_CMPLT_ITEMSIZE];
+} ctrl_completion_item_t;
+
+/* H2D Rxpost ring work items */
+typedef struct host_rxbuf_post {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* provided meta data buffer len */
+ uint16 metadata_buf_len;
+ /* provided data buffer len to receive data */
+ uint16 data_buf_len;
+ /* alignment to make the host buffers start on 8 byte boundary */
+ uint32 rsvd;
+ /* provided meta data buffer */
+ addr64_t metadata_buf_addr;
+ /* provided data buffer to receive data */
+ addr64_t data_buf_addr;
+} host_rxbuf_post_t;
+
+typedef union rxbuf_submit_item {
+ host_rxbuf_post_t rxpost;
+ unsigned char check[H2DRING_RXPOST_ITEMSIZE];
+} rxbuf_submit_item_t;
+
+
+/* D2H Rxcompletion ring work items */
+typedef struct host_rxbuf_cmpl {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ /* filled up meta data len */
+ uint16 metadata_len;
+ /* filled up buffer len to receive data */
+ uint16 data_len;
+ /* offset in the host rx buffer where the data starts */
+ uint16 data_offset;
+ /* offset in the host rx buffer where the data starts */
+ uint16 flags;
+ /* rx status */
+ uint32 rx_status_0;
+ uint32 rx_status_1;
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+} host_rxbuf_cmpl_t;
+
+typedef union rxbuf_complete_item {
+ host_rxbuf_cmpl_t rxcmpl;
+ unsigned char check[D2HRING_RXCMPLT_ITEMSIZE];
+} rxbuf_complete_item_t;
+
+
+typedef struct host_txbuf_post {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* eth header */
+ uint8 txhdr[ETHER_HDR_LEN];
+ /* flags */
+ uint8 flags;
+ /* number of segments */
+ uint8 seg_cnt;
+
+ /* provided meta data buffer for txstatus */
+ addr64_t metadata_buf_addr;
+ /* provided data buffer to receive data */
+ addr64_t data_buf_addr;
+ /* provided meta data buffer len */
+ uint16 metadata_buf_len;
+ /* provided data buffer len to receive data */
+ uint16 data_len;
+ uint32 rsvd;
+} host_txbuf_post_t;
+
+#define BCMPCIE_PKT_FLAGS_FRAME_802_3 0x01
+#define BCMPCIE_PKT_FLAGS_FRAME_802_11 0x02
+
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_MASK 0x03 /* Exempt uses 2 bits */
+#define BCMPCIE_PKT_FLAGS_FRAME_EXEMPT_SHIFT 0x02 /* needs to be shifted past other bits */
+
+
+#define BCMPCIE_PKT_FLAGS_PRIO_SHIFT 5
+#define BCMPCIE_PKT_FLAGS_PRIO_MASK (7 << BCMPCIE_PKT_FLAGS_PRIO_SHIFT)
+
+/* These are added to fix up teh compile issues */
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_3 BCMPCIE_PKT_FLAGS_FRAME_802_3
+#define BCMPCIE_TXPOST_FLAGS_FRAME_802_11 BCMPCIE_PKT_FLAGS_FRAME_802_11
+#define BCMPCIE_TXPOST_FLAGS_PRIO_SHIFT BCMPCIE_PKT_FLAGS_PRIO_SHIFT
+#define BCMPCIE_TXPOST_FLAGS_PRIO_MASK BCMPCIE_PKT_FLAGS_PRIO_MASK
+
+/* H2D Txpost ring work items */
+typedef union txbuf_submit_item {
+ host_txbuf_post_t txpost;
+ unsigned char check[H2DRING_TXPOST_ITEMSIZE];
+} txbuf_submit_item_t;
+
+/* D2H Txcompletion ring work items */
+typedef struct host_txbuf_cmpl {
+ /* common message header */
+ cmn_msg_hdr_t cmn_hdr;
+ /* completion message header */
+ compl_msg_hdr_t compl_hdr;
+ union {
+ struct {
+ /* provided meta data len */
+ uint16 metadata_len;
+ /* WLAN side txstatus */
+ uint16 tx_status;
+ };
+ /* XOR checksum or a magic number to audit DMA done */
+ dma_done_t marker;
+ };
+} host_txbuf_cmpl_t;
+
+typedef union txbuf_complete_item {
+ host_txbuf_cmpl_t txcmpl;
+ unsigned char check[D2HRING_TXCMPLT_ITEMSIZE];
+} txbuf_complete_item_t;
+
+#define BCMPCIE_D2H_METADATA_HDRLEN 4
+#define BCMPCIE_D2H_METADATA_MINLEN (BCMPCIE_D2H_METADATA_HDRLEN + 4)
+
+/* ret buf struct */
+typedef struct ret_buf_ptr {
+ uint32 low_addr;
+ uint32 high_addr;
+} ret_buf_t;
+
+#ifdef PCIE_API_REV1
+/* ioctl specific hdr */
+typedef struct ioctl_hdr {
+ uint16 cmd;
+ uint16 retbuf_len;
+ uint32 cmd_id;
+} ioctl_hdr_t;
+typedef struct ioctlptr_hdr {
+ uint16 cmd;
+ uint16 retbuf_len;
+ uint16 buflen;
+ uint16 rsvd;
+ uint32 cmd_id;
+} ioctlptr_hdr_t;
+#else /* PCIE_API_REV1 */
+typedef struct ioctl_req_hdr {
+ uint32 pkt_id; /* Packet ID */
+ uint32 cmd; /* IOCTL ID */
+ uint16 retbuf_len;
+ uint16 buflen;
+ uint16 xt_id; /* transaction ID */
+ uint16 rsvd[1];
+} ioctl_req_hdr_t;
+#endif /* PCIE_API_REV1 */
+
+
+/* Complete msgbuf hdr for ioctl from host to dongle */
+typedef struct ioct_reqst_hdr {
+ cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+ ioctl_hdr_t ioct_hdr;
+#else
+ ioctl_req_hdr_t ioct_hdr;
+#endif
+ ret_buf_t ret_buf;
+} ioct_reqst_hdr_t;
+typedef struct ioctptr_reqst_hdr {
+ cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+ ioctlptr_hdr_t ioct_hdr;
+#else
+ ioctl_req_hdr_t ioct_hdr;
+#endif
+ ret_buf_t ret_buf;
+ ret_buf_t ioct_buf;
+} ioctptr_reqst_hdr_t;
+
+/* ioctl response header */
+typedef struct ioct_resp_hdr {
+ cmn_msg_hdr_t msg;
+#ifdef PCIE_API_REV1
+ uint32 cmd_id;
+#else
+ uint32 pkt_id;
+#endif
+ uint32 status;
+ uint32 ret_len;
+ uint32 inline_data;
+#ifdef PCIE_API_REV1
+#else
+ uint16 xt_id; /* transaction ID */
+ uint16 rsvd[1];
+#endif
+} ioct_resp_hdr_t;
+
+/* ioct resp header used in dongle */
+/* ret buf hdr will be stripped off inside dongle itself */
+typedef struct msgbuf_ioctl_resp {
+ ioct_resp_hdr_t ioct_hdr;
+ ret_buf_t ret_buf; /* ret buf pointers */
+} msgbuf_ioct_resp_t;
+
+/* WL evet hdr info */
+typedef struct wl_event_hdr {
+ cmn_msg_hdr_t msg;
+ uint16 event;
+ uint8 flags;
+ uint8 rsvd;
+ uint16 retbuf_len;
+ uint16 rsvd1;
+ uint32 rxbufid;
+} wl_event_hdr_t;
+
+#define TXDESCR_FLOWID_PCIELPBK_1 0xFF
+#define TXDESCR_FLOWID_PCIELPBK_2 0xFE
+
+typedef struct txbatch_lenptr_tup {
+ uint32 pktid;
+ uint16 pktlen;
+ uint16 rsvd;
+ ret_buf_t ret_buf; /* ret buf pointers */
+} txbatch_lenptr_tup_t;
+
+typedef struct txbatch_cmn_msghdr {
+ cmn_msg_hdr_t msg;
+ uint8 priority;
+ uint8 hdrlen;
+ uint8 pktcnt;
+ uint8 flowid;
+ uint8 txhdr[ETHER_HDR_LEN];
+ uint16 rsvd;
+} txbatch_cmn_msghdr_t;
+
+typedef struct txbatch_msghdr {
+ txbatch_cmn_msghdr_t txcmn;
+ txbatch_lenptr_tup_t tx_tup[0]; /* Based on packet count */
+} txbatch_msghdr_t;
+
+/* TX desc posting header */
+typedef struct tx_lenptr_tup {
+ uint16 pktlen;
+ uint16 rsvd;
+ ret_buf_t ret_buf; /* ret buf pointers */
+} tx_lenptr_tup_t;
+
+typedef struct txdescr_cmn_msghdr {
+ cmn_msg_hdr_t msg;
+ uint8 priority;
+ uint8 hdrlen;
+ uint8 descrcnt;
+ uint8 flowid;
+ uint32 pktid;
+} txdescr_cmn_msghdr_t;
+
+typedef struct txdescr_msghdr {
+ txdescr_cmn_msghdr_t txcmn;
+ uint8 txhdr[ETHER_HDR_LEN];
+ uint16 rsvd;
+ tx_lenptr_tup_t tx_tup[0]; /* Based on descriptor count */
+} txdescr_msghdr_t;
+
+/* Tx status header info */
+typedef struct txstatus_hdr {
+ cmn_msg_hdr_t msg;
+ uint32 pktid;
+} txstatus_hdr_t;
+/* RX bufid-len-ptr tuple */
+typedef struct rx_lenptr_tup {
+ uint32 rxbufid;
+ uint16 len;
+ uint16 rsvd2;
+ ret_buf_t ret_buf; /* ret buf pointers */
+} rx_lenptr_tup_t;
+/* Rx descr Post hdr info */
+typedef struct rxdesc_msghdr {
+ cmn_msg_hdr_t msg;
+ uint16 rsvd0;
+ uint8 rsvd1;
+ uint8 descnt;
+ rx_lenptr_tup_t rx_tup[0];
+} rxdesc_msghdr_t;
+
+/* RX complete tuples */
+typedef struct rxcmplt_tup {
+ uint16 retbuf_len;
+ uint16 data_offset;
+ uint32 rxstatus0;
+ uint32 rxstatus1;
+ uint32 rxbufid;
+} rxcmplt_tup_t;
+/* RX complete messge hdr */
+typedef struct rxcmplt_hdr {
+ cmn_msg_hdr_t msg;
+ uint16 rsvd0;
+ uint16 rxcmpltcnt;
+ rxcmplt_tup_t rx_tup[0];
+} rxcmplt_hdr_t;
+typedef struct hostevent_hdr {
+ cmn_msg_hdr_t msg;
+ uint32 evnt_pyld;
+} hostevent_hdr_t;
+
+typedef struct dma_xfer_params {
+ uint32 src_physaddr_hi;
+ uint32 src_physaddr_lo;
+ uint32 dest_physaddr_hi;
+ uint32 dest_physaddr_lo;
+ uint32 len;
+ uint32 srcdelay;
+ uint32 destdelay;
+} dma_xfer_params_t;
+
+enum {
+ HOST_EVENT_CONS_CMD = 1
+};
+
+/* defines for flags */
+#define MSGBUF_IOC_ACTION_MASK 0x1
+
+#endif /* _bcmmsgbuf_h_ */
--- /dev/null
+/*
+ * Broadcom PCIE
+ * Software-specific definitions shared between device and host side
+ * Explains the shared area between host and dongle
+ * $Copyright Open 2005 Broadcom Corporation$
+ *
+ * $Id: bcmpcie.h 490808 2014-07-12 00:33:13Z $
+ */
+
+#ifndef _bcmpcie_h_
+#define _bcmpcie_h_
+
+#include <bcmutils.h>
+
+#define ADDR_64(x) (x.addr)
+#define HIGH_ADDR_32(x) ((uint32) (((sh_addr_t) x).high_addr))
+#define LOW_ADDR_32(x) ((uint32) (((sh_addr_t) x).low_addr))
+
+typedef struct {
+ uint32 low_addr;
+ uint32 high_addr;
+} sh_addr_t;
+
+
+
+#ifdef BCMPCIE_SUPPORT_TX_PUSH_RING
+#define BCMPCIE_PUSH_TX_RING 1
+#else
+#define BCMPCIE_PUSH_TX_RING 0
+#endif /* BCMPCIE_SUPPORT_TX_PUSH_RING */
+
+/* May be overridden by 43xxxxx-roml.mk */
+#if !defined(BCMPCIE_MAX_TX_FLOWS)
+#define BCMPCIE_MAX_TX_FLOWS 40
+#endif /* ! BCMPCIE_MAX_TX_FLOWS */
+
+#define PCIE_SHARED_VERSION 0x00005
+#define PCIE_SHARED_VERSION_MASK 0x000FF
+#define PCIE_SHARED_ASSERT_BUILT 0x00100
+#define PCIE_SHARED_ASSERT 0x00200
+#define PCIE_SHARED_TRAP 0x00400
+#define PCIE_SHARED_IN_BRPT 0x00800
+#define PCIE_SHARED_SET_BRPT 0x01000
+#define PCIE_SHARED_PENDING_BRPT 0x02000
+#define PCIE_SHARED_TXPUSH_SPRT 0x04000
+#define PCIE_SHARED_EVT_SEQNUM 0x08000
+#define PCIE_SHARED_DMA_INDEX 0x10000
+
+#define BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT 0
+#define BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT 1
+#define BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE 2
+#define BCMPCIE_D2H_MSGRING_TX_COMPLETE 3
+#define BCMPCIE_D2H_MSGRING_RX_COMPLETE 4
+#define BCMPCIE_COMMON_MSGRING_MAX_ID 4
+
+/* Added only for single tx ring */
+#define BCMPCIE_H2D_TXFLOWRINGID 5
+
+#define BCMPCIE_H2D_COMMON_MSGRINGS 2
+#define BCMPCIE_D2H_COMMON_MSGRINGS 3
+#define BCMPCIE_COMMON_MSGRINGS 5
+
+enum h2dring_idx {
+ BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT_IDX = 0,
+ BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT_IDX = 1,
+ BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START = 2
+};
+
+enum d2hring_idx {
+ BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE_IDX = 0,
+ BCMPCIE_D2H_MSGRING_TX_COMPLETE_IDX = 1,
+ BCMPCIE_D2H_MSGRING_RX_COMPLETE_IDX = 2
+};
+
+typedef struct ring_mem {
+ uint16 idx;
+ uint8 type;
+ uint8 rsvd;
+ uint16 max_item;
+ uint16 len_items;
+ sh_addr_t base_addr;
+} ring_mem_t;
+
+#define RINGSTATE_INITED 1
+
+typedef struct ring_state {
+ uint8 idx;
+ uint8 state;
+ uint16 r_offset;
+ uint16 w_offset;
+ uint16 e_offset;
+} ring_state_t;
+
+
+
+typedef struct ring_info {
+ /* locations in the TCM where the ringmem is and ringstate are defined */
+ uint32 ringmem_ptr; /* ring mem location in TCM */
+ uint32 h2d_w_idx_ptr;
+
+ uint32 h2d_r_idx_ptr;
+ uint32 d2h_w_idx_ptr;
+
+ uint32 d2h_r_idx_ptr;
+ /* host locations where the DMA of read/write indices are */
+ sh_addr_t h2d_w_idx_hostaddr;
+ sh_addr_t h2d_r_idx_hostaddr;
+ sh_addr_t d2h_w_idx_hostaddr;
+ sh_addr_t d2h_r_idx_hostaddr;
+ uint16 max_sub_queues;
+ uint16 rsvd;
+} ring_info_t;
+
+typedef struct {
+ /* shared area version captured at flags 7:0 */
+ uint32 flags;
+
+ uint32 trap_addr;
+ uint32 assert_exp_addr;
+ uint32 assert_file_addr;
+ uint32 assert_line;
+ uint32 console_addr; /* Address of hnd_cons_t */
+
+ uint32 msgtrace_addr;
+
+ uint32 fwid;
+
+ /* Used for debug/flow control */
+ uint16 total_lfrag_pkt_cnt;
+ uint16 max_host_rxbufs; /* rsvd in spec */
+
+ uint32 dma_rxoffset; /* rsvd in spec */
+
+ /* these will be used for sleep request/ack, d3 req/ack */
+ uint32 h2d_mb_data_ptr;
+ uint32 d2h_mb_data_ptr;
+
+ /* information pertinent to host IPC/msgbuf channels */
+ /* location in the TCM memory which has the ring_info */
+ uint32 rings_info_ptr;
+
+ /* block of host memory for the scratch buffer */
+ uint32 host_dma_scratch_buffer_len;
+ sh_addr_t host_dma_scratch_buffer;
+
+ /* block of host memory for the dongle to push the status into */
+ uint32 device_rings_stsblk_len;
+ sh_addr_t device_rings_stsblk;
+#ifdef BCM_BUZZZ
+ uint32 buzzz; /* BUZZZ state format strings and trace buffer */
+#endif
+} pciedev_shared_t;
+
+
+/* H2D mail box Data */
+#define H2D_HOST_D3_INFORM 0x00000001
+#define H2D_HOST_DS_ACK 0x00000002
+#define H2D_HOST_CONS_INT 0x80000000 /* h2d int for console cmds */
+
+/* D2H mail box Data */
+#define D2H_DEV_D3_ACK 0x00000001
+#define D2H_DEV_DS_ENTER_REQ 0x00000002
+#define D2H_DEV_DS_EXIT_NOTE 0x00000004
+#define D2H_DEV_FWHALT 0x10000000
+
+
+extern pciedev_shared_t pciedev_shared;
+#define NEXTTXP(i, d) ((((i)+1) >= (d)) ? 0 : ((i)+1))
+#define NTXPACTIVE(r, w, d) (((r) <= (w)) ? ((w)-(r)) : ((d)-(r)+(w)))
+#define NTXPAVAIL(r, w, d) (((d) - NTXPACTIVE((r), (w), (d))) > 1)
+
+/* Function can be used to notify host of FW halt */
+#define READ_AVAIL_SPACE(w, r, d) \
+ ((w >= r) ? (w - r) : (d - r))
+
+#define WRT_PEND(x) ((x)->wr_pending)
+#define DNGL_RING_WPTR(msgbuf) (*((msgbuf)->tcm_rs_w_ptr))
+#define BCMMSGBUF_RING_SET_W_PTR(msgbuf, a) (DNGL_RING_WPTR(msgbuf) = (a))
+
+#define DNGL_RING_RPTR(msgbuf) (*((msgbuf)->tcm_rs_r_ptr))
+#define BCMMSGBUF_RING_SET_R_PTR(msgbuf, a) (DNGL_RING_RPTR(msgbuf) = (a))
+
+#define RING_READ_PTR(x) ((x)->ringstate->r_offset)
+#define RING_WRITE_PTR(x) ((x)->ringstate->w_offset)
+#define RING_START_PTR(x) ((x)->ringmem->base_addr.low_addr)
+#define RING_MAX_ITEM(x) ((x)->ringmem->max_item)
+#define RING_LEN_ITEMS(x) ((x)->ringmem->len_items)
+#define HOST_RING_BASE(x) ((x)->ring_base.va)
+#define HOST_RING_END(x) ((uint8 *)HOST_RING_BASE((x)) + \
+ ((RING_MAX_ITEM((x))-1)*RING_LEN_ITEMS((x))))
+
+#define WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d) ((w >= r) ? (d - w) : (r - w))
+#define WRITE_SPACE_AVAIL(r, w, d) (d - (NTXPACTIVE(r, w, d)) - 1)
+#define CHECK_WRITE_SPACE(r, w, d) \
+ MIN(WRITE_SPACE_AVAIL(r, w, d), WRITE_SPACE_AVAIL_CONTINUOUS(r, w, d))
+
+#endif /* _bcmpcie_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmsdbus.h 408155 2013-06-17 21:52:27Z $
+ * $Id: bcmsdbus.h 408158 2013-06-17 22:15:35Z $
*/
#ifndef _sdio_api_h_
#define SDIOH_DATA_PIO 0 /* PIO mode */
#define SDIOH_DATA_DMA 1 /* DMA mode */
-#ifdef BCMSDIOH_TXGLOM
/* Max number of glommed pkts */
#ifdef CUSTOM_MAX_TXGLOM_SIZE
#define SDPCM_MAXGLOM_SIZE CUSTOM_MAX_TXGLOM_SIZE
#else
-#define SDPCM_MAXGLOM_SIZE 10
+#define SDPCM_MAXGLOM_SIZE 40
#endif /* CUSTOM_MAX_TXGLOM_SIZE */
#define SDPCM_TXGLOM_CPY 0 /* SDIO 2.0 should use copy mode */
#define SDPCM_TXGLOM_MDESC 1 /* SDIO 3.0 should use multi-desc mode */
-#ifdef BCMSDIOH_TXGLOM_HIGHSPEED
-#define SDPCM_DEFGLOM_MODE SDPCM_TXGLOM_MDESC
#ifdef CUSTOM_DEF_TXGLOM_SIZE
#define SDPCM_DEFGLOM_SIZE CUSTOM_DEF_TXGLOM_SIZE
#else
-#define SDPCM_DEFGLOM_SIZE 10
+#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
#endif /* CUSTOM_DEF_TXGLOM_SIZE */
-#else
-#define SDPCM_DEFGLOM_MODE SDPCM_TXGLOM_CPY
-#define SDPCM_DEFGLOM_SIZE 3
-#endif /* BCMSDIOH_TXGLOM_HIGHSPEED */
#if SDPCM_DEFGLOM_SIZE > SDPCM_MAXGLOM_SIZE
#warning "SDPCM_DEFGLOM_SIZE cannot be higher than SDPCM_MAXGLOM_SIZE!!"
#undef SDPCM_DEFGLOM_SIZE
#define SDPCM_DEFGLOM_SIZE SDPCM_MAXGLOM_SIZE
#endif
-#endif /* BCMSDIOH_TXGLOM */
typedef int SDIOH_API_RC;
/* callback function, taking one arg */
typedef void (*sdioh_cb_fn_t)(void *);
-/* attach, return handler on success, NULL if failed.
- * The handler shall be provided by all subsequent calls. No local cache
- * cfghdl points to the starting address of pci device mapped memory
- */
-extern sdioh_info_t * sdioh_attach(osl_t *osh, void *cfghdl, uint irq);
-extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *si);
extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *si, sdioh_cb_fn_t fn, void *argh);
extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *si);
uint rw, uint fnc_num, uint32 addr, uint regwidth, uint32 buflen, uint8 *buffer,
void *pkt);
-#ifdef BCMSDIOH_TXGLOM
-extern void sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len);
-extern void sdioh_glom_clear(sdioh_info_t *sd);
-extern uint sdioh_set_mode(sdioh_info_t *sd, uint mode);
-extern bool sdioh_glom_enabled(void);
-#else
-#define sdioh_glom_post(a, b, c, d)
-#define sdioh_glom_clear(a)
-#define sdioh_set_mode(a) (0)
-#define sdioh_glom_enabled() (FALSE)
-#endif
-
/* get cis data */
extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *si, uint fuc, uint8 *cis, uint32 length);
/* Reset and re-initialize the device */
extern int sdioh_sdio_reset(sdioh_info_t *si);
-/* Helper function */
-void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
-
#if defined(BCMSDIOH_STD)
*
* $ Copyright Open License Broadcom Corporation $
*
- * $Id: bcmsdh.h 414953 2013-07-26 17:36:27Z $
+ * $Id: bcmsdh.h 450676 2014-01-22 22:45:13Z $
*/
/**
#define BCMSDH_ERROR(x)
#define BCMSDH_INFO(x)
-#if (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || defined(BCMSDIOH_SPI))
+#if defined(BCMSDIO) && (defined(BCMSDIOH_STD) || defined(BCMSDIOH_BCM) || \
+ defined(BCMSDIOH_SPI))
#define BCMSDH_ADAPTER
#endif /* BCMSDIO && (BCMSDIOH_STD || BCMSDIOH_BCM || BCMSDIOH_SPI) */
typedef struct bcmsdh_info bcmsdh_info_t;
typedef void (*bcmsdh_cb_fn_t)(void *);
-/* Attach and build an interface to the underlying SD host driver.
- * - Allocates resources (structs, arrays, mem, OS handles, etc) needed by bcmsdh.
- * - Returns the bcmsdh handle and virtual address base for register access.
- * The returned handle should be used in all subsequent calls, but the bcmsh
- * implementation may maintain a single "default" handle (e.g. the first or
- * most recent one) to enable single-instance implementations to pass NULL.
+#if 0 && (NDISVER >= 0x0630) && 1
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl,
+ void **regsva, uint irq, shared_info_t *sh);
+#else
+extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *sdioh, ulong *regsva);
+/**
+ * BCMSDH API context
*/
-extern bcmsdh_info_t *bcmsdh_attach(osl_t *osh, void *cfghdl, void **regsva, uint irq);
+struct bcmsdh_info
+{
+ bool init_success; /* underlying driver successfully attached */
+ void *sdioh; /* handler for sdioh */
+ uint32 vendevid; /* Target Vendor and Device ID on SD bus */
+ osl_t *osh;
+ bool regfail; /* Save status of last reg_read/reg_write call */
+ uint32 sbwad; /* Save backplane window address */
+ void *os_cxt; /* Pointer to per-OS private data */
+};
+#endif
/* Detach - freeup resources allocated in attach */
extern int bcmsdh_detach(osl_t *osh, void *sdh);
/* helper functions */
-extern void *bcmsdh_get_sdioh(bcmsdh_info_t *sdh);
-
/* callback functions */
typedef struct {
- /* attach to device */
- void *(*attach)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
+ /* probe the device */
+ void *(*probe)(uint16 vend_id, uint16 dev_id, uint16 bus, uint16 slot,
uint16 func, uint bustype, void * regsva, osl_t * osh,
void * param);
- /* detach from device */
- void (*detach)(void *ch);
+ /* remove the device */
+ void (*remove)(void *context);
+ /* can we suspend now */
+ int (*suspend)(void *context);
+ /* resume from suspend */
+ int (*resume)(void *context);
} bcmsdh_driver_t;
/* platform specific/high level functions */
extern int bcmsdh_reg_sdio_notify(void* semaphore);
extern void bcmsdh_unreg_sdio_notify(void);
-extern int bcmsdh_set_drvdata(void * dhdp);
-
#if defined(OOB_INTR_ONLY)
-extern int bcmsdh_register_oob_intr(void * dhdp);
-extern void bcmsdh_unregister_oob_intr(void);
-extern void bcmsdh_oob_intr_set(bool enable);
-extern bool bcmsdh_is_oob_intr_registered(void);
-#endif
+extern int bcmsdh_oob_intr_register(bcmsdh_info_t *bcmsdh, bcmsdh_cb_fn_t oob_irq_handler,
+ void* oob_irq_handler_context);
+extern void bcmsdh_oob_intr_unregister(bcmsdh_info_t *sdh);
+extern void bcmsdh_oob_intr_set(bcmsdh_info_t *sdh, bool enable);
+#endif
+extern void bcmsdh_dev_pm_stay_awake(bcmsdh_info_t *sdh);
+extern void bcmsdh_dev_relax(bcmsdh_info_t *sdh);
+extern bool bcmsdh_dev_pm_enabled(bcmsdh_info_t *sdh);
+
+int bcmsdh_suspend(bcmsdh_info_t *bcmsdh);
+int bcmsdh_resume(bcmsdh_info_t *bcmsdh);
/* Function to pass device-status bits to DHD. */
extern uint32 bcmsdh_get_dstatus(void *sdh);
/*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
- * Copyright (C) 1999-2013, Broadcom Corporation
+ * Copyright (C) 1999-2014, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
- * $Id: bcmsdh_sdmmc.h 396592 2013-04-13 16:14:38Z $
+ * $Id: bcmsdh_sdmmc.h 408158 2013-06-17 22:15:35Z $
*/
#ifndef __BCMSDH_SDMMC_H__
#define sd_ack_intr(sd)
#define sd_wakeup(sd);
-/* Allocate/init/free per-OS private data */
-extern int sdioh_sdmmc_osinit(sdioh_info_t *sd);
-extern void sdioh_sdmmc_osfree(sdioh_info_t *sd);
-
#define sd_log(x)
#define SDIOH_ASSERT(exp) \
/* private bus modes */
#define SDIOH_MODE_SD4 2
#define CLIENT_INTR 0x100 /* Get rid of this! */
-
-#ifdef BCMSDIOH_TXGLOM
-
-typedef struct glom_buf {
- void *glom_pkt_head;
- void *glom_pkt_tail;
- uint32 count; /* Total number of pkts queued */
-} glom_buf_t;
-#endif /* BCMSDIOH_TXGLOM */
+#define SDIOH_SDMMC_MAX_SG_ENTRIES 32
struct sdioh_info {
osl_t *osh; /* osh handler */
+ void *bcmsdh; /* upper layer handle */
bool client_intr_enabled; /* interrupt connnected flag */
bool intr_handler_valid; /* client driver interrupt handler valid */
sdioh_cb_fn_t intr_handler; /* registered interrupt handler */
void *intr_handler_arg; /* argument to call interrupt handler */
uint16 intmask; /* Current active interrupts */
- void *sdos_info; /* Pointer to per-OS private data */
-
- uint irq; /* Client irq */
- int intrcount; /* Client interrupts */
+ int intrcount; /* Client interrupts */
bool sd_use_dma; /* DMA on CMD53 */
bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
/* Must be on for sd_multiblock to be effective */
- bool use_client_ints; /* If this is false, make sure to restore */
- int sd_mode; /* SD1/SD4/SPI */
- int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
- uint8 num_funcs; /* Supported funcs on client */
- uint32 com_cis_ptr;
+ bool use_client_ints; /* If this is false, make sure to restore */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
-
-#define SDIOH_SDMMC_MAX_SG_ENTRIES 32
- struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
bool use_rxchain;
+ struct scatterlist sg_list[SDIOH_SDMMC_MAX_SG_ENTRIES];
+ struct sdio_func fake_func0;
+ struct sdio_func *func[SDIOD_MAX_IOFUNCS];
-#ifdef BCMSDIOH_TXGLOM
- glom_buf_t glom_info; /* pkt information used for glomming */
- uint txglom_mode; /* Txglom mode: 0 - copy, 1 - multi-descriptor */
-#endif
};
/************************************************************
extern int sdioh_sdmmc_register_irq(sdioh_info_t *sd, uint irq);
extern void sdioh_sdmmc_free_irq(uint irq, sdioh_info_t *sd);
+extern sdioh_info_t *sdioh_attach(osl_t *osh, struct sdio_func *func);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
+
typedef struct _BCMSDH_SDMMC_INSTANCE {
sdioh_info_t *sd;
struct sdio_func *func[SDIOD_MAX_IOFUNCS];
} BCMSDH_SDMMC_INSTANCE, *PBCMSDH_SDMMC_INSTANCE;
-
#endif /* __BCMSDH_SDMMC_H__ */
*
* $Copyright Open 2005 Broadcom Corporation$
*
- * $Id: bcmsdpcm.h 364353 2012-10-23 20:31:46Z $
+ * $Id: bcmsdpcm.h 472405 2014-04-23 23:46:55Z $
*/
#ifndef _bcmsdpcm_h_
uint32 assert_exp_addr;
uint32 assert_file_addr;
uint32 assert_line;
- uint32 console_addr; /* Address of hndrte_cons_t */
+ uint32 console_addr; /* Address of hnd_cons_t */
uint32 msgtrace_addr;
- uint32 brpt_addr;
+ uint32 fwid;
} sdpcm_shared_t;
extern sdpcm_shared_t sdpcm_shared;
-/* Function can be used to notify host of FW halt */
-extern void sdpcmd_fwhalt(void);
-
#endif /* _bcmsdpcm_h_ */
*
* $ Copyright Open Broadcom Corporation $
*
- * $Id: bcmsdstd.h 343301 2012-07-06 13:07:32Z $
+ * $Id: bcmsdstd.h 455390 2014-02-13 22:14:56Z $
*/
#ifndef _BCM_SD_STD_H
#define _BCM_SD_STD_H
#ifdef BCMSDIOH_TXGLOM
-/* Setting the MAX limit to 10 */
-#define SDIOH_MAXGLOM_SIZE 10
+/* Total glom pkt can not exceed 64K
+ * need one more slot for glom padding packet
+ */
+#define SDIOH_MAXGLOM_SIZE (40+1)
typedef struct glom_buf {
uint32 count; /* Total number of pkts queued */
uint target_dev; /* Target device ID */
uint16 intmask; /* Current active interrupts */
void *sdos_info; /* Pointer to per-OS private data */
+ void *bcmsdh; /* handler to upper layer stack (bcmsdh) */
uint32 controller_type; /* Host controller type */
uint8 version; /* Host Controller Spec Compliance Version */
- uint irq; /* Client irq */
- int intrcount; /* Client interrupts */
- int local_intrcount; /* Controller interrupts */
- bool host_init_done; /* Controller initted */
- bool card_init_done; /* Client SDIO interface initted */
- bool polled_mode; /* polling for command completion */
-
- bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
+ uint irq; /* Client irq */
+ int intrcount; /* Client interrupts */
+ int local_intrcount; /* Controller interrupts */
+ bool host_init_done; /* Controller initted */
+ bool card_init_done; /* Client SDIO interface initted */
+ bool polled_mode; /* polling for command completion */
+
+ bool sd_blockmode; /* sd_blockmode == FALSE => 64 Byte Cmd 53s. */
/* Must be on for sd_multiblock to be effective */
- bool use_client_ints; /* If this is false, make sure to restore */
+ bool use_client_ints; /* If this is false, make sure to restore */
/* polling hack in wl_linux.c:wl_timer() */
- int adapter_slot; /* Maybe dealing with multiple slots/controllers */
- int sd_mode; /* SD1/SD4/SPI */
- int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
- uint32 data_xfer_count; /* Current transfer */
- uint16 card_rca; /* Current Address */
+ int adapter_slot; /* Maybe dealing with multiple slots/controllers */
+ int sd_mode; /* SD1/SD4/SPI */
+ int client_block_size[SDIOD_MAX_IOFUNCS]; /* Blocksize */
+ uint32 data_xfer_count; /* Current transfer */
+ uint16 card_rca; /* Current Address */
int8 sd_dma_mode; /* DMA Mode (PIO, SDMA, ... ADMA2) on CMD53 */
- uint8 num_funcs; /* Supported funcs on client */
- uint32 com_cis_ptr;
- uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
+ uint8 num_funcs; /* Supported funcs on client */
+ uint32 com_cis_ptr;
+ uint32 func_cis_ptr[SDIOD_MAX_IOFUNCS];
void *dma_buf; /* DMA Buffer virtual address */
ulong dma_phys; /* DMA Buffer physical address */
void *adma2_dscr_buf; /* ADMA2 Descriptor Buffer virtual address */
#define CHECK_TUNING_PRE_DATA 1
#define CHECK_TUNING_POST_DATA 2
+
+#ifdef DHD_DEBUG
+#define SD_DHD_DISABLE_PERIODIC_TUNING 0x01
+#define SD_DHD_ENABLE_PERIODIC_TUNING 0x00
+#endif
+
+
/************************************************************
* Internal interfaces: per-port references into bcmsdstd.c
*/
*/
/* Register mapping routines */
-extern uint32 *sdstd_reg_map(osl_t *osh, int32 addr, int size);
-extern void sdstd_reg_unmap(osl_t *osh, int32 addr, int size);
+extern uint32 *sdstd_reg_map(osl_t *osh, ulong addr, int size);
+extern void sdstd_reg_unmap(osl_t *osh, ulong addr, int size);
/* Interrupt (de)registration routines */
extern int sdstd_register_irq(sdioh_info_t *sd, uint irq);
extern void sdstd_unlock(sdioh_info_t *sd);
extern void sdstd_waitlockfree(sdioh_info_t *sd);
+/* OS-specific wrappers for safe concurrent register access */
+extern void sdstd_os_lock_irqsave(sdioh_info_t *sd, ulong* flags);
+extern void sdstd_os_unlock_irqrestore(sdioh_info_t *sd, ulong* flags);
+
/* OS-specific wait-for-interrupt-or-status */
extern int sdstd_waitbits(sdioh_info_t *sd, uint16 norm, uint16 err, bool yield, uint16 *bits);
extern void sdstd_3_osinit_tuning(sdioh_info_t *sd);
extern void sdstd_3_osclean_tuning(sdioh_info_t *sd);
+extern void sdstd_enable_disable_periodic_timer(sdioh_info_t * sd, uint val);
+
+extern sdioh_info_t *sdioh_attach(osl_t *osh, void *bar0, uint irq);
+extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd);
#endif /* _BCM_SD_STD_H */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmutils.h 412804 2013-07-16 16:26:39Z $
+ * $Id: bcmutils.h 490808 2014-07-12 00:33:13Z $
*/
#ifndef _bcmutils_h_
extern "C" {
#endif
+
#ifdef PKTQ_LOG
#include <wlioctl.h>
#endif
-
-#define _BCM_U 0x01
-#define _BCM_L 0x02
-#define _BCM_D 0x04
-#define _BCM_C 0x08
-#define _BCM_P 0x10
-#define _BCM_S 0x20
-#define _BCM_X 0x40
-#define _BCM_SP 0x80
+/* ctype replacement */
+#define _BCM_U 0x01 /* upper */
+#define _BCM_L 0x02 /* lower */
+#define _BCM_D 0x04 /* digit */
+#define _BCM_C 0x08 /* cntrl */
+#define _BCM_P 0x10 /* punct */
+#define _BCM_S 0x20 /* white space (space/lf/tab) */
+#define _BCM_X 0x40 /* hex digit */
+#define _BCM_SP 0x80 /* hard space (0x20) */
extern const unsigned char bcm_ctype[];
#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)])
#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c))
#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c))
+#define CIRCULAR_ARRAY_FULL(rd_idx, wr_idx, max) ((wr_idx + 1)%max == rd_idx)
+/* Buffer structure for collecting string-formatted data
+* using bcm_bprintf() API.
+* Use bcm_binit() to initialize before use
+*/
struct bcmstrbuf {
- char *buf;
- unsigned int size;
- char *origbuf;
- unsigned int origsize;
+ char *buf; /* pointer to current position in origbuf */
+ unsigned int size; /* current (residual) size in bytes */
+ char *origbuf; /* unmodified pointer to orignal buffer */
+ unsigned int origsize; /* unmodified orignal buffer size in bytes */
};
-
+/* ** driver-only section ** */
#ifdef BCMDRIVER
#include <osl.h>
+#include <hnd_pktq.h>
+#include <hnd_pktpool.h>
-#define GPIO_PIN_NOTDEFINED 0x20
+#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */
+/*
+ * Spin at most 'us' microseconds while 'exp' is true.
+ * Caller should explicitly test 'exp' when this completes
+ * and take appropriate error action if 'exp' is still true.
+ */
+#ifndef SPINWAIT_POLL_PERIOD
+#define SPINWAIT_POLL_PERIOD 10
+#endif
#define SPINWAIT(exp, us) { \
- uint countdown = (us) + 9; \
- while ((exp) && (countdown >= 10)) {\
- OSL_DELAY(10); \
- countdown -= 10; \
+ uint countdown = (us) + (SPINWAIT_POLL_PERIOD - 1); \
+ while ((exp) && (countdown >= SPINWAIT_POLL_PERIOD)) { \
+ OSL_DELAY(SPINWAIT_POLL_PERIOD); \
+ countdown -= SPINWAIT_POLL_PERIOD; \
} \
}
-
-#ifndef PKTQ_LEN_DEFAULT
-#define PKTQ_LEN_DEFAULT 128
-#endif
-#ifndef PKTQ_MAX_PREC
-#define PKTQ_MAX_PREC 16
-#endif
-
-typedef struct pktq_prec {
- void *head;
- void *tail;
- uint16 len;
- uint16 max;
-} pktq_prec_t;
-
-#ifdef PKTQ_LOG
-typedef struct {
- uint32 requested;
- uint32 stored;
- uint32 saved;
- uint32 selfsaved;
- uint32 full_dropped;
- uint32 dropped;
- uint32 sacrificed;
- uint32 busy;
- uint32 retry;
- uint32 ps_retry;
- uint32 retry_drop;
- uint32 max_avail;
- uint32 max_used;
- uint32 queue_capacity;
- uint32 rtsfail;
- uint32 acked;
-} pktq_counters_t;
-#endif
-
-
-#define PKTQ_COMMON \
- uint16 num_prec; \
- uint16 hi_prec; \
- uint16 max; \
- uint16 len;
-
-
-struct pktq {
- PKTQ_COMMON
-
- struct pktq_prec q[PKTQ_MAX_PREC];
-#ifdef PKTQ_LOG
- pktq_counters_t _prec_cnt[PKTQ_MAX_PREC];
- pktq_counters_t _prec_bytes[PKTQ_MAX_PREC];
- uint32 _logtime;
-#endif
-};
-
-
-struct spktq {
- PKTQ_COMMON
-
- struct pktq_prec q[1];
-};
-
-#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
-
-
-typedef bool (*ifpkt_cb_t)(void*, int);
-
-#ifdef BCMPKTPOOL
-#define POOL_ENAB(pool) ((pool) && (pool)->inited)
-#define SHARED_POOL (pktpool_shared)
-#else
-#define POOL_ENAB(bus) 0
-#define SHARED_POOL ((struct pktpool *)NULL)
-#endif
-
-#ifndef PKTPOOL_LEN_MAX
-#define PKTPOOL_LEN_MAX 40
-#endif
-#define PKTPOOL_CB_MAX 3
-
-struct pktpool;
-typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
-typedef struct {
- pktpool_cb_t cb;
- void *arg;
-} pktpool_cbinfo_t;
-
-#ifdef BCMDBG_POOL
-
-#define POOL_IDLE 0
-#define POOL_RXFILL 1
-#define POOL_RXDH 2
-#define POOL_RXD11 3
-#define POOL_TXDH 4
-#define POOL_TXD11 5
-#define POOL_AMPDU 6
-#define POOL_TXENQ 7
-
-typedef struct {
- void *p;
- uint32 cycles;
- uint32 dur;
-} pktpool_dbg_t;
-
-typedef struct {
- uint8 txdh;
- uint8 txd11;
- uint8 enq;
- uint8 rxdh;
- uint8 rxd11;
- uint8 rxfill;
- uint8 idle;
-} pktpool_stats_t;
-#endif
-
-typedef struct pktpool {
- bool inited;
- uint16 r;
- uint16 w;
- uint16 len;
- uint16 maxlen;
- uint16 plen;
- bool istx;
- bool empty;
- uint8 cbtoggle;
- uint8 cbcnt;
- uint8 ecbcnt;
- bool emptycb_disable;
- pktpool_cbinfo_t *availcb_excl;
- pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
- pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
- void *q[PKTPOOL_LEN_MAX + 1];
-
-#ifdef BCMDBG_POOL
- uint8 dbg_cbcnt;
- pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
- uint16 dbg_qlen;
- pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
-#endif
-} pktpool_t;
-
-extern pktpool_t *pktpool_shared;
-
-extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx);
-extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
-extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
-extern void* pktpool_get(pktpool_t *pktp);
-extern void pktpool_free(pktpool_t *pktp, void *p);
-extern int pktpool_add(pktpool_t *pktp, void *p);
-extern uint16 pktpool_avail(pktpool_t *pktp);
-extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
-extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
-extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
-extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
-extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
-extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
-extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
-extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
-
-#define POOLPTR(pp) ((pktpool_t *)(pp))
-#define pktpool_len(pp) (POOLPTR(pp)->len - 1)
-#define pktpool_plen(pp) (POOLPTR(pp)->plen)
-#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen)
-
-#ifdef BCMDBG_POOL
-extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
-extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
-extern int pktpool_dbg_dump(pktpool_t *pktp);
-extern int pktpool_dbg_notify(pktpool_t *pktp);
-extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
-#endif
-
-
+/* forward definition of ether_addr structure used by some function prototypes */
struct ether_addr;
extern int ether_isbcast(const void *ea);
extern int ether_isnulladdr(const void *ea);
+#define BCM_MAC_RXCPL_IDX_BITS 12
+#define BCM_MAX_RXCPL_IDX_INVALID 0
+#define BCM_MAC_RXCPL_IFIDX_BITS 3
+#define BCM_MAC_RXCPL_DOT11_BITS 1
+#define BCM_MAX_RXCPL_IFIDX ((1 << BCM_MAC_RXCPL_IFIDX_BITS) - 1)
+#define BCM_MAC_RXCPL_FLAG_BITS 4
+#define BCM_RXCPL_FLAGS_IN_TRANSIT 0x1
+#define BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST 0x2
+#define BCM_RXCPL_FLAGS_RXCPLVALID 0x4
+#define BCM_RXCPL_FLAGS_RSVD 0x8
+
+#define BCM_RXCPL_SET_IN_TRANSIT(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_IN_TRANSIT)
+#define BCM_RXCPL_CLR_IN_TRANSIT(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_IN_TRANSIT)
+#define BCM_RXCPL_IN_TRANSIT(a) ((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_IN_TRANSIT)
+
+#define BCM_RXCPL_SET_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+#define BCM_RXCPL_CLR_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+#define BCM_RXCPL_FRST_IN_FLUSH(a) ((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_FIRST_IN_FLUSHLIST)
+
+#define BCM_RXCPL_SET_VALID_INFO(a) ((a)->rxcpl_id.flags |= BCM_RXCPL_FLAGS_RXCPLVALID)
+#define BCM_RXCPL_CLR_VALID_INFO(a) ((a)->rxcpl_id.flags &= ~BCM_RXCPL_FLAGS_RXCPLVALID)
+#define BCM_RXCPL_VALID_INFO(a) (((a)->rxcpl_id.flags & BCM_RXCPL_FLAGS_RXCPLVALID) ? TRUE : FALSE)
+
+
+struct reorder_rxcpl_id_list {
+ uint16 head;
+ uint16 tail;
+ uint32 cnt;
+};
-
-#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
-#define pktq_pmax(pq, prec) ((pq)->q[prec].max)
-#define pktq_plen(pq, prec) ((pq)->q[prec].len)
-#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
-#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
-#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
-
-#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
-#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
-
-extern void *pktq_penq(struct pktq *pq, int prec, void *p);
-extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
-extern void *pktq_pdeq(struct pktq *pq, int prec);
-extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
-extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg);
-extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
-
-extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
- ifpkt_cb_t fn, int arg);
-
-extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
-
-
-
-extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
-extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
-
-
-
-#define pktq_len(pq) ((int)(pq)->len)
-#define pktq_max(pq) ((int)(pq)->max)
-#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
-#define pktq_full(pq) ((pq)->len >= (pq)->max)
-#define pktq_empty(pq) ((pq)->len == 0)
-
-
-#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p))
-#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
-#define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0)
-#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
-#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len)
-
-extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
-extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
-
-
-extern void *pktq_deq(struct pktq *pq, int *prec_out);
-extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
-extern void *pktq_peek(struct pktq *pq, int *prec_out);
-extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
-
-
-
+typedef struct rxcpl_id {
+ uint32 idx : BCM_MAC_RXCPL_IDX_BITS;
+ uint32 next_idx : BCM_MAC_RXCPL_IDX_BITS;
+ uint32 ifidx : BCM_MAC_RXCPL_IFIDX_BITS;
+ uint32 dot11 : BCM_MAC_RXCPL_DOT11_BITS;
+ uint32 flags : BCM_MAC_RXCPL_FLAG_BITS;
+} rxcpl_idx_id_t;
+
+typedef struct rxcpl_data_len {
+ uint32 metadata_len_w : 6;
+ uint32 dataoffset: 10;
+ uint32 datalen : 16;
+} rxcpl_data_len_t;
+
+typedef struct rxcpl_info {
+ rxcpl_idx_id_t rxcpl_id;
+ uint32 host_pktref;
+ union {
+ rxcpl_data_len_t rxcpl_len;
+ struct rxcpl_info *free_next;
+ };
+} rxcpl_info_t;
+
+/* rx completion list */
+typedef struct bcm_rxcplid_list {
+ uint32 max;
+ uint32 avail;
+ rxcpl_info_t *rxcpl_ptr;
+ rxcpl_info_t *free_list;
+} bcm_rxcplid_list_t;
+
+extern bool bcm_alloc_rxcplid_list(osl_t *osh, uint32 max);
+extern rxcpl_info_t * bcm_alloc_rxcplinfo(void);
+extern void bcm_free_rxcplinfo(rxcpl_info_t *ptr);
+extern void bcm_chain_rxcplid(uint16 first, uint16 next);
+extern rxcpl_info_t *bcm_id2rxcplinfo(uint16 id);
+extern uint16 bcm_rxcplinfo2id(rxcpl_info_t *ptr);
+extern rxcpl_info_t *bcm_rxcpllist_end(rxcpl_info_t *ptr, uint32 *count);
+
+/* externs */
+/* packet */
extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf);
extern uint pkttotlen(osl_t *osh, void *p);
extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset);
extern void *pktoffset(osl_t *osh, void *p, uint offset);
+/* Get priority from a packet and pass it back in scb (or equiv) */
+#define PKTPRIO_VDSCP 0x100 /* DSCP prio found after VLAN tag */
+#define PKTPRIO_VLAN 0x200 /* VLAN prio found */
+#define PKTPRIO_UPD 0x400 /* DSCP used to update VLAN prio */
+#define PKTPRIO_DSCP 0x800 /* DSCP prio found */
-#define PKTPRIO_VDSCP 0x100
-#define PKTPRIO_VLAN 0x200
-#define PKTPRIO_UPD 0x400
-#define PKTPRIO_DSCP 0x800
-
-
-
+/* DSCP type definitions (RFC4594) */
+/* AF1x: High-Throughput Data (RFC2597) */
#define DSCP_AF11 0x0A
#define DSCP_AF12 0x0C
#define DSCP_AF13 0x0E
-
+/* AF2x: Low-Latency Data (RFC2597) */
#define DSCP_AF21 0x12
#define DSCP_AF22 0x14
#define DSCP_AF23 0x16
-
+/* AF3x: Multimedia Streaming (RFC2597) */
#define DSCP_AF31 0x1A
#define DSCP_AF32 0x1C
#define DSCP_AF33 0x1E
-
+/* EF: Telephony (RFC3246) */
#define DSCP_EF 0x2E
extern uint pktsetprio(void *pkt, bool update_vtag);
+extern bool pktgetdscp(uint8 *pktdata, uint pktlen, uint8 *dscp);
-
+/* string */
extern int bcm_atoi(const char *s);
extern ulong bcm_strtoul(const char *cp, char **endp, uint base);
extern char *bcmstrstr(const char *haystack, const char *needle);
+extern char *bcmstrnstr(const char *s, uint s_len, const char *substr, uint substr_len);
extern char *bcmstrcat(char *dest, const char *src);
extern char *bcmstrncat(char *dest, const char *src, uint size);
extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen);
int bcmstrnicmp(const char* s1, const char* s2, int cnt);
-
+/* ethernet address */
extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf);
extern int bcm_ether_atoe(const char *p, struct ether_addr *ea);
-
+/* ip address */
struct ipv4_addr;
extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf);
+extern char *bcm_ipv6_ntoa(void *ipv6, char *buf);
+extern int bcm_atoipv4(const char *p, struct ipv4_addr *ip);
-
+/* delay */
extern void bcm_mdelay(uint ms);
-
+/* variable access */
#define NVRAM_RECLAIM_CHECK(name)
extern char *getvar(char *vars, const char *name);
#define bcmdumplog(buf, size) *buf = '\0'
#define bcmdumplogent(buf, idx) -1
+#define TSF_TICKS_PER_MS 1000
+#define TS_ENTER 0xdeadbeef /* Timestamp profiling enter */
+#define TS_EXIT 0xbeefcafe /* Timestamp profiling exit */
+
#define bcmtslog(tstamp, fmt, a1, a2)
#define bcmprinttslogs()
#define bcmprinttstamp(us)
extern char *bcm_nvram_vars(uint *length);
extern int bcm_nvram_cache(void *sih);
+/* Support for sharing code across in-driver iovar implementations.
+ * The intent is that a driver use this structure to map iovar names
+ * to its (private) iovar identifiers, and the lookup function to
+ * find the entry. Macros are provided to map ids and get/set actions
+ * into a single number space for a switch statement.
+ */
-
-
+/* iovar structure */
typedef struct bcm_iovar {
- const char *name;
- uint16 varid;
- uint16 flags;
- uint16 type;
- uint16 minlen;
+ const char *name; /* name for lookup and display */
+ uint16 varid; /* id for switch */
+ uint16 flags; /* driver-specific flag bits */
+ uint16 type; /* base type of argument */
+ uint16 minlen; /* min length for buffer vars */
} bcm_iovar_t;
+/* varid definitions are per-driver, may use these get/set bits */
+/* IOVar action bits for id mapping */
+#define IOV_GET 0 /* Get an iovar */
+#define IOV_SET 1 /* Set an iovar */
-
-#define IOV_GET 0
-#define IOV_SET 1
-
-
+/* Varid to actionid mapping */
#define IOV_GVAL(id) ((id) * 2)
#define IOV_SVAL(id) ((id) * 2 + IOV_SET)
#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET)
#define IOV_ID(actionid) (actionid >> 1)
-
+/* flags are per-driver based on driver attributes */
extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name);
extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set);
defined(WLMSG_PRPKT) || defined(WLMSG_WSEC)
extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len);
#endif
-#endif
-
-
-#define IOVT_VOID 0
-#define IOVT_BOOL 1
-#define IOVT_INT8 2
-#define IOVT_UINT8 3
-#define IOVT_INT16 4
-#define IOVT_UINT16 5
-#define IOVT_INT32 6
-#define IOVT_UINT32 7
-#define IOVT_BUFFER 8
+#endif /* BCMDRIVER */
+
+/* Base type definitions */
+#define IOVT_VOID 0 /* no value (implictly set only) */
+#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */
+#define IOVT_INT8 2 /* integer values are range-checked */
+#define IOVT_UINT8 3 /* unsigned int 8 bits */
+#define IOVT_INT16 4 /* int 16 bits */
+#define IOVT_UINT16 5 /* unsigned int 16 bits */
+#define IOVT_INT32 6 /* int 32 bits */
+#define IOVT_UINT32 7 /* unsigned int 32 bits */
+#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */
#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER)
-
+/* Initializer for IOV type strings */
#define BCM_IOV_TYPE_INIT { \
"void", \
"bool", \
(type == IOVT_INT32) || \
(type == IOVT_UINT32))
+/* ** driver/apps-shared section ** */
-
-#define BCME_STRLEN 64
+#define BCME_STRLEN 64 /* Max string length for BCM errors */
#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST))
-
-
-#define BCME_OK 0
-#define BCME_ERROR -1
-#define BCME_BADARG -2
-#define BCME_BADOPTION -3
-#define BCME_NOTUP -4
-#define BCME_NOTDOWN -5
-#define BCME_NOTAP -6
-#define BCME_NOTSTA -7
-#define BCME_BADKEYIDX -8
-#define BCME_RADIOOFF -9
-#define BCME_NOTBANDLOCKED -10
-#define BCME_NOCLK -11
-#define BCME_BADRATESET -12
-#define BCME_BADBAND -13
-#define BCME_BUFTOOSHORT -14
-#define BCME_BUFTOOLONG -15
-#define BCME_BUSY -16
-#define BCME_NOTASSOCIATED -17
-#define BCME_BADSSIDLEN -18
-#define BCME_OUTOFRANGECHAN -19
-#define BCME_BADCHAN -20
-#define BCME_BADADDR -21
-#define BCME_NORESOURCE -22
-#define BCME_UNSUPPORTED -23
-#define BCME_BADLEN -24
-#define BCME_NOTREADY -25
-#define BCME_EPERM -26
-#define BCME_NOMEM -27
-#define BCME_ASSOCIATED -28
-#define BCME_RANGE -29
-#define BCME_NOTFOUND -30
-#define BCME_WME_NOT_ENABLED -31
-#define BCME_TSPEC_NOTFOUND -32
-#define BCME_ACM_NOTSUPPORTED -33
-#define BCME_NOT_WME_ASSOCIATION -34
-#define BCME_SDIO_ERROR -35
-#define BCME_DONGLE_DOWN -36
-#define BCME_VERSION -37
-#define BCME_TXFAIL -38
-#define BCME_RXFAIL -39
-#define BCME_NODEVICE -40
-#define BCME_NMODE_DISABLED -41
-#define BCME_NONRESIDENT -42
-#define BCME_SCANREJECT -43
-#define BCME_USAGE_ERROR -44
-#define BCME_IOCTL_ERROR -45
-#define BCME_SERIAL_PORT_ERR -46
-#define BCME_LAST BCME_SERIAL_PORT_ERR
-
-
+/*
+ * error codes could be added but the defined ones shouldn't be changed/deleted
+ * these error codes are exposed to the user code
+ * when ever a new error code is added to this list
+ * please update errorstring table with the related error string and
+ * update osl files with os specific errorcode map
+*/
+
+#define BCME_OK 0 /* Success */
+#define BCME_ERROR -1 /* Error generic */
+#define BCME_BADARG -2 /* Bad Argument */
+#define BCME_BADOPTION -3 /* Bad option */
+#define BCME_NOTUP -4 /* Not up */
+#define BCME_NOTDOWN -5 /* Not down */
+#define BCME_NOTAP -6 /* Not AP */
+#define BCME_NOTSTA -7 /* Not STA */
+#define BCME_BADKEYIDX -8 /* BAD Key Index */
+#define BCME_RADIOOFF -9 /* Radio Off */
+#define BCME_NOTBANDLOCKED -10 /* Not band locked */
+#define BCME_NOCLK -11 /* No Clock */
+#define BCME_BADRATESET -12 /* BAD Rate valueset */
+#define BCME_BADBAND -13 /* BAD Band */
+#define BCME_BUFTOOSHORT -14 /* Buffer too short */
+#define BCME_BUFTOOLONG -15 /* Buffer too long */
+#define BCME_BUSY -16 /* Busy */
+#define BCME_NOTASSOCIATED -17 /* Not Associated */
+#define BCME_BADSSIDLEN -18 /* Bad SSID len */
+#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */
+#define BCME_BADCHAN -20 /* Bad Channel */
+#define BCME_BADADDR -21 /* Bad Address */
+#define BCME_NORESOURCE -22 /* Not Enough Resources */
+#define BCME_UNSUPPORTED -23 /* Unsupported */
+#define BCME_BADLEN -24 /* Bad length */
+#define BCME_NOTREADY -25 /* Not Ready */
+#define BCME_EPERM -26 /* Not Permitted */
+#define BCME_NOMEM -27 /* No Memory */
+#define BCME_ASSOCIATED -28 /* Associated */
+#define BCME_RANGE -29 /* Not In Range */
+#define BCME_NOTFOUND -30 /* Not Found */
+#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */
+#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */
+#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */
+#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */
+#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */
+#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */
+#define BCME_VERSION -37 /* Incorrect version */
+#define BCME_TXFAIL -38 /* TX failure */
+#define BCME_RXFAIL -39 /* RX failure */
+#define BCME_NODEVICE -40 /* Device not present */
+#define BCME_NMODE_DISABLED -41 /* NMODE disabled */
+#define BCME_NONRESIDENT -42 /* access to nonresident overlay */
+#define BCME_SCANREJECT -43 /* reject scan request */
+#define BCME_USAGE_ERROR -44 /* WLCMD usage error */
+#define BCME_IOCTL_ERROR -45 /* WLCMD ioctl error */
+#define BCME_SERIAL_PORT_ERR -46 /* RWL serial port error */
+#define BCME_DISABLED -47 /* Disabled in this build */
+#define BCME_DECERR -48 /* Decrypt error */
+#define BCME_ENCERR -49 /* Encrypt error */
+#define BCME_MICERR -50 /* Integrity/MIC error */
+#define BCME_REPLAY -51 /* Replay */
+#define BCME_IE_NOTFOUND -52 /* IE not found */
+#define BCME_LAST BCME_IE_NOTFOUND
+
+#define BCME_NOTENABLED BCME_DISABLED
+
+/* These are collection of BCME Error strings */
#define BCMERRSTRINGTABLE { \
"OK", \
"Undefined error", \
"WLCMD usage error", \
"WLCMD ioctl error", \
"RWL serial port error", \
+ "Disabled", \
+ "Decrypt error", \
+ "Encrypt error", \
+ "MIC error", \
+ "Replay", \
+ "IE not found", \
}
#ifndef ABS
#define ABS(a) (((a) < 0) ? -(a) : (a))
-#endif
+#endif /* ABS */
#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-#endif
+#endif /* MIN */
#ifndef MAX
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#endif
-
+#endif /* MAX */
+/* limit to [min, max] */
#ifndef LIMIT_TO_RANGE
#define LIMIT_TO_RANGE(x, min, max) \
((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
-#endif
-
+#endif /* LIMIT_TO_RANGE */
+/* limit to max */
#ifndef LIMIT_TO_MAX
#define LIMIT_TO_MAX(x, max) \
(((x) > (max) ? (max) : (x)))
-#endif
-
+#endif /* LIMIT_TO_MAX */
+/* limit to min */
#ifndef LIMIT_TO_MIN
#define LIMIT_TO_MIN(x, min) \
(((x) < (min) ? (min) : (x)))
-#endif
+#endif /* LIMIT_TO_MIN */
+#define DELTA(curr, prev) ((curr) > (prev) ? ((curr) - (prev)) : \
+ (0xffffffff - (prev) + (curr) + 1))
#define CEIL(x, y) (((x) + ((y) - 1)) / (y))
-#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDDN(p, align) ((p) & ~((align) - 1))
#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0)
#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \
& ~((boundary) - 1))
#ifndef OFFSETOF
#ifdef __ARMCC_VERSION
-
+/*
+ * The ARM RVCT compiler complains when using OFFSETOF where a constant
+ * expression is expected, such as an initializer for a static object.
+ * offsetof from the runtime library doesn't have that problem.
+ */
#include <stddef.h>
#define OFFSETOF(type, member) offsetof(type, member)
#else
-#define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member)
-#endif
-#endif
+# if ((__GNUC__ >= 4) && (__GNUC_MINOR__ >= 8))
+/* GCC 4.8+ complains when using our OFFSETOF macro in array length declarations. */
+# define OFFSETOF(type, member) __builtin_offsetof(type, member)
+# else
+# define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member)
+# endif /* GCC 4.8 or newer */
+#endif /* __ARMCC_VERSION */
+#endif /* OFFSETOF */
#ifndef ARRAYSIZE
#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0]))
#endif
-#ifndef ARRAYLAST
+#ifndef ARRAYLAST /* returns pointer to last array element */
#define ARRAYLAST(a) (&a[ARRAYSIZE(a)-1])
#endif
-
+/* Reference a function; used to prevent a static function from being optimized out */
extern void *_bcmutils_dummy_fn;
#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f))
-
+/* bit map related macros */
#ifndef setbit
-#ifndef NBBY
-#define NBBY 8
-#endif
+#ifndef NBBY /* the BSD family defines NBBY */
+#define NBBY 8 /* 8 bits per byte */
+#endif /* #ifndef NBBY */
#ifdef BCMUTILS_BIT_MACROS_USE_FUNCS
extern void setbit(void *array, uint bit);
extern void clrbit(void *array, uint bit);
#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY)))
#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0)
#endif
-#endif
+#endif /* setbit */
+extern void set_bitrange(void *array, uint start, uint end, uint maxbit);
#define isbitset(a, i) (((a) & (1 << (i))) != 0)
#define NBITMASK(nbits) MAXBITVAL(nbits)
#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8)
+extern void bcm_bitprint32(const uint32 u32);
-#define MUX(pred, true, false) ((pred) ? (true) : (false))
+/*
+ * ----------------------------------------------------------------------------
+ * Multiword map of 2bits, nibbles
+ * setbit2 setbit4 (void *ptr, uint32 ix, uint32 val)
+ * getbit2 getbit4 (void *ptr, uint32 ix)
+ * ----------------------------------------------------------------------------
+ */
+
+#define DECLARE_MAP_API(NB, RSH, LSH, OFF, MSK) \
+static INLINE void setbit##NB(void *ptr, uint32 ix, uint32 val) \
+{ \
+ uint32 *addr = (uint32 *)ptr; \
+ uint32 *a = addr + (ix >> RSH); /* (ix / 2^RSH) */ \
+ uint32 pos = (ix & OFF) << LSH; /* (ix % 2^RSH) * 2^LSH */ \
+ uint32 mask = (MSK << pos); \
+ uint32 tmp = *a & ~mask; \
+ *a = tmp | (val << pos); \
+} \
+static INLINE uint32 getbit##NB(void *ptr, uint32 ix) \
+{ \
+ uint32 *addr = (uint32 *)ptr; \
+ uint32 *a = addr + (ix >> RSH); \
+ uint32 pos = (ix & OFF) << LSH; \
+ return ((*a >> pos) & MSK); \
+}
+
+DECLARE_MAP_API(2, 4, 1, 15U, 0x0003) /* setbit2() and getbit2() */
+DECLARE_MAP_API(4, 3, 2, 7U, 0x000F) /* setbit4() and getbit4() */
+DECLARE_MAP_API(8, 2, 3, 3U, 0x00FF) /* setbit8() and getbit8() */
+/* basic mux operation - can be optimized on several architectures */
+#define MUX(pred, true, false) ((pred) ? (true) : (false))
+/* modulo inc/dec - assumes x E [0, bound - 1] */
#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1)
#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1)
-
+/* modulo inc/dec, bound = 2^k */
#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1))
#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1))
-
+/* modulo add/sub - assumes x, y E [0, bound - 1] */
#define MODADD(x, y, bound) \
MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y))
#define MODSUB(x, y, bound) \
MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y))
-
+/* module add/sub, bound = 2^k */
#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1))
#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1))
+/* crc defines */
+#define CRC8_INIT_VALUE 0xff /* Initial CRC8 checksum value */
+#define CRC8_GOOD_VALUE 0x9f /* Good final CRC8 checksum value */
+#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */
+#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */
+#define CRC32_INIT_VALUE 0xffffffff /* Initial CRC32 checksum value */
+#define CRC32_GOOD_VALUE 0xdebb20e3 /* Good final CRC32 checksum value */
-#define CRC8_INIT_VALUE 0xff
-#define CRC8_GOOD_VALUE 0x9f
-#define CRC16_INIT_VALUE 0xffff
-#define CRC16_GOOD_VALUE 0xf0b8
-#define CRC32_INIT_VALUE 0xffffffff
-#define CRC32_GOOD_VALUE 0xdebb20e3
-
-
+/* use for direct output of MAC address in printf etc */
#define MACF "%02x:%02x:%02x:%02x:%02x:%02x"
#define ETHERP_TO_MACF(ea) ((struct ether_addr *) (ea))->octet[0], \
((struct ether_addr *) (ea))->octet[1], \
#else
#define MACDBG "%02x:%02x:%02x"
#define MAC2STRDBG(ea) (ea)[0], (ea)[4], (ea)[5]
-#endif
-
+#endif /* SIMPLE_MAC_PRINT */
+/* bcm_format_flags() bit description structure */
typedef struct bcm_bit_desc {
uint32 bit;
const char* name;
} bcm_bit_desc_t;
-
+/* bcm_format_field */
typedef struct bcm_bit_desc_ex {
uint32 mask;
const bcm_bit_desc_t *bitfield;
} bcm_bit_desc_ex_t;
+/* buffer length for ethernet address from bcm_ether_ntoa() */
+#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */
-
-typedef struct bcm_tlv {
- uint8 id;
- uint8 len;
- uint8 data[1];
-} bcm_tlv_t;
-
-
-#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len))
-
-
-#define ETHER_ADDR_STR_LEN 18
-
-
-
+/* crypto utility function */
+/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */
static INLINE void
xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst)
{
1 ||
#endif
(((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) {
-
-
+ /* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */
+ /* x86 supports unaligned. This version runs 6x-9x faster on x86. */
((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0];
((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1];
((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2];
((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3];
} else {
-
+ /* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */
int k;
for (k = 0; k < 16; k++)
dst[k] = src1[k] ^ src2[k];
}
}
-
-
+/* externs */
+/* crc */
extern uint8 hndcrc8(uint8 *p, uint nbytes, uint8 crc);
extern uint16 hndcrc16(uint8 *p, uint nbytes, uint16 crc);
extern uint32 hndcrc32(uint8 *p, uint nbytes, uint32 crc);
-
+/* format/print */
#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
defined(WLMSG_ASSOC)
-
+/* print out the value a field has: fields may have 1-32 bits and may hold any value */
extern int bcm_format_field(const bcm_bit_desc_ex_t *bd, uint32 field, char* buf, int len);
-
+/* print out which bits in flags are set */
extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len);
#endif
-#if defined(DHD_DEBUG) || defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || \
- defined(WLMSG_ASSOC) || defined(WLMEDIA_PEAKRATE)
extern int bcm_format_hex(char *str, const void *bytes, int len);
-#endif
extern const char *bcm_crypto_algo_name(uint algo);
extern char *bcm_chipname(uint chipid, char *buf, uint len);
extern void printbig(char *buf);
extern void prhex(const char *msg, uchar *buf, uint len);
+/* IE parsing */
+
+/* tag_ID/length/value_buffer tuple */
+typedef struct bcm_tlv {
+ uint8 id;
+ uint8 len;
+ uint8 data[1];
+} bcm_tlv_t;
+
+/* bcm tlv w/ 16 bit id/len */
+typedef struct bcm_xtlv {
+ uint16 id;
+ uint16 len;
+ uint8 data[1];
+} bcm_xtlv_t;
+
+/* descriptor of xtlv data src or dst */
+typedef struct {
+ uint16 type;
+ uint16 len;
+ void *ptr; /* ptr to memory location */
+} xtlv_desc_t;
+
+/* set a var from xtlv buffer */
+typedef int
+(bcm_set_var_from_tlv_cbfn_t)(void *ctx, void **tlv_buf, uint16 type, uint16 len);
+
+struct bcm_tlvbuf {
+ uint16 size;
+ uint8 *head; /* point to head of buffer */
+ uint8 *buf; /* current position of buffer */
+ /* followed by the allocated buffer */
+};
+
+#define BCM_TLV_MAX_DATA_SIZE (255)
+#define BCM_XTLV_MAX_DATA_SIZE (65535)
+#define BCM_TLV_HDR_SIZE (OFFSETOF(bcm_tlv_t, data))
+
+#define BCM_XTLV_HDR_SIZE (OFFSETOF(bcm_xtlv_t, data))
+#define BCM_XTLV_LEN(elt) ltoh16_ua(&(elt->len))
+#define BCM_XTLV_ID(elt) ltoh16_ua(&(elt->id))
+#define BCM_XTLV_SIZE(elt) (BCM_XTLV_HDR_SIZE + BCM_XTLV_LEN(elt))
+
+/* Check that bcm_tlv_t fits into the given buflen */
+#define bcm_valid_tlv(elt, buflen) (\
+ ((int)(buflen) >= (int)BCM_TLV_HDR_SIZE) && \
+ ((int)(buflen) >= (int)(BCM_TLV_HDR_SIZE + (elt)->len)))
+
+#define bcm_valid_xtlv(elt, buflen) (\
+ ((int)(buflen) >= (int)BCM_XTLV_HDR_SIZE) && \
+ ((int)(buflen) >= (int)BCM_XTLV_SIZE(elt)))
extern bcm_tlv_t *bcm_next_tlv(bcm_tlv_t *elt, int *buflen);
extern bcm_tlv_t *bcm_parse_tlvs(void *buf, int buflen, uint key);
-extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_parse_tlvs_min_bodylen(void *buf, int buflen, uint key, int min_bodylen);
+extern bcm_tlv_t *bcm_parse_ordered_tlvs(void *buf, int buflen, uint key);
+extern bcm_tlv_t *bcm_find_vendor_ie(void *tlvs, int tlvs_len, const char *voui, uint8 *type,
+ int type_len);
+
+extern uint8 *bcm_write_tlv(int type, const void *data, int datalen, uint8 *dst);
+extern uint8 *bcm_write_tlv_safe(int type, const void *data, int datalen, uint8 *dst,
+ int dst_maxlen);
+
+extern uint8 *bcm_copy_tlv(const void *src, uint8 *dst);
+extern uint8 *bcm_copy_tlv_safe(const void *src, uint8 *dst, int dst_maxlen);
+
+/* xtlv */
+extern bcm_xtlv_t *bcm_next_xtlv(bcm_xtlv_t *elt, int *buflen);
+extern struct bcm_tlvbuf *bcm_xtlv_buf_alloc(void *osh, uint16 len);
+extern void bcm_xtlv_buf_free(void *osh, struct bcm_tlvbuf *tbuf);
+extern uint16 bcm_xtlv_buf_len(struct bcm_tlvbuf *tbuf);
+extern uint16 bcm_xtlv_buf_rlen(struct bcm_tlvbuf *tbuf);
+extern uint8 *bcm_xtlv_buf(struct bcm_tlvbuf *tbuf);
+extern uint8 *bcm_xtlv_head(struct bcm_tlvbuf *tbuf);
+extern int bcm_xtlv_put_data(struct bcm_tlvbuf *tbuf, uint16 type, const void *data, uint16 dlen);
+extern int bcm_xtlv_put_8(struct bcm_tlvbuf *tbuf, uint16 type, const int8 data);
+extern int bcm_xtlv_put_16(struct bcm_tlvbuf *tbuf, uint16 type, const int16 data);
+extern int bcm_xtlv_put_32(struct bcm_tlvbuf *tbuf, uint16 type, const int32 data);
+extern int bcm_unpack_xtlv_entry(void **tlv_buf, uint16 xpct_type, uint16 xpct_len, void *dst);
+extern int bcm_skip_xtlv(void **tlv_buf);
+extern int bcm_pack_xtlv_entry(void **tlv_buf, uint16 *buflen, uint16 type, uint16 len, void *src);
+extern int bcm_unpack_xtlv_buf(void *ctx,
+ void *tlv_buf, uint16 buflen, bcm_set_var_from_tlv_cbfn_t *cbfn);
+extern int
+bcm_unpack_xtlv_buf_to_mem(void *tlv_buf, int *buflen, xtlv_desc_t *items);
+extern int
+bcm_pack_xtlv_buf_from_mem(void **tlv_buf, uint16 *buflen, xtlv_desc_t *items);
+extern int
+bcm_pack_xtlv_entry_from_hex_string(void **tlv_buf, uint16 *buflen, uint16 type, char *hex);
+
+/* bcmerror */
extern const char *bcmerrorstr(int bcmerror);
-
-
+/* multi-bool data type: set of bools, mbool is true if any is set */
typedef uint32 mbool;
-#define mboolset(mb, bit) ((mb) |= (bit))
-#define mboolclr(mb, bit) ((mb) &= ~(bit))
-#define mboolisset(mb, bit) (((mb) & (bit)) != 0)
+#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */
+#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */
+#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* TRUE if one bool is set */
#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val)))
-
+/* generic datastruct to help dump routines */
struct fielddesc {
const char *nameandfmt;
uint32 offset;
extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...);
-
+/* power conversion */
extern uint16 bcm_qdbm_to_mw(uint8 qdbm);
extern uint8 bcm_mw_to_qdbm(uint16 mw);
extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len);
unsigned int process_nvram_vars(char *varbuf, unsigned int len);
-extern bcm_tlv_t *find_vendor_ie(void *tlvs, int tlvs_len,
- const char *voui, uint8 *type, int type_len);
-
+/* calculate a * b + c */
extern void bcm_uint64_multiple_add(uint32* r_high, uint32* r_low, uint32 a, uint32 b, uint32 c);
-
+/* calculate a / b */
extern void bcm_uint64_divide(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+/* Public domain bit twiddling hacks/utilities: Sean Eron Anderson */
+
+/* Table driven count set bits. */
+static const uint8 /* Table only for use by bcm_cntsetbits */
+_CSBTBL[256] =
+{
+# define B2(n) n, n + 1, n + 1, n + 2
+# define B4(n) B2(n), B2(n + 1), B2(n + 1), B2(n + 2)
+# define B6(n) B4(n), B4(n + 1), B4(n + 1), B4(n + 2)
+ B6(0), B6(0 + 1), B6(0 + 1), B6(0 + 2)
+};
+
+static INLINE uint32 /* Uses table _CSBTBL for fast counting of 1's in a u32 */
+bcm_cntsetbits(const uint32 u32)
+{
+ /* function local scope declaration of const _CSBTBL[] */
+ const uint8 * p = (const uint8 *)&u32;
+ return (_CSBTBL[p[0]] + _CSBTBL[p[1]] + _CSBTBL[p[2]] + _CSBTBL[p[3]]);
+}
+
+
+static INLINE int /* C equivalent count of leading 0's in a u32 */
+C_bcm_count_leading_zeros(uint32 u32)
+{
+ int shifts = 0;
+ while (u32) {
+ shifts++; u32 >>= 1;
+ }
+ return (32U - shifts);
+}
+
+#ifdef BCMDRIVER
+/*
+ * Assembly instructions: Count Leading Zeros
+ * "clz" : MIPS, ARM
+ * "cntlzw" : PowerPC
+ * "BSF" : x86
+ * "lzcnt" : AMD, SPARC
+ */
+
+#if defined(__arm__)
+
+#if defined(__ARM_ARCH_7M__) /* Cortex M3 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7M__ */
+
+#if defined(__ARM_ARCH_7R__) /* Cortex R4 */
+#define __USE_ASM_CLZ__
+#endif /* __ARM_ARCH_7R__ */
+
+#endif /* __arm__ */
+
+static INLINE int
+bcm_count_leading_zeros(uint32 u32)
+{
+#if defined(__USE_ASM_CLZ__)
+ int zeros;
+ __asm__ volatile("clz %0, %1 \n" : "=r" (zeros) : "r" (u32));
+ return zeros;
+#else /* C equivalent */
+ return C_bcm_count_leading_zeros(u32);
+#endif /* C equivalent */
+}
+
+/* INTERFACE: Multiword bitmap based small id allocator. */
+struct bcm_mwbmap; /* forward declaration for use as an opaque mwbmap handle */
+
+#define BCM_MWBMAP_INVALID_HDL ((struct bcm_mwbmap *)NULL)
+#define BCM_MWBMAP_INVALID_IDX ((uint32)(~0U))
+
+/* Incarnate a multiword bitmap based small index allocator */
+extern struct bcm_mwbmap * bcm_mwbmap_init(osl_t * osh, uint32 items_max);
+
+/* Free up the multiword bitmap index allocator */
+extern void bcm_mwbmap_fini(osl_t * osh, struct bcm_mwbmap * mwbmap_hdl);
+
+/* Allocate a unique small index using a multiword bitmap index allocator */
+extern uint32 bcm_mwbmap_alloc(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Force an index at a specified position to be in use */
+extern void bcm_mwbmap_force(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Free a previously allocated index back into the multiword bitmap allocator */
+extern void bcm_mwbmap_free(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Fetch the toal number of free indices in the multiword bitmap allocator */
+extern uint32 bcm_mwbmap_free_cnt(struct bcm_mwbmap * mwbmap_hdl);
+
+/* Determine whether an index is inuse or free */
+extern bool bcm_mwbmap_isfree(struct bcm_mwbmap * mwbmap_hdl, uint32 bitix);
+
+/* Debug dump a multiword bitmap allocator */
+extern void bcm_mwbmap_show(struct bcm_mwbmap * mwbmap_hdl);
+
+extern void bcm_mwbmap_audit(struct bcm_mwbmap * mwbmap_hdl);
+/* End - Multiword bitmap based small Id allocator. */
+
+
+/* INTERFACE: Simple unique 16bit Id Allocator using a stack implementation. */
+
+#define ID16_INVALID ((uint16)(~0))
+
+/*
+ * Construct a 16bit id allocator, managing 16bit ids in the range:
+ * [start_val16 .. start_val16+total_ids)
+ * Note: start_val16 is inclusive.
+ * Returns an opaque handle to the 16bit id allocator.
+ */
+extern void * id16_map_init(osl_t *osh, uint16 total_ids, uint16 start_val16);
+extern void * id16_map_fini(osl_t *osh, void * id16_map_hndl);
+
+/* Allocate a unique 16bit id */
+extern uint16 id16_map_alloc(void * id16_map_hndl);
+
+/* Free a 16bit id value into the id16 allocator */
+extern void id16_map_free(void * id16_map_hndl, uint16 val16);
+
+/* Get the number of failures encountered during id allocation. */
+extern uint32 id16_map_failures(void * id16_map_hndl);
+
+/* Audit the 16bit id allocator state. */
+extern bool id16_map_audit(void * id16_map_hndl);
+/* End - Simple 16bit Id Allocator. */
+
+#endif /* BCMDRIVER */
+
+extern void bcm_uint64_right_shift(uint32* r, uint32 a_high, uint32 a_low, uint32 b);
+
+void bcm_add_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+void bcm_sub_64(uint32* r_hi, uint32* r_lo, uint32 offset);
+
+/* calculate checksum for ip header, tcp / udp header / data */
+uint16 bcm_ip_cksum(uint8 *buf, uint32 len, uint32 sum);
+
+#ifndef _dll_t_
+#define _dll_t_
+/*
+ * -----------------------------------------------------------------------------
+ * Double Linked List Macros
+ * -----------------------------------------------------------------------------
+ *
+ * All dll operations must be performed on a pre-initialized node.
+ * Inserting an uninitialized node into a list effectively initialized it.
+ *
+ * When a node is deleted from a list, you may initialize it to avoid corruption
+ * incurred by double deletion. You may skip initialization if the node is
+ * immediately inserted into another list.
+ *
+ * By placing a dll_t element at the start of a struct, you may cast a dll_t *
+ * to the struct or vice versa.
+ *
+ * Example of declaring an initializing someList and inserting nodeA, nodeB
+ *
+ * typedef struct item {
+ * dll_t node;
+ * int someData;
+ * } Item_t;
+ * Item_t nodeA, nodeB, nodeC;
+ * nodeA.someData = 11111, nodeB.someData = 22222, nodeC.someData = 33333;
+ *
+ * dll_t someList;
+ * dll_init(&someList);
+ *
+ * dll_append(&someList, (dll_t *) &nodeA);
+ * dll_prepend(&someList, &nodeB.node);
+ * dll_insert((dll_t *)&nodeC, &nodeA.node);
+ *
+ * dll_delete((dll_t *) &nodeB);
+ *
+ * Example of a for loop to walk someList of node_p
+ *
+ * extern void mydisplay(Item_t * item_p);
+ *
+ * dll_t * item_p, * next_p;
+ * for (item_p = dll_head_p(&someList); ! dll_end(&someList, item_p);
+ * item_p = next_p)
+ * {
+ * next_p = dll_next_p(item_p);
+ * ... use item_p at will, including removing it from list ...
+ * mydisplay((PItem_t)item_p);
+ * }
+ *
+ * -----------------------------------------------------------------------------
+ */
+typedef struct dll {
+ struct dll * next_p;
+ struct dll * prev_p;
+} dll_t;
+
+static INLINE void
+dll_init(dll_t *node_p)
+{
+ node_p->next_p = node_p;
+ node_p->prev_p = node_p;
+}
+/* dll macros returing a pointer to dll_t */
+
+static INLINE dll_t *
+dll_head_p(dll_t *list_p)
+{
+ return list_p->next_p;
+}
+
+
+static INLINE dll_t *
+dll_tail_p(dll_t *list_p)
+{
+ return (list_p)->prev_p;
+}
+
+
+static INLINE dll_t *
+dll_next_p(dll_t *node_p)
+{
+ return (node_p)->next_p;
+}
+
+
+static INLINE dll_t *
+dll_prev_p(dll_t *node_p)
+{
+ return (node_p)->next_p;
+}
+
+
+static INLINE bool
+dll_empty(dll_t *list_p)
+{
+ return ((list_p)->next_p == (list_p));
+}
+
+
+static INLINE bool
+dll_end(dll_t *list_p, dll_t * node_p)
+{
+ return (list_p == node_p);
+}
+
+
+/* inserts the node new_p "after" the node at_p */
+static INLINE void
+dll_insert(dll_t *new_p, dll_t * at_p)
+{
+ new_p->next_p = at_p->next_p;
+ new_p->prev_p = at_p;
+ at_p->next_p = new_p;
+ (new_p->next_p)->prev_p = new_p;
+}
+
+static INLINE void
+dll_append(dll_t *list_p, dll_t *node_p)
+{
+ dll_insert(node_p, dll_tail_p(list_p));
+}
+
+static INLINE void
+dll_prepend(dll_t *list_p, dll_t *node_p)
+{
+ dll_insert(node_p, list_p);
+}
+
+
+/* deletes a node from any list that it "may" be in, if at all. */
+static INLINE void
+dll_delete(dll_t *node_p)
+{
+ node_p->prev_p->next_p = node_p->next_p;
+ node_p->next_p->prev_p = node_p->prev_p;
+}
+#endif /* ! defined(_dll_t_) */
+
+/* Elements managed in a double linked list */
+
+typedef struct dll_pool {
+ dll_t free_list;
+ uint16 free_count;
+ uint16 elems_max;
+ uint16 elem_size;
+ dll_t elements[1];
+} dll_pool_t;
+
+dll_pool_t * dll_pool_init(void * osh, uint16 elems_max, uint16 elem_size);
+void * dll_pool_alloc(dll_pool_t * dll_pool_p);
+void dll_pool_free(dll_pool_t * dll_pool_p, void * elem_p);
+void dll_pool_free_tail(dll_pool_t * dll_pool_p, void * elem_p);
+typedef void (* dll_elem_dump)(void * elem_p);
+void dll_pool_detach(void * osh, dll_pool_t * pool, uint16 elems_max, uint16 elem_size);
+
#ifdef __cplusplus
}
#endif
-#endif
+/* #define DEBUG_COUNTER */
+#ifdef DEBUG_COUNTER
+#define CNTR_TBL_MAX 10
+typedef struct _counter_tbl_t {
+ char name[16]; /* name of this counter table */
+ uint32 prev_log_print; /* Internal use. Timestamp of the previous log print */
+ uint log_print_interval; /* Desired interval to print logs in ms */
+ uint needed_cnt; /* How many counters need to be used */
+ uint32 cnt[CNTR_TBL_MAX]; /* Counting entries to increase at desired places */
+ bool enabled; /* Whether to enable printing log */
+} counter_tbl_t;
+
+
+void counter_printlog(counter_tbl_t *ctr_tbl);
+#endif /* DEBUG_COUNTER */
+
+#endif /* _bcmutils_h_ */
#define _bcmwifi_channels_h_
-
+/* A chanspec holds the channel number, band, bandwidth and control sideband */
typedef uint16 chanspec_t;
-
+/* channel defines */
#define CH_UPPER_SB 0x01
#define CH_LOWER_SB 0x02
#define CH_EWA_VALID 0x04
#define CH_40MHZ_APART 8
#define CH_20MHZ_APART 4
#define CH_10MHZ_APART 2
-#define CH_5MHZ_APART 1
-#define CH_MAX_2G_CHANNEL 14
-#define MAXCHANNEL 224
+#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */
+#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */
+#define MAXCHANNEL 224 /* max # supported channels. The max channel no is above,
+ * this is that + 1 rounded up to a multiple of NBBY (8).
+ * DO NOT MAKE it > 255: channels are uint8's all over
+ */
+#define MAXCHANNEL_NUM (MAXCHANNEL - 1) /* max channel number */
+
+/* make sure channel num is within valid range */
+#define CH_NUM_VALID_RANGE(ch_num) ((ch_num) > 0 && (ch_num) <= MAXCHANNEL_NUM)
+
#define CHSPEC_CTLOVLP(sp1, sp2, sep) (ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < \
(sep))
-
+/* All builds use the new 11ac ratespec/chanspec */
#undef D11AC_IOTYPES
#define D11AC_IOTYPES
-#ifndef D11AC_IOTYPES
-
-#define WL_CHANSPEC_CHAN_MASK 0x00ff
-#define WL_CHANSPEC_CHAN_SHIFT 0
-
-#define WL_CHANSPEC_CTL_SB_MASK 0x0300
-#define WL_CHANSPEC_CTL_SB_SHIFT 8
-#define WL_CHANSPEC_CTL_SB_LOWER 0x0100
-#define WL_CHANSPEC_CTL_SB_UPPER 0x0200
-#define WL_CHANSPEC_CTL_SB_NONE 0x0300
-
-#define WL_CHANSPEC_BW_MASK 0x0C00
-#define WL_CHANSPEC_BW_SHIFT 10
-#define WL_CHANSPEC_BW_10 0x0400
-#define WL_CHANSPEC_BW_20 0x0800
-#define WL_CHANSPEC_BW_40 0x0C00
-
-#define WL_CHANSPEC_BAND_MASK 0xf000
-#define WL_CHANSPEC_BAND_SHIFT 12
-#ifdef WL_CHANSPEC_BAND_5G
-#undef WL_CHANSPEC_BAND_5G
-#endif
-#ifdef WL_CHANSPEC_BAND_2G
-#undef WL_CHANSPEC_BAND_2G
-#endif
-#define WL_CHANSPEC_BAND_5G 0x1000
-#define WL_CHANSPEC_BAND_2G 0x2000
-#define INVCHANSPEC 255
-
-
-#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0)
-#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
- ((channel) + CH_10MHZ_APART) : 0)
-
-#define LL_20_SB(channel) (((channel) > 3 * CH_10MHZ_APART) ? ((channel) - 3 * CH_10MHZ_APART) : 0)
-#define UU_20_SB(channel) (((channel) < (MAXCHANNEL - 3 * CH_10MHZ_APART)) ? \
- ((channel) + 3 * CH_10MHZ_APART) : 0)
-#define LU_20_SB(channel) LOWER_20_SB(channel)
-#define UL_20_SB(channel) UPPER_20_SB(channel)
-
-#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX)
-#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \
- WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
- WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G))
-#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \
- ((channel) + CH_20MHZ_APART) : 0)
-#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \
- ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \
- ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \
- WL_CHANSPEC_BAND_5G))
-#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
-#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
-
-
-#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
-#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
-
-#ifdef WL11N_20MHZONLY
-
-#define CHSPEC_IS10(chspec) 0
-#define CHSPEC_IS20(chspec) 1
-#ifndef CHSPEC_IS40
-#define CHSPEC_IS40(chspec) 0
-#endif
-
-#else
-
-#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
-#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
-#ifndef CHSPEC_IS40
-#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)
-#endif
-
-#endif
-
-#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
-#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
-#define CHSPEC_SB_NONE(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE)
-#define CHSPEC_SB_UPPER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER)
-#define CHSPEC_SB_LOWER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER)
-#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \
- (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \
- (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))))
-#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
-
-#define CHANSPEC_STR_LEN 8
-
-#else
-
#define WL_CHANSPEC_CHAN_MASK 0x00ff
#define WL_CHANSPEC_CHAN_SHIFT 0
#define WL_CHANSPEC_CHAN1_MASK 0x000f
#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU
#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL
#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU
-#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL
+#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL
#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU
+#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
#define WL_CHANSPEC_BW_MASK 0x3800
#define WL_CHANSPEC_BW_SHIFT 11
#define WL_CHANSPEC_BAND_5G 0xc000
#define INVCHANSPEC 255
-
+/* channel defines */
#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \
((channel) - CH_10MHZ_APART) : 0)
#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \
((channel) | (ctlsb) | \
WL_CHANSPEC_BW_160 | WL_CHANSPEC_BAND_5G)
-
-#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
-#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK)
-#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK)
+/* simple MACROs to get different fields of chanspec */
+#ifdef WL11AC_80P80
+#define CHSPEC_CHANNEL(chspec) wf_chspec_channel(chspec)
+#else
+#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK))
+#endif
+#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK) >> WL_CHANSPEC_CHAN1_SHIFT
+#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK) >> WL_CHANSPEC_CHAN2_SHIFT
#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK)
-#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
+#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK)
#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK)
#ifdef WL11N_20MHZONLY
#define CHSPEC_IS8080(chspec) 0
#endif
-#else
+#else /* !WL11N_20MHZONLY */
#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10)
#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20)
#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080)
#endif
-#endif
+#endif /* !WL11N_20MHZONLY */
#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G)
#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G)
(((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40))
#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G)
-
+/**
+ * Number of chars needed for wf_chspec_ntoa() destination character buffer.
+ */
#define CHANSPEC_STR_LEN 20
-
+#define CHSPEC_IS_BW_160_WIDE(chspec) (CHSPEC_BW(chspec) == WL_CHANSPEC_BW_160 ||\
+ CHSPEC_BW(chspec) == WL_CHANSPEC_BW_8080)
+
+/* BW inequality comparisons, LE (<=), GE (>=), LT (<), GT (>), comparisons can be made
+* as simple numeric comparisons, with the exception that 160 is the same BW as 80+80,
+* but have different numeric values; (WL_CHANSPEC_BW_160 < WL_CHANSPEC_BW_8080).
+*
+* The LT/LE/GT/GE macros check first checks whether both chspec bandwidth and bw are 160 wide.
+* If both chspec bandwidth and bw is not 160 wide, then the comparison is made.
+*/
+#define CHSPEC_BW_GE(chspec, bw) \
+ ((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ (bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\
+ (CHSPEC_BW(chspec) >= bw))
+
+#define CHSPEC_BW_LE(chspec, bw) \
+ ((CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ (bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) ||\
+ (CHSPEC_BW(chspec) <= bw))
+
+#define CHSPEC_BW_GT(chspec, bw) \
+ (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ (bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\
+ (CHSPEC_BW(chspec) > bw))
+
+#define CHSPEC_BW_LT(chspec, bw) \
+ (!(CHSPEC_IS_BW_160_WIDE(chspec) &&\
+ (bw == WL_CHANSPEC_BW_160 || bw == WL_CHANSPEC_BW_8080)) &&\
+ (CHSPEC_BW(chspec) < bw))
+
+/* Legacy Chanspec defines
+ * These are the defines for the previous format of the chanspec_t
+ */
#define WL_LCHANSPEC_CHAN_MASK 0x00ff
#define WL_LCHANSPEC_CHAN_SHIFT 0
#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G)
#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G)
+#define LCHSPEC_SB_UPPER(chspec) \
+ ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_UPPER) && \
+ (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+#define LCHSPEC_SB_LOWER(chspec) \
+ ((((chspec) & WL_LCHANSPEC_CTL_SB_MASK) == WL_LCHANSPEC_CTL_SB_LOWER) && \
+ (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40))
+
#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band)))
#define CH20MHZ_LCHSPEC(channel) \
WL_LCHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \
WL_LCHANSPEC_BAND_2G : WL_LCHANSPEC_BAND_5G))
-#endif
-
-
-
-
-#define WF_CHAN_FACTOR_2_4_G 4814
+/*
+ * WF_CHAN_FACTOR_* constants are used to calculate channel frequency
+ * given a channel number.
+ * chan_freq = chan_factor * 500Mhz + chan_number * 5
+ */
+/**
+ * Channel Factor for the starting frequence of 2.4 GHz channels.
+ * The value corresponds to 2407 MHz.
+ */
+#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */
-#define WF_CHAN_FACTOR_5_G 10000
+/**
+ * Channel Factor for the starting frequence of 5 GHz channels.
+ * The value corresponds to 5000 MHz.
+ */
+#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */
+/**
+ * Channel Factor for the starting frequence of 4.9 GHz channels.
+ * The value corresponds to 4000 MHz.
+ */
+#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */
-#define WF_CHAN_FACTOR_4_G 8000
+#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */
-#define WLC_2G_25MHZ_OFFSET 5
+/**
+ * No of sub-band vlaue of the specified Mhz chanspec
+ */
+#define WF_NUM_SIDEBANDS_40MHZ 2
+#define WF_NUM_SIDEBANDS_80MHZ 4
+#define WF_NUM_SIDEBANDS_8080MHZ 4
+#define WF_NUM_SIDEBANDS_160MHZ 8
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param chspec chanspec format
+ * @param buf ascii string of chanspec
+ *
+ * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ * Original chanspec in case of error
+ *
+ * @see CHANSPEC_STR_LEN
+ */
+extern char * wf_chspec_ntoa_ex(chanspec_t chspec, char *buf);
+/**
+ * Convert chanspec to ascii string
+ *
+ * @param chspec chanspec format
+ * @param buf ascii string of chanspec
+ *
+ * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes
+ * NULL in case of error
+ *
+ * @see CHANSPEC_STR_LEN
+ */
extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf);
-
+/**
+ * Convert ascii string to chanspec
+ *
+ * @param a pointer to input string
+ *
+ * @return >= 0 if successful or 0 otherwise
+ */
extern chanspec_t wf_chspec_aton(const char *a);
-
+/**
+ * Verify the chanspec fields are valid.
+ *
+ * Verify the chanspec is using a legal set field values, i.e. that the chanspec
+ * specified a band, bw, ctl_sb and channel and that the combination could be
+ * legal given some set of circumstances.
+ *
+ * @param chanspec input chanspec to verify
+ *
+ * @return TRUE if the chanspec is malformed, FALSE if it looks good.
+ */
extern bool wf_chspec_malformed(chanspec_t chanspec);
-
+/**
+ * Verify the chanspec specifies a valid channel according to 802.11.
+ *
+ * @param chanspec input chanspec to verify
+ *
+ * @return TRUE if the chanspec is a valid 802.11 channel
+ */
extern bool wf_chspec_valid(chanspec_t chanspec);
-
+/**
+ * Return the primary (control) channel.
+ *
+ * This function returns the channel number of the primary 20MHz channel. For
+ * 20MHz channels this is just the channel number. For 40MHz or wider channels
+ * it is the primary 20MHz channel specified by the chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the channel number of the primary 20MHz channel
+ */
extern uint8 wf_chspec_ctlchan(chanspec_t chspec);
+/**
+ * Return the bandwidth string.
+ *
+ * This function returns the bandwidth string for the passed chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the bandwidth string
+ */
+extern char * wf_chspec_to_bw_str(chanspec_t chspec);
+/**
+ * Return the primary (control) chanspec.
+ *
+ * This function returns the chanspec of the primary 20MHz channel. For 20MHz
+ * channels this is just the chanspec. For 40MHz or wider channels it is the
+ * chanspec of the primary 20MHZ channel specified by the chanspec.
+ *
+ * @param chspec input chanspec
+ *
+ * @return Returns the chanspec of the primary 20MHz channel
+ */
extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec);
-
+/**
+ * Return a channel number corresponding to a frequency.
+ *
+ * This function returns the chanspec for the primary 40MHz of an 80MHz channel.
+ * The control sideband specifies the same 20MHz channel that the 80MHz channel is using
+ * as the primary 20MHz channel.
+ */
extern chanspec_t wf_chspec_primary40_chspec(chanspec_t chspec);
-
+/*
+ * Return the channel number for a given frequency and base frequency.
+ * The returned channel number is relative to the given base frequency.
+ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for
+ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz.
+ *
+ * Frequency is specified in MHz.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ *
+ * The returned channel will be in the range [1, 14] in the 2.4 GHz band
+ * and [0, 200] otherwise.
+ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the
+ * frequency is not a 2.4 GHz channel, or if the frequency is not and even
+ * multiple of 5 MHz from the base frequency to the base plus 1 GHz.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param freq frequency in MHz
+ * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a channel number
+ *
+ * @see WF_CHAN_FACTOR_2_4_G
+ * @see WF_CHAN_FACTOR_5_G
+ */
extern int wf_mhz2channel(uint freq, uint start_factor);
-
+/**
+ * Return the center frequency in MHz of the given channel and base frequency.
+ *
+ * Return the center frequency in MHz of the given channel and base frequency.
+ * The channel number is interpreted relative to the given base frequency.
+ *
+ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise.
+ * The base frequency is specified as (start_factor * 500 kHz).
+ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for
+ * 2.4 GHz and 5 GHz bands.
+ * The channel range of [1, 14] is only checked for a start_factor of
+ * WF_CHAN_FACTOR_2_4_G (4814).
+ * Odd start_factors produce channels on .5 MHz boundaries, in which case
+ * the answer is rounded down to an integral MHz.
+ * -1 is returned for an out of range channel.
+ *
+ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2
+ *
+ * @param channel input channel number
+ * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz
+ *
+ * @return Returns a frequency in MHz
+ *
+ * @see WF_CHAN_FACTOR_2_4_G
+ * @see WF_CHAN_FACTOR_5_G
+ */
extern int wf_channel2mhz(uint channel, uint start_factor);
+/**
+ * Returns the chanspec 80Mhz channel corresponding to the following input
+ * parameters
+ *
+ * primary_channel - primary 20Mhz channel
+ * center_channel - center frequecny of the 80Mhz channel
+ *
+ * The center_channel can be one of {42, 58, 106, 122, 138, 155}
+ *
+ * returns INVCHANSPEC in case of error
+ */
+extern chanspec_t wf_chspec_80(uint8 center_channel, uint8 primary_channel);
+/**
+ * Convert ctl chan and bw to chanspec
+ *
+ * @param ctl_ch channel
+ * @param bw bandwidth
+ *
+ * @return > 0 if successful or 0 otherwise
+ *
+ */
extern uint16 wf_channel2chspec(uint ctl_ch, uint bw);
extern uint wf_channel2freq(uint channel);
extern uint wf_freq2channel(uint freq);
+/*
+ * Returns the 80+80 MHz chanspec corresponding to the following input parameters
+ *
+ * primary_20mhz - Primary 20 MHz channel
+ * chan0_80MHz - center channel number of one frequency segment
+ * chan1_80MHz - center channel number of the other frequency segment
+ *
+ * Parameters chan0_80MHz and chan1_80MHz are channel numbers in {42, 58, 106, 122, 138, 155}.
+ * The primary channel must be contained in one of the 80MHz channels. This routine
+ * will determine which frequency segment is the primary 80 MHz segment.
+ *
+ * Returns INVCHANSPEC in case of error.
+ *
+ * Refer to IEEE802.11ac section 22.3.14 "Channelization".
+ */
+extern chanspec_t wf_chspec_get8080_chspec(uint8 primary_20mhz,
+ uint8 chan0_80Mhz, uint8 chan1_80Mhz);
+
+/*
+ * Returns the primary 80 Mhz channel for the provided chanspec
+ *
+ * chanspec - Input chanspec for which the 80MHz primary channel has to be retrieved
+ *
+ * returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_primary80_channel(chanspec_t chanspec);
-#endif
+/*
+ * Returns the secondary 80 Mhz channel for the provided chanspec
+ *
+ * chanspec - Input chanspec for which the 80MHz secondary channel has to be retrieved
+ *
+ * returns -1 in case the provided channel is 20/40 Mhz chanspec
+ */
+extern uint8 wf_chspec_secondary80_channel(chanspec_t chanspec);
+
+/*
+ * This function returns the chanspec for the primary 80MHz of an 160MHz or 80+80 channel.
+ */
+extern chanspec_t wf_chspec_primary80_chspec(chanspec_t chspec);
+
+#ifdef WL11AC_80P80
+/*
+ * This function returns the centre chanel for the given chanspec.
+ * In case of 80+80 chanspec it returns the primary 80 Mhz centre channel
+ */
+extern uint8 wf_chspec_channel(chanspec_t chspec);
+#endif
+#endif /* _bcmwifi_channels_h_ */
#ifdef __cplusplus
extern "C" {
-#endif
+#endif /* __cplusplus */
#define WL_RATESET_SZ_DSSS 4
#define WL_RATESET_SZ_OFDM 8
-#define WL_RATESET_SZ_HT_MCS 8
#define WL_RATESET_SZ_VHT_MCS 10
-#define WL_TX_CHAINS_MAX 3
+#if defined(WLPROPRIETARY_11N_RATES)
+#define WL_RATESET_SZ_HT_MCS WL_RATESET_SZ_VHT_MCS
+#else
+#define WL_RATESET_SZ_HT_MCS 8
+#endif
-#define WL_RATE_DISABLED (-128)
+#define WL_RATESET_SZ_HT_IOCTL 8 /* MAC histogram, compatibility with wl utility */
+#define WL_TX_CHAINS_MAX 3
+
+#define WL_RATE_DISABLED (-128) /* Power value corresponding to unsupported rate */
+/* Transmit channel bandwidths */
typedef enum wl_tx_bw {
WL_TX_BW_20,
WL_TX_BW_40,
WL_TX_BW_80,
WL_TX_BW_20IN40,
WL_TX_BW_20IN80,
- WL_TX_BW_40IN80
+ WL_TX_BW_40IN80,
+ WL_TX_BW_160,
+ WL_TX_BW_20IN160,
+ WL_TX_BW_40IN160,
+ WL_TX_BW_80IN160,
+ WL_TX_BW_ALL,
+ WL_TX_BW_8080,
+ WL_TX_BW_8080CHAN2,
+ WL_TX_BW_20IN8080,
+ WL_TX_BW_40IN8080,
+ WL_TX_BW_80IN8080
} wl_tx_bw_t;
-
+/*
+ * Transmit modes.
+ * Not all modes are listed here, only those required for disambiguation. e.g. SPEXP is not listed
+ */
typedef enum wl_tx_mode {
WL_TX_MODE_NONE,
WL_TX_MODE_STBC,
} wl_tx_mode_t;
-
+/* Number of transmit chains */
typedef enum wl_tx_chains {
WL_TX_CHAINS_1 = 1,
WL_TX_CHAINS_2,
} wl_tx_chains_t;
-
+/* Number of transmit streams */
typedef enum wl_tx_nss {
WL_TX_NSS_1 = 1,
WL_TX_NSS_2,
typedef enum clm_rates {
-
+ /************
+ * 1 chain *
+ ************
+ */
-
+ /* 1 Stream */
WL_RATE_1X1_DSSS_1 = 0,
WL_RATE_1X1_DSSS_2 = 1,
WL_RATE_1X1_DSSS_5_5 = 2,
WL_RATE_1X1_VHT9SS1 = 21,
-
+ /************
+ * 2 chains *
+ ************
+ */
-
+ /* 1 Stream expanded + 1 */
WL_RATE_1X2_DSSS_1 = 22,
WL_RATE_1X2_DSSS_2 = 23,
WL_RATE_1X2_DSSS_5_5 = 24,
WL_RATE_1X2_VHT8SS1 = 42,
WL_RATE_1X2_VHT9SS1 = 43,
-
+ /* 2 Streams */
WL_RATE_2X2_STBC_MCS0 = 44,
WL_RATE_2X2_STBC_MCS1 = 45,
WL_RATE_2X2_STBC_MCS2 = 46,
WL_RATE_2X2_VHT8SS2 = 62,
WL_RATE_2X2_VHT9SS2 = 63,
-
+ /************
+ * 3 chains *
+ ************
+ */
-
+ /* 1 Stream expanded + 2 */
WL_RATE_1X3_DSSS_1 = 64,
WL_RATE_1X3_DSSS_2 = 65,
WL_RATE_1X3_DSSS_5_5 = 66,
WL_RATE_1X3_VHT8SS1 = 84,
WL_RATE_1X3_VHT9SS1 = 85,
-
+ /* 2 Streams expanded + 1 */
WL_RATE_2X3_STBC_MCS0 = 86,
WL_RATE_2X3_STBC_MCS1 = 87,
WL_RATE_2X3_STBC_MCS2 = 88,
WL_RATE_2X3_VHT8SS2 = 104,
WL_RATE_2X3_VHT9SS2 = 105,
-
+ /* 3 Streams */
WL_RATE_3X3_SDM_MCS16 = 106,
WL_RATE_3X3_SDM_MCS17 = 107,
WL_RATE_3X3_SDM_MCS18 = 108,
WL_RATE_3X3_VHT9SS3 = 115,
-
+ /****************************
+ * TX Beamforming, 2 chains *
+ ****************************
+ */
-
+ /* 1 Stream expanded + 1 */
WL_RATE_1X2_TXBF_OFDM_6 = 116,
WL_RATE_1X2_TXBF_OFDM_9 = 117,
WL_RATE_1X2_TXBF_VHT8SS1 = 132,
WL_RATE_1X2_TXBF_VHT9SS1 = 133,
-
+ /* 2 Streams */
WL_RATE_2X2_TXBF_SDM_MCS8 = 134,
WL_RATE_2X2_TXBF_SDM_MCS9 = 135,
WL_RATE_2X2_TXBF_VHT7SS2 = 141,
-
+ /****************************
+ * TX Beamforming, 3 chains *
+ ****************************
+ */
-
+ /* 1 Stream expanded + 2 */
WL_RATE_1X3_TXBF_OFDM_6 = 142,
WL_RATE_1X3_TXBF_OFDM_9 = 143,
WL_RATE_1X3_TXBF_VHT8SS1 = 158,
WL_RATE_1X3_TXBF_VHT9SS1 = 159,
-
+ /* 2 Streams expanded + 1 */
WL_RATE_2X3_TXBF_SDM_MCS8 = 160,
WL_RATE_2X3_TXBF_SDM_MCS9 = 161,
WL_RATE_2X3_TXBF_VHT8SS2 = 168,
WL_RATE_2X3_TXBF_VHT9SS2 = 169,
-
+ /* 3 Streams */
WL_RATE_3X3_TXBF_SDM_MCS16 = 170,
WL_RATE_3X3_TXBF_SDM_MCS17 = 171,
WL_RATE_3X3_TXBF_VHT7SS3 = 177
} clm_rates_t;
-
+/* Number of rate codes */
#define WL_NUMRATES 178
#ifdef __cplusplus
}
-#endif
+#endif /* __cplusplus */
-#endif
+#endif /* _bcmwifi_rates_h_ */
--- /dev/null
+/*
+ * Definitions for nl80211 vendor command/event access to host driver
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: brcm_nl80211.h 487126 2014-06-24 23:06:12Z $
+ *
+ */
+
+#ifndef _brcm_nl80211_h_
+#define _brcm_nl80211_h_
+
+#define OUI_BRCM 0x001018
+
+enum wl_vendor_subcmd {
+ BRCM_VENDOR_SCMD_UNSPEC,
+ BRCM_VENDOR_SCMD_PRIV_STR
+};
+
+struct bcm_nlmsg_hdr {
+ uint cmd; /* common ioctl definition */
+ uint len; /* expected return buffer length */
+ uint offset; /* user buffer offset */
+ uint set; /* get or set request optional */
+ uint magic; /* magic number for verification */
+};
+
+enum bcmnl_attrs {
+ BCM_NLATTR_UNSPEC,
+
+ BCM_NLATTR_LEN,
+ BCM_NLATTR_DATA,
+
+ __BCM_NLATTR_AFTER_LAST,
+ BCM_NLATTR_MAX = __BCM_NLATTR_AFTER_LAST - 1
+};
+
+struct nl_prv_data {
+ int err; /* return result */
+ void *data; /* ioctl return buffer pointer */
+ uint len; /* ioctl return buffer length */
+ struct bcm_nlmsg_hdr *nlioc; /* bcm_nlmsg_hdr header pointer */
+};
+
+#endif /* _brcm_nl80211_h_ */
--- /dev/null
+/*
+ * Initialization and support routines for self-booting compressed image.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: circularbuf.h 452258 2014-01-29 19:17:57Z $
+ */
+
+#ifndef __CIRCULARBUF_H_INCLUDED__
+#define __CIRCULARBUF_H_INCLUDED__
+
+#include <osl.h>
+#include <typedefs.h>
+#include <bcmendian.h>
+
+/* Enumerations of return values provided by MsgBuf implementation */
+typedef enum {
+ CIRCULARBUF_FAILURE = -1,
+ CIRCULARBUF_SUCCESS
+} circularbuf_ret_t;
+
+/* Core circularbuf circular buffer structure */
+typedef struct circularbuf_s
+{
+ uint16 depth; /* Depth of circular buffer */
+ uint16 r_ptr; /* Read Ptr */
+ uint16 w_ptr; /* Write Ptr */
+ uint16 e_ptr; /* End Ptr */
+ uint16 wp_ptr; /* wp_ptr/pending - scheduled for DMA. But, not yet complete. */
+ uint16 rp_ptr; /* rp_ptr/pending - scheduled for DMA. But, not yet complete. */
+
+ uint8 *buf_addr;
+ void *mb_ctx;
+ void (*mb_ring_bell)(void *ctx);
+} circularbuf_t;
+
+#define CBUF_ERROR_VAL 0x00000001 /* Error level tracing */
+#define CBUF_TRACE_VAL 0x00000002 /* Function level tracing */
+#define CBUF_INFORM_VAL 0x00000004 /* debug level tracing */
+
+extern int cbuf_msg_level;
+
+#define CBUF_ERROR(args) do {if (cbuf_msg_level & CBUF_ERROR_VAL) printf args;} while (0)
+#define CBUF_TRACE(args) do {if (cbuf_msg_level & CBUF_TRACE_VAL) printf args;} while (0)
+#define CBUF_INFO(args) do {if (cbuf_msg_level & CBUF_INFORM_VAL) printf args;} while (0)
+
+#define CIRCULARBUF_START(x) ((x)->buf_addr)
+#define CIRCULARBUF_WRITE_PTR(x) ((x)->w_ptr)
+#define CIRCULARBUF_READ_PTR(x) ((x)->r_ptr)
+#define CIRCULARBUF_END_PTR(x) ((x)->e_ptr)
+
+#define circularbuf_debug_print(handle) \
+ CBUF_INFO(("%s:%d:\t%p rp=%4d r=%4d wp=%4d w=%4d e=%4d\n", \
+ __FUNCTION__, __LINE__, \
+ (void *) CIRCULARBUF_START(handle), \
+ (int) (handle)->rp_ptr, (int) (handle)->r_ptr, \
+ (int) (handle)->wp_ptr, (int) (handle)->w_ptr, \
+ (int) (handle)->e_ptr));
+
+
+/* Callback registered by application/mail-box with the circularbuf implementation.
+ * This will be invoked by the circularbuf implementation when write is complete and
+ * ready for informing the peer
+ */
+typedef void (*mb_ring_t)(void *ctx);
+
+
+/* Public Functions exposed by circularbuf */
+void
+circularbuf_init(circularbuf_t *handle, void *buf_base_addr, uint16 total_buf_len);
+void
+circularbuf_register_cb(circularbuf_t *handle, mb_ring_t mb_ring_func, void *ctx);
+
+/* Write Functions */
+void *
+circularbuf_reserve_for_write(circularbuf_t *handle, uint16 size);
+void
+circularbuf_write_complete(circularbuf_t *handle, uint16 bytes_written);
+
+/* Read Functions */
+void *
+circularbuf_get_read_ptr(circularbuf_t *handle, uint16 *avail_len);
+circularbuf_ret_t
+circularbuf_read_complete(circularbuf_t *handle, uint16 bytes_read);
+
+/*
+ * circularbuf_get_read_ptr() updates rp_ptr by the amount that the consumer
+ * is supposed to read. The consumer may not read the entire amount.
+ * In such a case, circularbuf_revert_rp_ptr() call follows a corresponding
+ * circularbuf_get_read_ptr() call to revert the rp_ptr back to
+ * the point till which data has actually been processed.
+ * It is not valid if it is preceded by multiple get_read_ptr() calls
+ */
+circularbuf_ret_t
+circularbuf_revert_rp_ptr(circularbuf_t *handle, uint16 bytes);
+
+#endif /* __CIRCULARBUF_H_INCLUDED__ */
--- /dev/null
+/*
+ * Custom OID/ioctl definitions for
+ * Broadcom 802.11abg Networking Device Driver
+ *
+ * Definitions subject to change without notice.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wlioctl_defs.h 403826 2013-05-22 16:40:55Z $
+ */
+
+
+#ifndef wlioctl_defs_h
+#define wlioctl_defs_h
+
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+
+
+/* All builds use the new 11ac ratespec/chanspec */
+#undef D11AC_IOTYPES
+#define D11AC_IOTYPES
+
+/* WL_RSPEC defines for rate information */
+#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */
+#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */
+#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */
+#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */
+#define WL_RSPEC_TXEXP_MASK 0x00000300
+#define WL_RSPEC_TXEXP_SHIFT 8
+#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */
+#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */
+#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */
+#define WL_RSPEC_TXBF 0x00200000 /* bit indicates TXBF mode */
+#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */
+#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */
+#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */
+#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */
+#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override both rate & mode */
+
+/* WL_RSPEC_ENCODING field defs */
+#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */
+#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
+
+/* WL_RSPEC_BW field defs */
+#define WL_RSPEC_BW_UNSPECIFIED 0
+#define WL_RSPEC_BW_20MHZ 0x00010000
+#define WL_RSPEC_BW_40MHZ 0x00020000
+#define WL_RSPEC_BW_80MHZ 0x00030000
+#define WL_RSPEC_BW_160MHZ 0x00040000
+
+/* Legacy defines for the nrate iovar */
+#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
+#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
+#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
+#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */
+#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
+#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
+#define OLD_NRATE_SGI 0x00800000 /* sgi mode */
+#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
+
+#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */
+#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */
+#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */
+#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */
+
+#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
+
+/* given a proprietary MCS, get number of spatial streams */
+#define GET_PROPRIETARY_11N_MCS_NSS(mcs) (1 + ((mcs) - 85) / 8)
+
+#define GET_11N_MCS_NSS(mcs) ((mcs) < 32 ? (1 + ((mcs) / 8)) \
+ : ((mcs) == 32 ? 1 : GET_PROPRIETARY_11N_MCS_NSS(mcs)))
+
+#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
+#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */
+
+#define IBSS_MED 15 /* Mediom in-bss congestion percentage */
+#define IBSS_HI 25 /* Hi in-bss congestion percentage */
+#define OBSS_MED 12
+#define OBSS_HI 25
+#define INTERFER_MED 5
+#define INTERFER_HI 10
+
+#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */
+#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */
+#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */
+#define CCA_FLAGS_PREFER_1_6_11 0x10
+#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */
+
+#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */
+#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */
+#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */
+#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */
+#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */
+
+#define WL_STA_AID(a) ((a) &~ 0xc000)
+
+/* Flags for sta_info_t indicating properties of STA */
+#define WL_STA_BRCM 0x00000001 /* Running a Broadcom driver */
+#define WL_STA_WME 0x00000002 /* WMM association */
+#define WL_STA_NONERP 0x00000004 /* No ERP */
+#define WL_STA_AUTHE 0x00000008 /* Authenticated */
+#define WL_STA_ASSOC 0x00000010 /* Associated */
+#define WL_STA_AUTHO 0x00000020 /* Authorized */
+#define WL_STA_WDS 0x00000040 /* Wireless Distribution System */
+#define WL_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */
+#define WL_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */
+#define WL_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */
+#define WL_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */
+#define WL_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */
+#define WL_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */
+#define WL_STA_N_CAP 0x00002000 /* STA 802.11n capable */
+#define WL_STA_SCBSTATS 0x00004000 /* Per STA debug stats */
+#define WL_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */
+#define WL_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */
+#define WL_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */
+#define WL_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */
+#define WL_STA_RIFS_CAP 0x00080000 /* rifs enabled */
+#define WL_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */
+#define WL_STA_WPS 0x00200000 /* WPS state */
+
+#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */
+
+/* STA HT cap fields */
+#define WL_STA_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */
+#define WL_STA_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define WL_STA_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */
+#define WL_STA_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */
+#define WL_STA_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */
+#define WL_STA_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */
+#define WL_STA_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */
+#define WL_STA_CAP_GF 0x0010 /* Greenfield preamble support */
+#define WL_STA_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */
+#define WL_STA_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */
+#define WL_STA_CAP_TX_STBC 0x0080 /* Tx STBC support */
+#define WL_STA_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */
+#define WL_STA_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */
+#define WL_STA_CAP_DELAYED_BA 0x0400 /* delayed BA support */
+#define WL_STA_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */
+#define WL_STA_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */
+#define WL_STA_CAP_PSMP 0x2000 /* Power Save Multi Poll support */
+#define WL_STA_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */
+#define WL_STA_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */
+
+#define WL_STA_CAP_RX_STBC_NO 0x0 /* no rx STBC support */
+#define WL_STA_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */
+#define WL_STA_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */
+#define WL_STA_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */
+
+/* scb vht flags */
+#define WL_STA_VHT_LDPCCAP 0x0001
+#define WL_STA_SGI80 0x0002
+#define WL_STA_SGI160 0x0004
+#define WL_STA_VHT_TX_STBCCAP 0x0008
+#define WL_STA_VHT_RX_STBCCAP 0x0010
+#define WL_STA_SU_BEAMFORMER 0x0020
+#define WL_STA_SU_BEAMFORMEE 0x0040
+#define WL_STA_MU_BEAMFORMER 0x0080
+#define WL_STA_MU_BEAMFORMEE 0x0100
+#define WL_STA_VHT_TXOP_PS 0x0200
+#define WL_STA_HTC_VHT_CAP 0x0400
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+#define WL_IOCTL_ACTION_GET 0x0
+#define WL_IOCTL_ACTION_SET 0x1
+#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e
+#define WL_IOCTL_ACTION_OVL_RSV 0x20
+#define WL_IOCTL_ACTION_OVL 0x40
+#define WL_IOCTL_ACTION_MASK 0x7e
+#define WL_IOCTL_ACTION_OVL_SHIFT 1
+
+#define WL_BSSTYPE_INFRA 1
+#define WL_BSSTYPE_INDEP 0
+#define WL_BSSTYPE_ANY 2
+
+/* Bitmask for scan_type */
+#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */
+#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */
+#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */
+#define WL_SCANFLAGS_OFFCHAN 0x08 /* allow scanning/reporting off-channel APs */
+#define WL_SCANFLAGS_HOTSPOT 0x10 /* automatic ANQP to hotspot APs */
+#define WL_SCANFLAGS_SWTCHAN 0x20 /* Force channel switch for differerent bandwidth */
+
+/* wl_iscan_results status values */
+#define WL_SCAN_RESULTS_SUCCESS 0
+#define WL_SCAN_RESULTS_PARTIAL 1
+#define WL_SCAN_RESULTS_PENDING 2
+#define WL_SCAN_RESULTS_ABORTED 3
+#define WL_SCAN_RESULTS_NO_MEM 4
+
+#define SCANOL_ENABLED (1 << 0)
+#define SCANOL_BCAST_SSID (1 << 1)
+#define SCANOL_NOTIFY_BCAST_SSID (1 << 2)
+#define SCANOL_RESULTS_PER_CYCLE (1 << 3)
+
+/* scan times in milliseconds */
+#define SCANOL_HOME_TIME 45 /* for home channel processing */
+#define SCANOL_ASSOC_TIME 20 /* dwell on a channel while associated */
+#define SCANOL_UNASSOC_TIME 40 /* dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME 110 /* listen on a channelfor passive scan */
+#define SCANOL_AWAY_LIMIT 100 /* max time to be away from home channel */
+#define SCANOL_IDLE_REST_TIME 40
+#define SCANOL_IDLE_REST_MULTIPLIER 0
+#define SCANOL_ACTIVE_REST_TIME 20
+#define SCANOL_ACTIVE_REST_MULTIPLIER 0
+#define SCANOL_CYCLE_IDLE_REST_TIME 300000 /* Idle Rest Time between Scan Cycle (msec) */
+#define SCANOL_CYCLE_IDLE_REST_MULTIPLIER 0 /* Idle Rest Time Multiplier */
+#define SCANOL_CYCLE_ACTIVE_REST_TIME 200
+#define SCANOL_CYCLE_ACTIVE_REST_MULTIPLIER 0
+#define SCANOL_MAX_REST_TIME 3600000 /* max rest time between scan cycle (msec) */
+#define SCANOL_CYCLE_DEFAULT 0 /* default for Max Scan Cycle, 0 = forever */
+#define SCANOL_CYCLE_MAX 864000 /* Max Scan Cycle */
+ /* 10 sec/scan cycle => 100 days */
+#define SCANOL_NPROBES 2 /* for Active scan; send n probes on each channel */
+#define SCANOL_NPROBES_MAX 5 /* for Active scan; send n probes on each channel */
+#define SCANOL_SCAN_START_DLY 10 /* delay start of offload scan (sec) */
+#define SCANOL_SCAN_START_DLY_MAX 240 /* delay start of offload scan (sec) */
+#define SCANOL_MULTIPLIER_MAX 10 /* Max Multiplier */
+#define SCANOL_UNASSOC_TIME_MAX 100 /* max dwell on a channel while unassociated */
+#define SCANOL_PASSIVE_TIME_MAX 500 /* max listen on a channel for passive scan */
+#define SCANOL_SSID_MAX 16 /* max supported preferred SSID */
+
+/* masks for channel and ssid count */
+#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
+#define WL_SCAN_PARAMS_NSSID_SHIFT 16
+
+#define WL_SCAN_ACTION_START 1
+#define WL_SCAN_ACTION_CONTINUE 2
+#define WL_SCAN_ACTION_ABORT 3
+
+
+#define ANTENNA_NUM_1 1 /* total number of antennas to be used */
+#define ANTENNA_NUM_2 2
+#define ANTENNA_NUM_3 3
+#define ANTENNA_NUM_4 4
+
+#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
+#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
+#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */
+#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */
+#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */
+#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */
+
+/* interference source detection and identification mode */
+#define ITFR_MODE_DISABLE 0 /* disable feature */
+#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */
+#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */
+
+/* bit definitions for flags in interference source report */
+#define ITFR_INTERFERENCED 1 /* interference detected */
+#define ITFR_HOME_CHANNEL 2 /* home channel has interference */
+#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */
+
+#define WL_NUM_RPI_BINS 8
+#define WL_RM_TYPE_BASIC 1
+#define WL_RM_TYPE_CCA 2
+#define WL_RM_TYPE_RPI 3
+#define WL_RM_TYPE_ABORT -1 /* ABORT any in-progress RM request */
+
+#define WL_RM_FLAG_PARALLEL (1<<0)
+
+#define WL_RM_FLAG_LATE (1<<1)
+#define WL_RM_FLAG_INCAPABLE (1<<2)
+#define WL_RM_FLAG_REFUSED (1<<3)
+
+/* flags */
+#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
+
+#define WLC_CIS_DEFAULT 0 /* built-in default */
+#define WLC_CIS_SROM 1 /* source is sprom */
+#define WLC_CIS_OTP 2 /* source is otp */
+
+/* PCL - Power Control Loop */
+/* current gain setting is replaced by user input */
+#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */
+#define WL_ATTEN_PCL_ON 1 /* turn on PCL */
+/* current gain setting is maintained */
+#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */
+
+#define PLC_CMD_FAILOVER 1
+#define PLC_CMD_MAC_COST 2
+#define PLC_CMD_LINK_COST 3
+#define PLC_CMD_NODE_LIST 4
+
+#define NODE_TYPE_UNKNOWN 0 /* Unknown link */
+#define NODE_TYPE_WIFI_ONLY 1 /* Pure Wireless STA node */
+#define NODE_TYPE_PLC_ONLY 2 /* Pure PLC only node */
+#define NODE_TYPE_WIFI_PLC 3 /* WiFi PLC capable node */
+
+/* defines used by poweridx iovar - it controls power in a-band */
+/* current gain setting is maintained */
+#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */
+#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */
+#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */
+#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */
+/* value >= 0 causes
+ * - input to be set to that value
+ * - PCL to be off
+ */
+
+#define BCM_MAC_STATUS_INDICATION (0x40010200L)
+
+/* Values for TX Filter override mode */
+#define WLC_TXFILTER_OVERRIDE_DISABLED 0
+#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+
+/* magic pattern used for mismatch driver and wl */
+#define WL_TXFIFO_SZ_MAGIC 0xa5a5
+
+/* check this magic number */
+#define WLC_IOCTL_MAGIC 0x14e46c77
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* bss_info_cap_t flags */
+#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */
+#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */
+#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info received on channel (vs offchannel) */
+#define WL_BSS_FLAGS_HS20 0x08 /* hotspot 2.0 capable */
+#define WL_BSS_FLAGS_RSSI_INVALID 0x10 /* BSS contains invalid RSSI */
+#define WL_BSS_FLAGS_RSSI_INACCURATE 0x20 /* BSS contains inaccurate RSSI */
+#define WL_BSS_FLAGS_SNR_INVALID 0x40 /* BSS contains invalid SNR */
+#define WL_BSS_FLAGS_NF_INVALID 0x80 /* BSS contains invalid noise floor */
+
+/* bssinfo flag for nbss_cap */
+#define VHT_BI_SGI_80MHZ 0x00000100
+#define VHT_BI_80MHZ 0x00000200
+#define VHT_BI_160MHZ 0x00000400
+#define VHT_BI_8080MHZ 0x00000800
+
+/* reference to wl_ioctl_t struct used by usermode driver */
+#define ioctl_subtype set /* subtype param */
+#define ioctl_pid used /* pid param */
+#define ioctl_status needed /* status param */
+
+
+/* Enumerate crypto algorithms */
+#define CRYPTO_ALGO_OFF 0
+#define CRYPTO_ALGO_WEP1 1
+#define CRYPTO_ALGO_TKIP 2
+#define CRYPTO_ALGO_WEP128 3
+#define CRYPTO_ALGO_AES_CCM 4
+#define CRYPTO_ALGO_AES_OCB_MSDU 5
+#define CRYPTO_ALGO_AES_OCB_MPDU 6
+#if !defined(BCMCCX) && !defined(BCMEXTCCX)
+#define CRYPTO_ALGO_NALG 7
+#else
+#define CRYPTO_ALGO_CKIP 7
+#define CRYPTO_ALGO_CKIP_MMH 8
+#define CRYPTO_ALGO_WEP_MMH 9
+#define CRYPTO_ALGO_NALG 10
+#endif /* !BCMCCX && !BCMEXTCCX */
+
+#define CRYPTO_ALGO_SMS4 11
+#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */
+#define CRYPTO_ALGO_BIP 13 /* 802.11w BIP (aes cmac) */
+
+#define CRYPTO_ALGO_AES_GCM 14 /* 128 bit GCM */
+#define CRYPTO_ALGO_AES_CCM256 15 /* 256 bit CCM */
+#define CRYPTO_ALGO_AES_GCM256 16 /* 256 bit GCM */
+#define CRYPTO_ALGO_BIP_CMAC256 17 /* 256 bit BIP CMAC */
+#define CRYPTO_ALGO_BIP_GMAC 18 /* 128 bit BIP GMAC */
+#define CRYPTO_ALGO_BIP_GMAC256 19 /* 256 bit BIP GMAC */
+
+#define CRYPTO_ALGO_NONE CRYPTO_ALGO_OFF
+
+#define WSEC_GEN_MIC_ERROR 0x0001
+#define WSEC_GEN_REPLAY 0x0002
+#define WSEC_GEN_ICV_ERROR 0x0004
+#define WSEC_GEN_MFP_ACT_ERROR 0x0008
+#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010
+#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020
+
+#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
+#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
+#if defined(BCMCCX) || defined(BCMEXTCCX)
+#define WL_CKIP_KP (1 << 4) /* CMIC */
+#define WL_CKIP_MMH (1 << 5) /* CKIP */
+#else
+#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
+#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
+#endif /* BCMCCX || BCMEXTCCX */
+#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
+
+/* wireless security bitvec */
+#define WEP_ENABLED 0x0001
+#define TKIP_ENABLED 0x0002
+#define AES_ENABLED 0x0004
+#define WSEC_SWFLAG 0x0008
+#ifdef BCMCCX
+#define CKIP_KP_ENABLED 0x0010
+#define CKIP_MIC_ENABLED 0x0020
+#endif /* BCMCCX */
+#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
+#ifdef BCMWAPI_WPI
+#define SMS4_ENABLED 0x0100
+#endif /* BCMWAPI_WPI */
+
+/* wsec macros for operating on the above definitions */
+#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED)
+#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED)
+#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED)
+
+#ifdef BCMCCX
+#define WSEC_CKIP_KP_ENABLED(wsec) ((wsec) & CKIP_KP_ENABLED)
+#define WSEC_CKIP_MIC_ENABLED(wsec) ((wsec) & CKIP_MIC_ENABLED)
+#define WSEC_CKIP_ENABLED(wsec) ((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED))
+
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec) \
+ ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | \
+ CKIP_MIC_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec) \
+ ((wsec) & \
+ (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED))
+#endif /* BCMWAPI_WPI */
+#else /* defined BCMCCX */
+#ifdef BCMWAPI_WPI
+#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
+#else /* BCMWAPI_WPI */
+#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
+#endif /* BCMWAPI_WPI */
+#endif /* BCMCCX */
+#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED)
+#ifdef BCMWAPI_WAI
+#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED)
+#endif /* BCMWAPI_WAI */
+
+#define MFP_CAPABLE 0x0200
+#define MFP_REQUIRED 0x0400
+#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */
+
+/* WPA authentication mode bitvec */
+#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
+#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
+#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
+#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
+#if defined(BCMCCX) || defined(BCMEXTCCX)
+#define WPA_AUTH_CCKM 0x0008 /* CCKM */
+#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */
+#endif /* BCMCCX || BCMEXTCCX */
+/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */
+#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
+#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
+#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
+#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
+#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
+#define WPA_AUTH_WAPI 0x0400
+#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
+#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
+#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
+#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
+#define WPA2_AUTH_MFP 0x1000 /* MFP (11w) in contrast to CCX */
+#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
+#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+/* pmkid */
+#define MAXPMKID 16
+
+#ifdef SROM12
+#define WLC_IOCTL_MAXLEN 10000 /* max length ioctl buffer required */
+#else
+#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
+#endif /* SROM12 */
+
+#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
+#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */
+#if defined(LCNCONF) || defined(LCN40CONF)
+#define WLC_SAMPLECOLLECT_MAXLEN 1024 /* Max Sample Collect buffer */
+#else
+#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */
+#endif
+#define WLC_SAMPLECOLLECT_MAXLEN_LCN40 8192
+
+/* common ioctl definitions */
+#define WLC_GET_MAGIC 0
+#define WLC_GET_VERSION 1
+#define WLC_UP 2
+#define WLC_DOWN 3
+#define WLC_GET_LOOP 4
+#define WLC_SET_LOOP 5
+#define WLC_DUMP 6
+#define WLC_GET_MSGLEVEL 7
+#define WLC_SET_MSGLEVEL 8
+#define WLC_GET_PROMISC 9
+#define WLC_SET_PROMISC 10
+/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */
+#define WLC_GET_RATE 12
+#define WLC_GET_MAX_RATE 13
+#define WLC_GET_INSTANCE 14
+/* #define WLC_GET_FRAG 15 */ /* no longer supported */
+/* #define WLC_SET_FRAG 16 */ /* no longer supported */
+/* #define WLC_GET_RTS 17 */ /* no longer supported */
+/* #define WLC_SET_RTS 18 */ /* no longer supported */
+#define WLC_GET_INFRA 19
+#define WLC_SET_INFRA 20
+#define WLC_GET_AUTH 21
+#define WLC_SET_AUTH 22
+#define WLC_GET_BSSID 23
+#define WLC_SET_BSSID 24
+#define WLC_GET_SSID 25
+#define WLC_SET_SSID 26
+#define WLC_RESTART 27
+#define WLC_TERMINATED 28
+/* #define WLC_DUMP_SCB 28 */ /* no longer supported */
+#define WLC_GET_CHANNEL 29
+#define WLC_SET_CHANNEL 30
+#define WLC_GET_SRL 31
+#define WLC_SET_SRL 32
+#define WLC_GET_LRL 33
+#define WLC_SET_LRL 34
+#define WLC_GET_PLCPHDR 35
+#define WLC_SET_PLCPHDR 36
+#define WLC_GET_RADIO 37
+#define WLC_SET_RADIO 38
+#define WLC_GET_PHYTYPE 39
+#define WLC_DUMP_RATE 40
+#define WLC_SET_RATE_PARAMS 41
+#define WLC_GET_FIXRATE 42
+#define WLC_SET_FIXRATE 43
+/* #define WLC_GET_WEP 42 */ /* no longer supported */
+/* #define WLC_SET_WEP 43 */ /* no longer supported */
+#define WLC_GET_KEY 44
+#define WLC_SET_KEY 45
+#define WLC_GET_REGULATORY 46
+#define WLC_SET_REGULATORY 47
+#define WLC_GET_PASSIVE_SCAN 48
+#define WLC_SET_PASSIVE_SCAN 49
+#define WLC_SCAN 50
+#define WLC_SCAN_RESULTS 51
+#define WLC_DISASSOC 52
+#define WLC_REASSOC 53
+#define WLC_GET_ROAM_TRIGGER 54
+#define WLC_SET_ROAM_TRIGGER 55
+#define WLC_GET_ROAM_DELTA 56
+#define WLC_SET_ROAM_DELTA 57
+#define WLC_GET_ROAM_SCAN_PERIOD 58
+#define WLC_SET_ROAM_SCAN_PERIOD 59
+#define WLC_EVM 60 /* diag */
+#define WLC_GET_TXANT 61
+#define WLC_SET_TXANT 62
+#define WLC_GET_ANTDIV 63
+#define WLC_SET_ANTDIV 64
+/* #define WLC_GET_TXPWR 65 */ /* no longer supported */
+/* #define WLC_SET_TXPWR 66 */ /* no longer supported */
+#define WLC_GET_CLOSED 67
+#define WLC_SET_CLOSED 68
+#define WLC_GET_MACLIST 69
+#define WLC_SET_MACLIST 70
+#define WLC_GET_RATESET 71
+#define WLC_SET_RATESET 72
+/* #define WLC_GET_LOCALE 73 */ /* no longer supported */
+#define WLC_LONGTRAIN 74
+#define WLC_GET_BCNPRD 75
+#define WLC_SET_BCNPRD 76
+#define WLC_GET_DTIMPRD 77
+#define WLC_SET_DTIMPRD 78
+#define WLC_GET_SROM 79
+#define WLC_SET_SROM 80
+#define WLC_GET_WEP_RESTRICT 81
+#define WLC_SET_WEP_RESTRICT 82
+#define WLC_GET_COUNTRY 83
+#define WLC_SET_COUNTRY 84
+#define WLC_GET_PM 85
+#define WLC_SET_PM 86
+#define WLC_GET_WAKE 87
+#define WLC_SET_WAKE 88
+/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */
+#define WLC_GET_FORCELINK 90 /* ndis only */
+#define WLC_SET_FORCELINK 91 /* ndis only */
+#define WLC_FREQ_ACCURACY 92 /* diag */
+#define WLC_CARRIER_SUPPRESS 93 /* diag */
+#define WLC_GET_PHYREG 94
+#define WLC_SET_PHYREG 95
+#define WLC_GET_RADIOREG 96
+#define WLC_SET_RADIOREG 97
+#define WLC_GET_REVINFO 98
+#define WLC_GET_UCANTDIV 99
+#define WLC_SET_UCANTDIV 100
+#define WLC_R_REG 101
+#define WLC_W_REG 102
+/* #define WLC_DIAG_LOOPBACK 103 old tray diag */
+/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */
+#define WLC_GET_MACMODE 105
+#define WLC_SET_MACMODE 106
+#define WLC_GET_MONITOR 107
+#define WLC_SET_MONITOR 108
+#define WLC_GET_GMODE 109
+#define WLC_SET_GMODE 110
+#define WLC_GET_LEGACY_ERP 111
+#define WLC_SET_LEGACY_ERP 112
+#define WLC_GET_RX_ANT 113
+#define WLC_GET_CURR_RATESET 114 /* current rateset */
+#define WLC_GET_SCANSUPPRESS 115
+#define WLC_SET_SCANSUPPRESS 116
+#define WLC_GET_AP 117
+#define WLC_SET_AP 118
+#define WLC_GET_EAP_RESTRICT 119
+#define WLC_SET_EAP_RESTRICT 120
+#define WLC_SCB_AUTHORIZE 121
+#define WLC_SCB_DEAUTHORIZE 122
+#define WLC_GET_WDSLIST 123
+#define WLC_SET_WDSLIST 124
+#define WLC_GET_ATIM 125
+#define WLC_SET_ATIM 126
+#define WLC_GET_RSSI 127
+#define WLC_GET_PHYANTDIV 128
+#define WLC_SET_PHYANTDIV 129
+#define WLC_AP_RX_ONLY 130
+#define WLC_GET_TX_PATH_PWR 131
+#define WLC_SET_TX_PATH_PWR 132
+#define WLC_GET_WSEC 133
+#define WLC_SET_WSEC 134
+#define WLC_GET_PHY_NOISE 135
+#define WLC_GET_BSS_INFO 136
+#define WLC_GET_PKTCNTS 137
+#define WLC_GET_LAZYWDS 138
+#define WLC_SET_LAZYWDS 139
+#define WLC_GET_BANDLIST 140
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_GET_BAND 141
+#define WLC_SET_BAND 142
+#define WLC_SCB_DEAUTHENTICATE 143
+#define WLC_GET_SHORTSLOT 144
+#define WLC_GET_SHORTSLOT_OVERRIDE 145
+#define WLC_SET_SHORTSLOT_OVERRIDE 146
+#define WLC_GET_SHORTSLOT_RESTRICT 147
+#define WLC_SET_SHORTSLOT_RESTRICT 148
+#define WLC_GET_GMODE_PROTECTION 149
+#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150
+#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151
+#define WLC_UPGRADE 152
+/* #define WLC_GET_MRATE 153 */ /* no longer supported */
+/* #define WLC_SET_MRATE 154 */ /* no longer supported */
+#define WLC_GET_IGNORE_BCNS 155
+#define WLC_SET_IGNORE_BCNS 156
+#define WLC_GET_SCB_TIMEOUT 157
+#define WLC_SET_SCB_TIMEOUT 158
+#define WLC_GET_ASSOCLIST 159
+#define WLC_GET_CLK 160
+#define WLC_SET_CLK 161
+#define WLC_GET_UP 162
+#define WLC_OUT 163
+#define WLC_GET_WPA_AUTH 164
+#define WLC_SET_WPA_AUTH 165
+#define WLC_GET_UCFLAGS 166
+#define WLC_SET_UCFLAGS 167
+#define WLC_GET_PWRIDX 168
+#define WLC_SET_PWRIDX 169
+#define WLC_GET_TSSI 170
+#define WLC_GET_SUP_RATESET_OVERRIDE 171
+#define WLC_SET_SUP_RATESET_OVERRIDE 172
+/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */
+/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */
+/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */
+/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */
+/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */
+#define WLC_GET_PROTECTION_CONTROL 178
+#define WLC_SET_PROTECTION_CONTROL 179
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_GET_PHYLIST 180
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */
+#define WLC_DECRYPT_STATUS 182 /* ndis only */
+#define WLC_GET_KEY_SEQ 183
+#define WLC_GET_SCAN_CHANNEL_TIME 184
+#define WLC_SET_SCAN_CHANNEL_TIME 185
+#define WLC_GET_SCAN_UNASSOC_TIME 186
+#define WLC_SET_SCAN_UNASSOC_TIME 187
+#define WLC_GET_SCAN_HOME_TIME 188
+#define WLC_SET_SCAN_HOME_TIME 189
+#define WLC_GET_SCAN_NPROBES 190
+#define WLC_SET_SCAN_NPROBES 191
+#define WLC_GET_PRB_RESP_TIMEOUT 192
+#define WLC_SET_PRB_RESP_TIMEOUT 193
+#define WLC_GET_ATTEN 194
+#define WLC_SET_ATTEN 195
+#define WLC_GET_SHMEM 196 /* diag */
+#define WLC_SET_SHMEM 197 /* diag */
+/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */
+/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */
+#define WLC_SET_WSEC_TEST 200
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_TKIP_COUNTERMEASURES 202
+#define WLC_GET_PIOMODE 203
+#define WLC_SET_PIOMODE 204
+#define WLC_SET_ASSOC_PREFER 205
+#define WLC_GET_ASSOC_PREFER 206
+#define WLC_SET_ROAM_PREFER 207
+#define WLC_GET_ROAM_PREFER 208
+#define WLC_SET_LED 209
+#define WLC_GET_LED 210
+#define WLC_GET_INTERFERENCE_MODE 211
+#define WLC_SET_INTERFERENCE_MODE 212
+#define WLC_GET_CHANNEL_QA 213
+#define WLC_START_CHANNEL_QA 214
+#define WLC_GET_CHANNEL_SEL 215
+#define WLC_START_CHANNEL_SEL 216
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_GET_VALID_CHANNELS 217
+#define WLC_GET_FAKEFRAG 218
+#define WLC_SET_FAKEFRAG 219
+#define WLC_GET_PWROUT_PERCENTAGE 220
+#define WLC_SET_PWROUT_PERCENTAGE 221
+#define WLC_SET_BAD_FRAME_PREEMPT 222
+#define WLC_GET_BAD_FRAME_PREEMPT 223
+#define WLC_SET_LEAP_LIST 224
+#define WLC_GET_LEAP_LIST 225
+#define WLC_GET_CWMIN 226
+#define WLC_SET_CWMIN 227
+#define WLC_GET_CWMAX 228
+#define WLC_SET_CWMAX 229
+#define WLC_GET_WET 230
+#define WLC_SET_WET 231
+#define WLC_GET_PUB 232
+/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */
+/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */
+#define WLC_GET_KEY_PRIMARY 235
+#define WLC_SET_KEY_PRIMARY 236
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */
+#define WLC_GET_ACI_ARGS 238
+#define WLC_SET_ACI_ARGS 239
+#define WLC_UNSET_CALLBACK 240
+#define WLC_SET_CALLBACK 241
+#define WLC_GET_RADAR 242
+#define WLC_SET_RADAR 243
+#define WLC_SET_SPECT_MANAGMENT 244
+#define WLC_GET_SPECT_MANAGMENT 245
+#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */
+#define WLC_WDS_GET_WPA_SUP 247
+#define WLC_SET_CS_SCAN_TIMER 248
+#define WLC_GET_CS_SCAN_TIMER 249
+#define WLC_MEASURE_REQUEST 250
+#define WLC_INIT 251
+#define WLC_SEND_QUIET 252
+#define WLC_KEEPALIVE 253
+#define WLC_SEND_PWR_CONSTRAINT 254
+#define WLC_UPGRADE_STATUS 255
+#define WLC_CURRENT_PWR 256
+#define WLC_GET_SCAN_PASSIVE_TIME 257
+#define WLC_SET_SCAN_PASSIVE_TIME 258
+#define WLC_LEGACY_LINK_BEHAVIOR 259
+#define WLC_GET_CHANNELS_IN_COUNTRY 260
+#define WLC_GET_COUNTRY_LIST 261
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_GET_VAR 262 /* get value of named variable */
+#define WLC_SET_VAR 263 /* set named variable to value */
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_NVRAM_GET 264 /* deprecated */
+#define WLC_NVRAM_SET 265
+#define WLC_NVRAM_DUMP 266
+#define WLC_REBOOT 267
+#endif /* !LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_SET_WSEC_PMK 268
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#define WLC_GET_AUTH_MODE 269
+#define WLC_SET_AUTH_MODE 270
+#define WLC_GET_WAKEENTRY 271
+#define WLC_SET_WAKEENTRY 272
+#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */
+#define WLC_NVOTPW 274
+#define WLC_OTPW 275
+#define WLC_IOV_BLOCK_GET 276
+#define WLC_IOV_MODULES_GET 277
+#define WLC_SOFT_RESET 278
+#define WLC_GET_ALLOW_MODE 279
+#define WLC_SET_ALLOW_MODE 280
+#define WLC_GET_DESIRED_BSSID 281
+#define WLC_SET_DESIRED_BSSID 282
+#define WLC_DISASSOC_MYAP 283
+#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */
+#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */
+#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */
+#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */
+#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */
+#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */
+#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */
+#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */
+#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */
+#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */
+#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */
+#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */
+#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */
+#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */
+#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */
+#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */
+#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */
+#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */
+#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */
+#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */
+/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */
+#define WLC_GET_CMD 309
+/* #define WLC_LAST 310 */ /* Never used - can be reused */
+#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */
+#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */
+/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */
+/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */
+#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */
+#define WLC_GET_NAT_STATE 317
+#define WLC_GET_TXBF_RATESET 318
+#define WLC_SET_TXBF_RATESET 319
+#define WLC_SCAN_CQ 320
+#define WLC_GET_RSSI_QDB 321 /* qdB portion of the RSSI */
+#define WLC_DUMP_RATESET 322
+#define WLC_ECHO 323
+#define WLC_LAST 324
+#ifndef EPICTRL_COOKIE
+#define EPICTRL_COOKIE 0xABADCEDE
+#endif
+
+/* vx wlc ioctl's offset */
+#define CMN_IOCTL_OFF 0x180
+
+/*
+ * custom OID support
+ *
+ * 0xFF - implementation specific OID
+ * 0xE4 - first byte of Broadcom PCI vendor ID
+ * 0x14 - second byte of Broadcom PCI vendor ID
+ * 0xXX - the custom OID number
+ */
+
+/* begin 0x1f values beyond the start of the ET driver range. */
+#define WL_OID_BASE 0xFFE41420
+
+/* NDIS overrides */
+#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE)
+#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK)
+#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK)
+#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
+#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS)
+#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
+#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM)
+
+/* EXT_STA Dongle suuport */
+#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC)
+#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS)
+#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY)
+#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY)
+#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME)
+#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID)
+#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE)
+#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING)
+#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
+#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
+#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
+#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE)
+#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC)
+#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS)
+#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS)
+
+/* NAT filter driver support */
+#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG)
+#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE)
+
+#define WL_DECRYPT_STATUS_SUCCESS 1
+#define WL_DECRYPT_STATUS_FAILURE 2
+#define WL_DECRYPT_STATUS_UNKNOWN 3
+
+/* allows user-mode app to poll the status of USB image upgrade */
+#define WLC_UPGRADE_SUCCESS 0
+#define WLC_UPGRADE_PENDING 1
+
+/* WLC_GET_AUTH, WLC_SET_AUTH values */
+#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */
+#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
+#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */
+
+/* a large TX Power as an init value to factor out of MIN() calculations,
+ * keep low enough to fit in an int8, units are .25 dBm
+ */
+#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
+
+/* "diag" iovar argument and error code */
+#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */
+#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */
+#define WL_DIAG_MEMORY 3 /* d11 memory test */
+#define WL_DIAG_LED 4 /* LED test */
+#define WL_DIAG_REG 5 /* d11/phy register test */
+#define WL_DIAG_SROM 6 /* srom read/crc test */
+#define WL_DIAG_DMA 7 /* DMA test */
+#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */
+
+#define WL_DIAGERR_SUCCESS 0
+#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */
+#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */
+#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */
+#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */
+#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */
+#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */
+#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */
+#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */
+#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */
+#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */
+
+#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */
+#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */
+
+/* band types */
+#define WLC_BAND_AUTO 0 /* auto-select */
+#define WLC_BAND_5G 1 /* 5 Ghz */
+#define WLC_BAND_2G 2 /* 2.4 Ghz */
+#define WLC_BAND_ALL 3 /* all bands */
+
+/* band range returned by band_range iovar */
+#define WL_CHAN_FREQ_RANGE_2G 0
+#define WL_CHAN_FREQ_RANGE_5GL 1
+#define WL_CHAN_FREQ_RANGE_5GM 2
+#define WL_CHAN_FREQ_RANGE_5GH 3
+
+#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4
+#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5
+#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6
+#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7
+#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8
+
+#define WL_CHAN_FREQ_RANGE_5G_BAND0 1
+#define WL_CHAN_FREQ_RANGE_5G_BAND1 2
+#define WL_CHAN_FREQ_RANGE_5G_BAND2 3
+#define WL_CHAN_FREQ_RANGE_5G_BAND3 4
+
+#ifdef SROM12
+#define WL_CHAN_FREQ_RANGE_5G_BAND4 5
+#define WL_CHAN_FREQ_RANGE_2G_40 6
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_40 7
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_40 8
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_40 9
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_40 10
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_40 11
+#define WL_CHAN_FREQ_RANGE_5G_BAND0_80 12
+#define WL_CHAN_FREQ_RANGE_5G_BAND1_80 13
+#define WL_CHAN_FREQ_RANGE_5G_BAND2_80 14
+#define WL_CHAN_FREQ_RANGE_5G_BAND3_80 15
+#define WL_CHAN_FREQ_RANGE_5G_BAND4_80 16
+
+#define WL_CHAN_FREQ_RANGE_5G_4BAND 17
+#define WL_CHAN_FREQ_RANGE_5G_5BAND 18
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_40 19
+#define WL_CHAN_FREQ_RANGE_5G_5BAND_80 20
+#else
+#define WL_CHAN_FREQ_RANGE_5G_4BAND 5
+#endif /* SROM12 */
+/* MAC list modes */
+#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */
+#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */
+#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */
+
+/*
+ * 54g modes (basic bits may still be overridden)
+ *
+ * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11
+ * Preamble: Long
+ * Shortslot: Off
+ * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: Auto
+ * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
+ * Extended Rateset: 6b, 9, 12b, 48
+ * Preamble: Short required
+ * Shortslot: Auto
+ * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
+ * Extended Rateset: 6, 9, 12, 48
+ * Preamble: Long
+ * Shortslot: On
+ * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
+ * Preamble: Short required
+ * Shortslot: On and required
+ * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b
+ * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
+ * Preamble: Long
+ * Shortslot: Auto
+ */
+#define GMODE_LEGACY_B 0
+#define GMODE_AUTO 1
+#define GMODE_ONLY 2
+#define GMODE_B_DEFERRED 3
+#define GMODE_PERFORMANCE 4
+#define GMODE_LRS 5
+#define GMODE_MAX 6
+
+/* values for PLCPHdr_override */
+#define WLC_PLCP_AUTO -1
+#define WLC_PLCP_SHORT 0
+#define WLC_PLCP_LONG 1
+
+/* values for g_protection_override and n_protection_override */
+#define WLC_PROTECTION_AUTO -1
+#define WLC_PROTECTION_OFF 0
+#define WLC_PROTECTION_ON 1
+#define WLC_PROTECTION_MMHDR_ONLY 2
+#define WLC_PROTECTION_CTS_ONLY 3
+
+/* values for g_protection_control and n_protection_control */
+#define WLC_PROTECTION_CTL_OFF 0
+#define WLC_PROTECTION_CTL_LOCAL 1
+#define WLC_PROTECTION_CTL_OVERLAP 2
+
+/* values for n_protection */
+#define WLC_N_PROTECTION_OFF 0
+#define WLC_N_PROTECTION_OPTIONAL 1
+#define WLC_N_PROTECTION_20IN40 2
+#define WLC_N_PROTECTION_MIXEDMODE 3
+
+/* values for n_preamble_type */
+#define WLC_N_PREAMBLE_MIXEDMODE 0
+#define WLC_N_PREAMBLE_GF 1
+#define WLC_N_PREAMBLE_GF_BRCM 2
+
+/* values for band specific 40MHz capabilities (deprecated) */
+#define WLC_N_BW_20ALL 0
+#define WLC_N_BW_40ALL 1
+#define WLC_N_BW_20IN2G_40IN5G 2
+
+#define WLC_BW_20MHZ_BIT (1<<0)
+#define WLC_BW_40MHZ_BIT (1<<1)
+#define WLC_BW_80MHZ_BIT (1<<2)
+#define WLC_BW_160MHZ_BIT (1<<3)
+
+/* Bandwidth capabilities */
+#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT|WLC_BW_80MHZ_BIT| \
+ WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
+#define WLC_BW_CAP_UNRESTRICTED 0xFF
+
+#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
+#define WL_BW_CAP_160MHZ(bw_cap)(((bw_cap) & WLC_BW_160MHZ_BIT) ? TRUE : FALSE)
+
+/* values to force tx/rx chain */
+#define WLC_N_TXRX_CHAIN0 0
+#define WLC_N_TXRX_CHAIN1 1
+
+/* bitflags for SGI support (sgi_rx iovar) */
+#define WLC_N_SGI_20 0x01
+#define WLC_N_SGI_40 0x02
+#define WLC_VHT_SGI_80 0x04
+
+/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
+#define WLC_SGI_ALL 0x02
+
+#define LISTEN_INTERVAL 10
+/* interference mitigation options */
+#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */
+#define INTERFERE_NONE 0 /* off */
+#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */
+#define WLAN_MANUAL 2 /* ACI: no auto detection */
+#define WLAN_AUTO 3 /* ACI: auto detect */
+#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */
+#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */
+
+/* interfernece mode bit-masks (ACPHY) */
+#define ACPHY_ACI_GLITCHBASED_DESENSE 1 /* bit 0 */
+#define ACPHY_ACI_HWACI_PKTGAINLMT 2 /* bit 1 */
+#define ACPHY_ACI_W2NB_PKTGAINLMT 4 /* bit 2 */
+#define ACPHY_ACI_PREEMPTION 8 /* bit 3 */
+#define ACPHY_HWACI_MITIGATION 16 /* bit 4 */
+#define ACPHY_ACI_MAX_MODE 31
+
+/* AP environment */
+#define AP_ENV_DETECT_NOT_USED 0 /* We aren't using AP environment detection */
+#define AP_ENV_DENSE 1 /* "Corporate" or other AP dense environment */
+#define AP_ENV_SPARSE 2 /* "Home" or other sparse environment */
+#define AP_ENV_INDETERMINATE 3 /* AP environment hasn't been identified */
+
+#define TRIGGER_NOW 0
+#define TRIGGER_CRS 0x01
+#define TRIGGER_CRSDEASSERT 0x02
+#define TRIGGER_GOODFCS 0x04
+#define TRIGGER_BADFCS 0x08
+#define TRIGGER_BADPLCP 0x10
+#define TRIGGER_CRSGLITCH 0x20
+
+#define WL_SAMPLEDATA_HEADER_TYPE 1
+#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */
+#define WL_SAMPLEDATA_TYPE 2
+#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */
+#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */
+
+/* WL_OTA START */
+#define WL_OTA_ARG_PARSE_BLK_SIZE 1200
+#define WL_OTA_TEST_MAX_NUM_RATE 30
+#define WL_OTA_TEST_MAX_NUM_SEQ 100
+
+#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */
+
+/* radar iovar SET defines */
+#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */
+#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */
+#define WL_RADAR_SIMULATED 2 /* force radar detector to declare
+ * detection once
+ */
+#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */
+#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
+#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
+#define WL_ANT_IDX_1 0 /* antenna index 1 */
+#define WL_ANT_IDX_2 1 /* antenna index 2 */
+
+#ifndef WL_RSSI_ANT_MAX
+#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
+#elif WL_RSSI_ANT_MAX != 4
+#error "WL_RSSI_ANT_MAX does not match"
+#endif
+
+/* dfs_status iovar-related defines */
+
+/* cac - channel availability check,
+ * ism - in-service monitoring
+ * csa - channel switching announcement
+ */
+
+/* cac state values */
+#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */
+#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */
+#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */
+#define WL_DFS_CACSTATE_CSA 3 /* csa */
+#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */
+#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */
+#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */
+#define WL_DFS_CACSTATES 7 /* this many states exist */
+
+/* Defines used with channel_bandwidth for curpower */
+#define WL_BW_20MHZ 0
+#define WL_BW_40MHZ 1
+#define WL_BW_80MHZ 2
+#define WL_BW_160MHZ 3
+#define WL_BW_8080MHZ 4
+
+/* tx_power_t.flags bits */
+#define WL_TX_POWER_F_ENABLED 1
+#define WL_TX_POWER_F_HW 2
+#define WL_TX_POWER_F_MIMO 4
+#define WL_TX_POWER_F_SISO 8
+#define WL_TX_POWER_F_HT 0x10
+#define WL_TX_POWER_F_VHT 0x20
+#define WL_TX_POWER_F_OPENLOOP 0x40
+
+/* Message levels */
+#define WL_ERROR_VAL 0x00000001
+#define WL_TRACE_VAL 0x00000002
+#define WL_PRHDRS_VAL 0x00000004
+#define WL_PRPKT_VAL 0x00000008
+#define WL_INFORM_VAL 0x00000010
+#define WL_TMP_VAL 0x00000020
+#define WL_OID_VAL 0x00000040
+#define WL_RATE_VAL 0x00000080
+#define WL_ASSOC_VAL 0x00000100
+#define WL_PRUSR_VAL 0x00000200
+#define WL_PS_VAL 0x00000400
+#define WL_TXPWR_VAL 0x00000800 /* retired in TOT on 6/10/2009 */
+#define WL_MODE_SWITCH_VAL 0x00000800 /* Using retired TXPWR val */
+#define WL_PORT_VAL 0x00001000
+#define WL_DUAL_VAL 0x00002000
+#define WL_WSEC_VAL 0x00004000
+#define WL_WSEC_DUMP_VAL 0x00008000
+#define WL_LOG_VAL 0x00010000
+#define WL_NRSSI_VAL 0x00020000 /* retired in TOT on 6/10/2009 */
+#define WL_LOFT_VAL 0x00040000 /* retired in TOT on 6/10/2009 */
+#define WL_REGULATORY_VAL 0x00080000
+#define WL_TAF_VAL 0x00100000
+#define WL_RADAR_VAL 0x00200000 /* retired in TOT on 6/10/2009 */
+#define WL_MPC_VAL 0x00400000
+#define WL_APSTA_VAL 0x00800000
+#define WL_DFS_VAL 0x01000000
+#define WL_BA_VAL 0x02000000 /* retired in TOT on 6/14/2010 */
+#define WL_ACI_VAL 0x04000000
+#define WL_PRMAC_VAL 0x04000000
+#define WL_MBSS_VAL 0x04000000
+#define WL_CAC_VAL 0x08000000
+#define WL_AMSDU_VAL 0x10000000
+#define WL_AMPDU_VAL 0x20000000
+#define WL_FFPLD_VAL 0x40000000
+
+/* wl_msg_level is full. For new bits take the next one and AND with
+ * wl_msg_level2 in wl_dbg.h
+ */
+#define WL_DPT_VAL 0x00000001
+#define WL_SCAN_VAL 0x00000002
+#define WL_WOWL_VAL 0x00000004
+#define WL_COEX_VAL 0x00000008
+#define WL_RTDC_VAL 0x00000010
+#define WL_PROTO_VAL 0x00000020
+#define WL_BTA_VAL 0x00000040
+#define WL_CHANINT_VAL 0x00000080
+#define WL_WMF_VAL 0x00000100
+#define WL_P2P_VAL 0x00000200
+#define WL_ITFR_VAL 0x00000400
+#define WL_MCHAN_VAL 0x00000800
+#define WL_TDLS_VAL 0x00001000
+#define WL_MCNX_VAL 0x00002000
+#define WL_PROT_VAL 0x00004000
+#define WL_PSTA_VAL 0x00008000
+#define WL_TSO_VAL 0x00010000
+#define WL_TRF_MGMT_VAL 0x00020000
+#define WL_LPC_VAL 0x00040000
+#define WL_L2FILTER_VAL 0x00080000
+#define WL_TXBF_VAL 0x00100000
+#define WL_P2PO_VAL 0x00200000
+#define WL_TBTT_VAL 0x00400000
+#define WL_MQ_VAL 0x01000000
+
+/* This level is currently used in Phoenix2 only */
+#define WL_SRSCAN_VAL 0x02000000
+
+#define WL_WNM_VAL 0x04000000
+#define WL_PWRSEL_VAL 0x10000000
+#define WL_NET_DETECT_VAL 0x20000000
+#define WL_PCIE_VAL 0x40000000
+
+/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
+ * rather than a message-type of its own
+ */
+#define WL_TIMESTAMP_VAL 0x80000000
+
+/* max # of leds supported by GPIO (gpio pin# == led index#) */
+#define WL_LED_NUMGPIO 32 /* gpio 0-31 */
+
+/* led per-pin behaviors */
+#define WL_LED_OFF 0 /* always off */
+#define WL_LED_ON 1 /* always on */
+#define WL_LED_ACTIVITY 2 /* activity */
+#define WL_LED_RADIO 3 /* radio enabled */
+#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */
+#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */
+#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */
+#define WL_LED_WI1 7
+#define WL_LED_WI2 8
+#define WL_LED_WI3 9
+#define WL_LED_ASSOC 10 /* associated state indicator */
+#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */
+#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */
+#define WL_LED_WI4 13
+#define WL_LED_WI5 14
+#define WL_LED_BLINKSLOW 15 /* blink slow */
+#define WL_LED_BLINKMED 16 /* blink med */
+#define WL_LED_BLINKFAST 17 /* blink fast */
+#define WL_LED_BLINKCUSTOM 18 /* blink custom */
+#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */
+#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */
+ /* keep on for 300 sec */
+#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */
+#define WL_LED_WI6 22
+#define WL_LED_WI7 23
+#define WL_LED_WI8 24
+#define WL_LED_NUMBEHAVIOR 25
+
+/* led behavior numeric value format */
+#define WL_LED_BEH_MASK 0x7f /* behavior mask */
+#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */
+
+/* number of bytes needed to define a proper bit mask for MAC event reporting */
+#define BCMIO_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define BCMIO_NBBY 8
+#define WL_EVENTING_MASK_LEN 16
+
+
+/* join preference types */
+#define WL_JOIN_PREF_RSSI 1 /* by RSSI */
+#define WL_JOIN_PREF_WPA 2 /* by akm and ciphers */
+#define WL_JOIN_PREF_BAND 3 /* by 802.11 band */
+#define WL_JOIN_PREF_RSSI_DELTA 4 /* by 802.11 band only if RSSI delta condition matches */
+#define WL_JOIN_PREF_TRANS_PREF 5 /* defined by requesting AP */
+
+/* band preference */
+#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */
+
+/* any multicast cipher suite */
+#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00"
+
+/* 802.11h measurement types */
+#define WLC_MEASURE_TPC 1
+#define WLC_MEASURE_CHANNEL_BASIC 2
+#define WLC_MEASURE_CHANNEL_CCA 3
+#define WLC_MEASURE_CHANNEL_RPI 4
+
+/* regulatory enforcement levels */
+#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */
+#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */
+#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */
+#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */
+/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
+ * adoption is done regardless of capability spectrum_management
+ */
+#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */
+
+#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */
+#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */
+#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */
+#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */
+#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */
+#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */
+#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */
+
+/* BTC mode used by "btc_mode" iovar */
+#define WL_BTC_DISABLE 0 /* disable BT coexistence */
+#define WL_BTC_FULLTDM 1 /* full TDM COEX */
+#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */
+#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */
+#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */
+#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */
+#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */
+#define WL_BTC_DEFAULT 8 /* set the default mode for the device */
+#define WL_INF_BTC_DISABLE 0
+#define WL_INF_BTC_ENABLE 1
+#define WL_INF_BTC_AUTO 3
+
+/* BTC wire used by "btc_wire" iovar */
+#define WL_BTC_DEFWIRE 0 /* use default wire setting */
+#define WL_BTC_2WIRE 2 /* use 2-wire BTC */
+#define WL_BTC_3WIRE 3 /* use 3-wire BTC */
+#define WL_BTC_4WIRE 4 /* use 4-wire BTC */
+
+/* BTC flags: BTC configuration that can be set by host */
+#define WL_BTC_FLAG_PREMPT (1 << 0)
+#define WL_BTC_FLAG_BT_DEF (1 << 1)
+#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2)
+#define WL_BTC_FLAG_SIM_RSP (1 << 3)
+#define WL_BTC_FLAG_PS_PROTECT (1 << 4)
+#define WL_BTC_FLAG_SIM_TX_LP (1 << 5)
+#define WL_BTC_FLAG_ECI (1 << 6)
+#define WL_BTC_FLAG_LIGHT (1 << 7)
+#define WL_BTC_FLAG_PARALLEL (1 << 8)
+
+/* maximum channels returned by the get valid channels iovar */
+#define WL_NUMCHANNELS 64
+
+/* max number of chanspecs (used by the iovar to calc. buf space) */
+#ifdef WL11AC_80P80
+#define WL_NUMCHANSPECS 206
+#else
+#define WL_NUMCHANSPECS 110
+#endif
+
+
+/* WDS link local endpoint WPA role */
+#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */
+#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */
+#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */
+
+/* Base offset values */
+#define WL_PKT_FILTER_BASE_PKT 0
+#define WL_PKT_FILTER_BASE_END 1
+#define WL_PKT_FILTER_BASE_D11_H 2 /* May be removed */
+#define WL_PKT_FILTER_BASE_D11_D 3 /* May be removed */
+#define WL_PKT_FILTER_BASE_ETH_H 4
+#define WL_PKT_FILTER_BASE_ETH_D 5
+#define WL_PKT_FILTER_BASE_ARP_H 6
+#define WL_PKT_FILTER_BASE_ARP_D 7 /* May be removed */
+#define WL_PKT_FILTER_BASE_IP4_H 8
+#define WL_PKT_FILTER_BASE_IP4_D 9
+#define WL_PKT_FILTER_BASE_IP6_H 10
+#define WL_PKT_FILTER_BASE_IP6_D 11
+#define WL_PKT_FILTER_BASE_TCP_H 12
+#define WL_PKT_FILTER_BASE_TCP_D 13 /* May be removed */
+#define WL_PKT_FILTER_BASE_UDP_H 14
+#define WL_PKT_FILTER_BASE_UDP_D 15
+#define WL_PKT_FILTER_BASE_IP6_P 16
+#define WL_PKT_FILTER_BASE_COUNT 17 /* May be removed */
+
+/* String mapping for bases that may be used by applications or debug */
+#define WL_PKT_FILTER_BASE_NAMES \
+ { "START", WL_PKT_FILTER_BASE_PKT }, \
+ { "END", WL_PKT_FILTER_BASE_END }, \
+ { "ETH_H", WL_PKT_FILTER_BASE_ETH_H }, \
+ { "ETH_D", WL_PKT_FILTER_BASE_ETH_D }, \
+ { "D11_H", WL_PKT_FILTER_BASE_D11_H }, \
+ { "D11_D", WL_PKT_FILTER_BASE_D11_D }, \
+ { "ARP_H", WL_PKT_FILTER_BASE_ARP_H }, \
+ { "IP4_H", WL_PKT_FILTER_BASE_IP4_H }, \
+ { "IP4_D", WL_PKT_FILTER_BASE_IP4_D }, \
+ { "IP6_H", WL_PKT_FILTER_BASE_IP6_H }, \
+ { "IP6_D", WL_PKT_FILTER_BASE_IP6_D }, \
+ { "IP6_P", WL_PKT_FILTER_BASE_IP6_P }, \
+ { "TCP_H", WL_PKT_FILTER_BASE_TCP_H }, \
+ { "TCP_D", WL_PKT_FILTER_BASE_TCP_D }, \
+ { "UDP_H", WL_PKT_FILTER_BASE_UDP_H }, \
+ { "UDP_D", WL_PKT_FILTER_BASE_UDP_D }
+
+/* Flags for a pattern list element */
+#define WL_PKT_FILTER_MFLAG_NEG 0x0001
+
+/*
+ * Packet engine interface
+ */
+
+#define WL_PKTENG_PER_TX_START 0x01
+#define WL_PKTENG_PER_TX_STOP 0x02
+#define WL_PKTENG_PER_RX_START 0x04
+#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05
+#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06
+#define WL_PKTENG_PER_RX_STOP 0x08
+#define WL_PKTENG_PER_MASK 0xff
+
+#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */
+
+#define WL_PKTENG_MAXPKTSZ 16384 /* max pktsz limit for pkteng */
+
+#define NUM_80211b_RATES 4
+#define NUM_80211ag_RATES 8
+#define NUM_80211n_RATES 32
+#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
+
+/*
+ * WOWL capability/override settings
+ */
+#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */
+#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */
+#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */
+#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */
+#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */
+#define WL_WOWL_TST (1 << 5) /* Wakeup after test */
+#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */
+#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */
+#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */
+#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */
+#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */
+#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */
+#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */
+#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */
+#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */
+#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */
+#define WL_WOWL_SCANOL (1 << 16) /* If the bit is set, scan offload is enabled */
+#define WL_WOWL_TCPKEEP_TIME (1 << 17) /* Wakeup on tcpkeep alive timeout */
+#define WL_WOWL_MDNS_CONFLICT (1 << 18) /* Wakeup on mDNS Conflict Resolution */
+#define WL_WOWL_MDNS_SERVICE (1 << 19) /* Wakeup on mDNS Service Connect */
+#define WL_WOWL_TCPKEEP_DATA (1 << 20) /* tcp keepalive got data */
+#define WL_WOWL_FW_HALT (1 << 21) /* Firmware died in wowl mode */
+#define WL_WOWL_ENAB_HWRADIO (1 << 22) /* Enable detection of radio button changes */
+#define WL_WOWL_MIC_FAIL (1 << 23) /* Offloads detected MIC failure(s) */
+#define WL_WOWL_UNASSOC (1 << 24) /* Wakeup in Unassociated state (Net/Magic Pattern) */
+#define WL_WOWL_SECURE (1 << 25) /* Wakeup if received matched secured pattern */
+#define WL_WOWL_LINKDOWN (1 << 31) /* Link Down indication in WoWL mode */
+
+#define WL_WOWL_TCPKEEP (1 << 20) /* temp copy to satisfy automerger */
+#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+
+#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */
+#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */
+
+#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
+#define MAGIC_PKT_NUM_MAC_ADDRS 16
+
+
+/* Overlap BSS Scan parameters default, minimum, maximum */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */
+#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5
+#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */
+#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */
+#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */
+
+#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */
+
+#define WL_COEX_INFO_MASK 0x07
+#define WL_COEX_INFO_REQ 0x01
+#define WL_COEX_40MHZ_INTOLERANT 0x02
+#define WL_COEX_WIDTH20 0x04
+
+#define WLC_RSSI_INVALID 0 /* invalid RSSI value */
+
+#define MAX_RSSI_LEVELS 8
+
+/* **** EXTLOG **** */
+#define EXTLOG_CUR_VER 0x0100
+
+#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
+
+/* log modules (bitmap) */
+#define LOG_MODULE_COMMON 0x0001
+#define LOG_MODULE_ASSOC 0x0002
+#define LOG_MODULE_EVENT 0x0004
+#define LOG_MODULE_MAX 3 /* Update when adding module */
+
+/* log levels */
+#define WL_LOG_LEVEL_DISABLE 0
+#define WL_LOG_LEVEL_ERR 1
+#define WL_LOG_LEVEL_WARN 2
+#define WL_LOG_LEVEL_INFO 3
+#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */
+
+/* flag */
+#define LOG_FLAG_EVENT 1
+
+/* log arg_type */
+#define LOG_ARGTYPE_NULL 0
+#define LOG_ARGTYPE_STR 1 /* %s */
+#define LOG_ARGTYPE_INT 2 /* %d */
+#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */
+#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */
+
+/* 802.11 Mgmt Packet flags */
+#define VNDR_IE_BEACON_FLAG 0x1
+#define VNDR_IE_PRBRSP_FLAG 0x2
+#define VNDR_IE_ASSOCRSP_FLAG 0x4
+#define VNDR_IE_AUTHRSP_FLAG 0x8
+#define VNDR_IE_PRBREQ_FLAG 0x10
+#define VNDR_IE_ASSOCREQ_FLAG 0x20
+#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */
+#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */
+
+#if defined(WLP2P)
+/* P2P Action Frames flags (spec ordered) */
+#define VNDR_IE_GONREQ_FLAG 0x001000
+#define VNDR_IE_GONRSP_FLAG 0x002000
+#define VNDR_IE_GONCFM_FLAG 0x004000
+#define VNDR_IE_INVREQ_FLAG 0x008000
+#define VNDR_IE_INVRSP_FLAG 0x010000
+#define VNDR_IE_DISREQ_FLAG 0x020000
+#define VNDR_IE_DISRSP_FLAG 0x040000
+#define VNDR_IE_PRDREQ_FLAG 0x080000
+#define VNDR_IE_PRDRSP_FLAG 0x100000
+
+#define VNDR_IE_P2PAF_SHIFT 12
+#endif /* WLP2P */
+
+/* channel interference measurement (chanim) related defines */
+
+/* chanim mode */
+#define CHANIM_DISABLE 0 /* disabled */
+#define CHANIM_DETECT 1 /* detection only */
+#define CHANIM_EXT 2 /* external state machine */
+#define CHANIM_ACT 3 /* full internal state machine, detect + act */
+#define CHANIM_MODE_MAX 4
+
+/* define for apcs reason code */
+#define APCS_INIT 0
+#define APCS_IOCTL 1
+#define APCS_CHANIM 2
+#define APCS_CSTIMER 3
+#define APCS_BTA 4
+#define APCS_TXDLY 5
+#define APCS_NONACSD 6
+#define APCS_DFS_REENTRY 7
+#define APCS_TXFAIL 8
+#define APCS_MAX 9
+
+/* number of ACS record entries */
+#define CHANIM_ACS_RECORD 10
+
+/* CHANIM */
+#define CCASTATS_TXDUR 0
+#define CCASTATS_INBSS 1
+#define CCASTATS_OBSS 2
+#define CCASTATS_NOCTG 3
+#define CCASTATS_NOPKT 4
+#define CCASTATS_DOZE 5
+#define CCASTATS_TXOP 6
+#define CCASTATS_GDTXDUR 7
+#define CCASTATS_BDTXDUR 8
+#define CCASTATS_MAX 9
+
+#define WL_CHANIM_COUNT_ALL 0xff
+#define WL_CHANIM_COUNT_ONE 0x1
+
+/* ap tpc modes */
+#define AP_TPC_OFF 0
+#define AP_TPC_BSS_PWR 1 /* BSS power control */
+#define AP_TPC_AP_PWR 2 /* AP power control */
+#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN 127
+
+/* ap tpc modes */
+#define AP_TPC_OFF 0
+#define AP_TPC_BSS_PWR 1 /* BSS power control */
+#define AP_TPC_AP_PWR 2 /* AP power control */
+#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
+#define AP_TPC_MAX_LINK_MARGIN 127
+
+/* state */
+#define WL_P2P_DISC_ST_SCAN 0
+#define WL_P2P_DISC_ST_LISTEN 1
+#define WL_P2P_DISC_ST_SEARCH 2
+
+/* i/f type */
+#define WL_P2P_IF_CLIENT 0
+#define WL_P2P_IF_GO 1
+#define WL_P2P_IF_DYNBCN_GO 2
+#define WL_P2P_IF_DEV 3
+
+/* count */
+#define WL_P2P_SCHED_RSVD 0
+#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */
+
+#define WL_P2P_SCHED_FIXED_LEN 3
+
+/* schedule type */
+#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */
+#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */
+
+/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
+#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */
+#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */
+/* schedule option - WL_P2P_SCHED_TYPE_XXX */
+#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */
+
+/* schedule option - WL_P2P_SCHED_TYPE_ABS */
+#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */
+#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */
+/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
+#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with
+ * start being an offset of the 'current' TSF
+ */
+
+/* feature flags */
+#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */
+#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe
+ * requests
+ */
+#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */
+
+/* n-mode support capability */
+/* 2x2 includes both 1x1 & 2x2 devices
+ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
+ * control it independently
+ */
+#define WL_11N_2x2 1
+#define WL_11N_3x3 3
+#define WL_11N_4x4 4
+
+/* define 11n feature disable flags */
+#define WLFEATURE_DISABLE_11N 0x00000001
+#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
+#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
+#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
+#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
+#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
+#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
+#define WLFEATURE_DISABLE_11N_GF 0x00000080
+
+/* Proxy STA modes */
+#define PSTA_MODE_DISABLED 0
+#define PSTA_MODE_PROXY 1
+#define PSTA_MODE_REPEATER 2
+
+/* op code in nat_cfg */
+#define NAT_OP_ENABLE 1 /* enable NAT on given interface */
+#define NAT_OP_DISABLE 2 /* disable NAT on given interface */
+#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */
+
+/* NAT state */
+#define NAT_STATE_ENABLED 1 /* NAT is enabled */
+#define NAT_STATE_DISABLED 2 /* NAT is disabled */
+
+#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */
+#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */
+#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */
+#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */
+
+/* D0 Coalescing */
+#define IPV4_ARP_FILTER 0x0001
+#define IPV4_NETBT_FILTER 0x0002
+#define IPV4_LLMNR_FILTER 0x0004
+#define IPV4_SSDP_FILTER 0x0008
+#define IPV4_WSD_FILTER 0x0010
+#define IPV6_NETBT_FILTER 0x0200
+#define IPV6_LLMNR_FILTER 0x0400
+#define IPV6_SSDP_FILTER 0x0800
+#define IPV6_WSD_FILTER 0x1000
+
+/* Network Offload Engine */
+#define NWOE_OL_ENABLE 0x00000001
+
+/*
+ * Traffic management structures/defines.
+ */
+
+/* Traffic management bandwidth parameters */
+#define TRF_MGMT_MAX_PRIORITIES 3
+
+#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */
+#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Don't shape traffic */
+#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC 0x0008 /* Manage traffic over our local subnet */
+#define TRF_MGMT_FLAG_FILTER_ON_MACADDR 0x0010 /* filter on MAC address */
+#define TRF_MGMT_FLAG_NO_RX 0x0020 /* do not apply fiters to rx packets */
+
+#define TRF_FILTER_MAC_ADDR 0x0001 /* L2 filter use dst mac address for filtering */
+#define TRF_FILTER_IP_ADDR 0x0002 /* L3 filter use ip ddress for filtering */
+#define TRF_FILTER_L4 0x0004 /* L4 filter use tcp/udp for filtering */
+#define TRF_FILTER_DWM 0x0008 /* L3 filter use DSCP for filtering */
+#define TRF_FILTER_FAVORED 0x0010 /* Tag the packet FAVORED */
+
+/* WNM/NPS subfeatures mask */
+#define WL_WNM_BSSTRANS 0x00000001
+#define WL_WNM_PROXYARP 0x00000002
+#define WL_WNM_MAXIDLE 0x00000004
+#define WL_WNM_TIMBC 0x00000008
+#define WL_WNM_TFS 0x00000010
+#define WL_WNM_SLEEP 0x00000020
+#define WL_WNM_DMS 0x00000040
+#define WL_WNM_FMS 0x00000080
+#define WL_WNM_NOTIF 0x00000100
+#define WL_WNM_MAX 0x00000200
+
+#ifndef ETHER_MAX_DATA
+#define ETHER_MAX_DATA 1500
+#endif /* ETHER_MAX_DATA */
+
+/* Different discovery modes for dpt */
+#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */
+#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */
+#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */
+
+/* different path selection values */
+#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */
+#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */
+#define DPT_PATHSEL_APPATH 2 /* always use AP path */
+
+/* different ops for deny list */
+#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */
+#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */
+
+/* different ops for manual end point */
+#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
+#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
+#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
+
+/* flags to indicate DPT status */
+#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */
+#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */
+#define DPT_STATUS_FAILED 0x04 /* DPT link failed */
+
+#ifdef WLTDLS
+/* different ops for manual end point */
+#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
+#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
+#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
+#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */
+#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */
+#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */
+#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */
+#define TDLS_MANUAL_EP_WFD_TPQ 8 /* WiFi-Display Tunneled Probe reQuest */
+
+/* modes */
+#define TDLS_WFD_IE_TX 0
+#define TDLS_WFD_IE_RX 1
+#define TDLS_WFD_PROBE_IE_TX 2
+#define TDLS_WFD_PROBE_IE_RX 3
+#endif /* WLTDLS */
+
+/* define for flag */
+#define TSPEC_PENDING 0 /* TSPEC pending */
+#define TSPEC_ACCEPTED 1 /* TSPEC accepted */
+#define TSPEC_REJECTED 2 /* TSPEC rejected */
+#define TSPEC_UNKNOWN 3 /* TSPEC unknown */
+#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */
+
+#ifdef BCMCCX
+/* "wlan_reason" iovar interface */
+#define WL_WLAN_ASSOC_REASON_NORMAL_NETWORK 0 /* normal WLAN network setup */
+#define WL_WLAN_ASSOC_REASON_ROAM_FROM_CELLULAR_NETWORK 1 /* roam from Cellular network */
+#define WL_WLAN_ASSOC_REASON_ROAM_FROM_LAN 2 /* roam from LAN */
+#define WL_WLAN_ASSOC_REASON_MAX 2 /* largest value allowed */
+#endif /* BCMCCX */
+
+/* Software feature flag defines used by wlfeatureflag */
+#ifdef WLAFTERBURNER
+#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */
+#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */
+#endif /* WLAFTERBURNER */
+#define WL_SWFL_NOHWRADIO 0x0004
+#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
+#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
+
+#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
+
+#define CSA_BROADCAST_ACTION_FRAME 0 /* csa broadcast action frame */
+#define CSA_UNICAST_ACTION_FRAME 1 /* csa unicast action frame */
+
+/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
+ *
+ * (-100 < value < 0) value is used directly as a roaming trigger in dBm
+ * (0 <= value) value specifies a logical roaming trigger level from
+ * the list below
+ *
+ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
+ * the logical roam trigger value.
+ */
+#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */
+#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */
+#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */
+#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */
+#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */
+
+#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */
+
+/* Preferred Network Offload (PNO, formerly PFN) defines */
+#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
+
+#define SORT_CRITERIA_BIT 0
+#define AUTO_NET_SWITCH_BIT 1
+#define ENABLE_BKGRD_SCAN_BIT 2
+#define IMMEDIATE_SCAN_BIT 3
+#define AUTO_CONNECT_BIT 4
+#define ENABLE_BD_SCAN_BIT 5
+#define ENABLE_ADAPTSCAN_BIT 6
+#define IMMEDIATE_EVENT_BIT 8
+#define SUPPRESS_SSID_BIT 9
+#define ENABLE_NET_OFFLOAD_BIT 10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT 11
+#define BESTN_BSSID_ONLY_BIT 12
+
+#define SORT_CRITERIA_MASK 0x0001
+#define AUTO_NET_SWITCH_MASK 0x0002
+#define ENABLE_BKGRD_SCAN_MASK 0x0004
+#define IMMEDIATE_SCAN_MASK 0x0008
+#define AUTO_CONNECT_MASK 0x0010
+
+#define ENABLE_BD_SCAN_MASK 0x0020
+#define ENABLE_ADAPTSCAN_MASK 0x00c0
+#define IMMEDIATE_EVENT_MASK 0x0100
+#define SUPPRESS_SSID_MASK 0x0200
+#define ENABLE_NET_OFFLOAD_MASK 0x0400
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK 0x0800
+#define BESTN_BSSID_ONLY_MASK 0x1000
+
+#define PFN_VERSION 2
+#define PFN_SCANRESULT_VERSION 1
+#define MAX_PFN_LIST_COUNT 16
+
+#define PFN_COMPLETE 1
+#define PFN_INCOMPLETE 0
+
+#define DEFAULT_BESTN 2
+#define DEFAULT_MSCAN 0
+#define DEFAULT_REPEAT 10
+#define DEFAULT_EXP 2
+
+#define PFN_PARTIAL_SCAN_BIT 0
+#define PFN_PARTIAL_SCAN_MASK 1
+
+#define WL_PFN_SUPPRESSFOUND_MASK 0x08
+#define WL_PFN_SUPPRESSLOST_MASK 0x10
+#define WL_PFN_RSSI_MASK 0xff00
+#define WL_PFN_RSSI_SHIFT 8
+
+#define WL_PFN_REPORT_ALLNET 0
+#define WL_PFN_REPORT_SSIDNET 1
+#define WL_PFN_REPORT_BSSIDNET 2
+
+#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_HISTORY_OFF 0x00000002 /* Scan history suppressed */
+
+#define WL_PFN_HIDDEN_BIT 2
+#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */
+#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
+#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */
+#define WL_PFN_HIDDEN_MASK 0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX 8
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX 32
+#endif
+
+/* TCP Checksum Offload error injection for testing */
+#define TOE_ERRTEST_TX_CSUM 0x00000001
+#define TOE_ERRTEST_RX_CSUM 0x00000002
+#define TOE_ERRTEST_RX_CSUM2 0x00000004
+
+/* ARP Offload feature flags for arp_ol iovar */
+#define ARP_OL_AGENT 0x00000001
+#define ARP_OL_SNOOP 0x00000002
+#define ARP_OL_HOST_AUTO_REPLY 0x00000004
+#define ARP_OL_PEER_AUTO_REPLY 0x00000008
+
+/* ARP Offload error injection */
+#define ARP_ERRTEST_REPLY_PEER 0x1
+#define ARP_ERRTEST_REPLY_HOST 0x2
+
+#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */
+#define ND_MULTIHOMING_MAX 10 /* Maximum local host IP addresses */
+#define ND_REQUEST_MAX 5 /* Max set of offload params */
+
+
+/* AOAC wake event flag */
+#define WAKE_EVENT_NLO_DISCOVERY_BIT 1
+#define WAKE_EVENT_AP_ASSOCIATION_LOST_BIT 2
+#define WAKE_EVENT_GTK_HANDSHAKE_ERROR_BIT 4
+#define WAKE_EVENT_4WAY_HANDSHAKE_REQUEST_BIT 8
+
+
+#define MAX_NUM_WOL_PATTERN 22 /* LOGO requirements min 22 */
+
+
+/* Packet filter operation mode */
+/* True: 1; False: 0 */
+#define PKT_FILTER_MODE_FORWARD_ON_MATCH 1
+/* Enable and disable pkt_filter as a whole */
+#define PKT_FILTER_MODE_DISABLE 2
+/* Cache first matched rx pkt(be queried by host later) */
+#define PKT_FILTER_MODE_PKT_CACHE_ON_MATCH 4
+/* If pkt_filter is enabled and no filter is set, don't forward anything */
+#define PKT_FILTER_MODE_PKT_FORWARD_OFF_DEFAULT 8
+
+#ifdef DONGLEOVERLAYS
+#define OVERLAY_IDX_MASK 0x000000ff
+#define OVERLAY_IDX_SHIFT 0
+#define OVERLAY_FLAGS_MASK 0xffffff00
+#define OVERLAY_FLAGS_SHIFT 8
+/* overlay written to device memory immediately after loading the base image */
+#define OVERLAY_FLAG_POSTLOAD 0x100
+/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
+#define OVERLAY_FLAG_DEFER_DL 0x200
+/* overlay downloaded prior to the host going to sleep */
+#define OVERLAY_FLAG_PRESLEEP 0x400
+#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024
+#endif /* DONGLEOVERLAYS */
+
+/* reuse two number in the sc/rc space */
+#define SMFS_CODE_MALFORMED 0xFFFE
+#define SMFS_CODE_IGNORED 0xFFFD
+
+/* RFAWARE def */
+#define BCM_ACTION_RFAWARE 0x77
+#define BCM_ACTION_RFAWARE_DCS 0x01
+
+/* DCS reason code define */
+#define BCM_DCS_IOVAR 0x1
+#define BCM_DCS_UNKNOWN 0xFF
+
+
+#ifdef PROP_TXSTATUS
+/* Bit definitions for tlv iovar */
+/*
+ * enable RSSI signals:
+ * WLFC_CTL_TYPE_RSSI
+ */
+#define WLFC_FLAGS_RSSI_SIGNALS 0x0001
+
+/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
+ *
+ * WLFC_CTL_TYPE_MAC_OPEN
+ * WLFC_CTL_TYPE_MAC_CLOSE
+ *
+ * WLFC_CTL_TYPE_INTERFACE_OPEN
+ * WLFC_CTL_TYPE_INTERFACE_CLOSE
+ *
+ * WLFC_CTL_TYPE_MACDESC_ADD
+ * WLFC_CTL_TYPE_MACDESC_DEL
+ *
+ */
+#define WLFC_FLAGS_XONXOFF_SIGNALS 0x0002
+
+/* enable (status, fifo_credit, mac_credit) signals
+ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
+ * WLFC_CTL_TYPE_TXSTATUS
+ * WLFC_CTL_TYPE_FIFO_CREDITBACK
+ */
+#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
+
+#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
+#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
+#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
+#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE 0x0040
+#define WLFC_FLAGS_PKT_STAMP_SIGNALS 0x0080
+
+#endif /* PROP_TXSTATUS */
+
+#define WL_TIMBC_STATUS_AP_UNKNOWN 255 /* AP status for internal use only */
+
+#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RELMCAST_MAX_CLIENT 32
+#define WL_RELMCAST_FLAG_INBLACKLIST 1
+#define WL_RELMCAST_FLAG_ACTIVEACKER 2
+#define WL_RELMCAST_FLAG_RELMCAST 4
+
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE 0
+#define WL_PROXD_MODE_NEUTRAL 1
+#define WL_PROXD_MODE_INITIATOR 2
+#define WL_PROXD_MODE_TARGET 3
+#define WL_PROXD_RANDOM_WAKEUP 0x8000
+
+
+#ifdef NET_DETECT
+#define NET_DETECT_MAX_WAKE_DATA_SIZE 2048
+#define NET_DETECT_MAX_PROFILES 16
+#define NET_DETECT_MAX_CHANNELS 50
+#endif /* NET_DETECT */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+
+/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
+#define WL_RADIO_SW_DISABLE (1<<0)
+#define WL_RADIO_HW_DISABLE (1<<1)
+#define WL_RADIO_MPC_DISABLE (1<<2)
+#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
+
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
+
+
+#define WL_4335_SPURAVOID_ON1 1
+#define WL_4335_SPURAVOID_ON2 2
+#define WL_4335_SPURAVOID_ON3 3
+#define WL_4335_SPURAVOID_ON4 4
+#define WL_4335_SPURAVOID_ON5 5
+#define WL_4335_SPURAVOID_ON6 6
+#define WL_4335_SPURAVOID_ON7 7
+#define WL_4335_SPURAVOID_ON8 8
+#define WL_4335_SPURAVOID_ON9 9
+
+/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */
+#define WL_TXPWR_OVERRIDE (1U<<31)
+#define WL_TXPWR_NEG (1U<<30)
+
+
+/* phy types (returned by WLC_GET_PHYTPE) */
+#define WLC_PHY_TYPE_A 0
+#define WLC_PHY_TYPE_B 1
+#define WLC_PHY_TYPE_G 2
+#define WLC_PHY_TYPE_N 4
+#define WLC_PHY_TYPE_LP 5
+#define WLC_PHY_TYPE_SSN 6
+#define WLC_PHY_TYPE_HT 7
+#define WLC_PHY_TYPE_LCN 8
+#define WLC_PHY_TYPE_LCN40 10
+#define WLC_PHY_TYPE_AC 11
+#define WLC_PHY_TYPE_NULL 0xf
+
+/* Values for PM */
+#define PM_OFF 0
+#define PM_MAX 1
+#define PM_FAST 2
+#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */
+
+#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */
+
+/* fbt_cap: FBT assoc / reassoc modes. */
+#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC 1 /* Driver 4-way handshake & reassoc (WLFBT). */
+
+/* monitor_promisc_level bits */
+#define WL_MONPROMISC_PROMISC 0x0001
+#define WL_MONPROMISC_CTRL 0x0002
+#define WL_MONPROMISC_FCS 0x0004
+
+/* TCP Checksum Offload defines */
+#define TOE_TX_CSUM_OL 0x00000001
+#define TOE_RX_CSUM_OL 0x00000002
+
+/* Wi-Fi Display Services (WFDS) */
+#define WL_P2P_SOCIAL_CHANNELS_MAX WL_NUMCHANNELS
+#define MAX_WFDS_SEEK_SVC 4 /* Max # of wfds services to seek */
+#define MAX_WFDS_ADVERT_SVC 4 /* Max # of wfds services to advertise */
+#define MAX_WFDS_SVC_NAME_LEN 200 /* maximum service_name length */
+#define MAX_WFDS_ADV_SVC_INFO_LEN 65000 /* maximum adv service_info length */
+#define P2P_WFDS_HASH_LEN 6 /* Length of a WFDS service hash */
+#define MAX_WFDS_SEEK_SVC_INFO_LEN 255 /* maximum seek service_info req length */
+#define MAX_WFDS_SEEK_SVC_NAME_LEN 200 /* maximum service_name length */
+
+/* ap_isolate bitmaps */
+#define AP_ISOLATE_DISABLED 0x0
+#define AP_ISOLATE_SENDUP_ALL 0x01
+#define AP_ISOLATE_SENDUP_MCAST 0x02
+
+#endif /* wlioctl_defs_h */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhdioctl.h 419132 2013-08-19 21:33:05Z $
+ * $Id: dhdioctl.h 438755 2013-11-22 23:20:40Z $
*/
#ifndef _dhdioctl_h_
/* Underlying BUS definition */
enum {
BUS_TYPE_USB = 0, /* for USB dongles */
- BUS_TYPE_SDIO /* for SDIO dongles */
+ BUS_TYPE_SDIO, /* for SDIO dongles */
+ BUS_TYPE_PCIE /* for PCIE dongles */
};
/* per-driver magic numbers */
#define DHD_GLOM_VAL 0x0400
#define DHD_EVENT_VAL 0x0800
#define DHD_BTA_VAL 0x1000
+#if 0 && (NDISVER >= 0x0630) && 1
+#define DHD_SCAN_VAL 0x2000
+#else
#define DHD_ISCAN_VAL 0x2000
+#endif
#define DHD_ARPOE_VAL 0x4000
#define DHD_REORDER_VAL 0x8000
#define DHD_NOCHECKDIED_VAL 0x20000 /* UTF WAR */
#define EPI_MAJOR_VERSION 1
-#define EPI_MINOR_VERSION 88
+#define EPI_MINOR_VERSION 201
-#define EPI_RC_NUMBER 45
+#define EPI_RC_NUMBER 34
#define EPI_INCREMENTAL_NUMBER 0
#define EPI_BUILD_NUMBER 0
-#define EPI_VERSION 1, 88, 45, 0
+#define EPI_VERSION 1, 201, 34, 0
-#define EPI_VERSION_NUM 0x01582d00
+#define EPI_VERSION_NUM 0x01c92200
-#define EPI_VERSION_DEV 1.88.45
+#define EPI_VERSION_DEV 1.201.34
+/* Driver Version String, ASCII, 32 chars max */
+#define EPI_VERSION_STR "1.201.34.1 (r491657)"
-#define EPI_VERSION_STR "1.88.45.3 (r420671)"
-
-#endif
+#endif /* _epivers_h_ */
#define EPI_VERSION_DEV @EPI_VERSION_DEV@
-
+/* Driver Version String, ASCII, 32 chars max */
#define EPI_VERSION_STR "@EPI_VERSION_STR@@EPI_VERSION_TYPE@ (@VC_VERSION_NUM@)"
-#endif
+#endif /* _epivers_h_ */
#
# Epivers.h version support svn/sparse/gclient workspaces
#
-# $Id: epivers.sh 363310 2012-10-17 03:37:57Z $
+# $Id: epivers.sh 389103 2013-03-05 17:24:49Z $
#
# Version generation works off of svn property HeadURL, if
# not set it keys its versions from current svn workspace or
else # epivers.h doesn't exist
- NULL="/dev/null"
- svncmd="svn --non-interactive"
+ SVNCMD=${SVNCMD:-"svn --non-interactive"}
SRCBASE=${SRCBASE:-..}
+ NULL=/dev/null
+ [ -z "$VERBOSE" ] || NULL=/dev/stderr
# Check for the in file, if not there we're in the wrong directory
if [ ! -f epivers.h.in ]; then
fi
# Following SVNURL should be expanded on checkout
- SVNURL='$HeadURL: http://svn.sj.broadcom.com/svn/wlansvn/proj/tags/DHD/DHD_REL_1_88_45/src/include/epivers.sh $'
+ SVNURL='$HeadURL: http://svn.sj.broadcom.com/svn/wlansvn/proj/tags/DHD/DHD_REL_1_201_34/src/include/epivers.sh $'
# .gclient_info is created by gclient checkout/sync steps
# and contains "DEPS='<deps-url1> <deps-url2> ..." entry
if echo "$SVNURL" | egrep -vq 'HeadURL.*epivers.sh.*|http://.*/DEPS'; then
[ -n "$VERBOSE" ] && \
echo "DBG: SVN URL ($SVNURL) wasn't expanded. Getting it from svn info"
- SVNURL=$($svncmd info epivers.sh 2> $NULL | egrep "^URL:")
+ SVNURL=$($SVNCMD info epivers.sh 2> $NULL | egrep "^URL:")
fi
if echo "${TAG}" | grep -q "_BRANCH_\|_TWIG_"; then
[ -n "$VERBOSE" ] && \
echo "DBG: Fetching $GETCOMPVER from trunk"
- $svncmd export -q \
+ $SVNCMD export -q \
^/proj/trunk/src/tools/build/${GETCOMPVER} \
${GETCOMPVER} 2> $NULL
fi
# Finally get version control revision number of <SRCBASE> (if any)
- vc_version_num=$($svncmd info ${SRCBASE} 2> $NULL | awk -F': ' '/^Last Changed Rev: /{printf "%s", $2}')
+ vc_version_num=$($SVNCMD info ${SRCBASE} 2> $NULL | awk -F': ' '/^Last Changed Rev: /{printf "%s", $2}')
# OK, go do it
echo "maj=${maj}, min=${min}, rc=${rcnum}, inc=${incremental}, build=${build}"
--- /dev/null
+/*
+ * HND arm trap handling.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: hnd_armtrap.h 470663 2014-04-16 00:24:43Z $
+ */
+
+#ifndef _hnd_armtrap_h_
+#define _hnd_armtrap_h_
+
+
+/* ARM trap handling */
+
+/* Trap types defined by ARM (see arminc.h) */
+
+/* Trap locations in lo memory */
+#define TRAP_STRIDE 4
+#define FIRST_TRAP TR_RST
+#define LAST_TRAP (TR_FIQ * TRAP_STRIDE)
+
+#if defined(__ARM_ARCH_4T__)
+#define MAX_TRAP_TYPE (TR_FIQ + 1)
+#elif defined(__ARM_ARCH_7M__)
+#define MAX_TRAP_TYPE (TR_ISR + ARMCM3_NUMINTS)
+#endif /* __ARM_ARCH_7M__ */
+
+/* The trap structure is defined here as offsets for assembly */
+#define TR_TYPE 0x00
+#define TR_EPC 0x04
+#define TR_CPSR 0x08
+#define TR_SPSR 0x0c
+#define TR_REGS 0x10
+#define TR_REG(n) (TR_REGS + (n) * 4)
+#define TR_SP TR_REG(13)
+#define TR_LR TR_REG(14)
+#define TR_PC TR_REG(15)
+
+#define TRAP_T_SIZE 80
+
+#ifndef _LANGUAGE_ASSEMBLY
+
+#include <typedefs.h>
+
+typedef struct _trap_struct {
+ uint32 type;
+ uint32 epc;
+ uint32 cpsr;
+ uint32 spsr;
+ uint32 r0; /* a1 */
+ uint32 r1; /* a2 */
+ uint32 r2; /* a3 */
+ uint32 r3; /* a4 */
+ uint32 r4; /* v1 */
+ uint32 r5; /* v2 */
+ uint32 r6; /* v3 */
+ uint32 r7; /* v4 */
+ uint32 r8; /* v5 */
+ uint32 r9; /* sb/v6 */
+ uint32 r10; /* sl/v7 */
+ uint32 r11; /* fp/v8 */
+ uint32 r12; /* ip */
+ uint32 r13; /* sp */
+ uint32 r14; /* lr */
+ uint32 pc; /* r15 */
+} trap_t;
+
+#endif /* !_LANGUAGE_ASSEMBLY */
+
+#endif /* _hnd_armtrap_h_ */
--- /dev/null
+/*
+ * Console support for RTE - for host use only.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: hnd_cons.h 473343 2014-04-29 01:45:22Z $
+ */
+#ifndef _hnd_cons_h_
+#define _hnd_cons_h_
+
+#include <typedefs.h>
+#include <siutils.h>
+
+#define CBUF_LEN (128)
+
+#define LOG_BUF_LEN 1024
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+#undef RWL_MAX_DATA_LEN
+#undef CBUF_LEN
+#undef LOG_BUF_LEN
+#define RWL_MAX_DATA_LEN (4 * 1024 + 8)
+#define CBUF_LEN (RWL_MAX_DATA_LEN + 64)
+#define LOG_BUF_LEN (16 * 1024)
+#endif
+
+typedef struct {
+ uint32 buf; /* Can't be pointer on (64-bit) hosts */
+ uint buf_size;
+ uint idx;
+ uint out_idx; /* output index */
+} hnd_log_t;
+
+typedef struct {
+ /* Virtual UART
+ * When there is no UART (e.g. Quickturn), the host should write a complete
+ * input line directly into cbuf and then write the length into vcons_in.
+ * This may also be used when there is a real UART (at risk of conflicting with
+ * the real UART). vcons_out is currently unused.
+ */
+ volatile uint vcons_in;
+ volatile uint vcons_out;
+
+ /* Output (logging) buffer
+ * Console output is written to a ring buffer log_buf at index log_idx.
+ * The host may read the output when it sees log_idx advance.
+ * Output will be lost if the output wraps around faster than the host polls.
+ */
+ hnd_log_t log;
+
+ /* Console input line buffer
+ * Characters are read one at a time into cbuf until <CR> is received, then
+ * the buffer is processed as a command line. Also used for virtual UART.
+ */
+ uint cbuf_idx;
+ char cbuf[CBUF_LEN];
+} hnd_cons_t;
+
+#endif /* _hnd_cons_h_ */
--- /dev/null
+/*
+ * HND generic packet pool operation primitives
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: $
+ */
+
+#ifndef _hnd_pktpool_h_
+#define _hnd_pktpool_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef BCMPKTPOOL
+#define POOL_ENAB(pool) ((pool) && (pool)->inited)
+#define SHARED_POOL (pktpool_shared)
+#else /* BCMPKTPOOL */
+#define POOL_ENAB(bus) 0
+#define SHARED_POOL ((struct pktpool *)NULL)
+#endif /* BCMPKTPOOL */
+
+#ifdef BCMFRAGPOOL
+#define SHARED_FRAG_POOL (pktpool_shared_lfrag)
+#endif
+#define SHARED_RXFRAG_POOL (pktpool_shared_rxlfrag)
+
+
+#ifndef PKTPOOL_LEN_MAX
+#define PKTPOOL_LEN_MAX 40
+#endif /* PKTPOOL_LEN_MAX */
+#define PKTPOOL_CB_MAX 3
+
+/* forward declaration */
+struct pktpool;
+
+typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg);
+typedef struct {
+ pktpool_cb_t cb;
+ void *arg;
+} pktpool_cbinfo_t;
+/* call back fn extension to populate host address in pool pkt */
+typedef int (*pktpool_cb_extn_t)(struct pktpool *pool, void *arg1, void* pkt, bool arg2);
+typedef struct {
+ pktpool_cb_extn_t cb;
+ void *arg;
+} pktpool_cbextn_info_t;
+
+
+#ifdef BCMDBG_POOL
+/* pkt pool debug states */
+#define POOL_IDLE 0
+#define POOL_RXFILL 1
+#define POOL_RXDH 2
+#define POOL_RXD11 3
+#define POOL_TXDH 4
+#define POOL_TXD11 5
+#define POOL_AMPDU 6
+#define POOL_TXENQ 7
+
+typedef struct {
+ void *p;
+ uint32 cycles;
+ uint32 dur;
+} pktpool_dbg_t;
+
+typedef struct {
+ uint8 txdh; /* tx to host */
+ uint8 txd11; /* tx to d11 */
+ uint8 enq; /* waiting in q */
+ uint8 rxdh; /* rx from host */
+ uint8 rxd11; /* rx from d11 */
+ uint8 rxfill; /* dma_rxfill */
+ uint8 idle; /* avail in pool */
+} pktpool_stats_t;
+#endif /* BCMDBG_POOL */
+
+typedef struct pktpool {
+ bool inited; /* pktpool_init was successful */
+ uint8 type; /* type of lbuf: basic, frag, etc */
+ uint8 id; /* pktpool ID: index in registry */
+ bool istx; /* direction: transmit or receive data path */
+
+ void * freelist; /* free list: see PKTNEXTFREE(), PKTSETNEXTFREE() */
+ uint16 avail; /* number of packets in pool's free list */
+ uint16 len; /* number of packets managed by pool */
+ uint16 maxlen; /* maximum size of pool <= PKTPOOL_LEN_MAX */
+ uint16 plen; /* size of pkt buffer, excluding lbuf|lbuf_frag */
+
+ bool empty;
+ uint8 cbtoggle;
+ uint8 cbcnt;
+ uint8 ecbcnt;
+ bool emptycb_disable;
+ pktpool_cbinfo_t *availcb_excl;
+ pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX];
+ pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX];
+ pktpool_cbextn_info_t cbext;
+ pktpool_cbextn_info_t rxcplidfn;
+#ifdef BCMDBG_POOL
+ uint8 dbg_cbcnt;
+ pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX];
+ uint16 dbg_qlen;
+ pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1];
+#endif
+ pktpool_cbinfo_t dmarxfill;
+} pktpool_t;
+
+extern pktpool_t *pktpool_shared;
+#ifdef BCMFRAGPOOL
+extern pktpool_t *pktpool_shared_lfrag;
+#endif
+extern pktpool_t *pktpool_shared_rxlfrag;
+
+/* Incarnate a pktpool registry. On success returns total_pools. */
+extern int pktpool_attach(osl_t *osh, uint32 total_pools);
+extern int pktpool_dettach(osl_t *osh); /* Relinquish registry */
+
+extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx, uint8 type);
+extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal);
+extern void* pktpool_get(pktpool_t *pktp);
+extern void pktpool_free(pktpool_t *pktp, void *p);
+extern int pktpool_add(pktpool_t *pktp, void *p);
+extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp);
+extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb);
+extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen);
+extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen);
+extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable);
+extern bool pktpool_emptycb_disabled(pktpool_t *pktp);
+extern int pktpool_hostaddr_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg1);
+extern int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg);
+extern void pktpool_invoke_dmarxfill(pktpool_t *pktp);
+extern int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+
+#define POOLPTR(pp) ((pktpool_t *)(pp))
+#define POOLID(pp) (POOLPTR(pp)->id)
+
+#define POOLSETID(pp, ppid) (POOLPTR(pp)->id = (ppid))
+
+#define pktpool_len(pp) (POOLPTR(pp)->len)
+#define pktpool_avail(pp) (POOLPTR(pp)->avail)
+#define pktpool_plen(pp) (POOLPTR(pp)->plen)
+#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen)
+
+
+/*
+ * ----------------------------------------------------------------------------
+ * A pool ID is assigned with a pkt pool during pool initialization. This is
+ * done by maintaining a registry of all initialized pools, and the registry
+ * index at which the pool is registered is used as the pool's unique ID.
+ * ID 0 is reserved and is used to signify an invalid pool ID.
+ * All packets henceforth allocated from a pool will be tagged with the pool's
+ * unique ID. Packets allocated from the heap will use the reserved ID = 0.
+ * Packets with non-zero pool id signify that they were allocated from a pool.
+ * A maximum of 15 pools are supported, allowing a 4bit pool ID to be used
+ * in place of a 32bit pool pointer in each packet.
+ * ----------------------------------------------------------------------------
+ */
+#define PKTPOOL_INVALID_ID (0)
+#define PKTPOOL_MAXIMUM_ID (15)
+
+/* Registry of pktpool(s) */
+extern pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1];
+
+/* Pool ID to/from Pool Pointer converters */
+#define PKTPOOL_ID2PTR(id) (pktpools_registry[id])
+#define PKTPOOL_PTR2ID(pp) (POOLID(pp))
+
+
+#ifdef BCMDBG_POOL
+extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg);
+extern int pktpool_start_trigger(pktpool_t *pktp, void *p);
+extern int pktpool_dbg_dump(pktpool_t *pktp);
+extern int pktpool_dbg_notify(pktpool_t *pktp);
+extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats);
+#endif /* BCMDBG_POOL */
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _hnd_pktpool_h_ */
--- /dev/null
+/*
+ * HND generic pktq operation primitives
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: $
+ */
+
+#ifndef _hnd_pktq_h_
+#define _hnd_pktq_h_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* osl multi-precedence packet queue */
+#define PKTQ_LEN_MAX 0xFFFF /* Max uint16 65535 packets */
+#ifndef PKTQ_LEN_DEFAULT
+#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */
+#endif
+#ifndef PKTQ_MAX_PREC
+#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
+#endif
+
+typedef struct pktq_prec {
+ void *head; /* first packet to dequeue */
+ void *tail; /* last packet to dequeue */
+ uint16 len; /* number of queued packets */
+ uint16 max; /* maximum number of queued packets */
+} pktq_prec_t;
+
+#ifdef PKTQ_LOG
+typedef struct {
+ uint32 requested; /* packets requested to be stored */
+ uint32 stored; /* packets stored */
+ uint32 saved; /* packets saved,
+ because a lowest priority queue has given away one packet
+ */
+ uint32 selfsaved; /* packets saved,
+ because an older packet from the same queue has been dropped
+ */
+ uint32 full_dropped; /* packets dropped,
+ because pktq is full with higher precedence packets
+ */
+ uint32 dropped; /* packets dropped because pktq per that precedence is full */
+ uint32 sacrificed; /* packets dropped,
+ in order to save one from a queue of a highest priority
+ */
+ uint32 busy; /* packets droped because of hardware/transmission error */
+ uint32 retry; /* packets re-sent because they were not received */
+ uint32 ps_retry; /* packets retried again prior to moving power save mode */
+ uint32 suppress; /* packets which were suppressed and not transmitted */
+ uint32 retry_drop; /* packets finally dropped after retry limit */
+ uint32 max_avail; /* the high-water mark of the queue capacity for packets -
+ goes to zero as queue fills
+ */
+ uint32 max_used; /* the high-water mark of the queue utilisation for packets -
+ increases with use ('inverse' of max_avail)
+ */
+ uint32 queue_capacity; /* the maximum capacity of the queue */
+ uint32 rtsfail; /* count of rts attempts that failed to receive cts */
+ uint32 acked; /* count of packets sent (acked) successfully */
+ uint32 txrate_succ; /* running total of phy rate of packets sent successfully */
+ uint32 txrate_main; /* running totoal of primary phy rate of all packets */
+ uint32 throughput; /* actual data transferred successfully */
+ uint32 airtime; /* cumulative total medium access delay in useconds */
+ uint32 _logtime; /* timestamp of last counter clear */
+} pktq_counters_t;
+
+typedef struct {
+ uint32 _prec_log;
+ pktq_counters_t* _prec_cnt[PKTQ_MAX_PREC]; /* Counters per queue */
+} pktq_log_t;
+#endif /* PKTQ_LOG */
+
+
+#define PKTQ_COMMON \
+ uint16 num_prec; /* number of precedences in use */ \
+ uint16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */ \
+ uint16 max; /* total max packets */ \
+ uint16 len; /* total number of packets */
+
+/* multi-priority pkt queue */
+struct pktq {
+ PKTQ_COMMON
+ /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+ struct pktq_prec q[PKTQ_MAX_PREC];
+#ifdef PKTQ_LOG
+ pktq_log_t* pktqlog;
+#endif
+};
+
+/* simple, non-priority pkt queue */
+struct spktq {
+ PKTQ_COMMON
+ /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */
+ struct pktq_prec q[1];
+};
+
+#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+/* fn(pkt, arg). return true if pkt belongs to if */
+typedef bool (*ifpkt_cb_t)(void*, int);
+
+/* operations on a specific precedence in packet queue */
+
+#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
+#define pktq_pmax(pq, prec) ((pq)->q[prec].max)
+#define pktq_plen(pq, prec) ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
+
+extern void pktq_append(struct pktq *pq, int prec, struct spktq *list);
+extern void pktq_prepend(struct pktq *pq, int prec, struct spktq *list);
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p);
+extern void *pktq_pdeq_with_fn(struct pktq *pq, int prec, ifpkt_cb_t fn, int arg);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+/* Empty the queue at particular precedence level */
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir,
+ ifpkt_cb_t fn, int arg);
+/* Remove a specified packet from its queue */
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+/* operations on a set of precedences in packet queue */
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+/* operations on packet queue as a whole */
+
+#define pktq_len(pq) ((int)(pq)->len)
+#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq) ((pq)->len >= (pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
+
+/* operations for single precedence queues */
+#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p))
+#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p))
+#define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0)
+#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0)
+#define pktqflush(osh, pq) pktq_flush(osh, ((struct pktq *)(void *)pq), TRUE, NULL, 0)
+#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len);
+
+/* prec_out may be NULL if caller is not interested in return value */
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _hnd_pktq_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: hndpmu.h 385540 2013-02-15 23:14:50Z $
+ * $Id: hndpmu.h 471127 2014-04-17 23:24:23Z $
*/
#ifndef _hndpmu_h_
#define _hndpmu_h_
+#include <typedefs.h>
+#include <osl_decl.h>
+#include <siutils.h>
-extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on);
+
+extern void si_pmu_otp_power(si_t *sih, osl_t *osh, bool on, uint32* min_res_mask);
extern void si_sdiod_drive_strength_init(si_t *sih, osl_t *osh, uint32 drivestrength);
extern void si_pmu_minresmask_htavail_set(si_t *sih, osl_t *osh, bool set_clear);
+extern void si_pmu_slow_clk_reinit(si_t *sih, osl_t *osh);
#endif /* _hndpmu_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: hndsoc.h 365041 2012-10-26 09:10:35Z $
+ * $Id: hndsoc.h 473238 2014-04-28 19:14:56Z $
*/
#ifndef _HNDSOC_H
#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */
#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */
+#ifndef SI_MAXCORES
#define SI_MAXCORES 32 /* NorthStar has more cores */
+#endif /* SI_MAXCORES */
#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */
#define SI_FASTRAM_SWAPPED 0x19800000
#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */
#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */
+#define SI_SFLASH 0x14000000
#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */
#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */
#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */
#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2
* (2 ZettaBytes), high 32 bits
*/
-
/* core codes */
#define NODEV_CORE_ID 0x700 /* Invalid coreid */
#define CC_CORE_ID 0x800 /* chipcommon core */
#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */
#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */
#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */
+#define GCI_CORE_ID 0x840 /* GCI Core */
+#define M2MDMA_CORE_ID 0x844 /* memory to memory dma */
#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */
#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */
#define EROM_CORE_ID 0x366 /* EROM core ID */
#define CC_4706B0_CORE_REV 0x8000001f /* chipcommon core */
#define SOCRAM_4706B0_CORE_REV 0x80000005 /* internal memory core */
#define GMAC_4706B0_CORE_REV 0x80000000 /* Gigabit MAC core */
+#define NS_PCIEG2_CORE_REV_B0 0x7 /* NS-B0 PCIE Gen 2 core rev */
/* There are TWO constants on all HND chips: SI_ENUM_BASE above,
* and chipcommon being the first core:
*/
#define SI_CC_IDX 0
-
/* SOC Interconnect types (aka chip types) */
#define SOCI_SB 0
#define SOCI_AI 1
* communicate w/PMU regarding clock control.
*/
#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */
+#define SI_PWR_CTL_ST 0x1e8 /* For memory clock gating */
/* clk_ctl_st register */
#define CCS_FORCEALP 0x00000001 /* force ALP request */
#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */
#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */
#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */
+#define CCS_SECICLKREQ 0x00000100 /* SECI Clock Req */
+#define CCS_ARMFASTCLOCKREQ 0x00000100 /* ARM CR4 fast clock request */
+#define CCS_AVBCLKREQ 0x00000400 /* AVB Clock enable request */
#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */
#define CCS_ERSRC_REQ_SHIFT 8
#define CCS_ALPAVAIL 0x00010000 /* ALP is available */
#define CCS_HTAVAIL 0x00020000 /* HT is available */
#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */
#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */
+#define CCS_ARMFASTCLOCKSTATUS 0x01000000 /* Fast CPU clock is running */
#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */
#define CCS_ERSRC_STS_SHIFT 24
#define SOC_KNLDEV_NORFLASH 0x00000002
#define SOC_KNLDEV_NANDFLASH 0x00000004
-#ifndef _LANGUAGE_ASSEMBLY
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
int soc_boot_dev(void *sih);
int soc_knl_dev(void *sih);
-#endif /* _LANGUAGE_ASSEMBLY */
+#endif /* !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__) */
#endif /* _HNDSOC_H */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: linux_osl.h 411126 2013-07-05 01:22:09Z $
+ * $Id: linux_osl.h 491170 2014-07-15 06:23:58Z $
*/
#ifndef _linux_osl_h_
#define _linux_osl_h_
#include <typedefs.h>
+#define DECLSPEC_ALIGN(x) __attribute__ ((aligned(x)))
-
+/* Linux Kernel: File Operations: start */
extern void * osl_os_open_image(char * filename);
extern int osl_os_get_image_block(char * buf, int len, void * image);
extern void osl_os_close_image(void * image);
extern int osl_os_image_size(void *image);
-
+/* Linux Kernel: File Operations: end */
#ifdef BCMDRIVER
-
+/* OSL initialization */
+#ifdef SHARED_OSL_CMN
+extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag, void **osh_cmn);
+#else
extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag);
-extern void osl_detach(osl_t *osh);
-
+#endif /* SHARED_OSL_CMN */
+extern void osl_detach(osl_t *osh);
+extern int osl_static_mem_init(osl_t *osh, void *adapter);
+extern int osl_static_mem_deinit(osl_t *osh, void *adapter);
+extern void osl_set_bus_handle(osl_t *osh, void *bus_handle);
+extern void* osl_get_bus_handle(osl_t *osh);
+#ifdef EXYNOS5433_PCIE_WAR
+extern void exynos_pcie_set_l1_exit(void);
+extern void exynos_pcie_clear_l1_exit(void);
+#endif /* EXYNOS5433_PCIE_WAR */
+
+/* Global ASSERT type */
extern uint32 g_assert_type;
-
+/* ASSERT */
#if defined(BCMASSERT_LOG)
#define ASSERT(exp) \
do { if (!(exp)) osl_assert(#exp, __FILE__, __LINE__); } while (0)
#if GCC_VERSION > 30100
#define ASSERT(exp) do {} while (0)
#else
-
+ /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */
#define ASSERT(exp)
- #endif
- #endif
+ #endif /* GCC_VERSION > 30100 */
+ #endif /* __GNUC__ */
#endif
+/* bcm_prefetch_32B */
+static inline void bcm_prefetch_32B(const uint8 *addr, const int cachelines_32B)
+{
+#if defined(BCM47XX_CA9) && (__LINUX_ARM_ARCH__ >= 5)
+ switch (cachelines_32B) {
+ case 4: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 96) : "cc");
+ case 3: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 64) : "cc");
+ case 2: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 32) : "cc");
+ case 1: __asm__ __volatile__("pld\t%a0" :: "p"(addr + 0) : "cc");
+ }
+#endif
+}
+/* microsecond delay */
#define OSL_DELAY(usec) osl_delay(usec)
extern void osl_delay(uint usec);
extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size);
extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size);
-
+/* PCI configuration space access macros */
#define OSL_PCI_READ_CONFIG(osh, offset, size) \
osl_pci_read_config((osh), (offset), (size))
#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \
extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size);
extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val);
-
+/* PCI device bus # and slot # */
#define OSL_PCI_BUS(osh) osl_pci_bus(osh)
#define OSL_PCI_SLOT(osh) osl_pci_slot(osh)
+#define OSL_PCIE_DOMAIN(osh) osl_pcie_domain(osh)
+#define OSL_PCIE_BUS(osh) osl_pcie_bus(osh)
extern uint osl_pci_bus(osl_t *osh);
extern uint osl_pci_slot(osl_t *osh);
+extern uint osl_pcie_domain(osl_t *osh);
+extern uint osl_pcie_bus(osl_t *osh);
extern struct pci_dev *osl_pci_device(osl_t *osh);
+/* Pkttag flag should be part of public information */
typedef struct {
bool pkttag;
- bool mmbus;
- pktfree_cb_fn_t tx_fn;
- void *tx_ctx;
+ bool mmbus; /* Bus supports memory-mapped register accesses */
+ pktfree_cb_fn_t tx_fn; /* Callback function for PKTFREE */
+ void *tx_ctx; /* Context to the callback function */
void *unused[3];
} osl_pubinfo_t;
+extern void osl_flag_set(osl_t *osh, uint32 mask);
+extern bool osl_is_flag_set(osl_t *osh, uint32 mask);
+
#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \
do { \
((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \
} while (0)
-
+/* host/bus architecture-specific byte swap */
#define BUS_SWAP32(v) (v)
-
#define MALLOC(osh, size) osl_malloc((osh), (size))
+ #define MALLOCZ(osh, size) osl_mallocz((osh), (size))
#define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size))
#define MALLOCED(osh) osl_malloced((osh))
+ #define MEMORY_LEFTOVER(osh) osl_check_memleak(osh)
extern void *osl_malloc(osl_t *osh, uint size);
+ extern void *osl_mallocz(osl_t *osh, uint size);
extern void osl_mfree(osl_t *osh, void *addr, uint size);
extern uint osl_malloced(osl_t *osh);
+ extern uint osl_check_memleak(osl_t *osh);
-#define NATIVE_MALLOC(osh, size) kmalloc(size, GFP_ATOMIC)
-#define NATIVE_MFREE(osh, addr, size) kfree(addr)
#define MALLOC_FAILED(osh) osl_malloc_failed((osh))
extern uint osl_malloc_failed(osl_t *osh);
-
+/* allocate/free shared (dma-able) consistent memory */
#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align()
#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \
osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
-extern uint osl_dma_consistent_align(void);
-extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, ulong *pap);
-extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa);
+#define DMA_ALLOC_CONSISTENT_FORCE32(osh, size, align, tot, pap, dmah) \
+ osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap))
+#define DMA_FREE_CONSISTENT_FORCE32(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
-#define DMA_TX 1
-#define DMA_RX 2
+extern uint osl_dma_consistent_align(void);
+extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align,
+ uint *tot, dmaaddr_t *pap);
+extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa);
+/* map/unmap direction */
+#define DMA_TX 1 /* TX direction for DMA */
+#define DMA_RX 2 /* RX direction for DMA */
+/* map/unmap shared (dma-able) memory */
#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \
osl_dma_unmap((osh), (pa), (size), (direction))
-extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
+extern dmaaddr_t osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p,
hnddma_seg_map_t *txp_dmah);
extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction);
+/* API for DMA addressing capability */
+#define OSL_DMADDRWIDTH(osh, addrwidth) ({BCM_REFERENCE(osh); BCM_REFERENCE(addrwidth);})
+
+#if (defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__))
+ extern void osl_cache_flush(void *va, uint size);
+ extern void osl_cache_inv(void *va, uint size);
+ extern void osl_prefetch(const void *ptr);
+ #define OSL_CACHE_FLUSH(va, len) osl_cache_flush((void *) va, len)
+ #define OSL_CACHE_INV(va, len) osl_cache_inv((void *) va, len)
+ #define OSL_PREFETCH(ptr) osl_prefetch(ptr)
+#ifdef __ARM_ARCH_7A__
+ extern int osl_arch_is_coherent(void);
+ #define OSL_ARCH_IS_COHERENT() osl_arch_is_coherent()
+#else
+ #define OSL_ARCH_IS_COHERENT() NULL
+#endif /* __ARM_ARCH_7A__ */
+#else
+ #define OSL_CACHE_FLUSH(va, len) BCM_REFERENCE(va)
+ #define OSL_CACHE_INV(va, len) BCM_REFERENCE(va)
+ #define OSL_PREFETCH(ptr) BCM_REFERENCE(ptr)
-#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
-
+ #define OSL_ARCH_IS_COHERENT() NULL
+#endif
+/* register access macros */
+#if defined(BCMSDIO)
#include <bcmsdh.h>
- #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(NULL, (uintptr)(r), sizeof(*(r)), (v)))
- #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(NULL, (uintptr)(r), sizeof(*(r))))
+ #define OSL_WRITE_REG(osh, r, v) (bcmsdh_reg_write(osl_get_bus_handle(osh), \
+ (uintptr)(r), sizeof(*(r)), (v)))
+ #define OSL_READ_REG(osh, r) (bcmsdh_reg_read(osl_get_bus_handle(osh), \
+ (uintptr)(r), sizeof(*(r))))
+#elif defined(BCM47XX_ACP_WAR)
+extern void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size);
+
+#define OSL_READ_REG(osh, r) \
+ ({\
+ __typeof(*(r)) __osl_v; \
+ osl_pcie_rreg(osh, (uintptr)(r), (void *)&__osl_v, sizeof(*(r))); \
+ __osl_v; \
+ })
+#endif
+
+#if defined(BCM47XX_ACP_WAR)
+ #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+ #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); bus_op;})
+#else
+#if defined(BCMSDIO)
#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) if (((osl_pubinfo_t*)(osh))->mmbus) \
mmap_op else bus_op
#define SELECT_BUS_READ(osh, mmap_op, bus_op) (((osl_pubinfo_t*)(osh))->mmbus) ? \
mmap_op : bus_op
+#else
+ #define SELECT_BUS_WRITE(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+ #define SELECT_BUS_READ(osh, mmap_op, bus_op) ({BCM_REFERENCE(osh); mmap_op;})
+#endif
+#endif /* BCM47XX_ACP_WAR */
#define OSL_ERROR(bcmerror) osl_error(bcmerror)
extern int osl_error(int bcmerror);
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */
-#define PKTBUFSZ 2048
-
+#define OSH_NULL NULL
-#include <linuxver.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ * Macros expand to calls to functions defined in linux_osl.c .
+ */
+#include <linuxver.h> /* use current 2.4.x calling conventions */
+#include <linux/kernel.h> /* for vsn/printf's */
+#include <linux/string.h> /* for mem*, str* */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29)
#define OSL_SYSUPTIME() ((uint32)jiffies_to_msecs(jiffies))
#else
#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ))
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 29) */
#define printf(fmt, args...) printk(fmt , ## args)
-#include <linux/kernel.h>
-#include <linux/string.h>
-
+#include <linux/kernel.h> /* for vsn/printf's */
+#include <linux/string.h> /* for mem*, str* */
+/* bcopy's: Linux kernel doesn't provide these (anymore) */
#define bcopy(src, dst, len) memcpy((dst), (src), (len))
#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
#define bzero(b, len) memset((b), '\0', (len))
+/* register access macros */
-
+#ifdef EXYNOS5433_PCIE_WAR
#define R_REG(osh, r) (\
SELECT_BUS_READ(osh, \
({ \
__typeof(*(r)) __osl_v; \
- BCM_REFERENCE(osh); \
+ exynos_pcie_set_l1_exit(); \
switch (sizeof(*(r))) { \
case sizeof(uint8): __osl_v = \
readb((volatile uint8*)(r)); break; \
case sizeof(uint32): __osl_v = \
readl((volatile uint32*)(r)); break; \
} \
+ exynos_pcie_clear_l1_exit(); \
__osl_v; \
}), \
OSL_READ_REG(osh, r)) \
)
+#else
+#define R_REG(osh, r) (\
+ SELECT_BUS_READ(osh, \
+ ({ \
+ __typeof(*(r)) __osl_v; \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): __osl_v = \
+ readb((volatile uint8*)(r)); break; \
+ case sizeof(uint16): __osl_v = \
+ readw((volatile uint16*)(r)); break; \
+ case sizeof(uint32): __osl_v = \
+ readl((volatile uint32*)(r)); break; \
+ } \
+ __osl_v; \
+ }), \
+ OSL_READ_REG(osh, r)) \
+)
+#endif /* EXYNOS5433_PCIE_WAR */
+#ifdef EXYNOS5433_PCIE_WAR
+#define W_REG(osh, r, v) do { \
+ exynos_pcie_set_l1_exit(); \
+ SELECT_BUS_WRITE(osh, \
+ switch (sizeof(*(r))) { \
+ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
+ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \
+ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \
+ }, \
+ (OSL_WRITE_REG(osh, r, v))); \
+ exynos_pcie_clear_l1_exit(); \
+ } while (0)
+#else
#define W_REG(osh, r, v) do { \
- BCM_REFERENCE(osh); \
SELECT_BUS_WRITE(osh, \
switch (sizeof(*(r))) { \
case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \
}, \
(OSL_WRITE_REG(osh, r, v))); \
} while (0)
+#endif /* EXYNOS5433_PCIE_WAR */
#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
-
+/* bcopy, bcmp, and bzero functions */
#define bcopy(src, dst, len) memcpy((dst), (src), (len))
#define bcmp(b1, b2, len) memcmp((b1), (b2), (len))
#define bzero(b, len) memset((b), '\0', (len))
-
+/* uncached/cached virtual address */
#define OSL_UNCACHED(va) ((void *)va)
#define OSL_CACHED(va) ((void *)va)
+#define OSL_PREF_RANGE_LD(va, sz) BCM_REFERENCE(va)
+#define OSL_PREF_RANGE_ST(va, sz) BCM_REFERENCE(va)
-#define OSL_CACHE_FLUSH(va, len)
-
-#define OSL_PREF_RANGE_LD(va, sz)
-#define OSL_PREF_RANGE_ST(va, sz)
-
-
+/* get processor cycle count */
#if defined(__i386__)
#define OSL_GETCYCLES(x) rdtscl((x))
#else
#define OSL_GETCYCLES(x) ((x) = 0)
#endif
-
+/* dereference an address that may cause a bus exception */
#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; })
-
+/* map/unmap physical to virtual I/O */
#if !defined(CONFIG_MMC_MSM7X00A)
#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size))
#else
#define REG_MAP(pa, size) (void *)(0)
-#endif
+#endif /* !defined(CONFIG_MMC_MSM7X00A */
#define REG_UNMAP(va) iounmap((va))
-
+/* shared (dma-able) memory access macros */
#define R_SM(r) *(r)
#define W_SM(r, v) (*(r) = (v))
#define BZERO_SM(r, len) memset((r), '\0', (len))
+/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for
+ * performance reasons), we need the Linux headers.
+ */
+#include <linuxver.h> /* use current 2.4.x calling conventions */
-#include <linuxver.h>
-
-
+/* packet primitives */
#ifdef BCMDBG_CTRACE
#define PKTGET(osh, len, send) osl_pktget((osh), (len), __LINE__, __FILE__)
#define PKTDUP(osh, skb) osl_pktdup((osh), (skb), __LINE__, __FILE__)
#else
#define PKTGET(osh, len, send) osl_pktget((osh), (len))
#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
-#endif
-#define PKTLIST_DUMP(osh, buf)
-#define PKTDBG_TRACE(osh, pkt, bit)
+#endif /* BCMDBG_CTRACE */
+#define PKTLIST_DUMP(osh, buf) BCM_REFERENCE(osh)
+#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
#ifdef CONFIG_DHD_USE_STATIC_BUF
#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len))
#else
#define PKTGET_STATIC PKTGET
#define PKTFREE_STATIC PKTFREE
-#endif
-#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data)
-#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len)
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+#define PKTDATA(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->data);})
+#define PKTLEN(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->len);})
#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
-#define PKTTAILROOM(osh, skb) skb_tailroom((struct sk_buff*)(skb))
-#define PKTPADTAILROOM(osh, skb, padlen) osh_pktpadtailroom((osh), (skb), (padlen))
-#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next)
-#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x))
-#define PKTSETLEN(osh, skb, len) __skb_trim((struct sk_buff*)(skb), (len))
-#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes))
-#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes))
+#define PKTEXPHEADROOM(osh, skb, b) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_realloc_headroom((struct sk_buff*)(skb), (b)); \
+ })
+#define PKTTAILROOM(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_tailroom((struct sk_buff*)(skb)); \
+ })
+#define PKTPADTAILROOM(osh, skb, padlen) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_pad((struct sk_buff*)(skb), (padlen)); \
+ })
+#define PKTNEXT(osh, skb) ({BCM_REFERENCE(osh); (((struct sk_buff*)(skb))->next);})
+#define PKTSETNEXT(osh, skb, x) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)); \
+ })
+#define PKTSETLEN(osh, skb, len) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ __skb_trim((struct sk_buff*)(skb), (len)); \
+ })
+#define PKTPUSH(osh, skb, bytes) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_push((struct sk_buff*)(skb), (bytes)); \
+ })
+#define PKTPULL(osh, skb, bytes) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ skb_pull((struct sk_buff*)(skb), (bytes)); \
+ })
#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
-#define PKTSETPOOL(osh, skb, x, y) do {} while (0)
-#define PKTPOOL(osh, skb) FALSE
-#define PKTSHRINK(osh, m) (m)
+#define PKTSETPOOL(osh, skb, x, y) BCM_REFERENCE(osh)
+#define PKTPOOL(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#define PKTFREELIST(skb) PKTLINK(skb)
+#define PKTSETFREELIST(skb, x) PKTSETLINK((skb), (x))
+#define PKTPTR(skb) (skb)
+#define PKTID(skb) ({BCM_REFERENCE(skb); 0;})
+#define PKTSETID(skb, id) ({BCM_REFERENCE(skb); BCM_REFERENCE(id);})
+#define PKTSHRINK(osh, m) ({BCM_REFERENCE(osh); m;})
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)
+#define PKTORPHAN(skb) skb_orphan(skb)
+#else
+#define PKTORPHAN(skb) ({BCM_REFERENCE(skb); 0;})
+#endif /* LINUX VERSION >= 3.6 */
+
#ifdef BCMDBG_CTRACE
#define DEL_CTRACE(zosh, zskb) { \
}
#define PKTCALLER(zskb) UPDATE_CTRACE((struct sk_buff *)zskb, (char *)__FUNCTION__, __LINE__)
-#endif
+#endif /* BCMDBG_CTRACE */
#ifdef CTFPOOL
#define CTFPOOL_REFILL_THRESH 3
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
#define FASTBUF (1 << 0)
-#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF)
-#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF))
-#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF)
+#define PKTSETFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->pktc_flags) |= FASTBUF); \
+ })
+#define PKTCLRFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->pktc_flags) &= (~FASTBUF)); \
+ })
+#define PKTISFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->pktc_flags) & FASTBUF); \
+ })
#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->pktc_flags)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
#define FASTBUF (1 << 16)
-#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF)
-#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF))
-#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & FASTBUF)
+#define PKTSETFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF); \
+ })
+#define PKTCLRFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)); \
+ })
+#define PKTISFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->mac_len) & FASTBUF); \
+ })
#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len)
#else
#define FASTBUF (1 << 0)
-#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) |= FASTBUF)
-#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF))
-#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->__unused) & FASTBUF)
+#define PKTSETFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->__unused) |= FASTBUF); \
+ })
+#define PKTCLRFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->__unused) &= (~FASTBUF)); \
+ })
+#define PKTISFAST(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ ((((struct sk_buff*)(skb))->__unused) & FASTBUF); \
+ })
#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->__unused)
-#endif
+#endif /* 2.6.22 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
#define CTFPOOLPTR(osh, skb) (((struct sk_buff*)(skb))->ctfpool)
extern int32 osl_ctfpool_init(osl_t *osh, uint numobj, uint size);
extern void osl_ctfpool_cleanup(osl_t *osh);
extern void osl_ctfpool_stats(osl_t *osh, void *b);
-#else
-#define PKTSETFAST(osh, skb)
-#define PKTCLRFAST(osh, skb)
-#define PKTISFAST(osh, skb) (FALSE)
-#endif
+#else /* CTFPOOL */
+#define PKTSETFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISFAST(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+#endif /* CTFPOOL */
-#define PKTSETCTF(osh, skb)
-#define PKTCLRCTF(osh, skb)
-#define PKTISCTF(osh, skb) (FALSE)
+#define PKTSETCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISCTF(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
#ifdef HNDCTF
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
#define SKIPCT (1 << 2)
#define CHAINED (1 << 3)
-#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT)
-#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT))
-#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->pktc_flags & SKIPCT)
-#define PKTSETCHAINED(osh, skb) (((struct sk_buff*)(skb))->pktc_flags |= CHAINED)
-#define PKTCLRCHAINED(osh, skb) (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED))
+#define PKTSETSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags |= SKIPCT); \
+ })
+#define PKTCLRSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags &= (~SKIPCT)); \
+ })
+#define PKTSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags & SKIPCT); \
+ })
+#define PKTSETCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags |= CHAINED); \
+ })
+#define PKTCLRCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags &= (~CHAINED)); \
+ })
#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->pktc_flags & CHAINED)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
#define SKIPCT (1 << 18)
#define CHAINED (1 << 19)
-#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len |= SKIPCT)
-#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT))
-#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->mac_len & SKIPCT)
-#define PKTSETCHAINED(osh, skb) (((struct sk_buff*)(skb))->mac_len |= CHAINED)
-#define PKTCLRCHAINED(osh, skb) (((struct sk_buff*)(skb))->mac_len &= (~CHAINED))
+#define PKTSETSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len |= SKIPCT); \
+ })
+#define PKTCLRSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len &= (~SKIPCT)); \
+ })
+#define PKTSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len & SKIPCT); \
+ })
+#define PKTSETCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len |= CHAINED); \
+ })
+#define PKTCLRCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len &= (~CHAINED)); \
+ })
#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->mac_len & CHAINED)
-#else
+#else /* 2.6.22 */
#define SKIPCT (1 << 2)
#define CHAINED (1 << 3)
-#define PKTSETSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused |= SKIPCT)
-#define PKTCLRSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused &= (~SKIPCT))
-#define PKTSKIPCT(osh, skb) (((struct sk_buff*)(skb))->__unused & SKIPCT)
-#define PKTSETCHAINED(osh, skb) (((struct sk_buff*)(skb))->__unused |= CHAINED)
-#define PKTCLRCHAINED(osh, skb) (((struct sk_buff*)(skb))->__unused &= (~CHAINED))
+#define PKTSETSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused |= SKIPCT); \
+ })
+#define PKTCLRSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused &= (~SKIPCT)); \
+ })
+#define PKTSKIPCT(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused & SKIPCT); \
+ })
+#define PKTSETCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused |= CHAINED); \
+ })
+#define PKTCLRCHAINED(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused &= (~CHAINED)); \
+ })
#define PKTISCHAINED(skb) (((struct sk_buff*)(skb))->__unused & CHAINED)
-#endif
+#endif /* 2.6.22 */
typedef struct ctf_mark {
uint32 value;
} ctf_mark_t;
#define CTF_MARK(m) (m.value)
-#else
-#define PKTSETSKIPCT(osh, skb)
-#define PKTCLRSKIPCT(osh, skb)
-#define PKTSKIPCT(osh, skb)
-#define CTF_MARK(m) 0
+#else /* HNDCTF */
+#define PKTSETSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTSKIPCT(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define CTF_MARK(m) ({BCM_REFERENCE(m); 0;})
+#endif /* HNDCTF */
+
+#if defined(BCM_GMAC3)
+
+/** pktalloced accounting in devices using GMAC Bulk Forwarding to DHD */
+
+/* Account for packets delivered to downstream forwarder by GMAC interface. */
+extern void osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTTOFWDER(osh, skbs, skb_cnt) \
+ osl_pkt_tofwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
+
+/* Account for packets received from downstream forwarder. */
+#if defined(BCMDBG_CTRACE) /* pkt logging */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt,
+ int line, char *file);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+ osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt), \
+ __LINE__, __FILE__)
+#else /* ! (BCMDBG_PKT || BCMDBG_CTRACE) */
+extern void osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt);
+#define PKTFRMFWDER(osh, skbs, skb_cnt) \
+ osl_pkt_frmfwder(((osl_t *)osh), (void *)(skbs), (skb_cnt))
#endif
+
+/** GMAC Forwarded packet tagging for reduced cache flush/invalidate.
+ * In FWDERBUF tagged packet, only FWDER_PKTMAPSZ amount of data would have
+ * been accessed in the GMAC forwarder. This may be used to limit the number of
+ * cachelines that need to be flushed or invalidated.
+ * Packets sent to the DHD from a GMAC forwarder will be tagged w/ FWDERBUF.
+ * DHD may clear the FWDERBUF tag, if more than FWDER_PKTMAPSZ was accessed.
+ * Likewise, a debug print of a packet payload in say the ethernet driver needs
+ * to be accompanied with a clear of the FWDERBUF tag.
+ */
+
+/** Forwarded packets, have a HWRXOFF sized rx header (etc.h) */
+#define FWDER_HWRXOFF (30)
+
+/** Maximum amount of a pktadat that a downstream forwarder (GMAC) may have
+ * read into the L1 cache (not dirty). This may be used in reduced cache ops.
+ *
+ * Max 56: ET HWRXOFF[30] + BRCMHdr[4] + EtherHdr[14] + VlanHdr[4] + IP[4]
+ */
+#define FWDER_PKTMAPSZ (FWDER_HWRXOFF + 4 + 14 + 4 + 4)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+
+#define FWDERBUF (1 << 4)
+#define PKTSETFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags |= FWDERBUF); \
+ })
+#define PKTCLRFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags &= (~FWDERBUF)); \
+ })
+#define PKTISFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags & FWDERBUF); \
+ })
+
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+
+#define FWDERBUF (1 << 20)
+#define PKTSETFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len |= FWDERBUF); \
+ })
+#define PKTCLRFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len &= (~FWDERBUF)); \
+ })
+#define PKTISFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->mac_len & FWDERBUF); \
+ })
+
+#else /* 2.6.22 */
+
+#define FWDERBUF (1 << 4)
+#define PKTSETFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused |= FWDERBUF); \
+ })
+#define PKTCLRFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused &= (~FWDERBUF)); \
+ })
+#define PKTISFWDERBUF(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->__unused & FWDERBUF); \
+ })
+
+#endif /* 2.6.22 */
+
+#else /* ! BCM_GMAC3 */
+
+#define PKTSETFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTCLRFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); })
+#define PKTISFWDERBUF(osh, skb) ({ BCM_REFERENCE(osh); BCM_REFERENCE(skb); FALSE;})
+
+#endif /* ! BCM_GMAC3 */
+
+
+#ifdef HNDCTF
+/* For broadstream iqos */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
+#define TOBR (1 << 5)
+#define PKTSETTOBR(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags |= TOBR); \
+ })
+#define PKTCLRTOBR(osh, skb) \
+ ({ \
+ BCM_REFERENCE(osh); \
+ (((struct sk_buff*)(skb))->pktc_flags &= (~TOBR)); \
+ })
+#define PKTISTOBR(skb) (((struct sk_buff*)(skb))->pktc_flags & TOBR)
+#define PKTSETCTFIPCTXIF(skb, ifp) (((struct sk_buff*)(skb))->ctf_ipc_txif = ifp)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
+#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#else /* 2.6.22 */
+#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTSETCTFIPCTXIF(skb, ifp) ({BCM_REFERENCE(skb); BCM_REFERENCE(ifp);})
+#endif /* 2.6.22 */
+#else /* HNDCTF */
+#define PKTSETTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTCLRTOBR(osh, skb) ({BCM_REFERENCE(osh); BCM_REFERENCE(skb);})
+#define PKTISTOBR(skb) ({BCM_REFERENCE(skb); FALSE;})
+#endif /* HNDCTF */
+
+
+#ifdef BCMFA
+#ifdef BCMFA_HW_HASH
+#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx)
+#else
+#define PKTSETFAHIDX(skb, idx) ({BCM_REFERENCE(skb); BCM_REFERENCE(idx);})
+#endif /* BCMFA_SW_HASH */
+#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx)
+#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp)
+#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev)
+
+#define AUX_TCP_FIN_RST (1 << 0)
+#define AUX_FREED (1 << 1)
+#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST)
+#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST))
+#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST)
+#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED)
+#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED))
+#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED)
+#define PKTISFABRIDGED(skb) PKTISFAAUX(skb)
+#else
+#define PKTISFAAUX(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTISFABRIDGED(skb) ({BCM_REFERENCE(skb); FALSE;})
+#define PKTISFAFREED(skb) ({BCM_REFERENCE(skb); FALSE;})
+
+#define PKTCLRFAAUX(skb) BCM_REFERENCE(skb)
+#define PKTSETFAFREED(skb) BCM_REFERENCE(skb)
+#define PKTCLRFAFREED(skb) BCM_REFERENCE(skb)
+#endif /* BCMFA */
+
extern void osl_pktfree(osl_t *osh, void *skb, bool send);
extern void *osl_pktget_static(osl_t *osh, uint len);
extern void osl_pktfree_static(osl_t *osh, void *skb, bool send);
-extern int osh_pktpadtailroom(osl_t *osh, void* skb, int pad);
+extern void osl_pktclone(osl_t *osh, void **pkt);
#ifdef BCMDBG_CTRACE
#define PKT_CTRACE_DUMP(osh, b) osl_ctrace_dump((osh), (b))
extern void *osl_pkt_frmnative(osl_t *osh, void *skb);
extern void *osl_pktget(osl_t *osh, uint len);
extern void *osl_pktdup(osl_t *osh, void *skb);
-#endif
+#endif /* BCMDBG_CTRACE */
extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt);
#ifdef BCMDBG_CTRACE
#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), \
#define PKTISFRMNATIVE(osh, skb) osl_pkt_is_frmnative((osl_t *)(osh), (struct sk_buff *)(skb))
#else
#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb))
-#endif
+#endif /* BCMDBG_CTRACE */
#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt))
#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW)
#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \
((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE))
-
+/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */
#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
#ifdef CONFIG_NF_CONNTRACK_MARK
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
#define PKTMARK(p) (((struct sk_buff *)(p))->mark)
#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m)
-#else
+#else /* !2.6.0 */
#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark)
#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m)
-#endif
-#else
+#endif /* 2.6.0 */
+#else /* CONFIG_NF_CONNTRACK_MARK */
#define PKTMARK(p) 0
#define PKTSETMARK(p, m)
-#endif
+#endif /* CONFIG_NF_CONNTRACK_MARK */
#define PKTALLOCED(osh) osl_pktalloced(osh)
extern uint osl_pktalloced(osl_t *osh);
+#define OSL_RAND() osl_rand()
+extern uint32 osl_rand(void);
+
#define DMA_MAP(osh, va, size, direction, p, dmah) \
osl_dma_map((osh), (va), (size), (direction), (p), (dmah))
#ifdef PKTC
-
+/* Use 8 bytes of skb tstamp field to store below info */
struct chain_node {
struct sk_buff *link;
unsigned int flags:3, pkts:9, bytes:20;
(t) = (p); \
} \
} while (0)
-#endif
-
-#else
+#endif /* PKTC */
+#else /* ! BCMDRIVER */
+/* ASSERT */
#define ASSERT(exp) do {} while (0)
-
+/* MALLOC and MFREE */
#define MALLOC(o, l) malloc(l)
#define MFREE(o, p, l) free(p)
#include <stdlib.h>
-
+/* str* and mem* functions */
#include <string.h>
-
+/* *printf functions */
#include <stdio.h>
-
+/* bcopy, bcmp, and bzero */
extern void bcopy(const void *src, void *dst, size_t len);
extern int bcmp(const void *b1, const void *b2, size_t len);
extern void bzero(void *b, size_t len);
-#endif
+#endif /* ! BCMDRIVER */
-#endif
+#endif /* _linux_osl_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: linuxver.h 417757 2013-08-12 12:24:45Z $
+ * $Id: linuxver.h 431983 2013-10-25 06:53:27Z $
*/
#ifndef _linuxver_h_
#else
#include <linux/autoconf.h>
#endif
-#endif
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
#include <linux/module.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0))
+#include <linux/kconfig.h>
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0))
+/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */
#ifdef __UNDEF_NO_VERSION__
#undef __NO_VERSION__
#else
#define __NO_VERSION__
#endif
-#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i")
MODULE_PARM(_string_, "c" __MODULE_STRING(_size_))
#endif
-
+/* linux/malloc.h is deprecated, use linux/slab.h instead. */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9))
#include <linux/malloc.h>
#else
#include <linux/semaphore.h>
#else
#include <asm/semaphore.h>
-#endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28))
#undef IP_TOS
-#endif
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */
#include <asm/io.h>
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41))
#ifndef flush_scheduled_work
#define flush_scheduled_work() flush_scheduled_tasks()
#endif
-#endif
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
-#define DAEMONIZE(a)
+#define DAEMONIZE(a) do { \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM); \
+ } while (0)
#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
#define DAEMONIZE(a) daemonize(a); \
allow_signal(SIGKILL); \
allow_signal(SIGTERM);
-#else
+#else /* Linux 2.4 (w/o preemption patch) */
#define RAISE_RX_SOFTIRQ() \
cpu_raise_softirq(smp_processor_id(), NET_RX_SOFTIRQ)
#define DAEMONIZE(a) daemonize(); \
do { if (a) \
strncpy(current->comm, a, MIN(sizeof(current->comm), (strlen(a)))); \
} while (0);
-#endif
+#endif /* LINUX_VERSION_CODE */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func)
#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work)
#if !(LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18) && defined(RHEL_MAJOR) && \
(RHEL_MAJOR == 5))
-
+/* Exclude RHEL 5 */
typedef void (*work_func_t)(void *work);
#endif
-#endif
+#endif /* >= 2.6.20 */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
-
+/* Some distributions have their own 2.6.x compatibility layers */
#ifndef IRQ_NONE
typedef void irqreturn_t;
#define IRQ_NONE
#endif
#else
typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs);
-#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
#define IRQF_SHARED SA_SHIRQ
-#endif
+#endif /* < 2.6.18 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17)
#ifdef CONFIG_NET_RADIO
#define CONFIG_WIRELESS_EXT
#endif
-#endif
+#endif /* < 2.6.17 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67)
#define MOD_INC_USE_COUNT
#define MOD_DEC_USE_COUNT
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
#include <linux/sched.h>
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
#include <linux/sched/rt.h>
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
#include <net/lib80211.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
#include <net/ieee80211.h>
#endif
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */
#ifndef __exit
#define __devexit
#endif
#ifndef __devinit
-#define __devinit __init
-#endif
+# if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+# define __devinit __init
+# else
+/* All devices are hotpluggable since linux 3.8.0 */
+# define __devinit
+# endif
+#endif /* !__devinit */
#ifndef __devinitdata
#define __devinitdata
#endif
#define pci_get_drvdata(dev) (dev)->sysdata
#define pci_set_drvdata(dev, value) (dev)->sysdata = (value)
-
+/*
+ * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration
+ */
struct pci_device_id {
- unsigned int vendor, device;
- unsigned int subvendor, subdevice;
- unsigned int class, class_mask;
- unsigned long driver_data;
+ unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
+ unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
+ unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
+ unsigned long driver_data; /* Data private to the driver */
};
struct pci_driver {
struct list_head node;
char *name;
- const struct pci_device_id *id_table;
+ const struct pci_device_id *id_table; /* NULL if wants all devices */
int (*probe)(struct pci_dev *dev,
- const struct pci_device_id *id);
- void (*remove)(struct pci_dev *dev);
- void (*suspend)(struct pci_dev *dev);
- void (*resume)(struct pci_dev *dev);
+ const struct pci_device_id *id); /* New device inserted */
+ void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug
+ * capable driver)
+ */
+ void (*suspend)(struct pci_dev *dev); /* Device suspended */
+ void (*resume)(struct pci_dev *dev); /* Device woken up */
};
#define MODULE_DEVICE_TABLE(type, name)
#define PCI_ANY_ID (~0)
-
+/* compatpci.c */
#define pci_module_init pci_register_driver
extern int pci_register_driver(struct pci_driver *drv);
extern void pci_unregister_driver(struct pci_driver *drv);
-#endif
+#endif /* PCI registration */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
#define pci_module_init pci_register_driver
#define module_init(x) __initcall(x);
#define module_exit(x) __exitcall(x);
#endif
-#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
#define WL_USE_NETDEV_OPS
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42))
-
+/*
+ * DMA mapping
+ *
+ * See linux/Documentation/DMA-mapping.txt
+ */
#ifndef PCI_DMA_TODEVICE
#define PCI_DMA_TODEVICE 1
typedef u32 dma_addr_t;
-
+/* Pure 2^n version of get_order */
static inline int get_order(unsigned long size)
{
int order;
#define pci_map_single(cookie, address, size, dir) virt_to_bus(address)
#define pci_unmap_single(cookie, address, size, dir)
-#endif
+#endif /* DMA mapping */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43))
#define dev_kfree_skb_any(a) dev_kfree_skb(a)
#define netif_down(dev) do { (dev)->start = 0; } while (0)
-
+/* pcmcia-cs provides its own netdevice compatibility layer */
#ifndef _COMPAT_NETDEVICE_H
-
+/*
+ * SoftNet
+ *
+ * For pre-softnet kernels we need to tell the upper layer not to
+ * re-enter start_xmit() while we are in there. However softnet
+ * guarantees not to enter while we are in there so there is no need
+ * to do the netif_stop_queue() dance unless the transmit queue really
+ * gets stuck. This should also improve performance according to tests
+ * done by Aman Singla.
+ */
#define dev_kfree_skb_irq(a) dev_kfree_skb(a)
#define netif_wake_queue(dev) \
#define netif_queue_stopped(dev) (dev)->tbusy
#define netif_running(dev) (dev)->start
-#endif
+#endif /* _COMPAT_NETDEVICE_H */
#define netif_device_attach(dev) netif_start_queue(dev)
#define netif_device_detach(dev) netif_stop_queue(dev)
-
+/* 2.4.x renamed bottom halves to tasklets */
#define tasklet_struct tq_struct
static inline void tasklet_schedule(struct tasklet_struct *tasklet)
{
}
#define tasklet_kill(tasklet) { do {} while (0); }
-
+/* 2.4.x introduced del_timer_sync() */
#define del_timer_sync(timer) del_timer(timer)
#else
#define netif_down(dev)
-#endif
+#endif /* SoftNet */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3))
-
+/*
+ * Emit code to initialise a tq_struct's routine and data pointers
+ */
#define PREPARE_TQUEUE(_tq, _routine, _data) \
do { \
(_tq)->routine = _routine; \
(_tq)->data = _data; \
} while (0)
-
+/*
+ * Emit code to initialise all of a tq_struct
+ */
#define INIT_TQUEUE(_tq, _routine, _data) \
do { \
INIT_LIST_HEAD(&(_tq)->list); \
PREPARE_TQUEUE((_tq), (_routine), (_data)); \
} while (0)
-#endif
-
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */
+/* Power management related macro & routines */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9)
#define PCI_SAVE_STATE(a, b) pci_save_state(a)
#define PCI_RESTORE_STATE(a, b) pci_restore_state(a)
for (i = 0; i < 16; i++)
pci_write_config_dword(dev, i * 4, buffer[i]);
}
-
+ /*
+ * otherwise, write the context information we know from bootup.
+ * This works around a problem where warm-booting from Windows
+ * combined with a D3(hot)->D0 transition causes PCI config
+ * header data to be forgotten.
+ */
else {
for (i = 0; i < 6; i ++)
pci_write_config_dword(dev,
}
return 0;
}
-#endif
-
+#endif /* PCI power management */
+/* Old cp0 access macros deprecated in 2.4.19 */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19))
#define read_c0_count() read_32bit_cp0_register(CP0_COUNT)
#endif
-
+/* Module refcount handled internally in 2.6.x */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
#ifndef SET_MODULE_OWNER
#define SET_MODULE_OWNER(dev) do {} while (0)
#define OLD_MOD_INC_USE_COUNT do {} while (0)
#define OLD_MOD_DEC_USE_COUNT do {} while (0)
#endif
-#else
+#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
#ifndef SET_MODULE_OWNER
#define SET_MODULE_OWNER(dev) do {} while (0)
#endif
#endif
#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
-#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */
#ifndef SET_NETDEV_DEV
#define SET_NETDEV_DEV(net, pdev) do {} while (0)
#ifndef HAVE_FREE_NETDEV
#define free_netdev(dev) kfree(dev)
#endif
-#endif
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0) */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
-
+/* struct packet_type redefined in 2.6.x */
#define af_packet_priv data
#endif
-
+/* suspend args */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
#define DRV_SUSPEND_STATE_TYPE pm_message_t
#else
#endif
typedef struct {
- void *parent;
+ void *parent; /* some external entity that the thread supposed to work for */
char *proc_name;
struct task_struct *p_task;
long thr_pid;
- int prio;
+ int prio; /* priority */
struct semaphore sema;
int terminated;
struct completion completed;
} tsk_ctl_t;
-
-
+/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */
+/* note this macro assumes there may be only one context waiting on thread's completion */
#ifdef DHD_DEBUG
#define DBG_THR(x) printk x
#else
DBG_THR(("dhd_dpc_thread: Unexpected up_cnt %d\n", tsk->up_cnt));
}
spin_unlock_irqrestore(&tsk->spinlock, flags);
- return FALSE;
+ return false;
} else
- return TRUE;
+ return true;
}
static inline bool binary_sema_up(tsk_ctl_t *tsk)
{
- bool sem_up = FALSE;
+ bool sem_up = false;
unsigned long flags = 0;
spin_lock_irqsave(&tsk->spinlock, flags);
if (tsk->up_cnt == 0) {
tsk->up_cnt++;
- sem_up = TRUE;
+ sem_up = true;
} else if (tsk->up_cnt == 1) {
-
+ /* dhd_sched_dpc: dpc is alread up! */
} else
DBG_THR(("dhd_sched_dpc: unexpected up cnt %d!\n", tsk->up_cnt));
(tsk_ctl)->thr_pid = -1; \
}
-
+/* ----------------------- */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31))
#define KILL_PROC(nr, sig) \
kill_proc(pid, sig, 1); \
}
#endif
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0))
#include <linux/time.h>
__ret; \
})
-#endif
-
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */
+/*
+For < 2.6.24, wl creates its own netdev but doesn't
+align the priv area like the genuine alloc_netdev().
+Since netdev_priv() always gives us the aligned address, it will
+not match our unaligned address for < 2.6.24
+*/
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24))
#define DEV_PRIV(dev) (dev->priv)
#else
#define WL_ISR(i, d, p) wl_isr((i), (d))
#else
#define WL_ISR(i, d, p) wl_isr((i), (d), (p))
-#endif
+#endif /* < 2.6.20 */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0))
#define netdev_priv(dev) dev->priv
-#endif
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
+#define CAN_SLEEP() ((!in_atomic() && !irqs_disabled()))
+#else
+#define CAN_SLEEP() (FALSE)
+#endif
+
+#define KMALLOC_FLAG (CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
#define RANDOM32 prandom_u32
#else
#define RANDOM32 random32
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
#define SRANDOM32(entropy) prandom_seed(entropy)
#else
#define SRANDOM32(entropy) srandom32(entropy)
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
-#endif
+/*
+ * Overide latest kfifo functions with
+ * older version to work on older kernels
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) && !defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d) kfifo_put(a, (u8 *)b, c)
+#define kfifo_out_spinlocked(a, b, c, d) kfifo_get(a, (u8 *)b, c)
+#define kfifo_esize(a) 1
+#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)) && !defined(WL_COMPAT_WIRELESS)
+#define kfifo_in_spinlocked(a, b, c, d) kfifo_in_locked(a, b, c, d)
+#define kfifo_out_spinlocked(a, b, c, d) kfifo_out_locked(a, b, c, d)
+#define kfifo_esize(a) 1
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
+
+#endif /* _linuxver_h_ */
* Command line options parser.
*
* $Copyright Open Broadcom Corporation$
- * $Id: miniopt.h 241182 2011-02-17 21:50:03Z $
+ * $Id: miniopt.h 484281 2014-06-12 22:42:26Z $
*/
#endif
/* ---- Include Files ---------------------------------------------------- */
+
+
/* ---- Constants and Types ---------------------------------------------- */
#define MINIOPT_MAXKEY 128 /* Max options */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: msgtrace.h 369735 2012-11-19 22:50:22Z $
+ * $Id: msgtrace.h 439681 2013-11-27 15:39:50Z $
*/
#ifndef _MSGTRACE_H
/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
-
+/* for osl_t */
+#include <osl_decl.h>
#define MSGTRACE_VERSION 1
/* Message trace header */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: osl.h 370064 2012-11-20 21:00:25Z $
+ * $Id: osl.h 474639 2014-05-01 23:52:31Z $
*/
#ifndef _osl_h_
#define _osl_h_
+#include <osl_decl.h>
-typedef struct osl_info osl_t;
-typedef struct osl_dmainfo osldma_t;
-
-#define OSL_PKTTAG_SZ 32
-
+#define OSL_PKTTAG_SZ 32 /* Size of PktTag */
+/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */
typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status);
-
+/* Drivers use REGOPSSET() to register register read/write funcitons */
typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size);
typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size);
+
#include <linux_osl.h>
#ifndef PKTDBG_TRACE
-#define PKTDBG_TRACE(osh, pkt, bit)
+#define PKTDBG_TRACE(osh, pkt, bit) BCM_REFERENCE(osh)
#endif
-#define PKTCTFMAP(osh, p)
-
+#define PKTCTFMAP(osh, p) BCM_REFERENCE(osh)
+/* --------------------------------------------------------------------------
+** Register manipulation macros.
+*/
#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val)))
#ifndef AND_REG
#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v))
-#endif
+#endif /* !AND_REG */
#ifndef OR_REG
#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v))
-#endif
+#endif /* !OR_REG */
#if !defined(OSL_SYSUPTIME)
#define OSL_SYSUPTIME() (0)
#define OSL_SYSUPTIME_SUPPORT FALSE
#else
#define OSL_SYSUPTIME_SUPPORT TRUE
-#endif
+#endif /* OSL_SYSUPTIME */
-#if !defined(PKTC)
-#define PKTCGETATTR(s) (0)
-#define PKTCSETATTR(skb, f, p, b)
-#define PKTCCLRATTR(skb)
+#if !defined(PKTC) && !defined(PKTC_DONGLE)
+#define PKTCGETATTR(skb) (0)
+#define PKTCSETATTR(skb, f, p, b) BCM_REFERENCE(skb)
+#define PKTCCLRATTR(skb) BCM_REFERENCE(skb)
#define PKTCCNT(skb) (1)
#define PKTCLEN(skb) PKTLEN(NULL, skb)
#define PKTCGETFLAGS(skb) (0)
-#define PKTCSETFLAGS(skb, f)
-#define PKTCCLRFLAGS(skb)
+#define PKTCSETFLAGS(skb, f) BCM_REFERENCE(skb)
+#define PKTCCLRFLAGS(skb) BCM_REFERENCE(skb)
#define PKTCFLAGS(skb) (0)
-#define PKTCSETCNT(skb, c)
-#define PKTCINCRCNT(skb)
-#define PKTCADDCNT(skb, c)
-#define PKTCSETLEN(skb, l)
-#define PKTCADDLEN(skb, l)
-#define PKTCSETFLAG(skb, fb)
-#define PKTCCLRFLAG(skb, fb)
+#define PKTCSETCNT(skb, c) BCM_REFERENCE(skb)
+#define PKTCINCRCNT(skb) BCM_REFERENCE(skb)
+#define PKTCADDCNT(skb, c) BCM_REFERENCE(skb)
+#define PKTCSETLEN(skb, l) BCM_REFERENCE(skb)
+#define PKTCADDLEN(skb, l) BCM_REFERENCE(skb)
+#define PKTCSETFLAG(skb, fb) BCM_REFERENCE(skb)
+#define PKTCCLRFLAG(skb, fb) BCM_REFERENCE(skb)
#define PKTCLINK(skb) NULL
-#define PKTSETCLINK(skb, x)
+#define PKTSETCLINK(skb, x) BCM_REFERENCE(skb)
#define FOREACH_CHAINED_PKT(skb, nskb) \
for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb))
#define PKTCFREE PKTFREE
-#endif
-
-#ifndef HNDCTF
-#define PKTSETCHAINED(osh, skb)
-#define PKTCLRCHAINED(osh, skb)
-#define PKTISCHAINED(skb) (FALSE)
+#define PKTCENQTAIL(h, t, p) \
+do { \
+ if ((t) == NULL) { \
+ (h) = (t) = (p); \
+ } \
+} while (0)
+#endif /* !linux || !PKTC */
+
+#if !defined(HNDCTF) && !defined(PKTC_TX_DONGLE)
+#define PKTSETCHAINED(osh, skb) BCM_REFERENCE(osh)
+#define PKTCLRCHAINED(osh, skb) BCM_REFERENCE(osh)
+#define PKTISCHAINED(skb) FALSE
#endif
-#endif
+/* Lbuf with fraglist */
+#define PKTFRAGPKTID(osh, lb) (0)
+#define PKTSETFRAGPKTID(osh, lb, id) BCM_REFERENCE(osh)
+#define PKTFRAGTOTNUM(osh, lb) (0)
+#define PKTSETFRAGTOTNUM(osh, lb, tot) BCM_REFERENCE(osh)
+#define PKTFRAGTOTLEN(osh, lb) (0)
+#define PKTSETFRAGTOTLEN(osh, lb, len) BCM_REFERENCE(osh)
+#define PKTIFINDEX(osh, lb) (0)
+#define PKTSETIFINDEX(osh, lb, idx) BCM_REFERENCE(osh)
+#define PKTGETLF(osh, len, send, lbuf_type) (0)
+
+/* in rx path, reuse totlen as used len */
+#define PKTFRAGUSEDLEN(osh, lb) (0)
+#define PKTSETFRAGUSEDLEN(osh, lb, len) BCM_REFERENCE(osh)
+
+#define PKTFRAGLEN(osh, lb, ix) (0)
+#define PKTSETFRAGLEN(osh, lb, ix, len) BCM_REFERENCE(osh)
+#define PKTFRAGDATA_LO(osh, lb, ix) (0)
+#define PKTSETFRAGDATA_LO(osh, lb, ix, addr) BCM_REFERENCE(osh)
+#define PKTFRAGDATA_HI(osh, lb, ix) (0)
+#define PKTSETFRAGDATA_HI(osh, lb, ix, addr) BCM_REFERENCE(osh)
+
+/* RX FRAG */
+#define PKTISRXFRAG(osh, lb) (0)
+#define PKTSETRXFRAG(osh, lb) BCM_REFERENCE(osh)
+#define PKTRESETRXFRAG(osh, lb) BCM_REFERENCE(osh)
+
+/* TX FRAG */
+#define PKTISTXFRAG(osh, lb) (0)
+#define PKTSETTXFRAG(osh, lb) BCM_REFERENCE(osh)
+
+/* Need Rx completion used for AMPDU reordering */
+#define PKTNEEDRXCPL(osh, lb) (TRUE)
+#define PKTSETNORXCPL(osh, lb) BCM_REFERENCE(osh)
+#define PKTRESETNORXCPL(osh, lb) BCM_REFERENCE(osh)
+
+#define PKTISFRAG(osh, lb) (0)
+#define PKTFRAGISCHAINED(osh, i) (0)
+/* TRIM Tail bytes from lfrag */
+#define PKTFRAG_TRIM_TAILBYTES(osh, p, len) PKTSETLEN(osh, p, PKTLEN(osh, p) - len)
+
+#endif /* _osl_h_ */
--- /dev/null
+/*
+ * osl forward declarations
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id$
+ */
+
+#ifndef _osl_decl_h_
+#define _osl_decl_h_
+
+/* osl handle type forward declaration */
+typedef struct osl_info osl_t;
+typedef struct osl_dmainfo osldma_t;
+
+#endif
*
*
* $Copyright Open Broadcom Corporation$
- * $Id: packed_section_end.h 241182 2011-02-17 21:50:03Z $
+ * $Id: packed_section_end.h 437241 2013-11-18 07:39:24Z $
*/
-
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is NOT defined at this
+ * point, then there is a missing include of packed_section_start.h.
+ */
#ifdef BWL_PACKED_SECTION
#undef BWL_PACKED_SECTION
#else
-
+/* Compiler-specific directives for structure packing are declared in
+ * packed_section_start.h. This marks the end of the structure packing section,
+ * so, undef them here.
+ */
#undef BWL_PRE_PACKED_STRUCT
#undef BWL_POST_PACKED_STRUCT
*
*
* $Copyright Open Broadcom Corporation$
- * $Id: packed_section_start.h 286783 2011-09-29 06:18:57Z $
+ * $Id: packed_section_start.h 437241 2013-11-18 07:39:24Z $
*/
-
+/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h
+ * and undefined in packed_section_end.h. If it is already defined at this
+ * point, then there is a missing include of packed_section_end.h.
+ */
#ifdef BWL_PACKED_SECTION
#error "BWL_PACKED_SECTION is already defined!"
#else
-
+/* Declare compiler-specific directives for structure packing. */
#if defined(__GNUC__) || defined(__lint)
#define BWL_PRE_PACKED_STRUCT
#define BWL_POST_PACKED_STRUCT __attribute__ ((packed))
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: pcicfg.h 346935 2012-07-25 00:24:55Z $
+ * $Id: pcicfg.h 465082 2014-03-26 17:37:28Z $
*/
#ifndef _h_pcicfg_
#define _h_pcicfg_
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* The following inside ifndef's so we don't collide with NTDDK.H */
+#ifndef PCI_MAX_BUS
+#define PCI_MAX_BUS 0x100
+#endif
+#ifndef PCI_MAX_DEVICES
+#define PCI_MAX_DEVICES 0x20
+#endif
+#ifndef PCI_MAX_FUNCTION
+#define PCI_MAX_FUNCTION 0x8
+#endif
+#ifndef PCI_INVALID_VENDORID
+#define PCI_INVALID_VENDORID 0xffff
+#endif
+#ifndef PCI_INVALID_DEVICEID
+#define PCI_INVALID_DEVICEID 0xffff
+#endif
+
+
+/* Convert between bus-slot-function-register and config addresses */
+
+#define PCICFG_BUS_SHIFT 16 /* Bus shift */
+#define PCICFG_SLOT_SHIFT 11 /* Slot shift */
+#define PCICFG_FUN_SHIFT 8 /* Function shift */
+#define PCICFG_OFF_SHIFT 0 /* Register shift */
+
+#define PCICFG_BUS_MASK 0xff /* Bus mask */
+#define PCICFG_SLOT_MASK 0x1f /* Slot mask */
+#define PCICFG_FUN_MASK 7 /* Function mask */
+#define PCICFG_OFF_MASK 0xff /* Bus mask */
+
+#define PCI_CONFIG_ADDR(b, s, f, o) \
+ ((((b) & PCICFG_BUS_MASK) << PCICFG_BUS_SHIFT) \
+ | (((s) & PCICFG_SLOT_MASK) << PCICFG_SLOT_SHIFT) \
+ | (((f) & PCICFG_FUN_MASK) << PCICFG_FUN_SHIFT) \
+ | (((o) & PCICFG_OFF_MASK) << PCICFG_OFF_SHIFT))
+
+#define PCI_CONFIG_BUS(a) (((a) >> PCICFG_BUS_SHIFT) & PCICFG_BUS_MASK)
+#define PCI_CONFIG_SLOT(a) (((a) >> PCICFG_SLOT_SHIFT) & PCICFG_SLOT_MASK)
+#define PCI_CONFIG_FUN(a) (((a) >> PCICFG_FUN_SHIFT) & PCICFG_FUN_MASK)
+#define PCI_CONFIG_OFF(a) (((a) >> PCICFG_OFF_SHIFT) & PCICFG_OFF_MASK)
+
+/* PCIE Config space accessing MACROS */
+
+#define PCIECFG_BUS_SHIFT 24 /* Bus shift */
+#define PCIECFG_SLOT_SHIFT 19 /* Slot/Device shift */
+#define PCIECFG_FUN_SHIFT 16 /* Function shift */
+#define PCIECFG_OFF_SHIFT 0 /* Register shift */
+
+#define PCIECFG_BUS_MASK 0xff /* Bus mask */
+#define PCIECFG_SLOT_MASK 0x1f /* Slot/Device mask */
+#define PCIECFG_FUN_MASK 7 /* Function mask */
+#define PCIECFG_OFF_MASK 0xfff /* Register mask */
+
+#define PCIE_CONFIG_ADDR(b, s, f, o) \
+ ((((b) & PCIECFG_BUS_MASK) << PCIECFG_BUS_SHIFT) \
+ | (((s) & PCIECFG_SLOT_MASK) << PCIECFG_SLOT_SHIFT) \
+ | (((f) & PCIECFG_FUN_MASK) << PCIECFG_FUN_SHIFT) \
+ | (((o) & PCIECFG_OFF_MASK) << PCIECFG_OFF_SHIFT))
+
+#define PCIE_CONFIG_BUS(a) (((a) >> PCIECFG_BUS_SHIFT) & PCIECFG_BUS_MASK)
+#define PCIE_CONFIG_SLOT(a) (((a) >> PCIECFG_SLOT_SHIFT) & PCIECFG_SLOT_MASK)
+#define PCIE_CONFIG_FUN(a) (((a) >> PCIECFG_FUN_SHIFT) & PCIECFG_FUN_MASK)
+#define PCIE_CONFIG_OFF(a) (((a) >> PCIECFG_OFF_SHIFT) & PCIECFG_OFF_MASK)
+
+/* The actual config space */
+
+#define PCI_BAR_MAX 6
+
+#define PCI_ROM_BAR 8
+
+#define PCR_RSVDA_MAX 2
+
+/* Bits in PCI bars' flags */
+
+#define PCIBAR_FLAGS 0xf
+#define PCIBAR_IO 0x1
+#define PCIBAR_MEM1M 0x2
+#define PCIBAR_MEM64 0x4
+#define PCIBAR_PREFETCH 0x8
+#define PCIBAR_MEM32_MASK 0xFFFFFF80
+
+/* pci config status reg has a bit to indicate that capability ptr is present */
+
+#define PCI_CAPPTR_PRESENT 0x0010
+
+typedef struct _pci_config_regs {
+ uint16 vendor;
+ uint16 device;
+ uint16 command;
+ uint16 status;
+ uint8 rev_id;
+ uint8 prog_if;
+ uint8 sub_class;
+ uint8 base_class;
+ uint8 cache_line_size;
+ uint8 latency_timer;
+ uint8 header_type;
+ uint8 bist;
+ uint32 base[PCI_BAR_MAX];
+ uint32 cardbus_cis;
+ uint16 subsys_vendor;
+ uint16 subsys_id;
+ uint32 baserom;
+ uint32 rsvd_a[PCR_RSVDA_MAX];
+ uint8 int_line;
+ uint8 int_pin;
+ uint8 min_gnt;
+ uint8 max_lat;
+ uint8 dev_dep[192];
+} pci_config_regs;
+
+#define SZPCR (sizeof (pci_config_regs))
+#define MINSZPCR 64 /* offsetof (dev_dep[0] */
+
+#endif /* !LINUX_POSTMOGRIFY_REMOVAL */
+/* A structure for the config registers is nice, but in most
+ * systems the config space is not memory mapped, so we need
+ * field offsetts. :-(
+ */
#define PCI_CFG_VID 0
#define PCI_CFG_DID 2
#define PCI_CFG_CMD 4
#define PCI_CFG_MINGNT 0x3e
#define PCI_CFG_MAXLAT 0x3f
#define PCI_CFG_DEVCTRL 0xd8
-#define PCI_BAR0_WIN 0x80
-#define PCI_BAR1_WIN 0x84
-#define PCI_SPROM_CONTROL 0x88
-#define PCI_BAR1_CONTROL 0x8c
-#define PCI_INT_STATUS 0x90
-#define PCI_INT_MASK 0x94
-#define PCI_TO_SB_MB 0x98
-#define PCI_BACKPLANE_ADDR 0xa0
-#define PCI_BACKPLANE_DATA 0xa4
-#define PCI_CLK_CTL_ST 0xa8
-#define PCI_BAR0_WIN2 0xac
-#define PCI_GPIO_IN 0xb0
-#define PCI_GPIO_OUT 0xb4
-#define PCI_GPIO_OUTEN 0xb8
-
-#define PCI_BAR0_SHADOW_OFFSET (2 * 1024)
-#define PCI_BAR0_SPROM_OFFSET (4 * 1024)
-#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024)
-#define PCI_BAR0_PCISBR_OFFSET (4 * 1024)
-
-#define PCIE2_BAR0_WIN2 0x70
-#define PCIE2_BAR0_CORE2_WIN 0x74
-#define PCIE2_BAR0_CORE2_WIN2 0x78
-
-#define PCI_BAR0_WINSZ (16 * 1024)
-
-#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024)
-#define PCI_16KB0_CCREGS_OFFSET (12 * 1024)
-#define PCI_16KBB0_WINSZ (16 * 1024)
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+
+
+
+/* Classes and subclasses */
+
+typedef enum {
+ PCI_CLASS_OLD = 0,
+ PCI_CLASS_DASDI,
+ PCI_CLASS_NET,
+ PCI_CLASS_DISPLAY,
+ PCI_CLASS_MMEDIA,
+ PCI_CLASS_MEMORY,
+ PCI_CLASS_BRIDGE,
+ PCI_CLASS_COMM,
+ PCI_CLASS_BASE,
+ PCI_CLASS_INPUT,
+ PCI_CLASS_DOCK,
+ PCI_CLASS_CPU,
+ PCI_CLASS_SERIAL,
+ PCI_CLASS_INTELLIGENT = 0xe,
+ PCI_CLASS_SATELLITE,
+ PCI_CLASS_CRYPT,
+ PCI_CLASS_DSP,
+ PCI_CLASS_XOR = 0xfe
+} pci_classes;
+
+typedef enum {
+ PCI_DASDI_SCSI,
+ PCI_DASDI_IDE,
+ PCI_DASDI_FLOPPY,
+ PCI_DASDI_IPI,
+ PCI_DASDI_RAID,
+ PCI_DASDI_OTHER = 0x80
+} pci_dasdi_subclasses;
+
+typedef enum {
+ PCI_NET_ETHER,
+ PCI_NET_TOKEN,
+ PCI_NET_FDDI,
+ PCI_NET_ATM,
+ PCI_NET_OTHER = 0x80
+} pci_net_subclasses;
+
+typedef enum {
+ PCI_DISPLAY_VGA,
+ PCI_DISPLAY_XGA,
+ PCI_DISPLAY_3D,
+ PCI_DISPLAY_OTHER = 0x80
+} pci_display_subclasses;
+
+typedef enum {
+ PCI_MMEDIA_VIDEO,
+ PCI_MMEDIA_AUDIO,
+ PCI_MMEDIA_PHONE,
+ PCI_MEDIA_OTHER = 0x80
+} pci_mmedia_subclasses;
+
+typedef enum {
+ PCI_MEMORY_RAM,
+ PCI_MEMORY_FLASH,
+ PCI_MEMORY_OTHER = 0x80
+} pci_memory_subclasses;
+
+typedef enum {
+ PCI_BRIDGE_HOST,
+ PCI_BRIDGE_ISA,
+ PCI_BRIDGE_EISA,
+ PCI_BRIDGE_MC,
+ PCI_BRIDGE_PCI,
+ PCI_BRIDGE_PCMCIA,
+ PCI_BRIDGE_NUBUS,
+ PCI_BRIDGE_CARDBUS,
+ PCI_BRIDGE_RACEWAY,
+ PCI_BRIDGE_OTHER = 0x80
+} pci_bridge_subclasses;
+
+typedef enum {
+ PCI_COMM_UART,
+ PCI_COMM_PARALLEL,
+ PCI_COMM_MULTIUART,
+ PCI_COMM_MODEM,
+ PCI_COMM_OTHER = 0x80
+} pci_comm_subclasses;
+
+typedef enum {
+ PCI_BASE_PIC,
+ PCI_BASE_DMA,
+ PCI_BASE_TIMER,
+ PCI_BASE_RTC,
+ PCI_BASE_PCI_HOTPLUG,
+ PCI_BASE_OTHER = 0x80
+} pci_base_subclasses;
+
+typedef enum {
+ PCI_INPUT_KBD,
+ PCI_INPUT_PEN,
+ PCI_INPUT_MOUSE,
+ PCI_INPUT_SCANNER,
+ PCI_INPUT_GAMEPORT,
+ PCI_INPUT_OTHER = 0x80
+} pci_input_subclasses;
+
+typedef enum {
+ PCI_DOCK_GENERIC,
+ PCI_DOCK_OTHER = 0x80
+} pci_dock_subclasses;
+
+typedef enum {
+ PCI_CPU_386,
+ PCI_CPU_486,
+ PCI_CPU_PENTIUM,
+ PCI_CPU_ALPHA = 0x10,
+ PCI_CPU_POWERPC = 0x20,
+ PCI_CPU_MIPS = 0x30,
+ PCI_CPU_COPROC = 0x40,
+ PCI_CPU_OTHER = 0x80
+} pci_cpu_subclasses;
+
+typedef enum {
+ PCI_SERIAL_IEEE1394,
+ PCI_SERIAL_ACCESS,
+ PCI_SERIAL_SSA,
+ PCI_SERIAL_USB,
+ PCI_SERIAL_FIBER,
+ PCI_SERIAL_SMBUS,
+ PCI_SERIAL_OTHER = 0x80
+} pci_serial_subclasses;
+
+typedef enum {
+ PCI_INTELLIGENT_I2O
+} pci_intelligent_subclasses;
+
+typedef enum {
+ PCI_SATELLITE_TV,
+ PCI_SATELLITE_AUDIO,
+ PCI_SATELLITE_VOICE,
+ PCI_SATELLITE_DATA,
+ PCI_SATELLITE_OTHER = 0x80
+} pci_satellite_subclasses;
+
+typedef enum {
+ PCI_CRYPT_NETWORK,
+ PCI_CRYPT_ENTERTAINMENT,
+ PCI_CRYPT_OTHER = 0x80
+} pci_crypt_subclasses;
+
+typedef enum {
+ PCI_DSP_DPIO,
+ PCI_DSP_OTHER = 0x80
+} pci_dsp_subclasses;
+
+typedef enum {
+ PCI_XOR_QDMA,
+ PCI_XOR_OTHER = 0x80
+} pci_xor_subclasses;
+
+/* Header types */
+#define PCI_HEADER_MULTI 0x80
+#define PCI_HEADER_MASK 0x7f
+typedef enum {
+ PCI_HEADER_NORMAL,
+ PCI_HEADER_BRIDGE,
+ PCI_HEADER_CARDBUS
+} pci_header_types;
+
+
+/* Overlay for a PCI-to-PCI bridge */
+
+#define PPB_RSVDA_MAX 2
+#define PPB_RSVDD_MAX 8
+
+typedef struct _ppb_config_regs {
+ uint16 vendor;
+ uint16 device;
+ uint16 command;
+ uint16 status;
+ uint8 rev_id;
+ uint8 prog_if;
+ uint8 sub_class;
+ uint8 base_class;
+ uint8 cache_line_size;
+ uint8 latency_timer;
+ uint8 header_type;
+ uint8 bist;
+ uint32 rsvd_a[PPB_RSVDA_MAX];
+ uint8 prim_bus;
+ uint8 sec_bus;
+ uint8 sub_bus;
+ uint8 sec_lat;
+ uint8 io_base;
+ uint8 io_lim;
+ uint16 sec_status;
+ uint16 mem_base;
+ uint16 mem_lim;
+ uint16 pf_mem_base;
+ uint16 pf_mem_lim;
+ uint32 pf_mem_base_hi;
+ uint32 pf_mem_lim_hi;
+ uint16 io_base_hi;
+ uint16 io_lim_hi;
+ uint16 subsys_vendor;
+ uint16 subsys_id;
+ uint32 rsvd_b;
+ uint8 rsvd_c;
+ uint8 int_pin;
+ uint16 bridge_ctrl;
+ uint8 chip_ctrl;
+ uint8 diag_ctrl;
+ uint16 arb_ctrl;
+ uint32 rsvd_d[PPB_RSVDD_MAX];
+ uint8 dev_dep[192];
+} ppb_config_regs;
+
+
+/* PCI CAPABILITY DEFINES */
+#define PCI_CAP_POWERMGMTCAP_ID 0x01
+#define PCI_CAP_MSICAP_ID 0x05
+#define PCI_CAP_VENDSPEC_ID 0x09
+#define PCI_CAP_PCIECAP_ID 0x10
+
+/* Data structure to define the Message Signalled Interrupt facility
+ * Valid for PCI and PCIE configurations
+ */
+typedef struct _pciconfig_cap_msi {
+ uint8 capID;
+ uint8 nextptr;
+ uint16 msgctrl;
+ uint32 msgaddr;
+} pciconfig_cap_msi;
+#define MSI_ENABLE 0x1 /* bit 0 of msgctrl */
+
+/* Data structure to define the Power managment facility
+ * Valid for PCI and PCIE configurations
+ */
+typedef struct _pciconfig_cap_pwrmgmt {
+ uint8 capID;
+ uint8 nextptr;
+ uint16 pme_cap;
+ uint16 pme_sts_ctrl;
+ uint8 pme_bridge_ext;
+ uint8 data;
+} pciconfig_cap_pwrmgmt;
+
+#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */
+#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */
+#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */
+#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */
+
+/* Data structure to define the PCIE capability */
+typedef struct _pciconfig_cap_pcie {
+ uint8 capID;
+ uint8 nextptr;
+ uint16 pcie_cap;
+ uint32 dev_cap;
+ uint16 dev_ctrl;
+ uint16 dev_status;
+ uint32 link_cap;
+ uint16 link_ctrl;
+ uint16 link_status;
+ uint32 slot_cap;
+ uint16 slot_ctrl;
+ uint16 slot_status;
+ uint16 root_ctrl;
+ uint16 root_cap;
+ uint32 root_status;
+} pciconfig_cap_pcie;
+
+/* PCIE Enhanced CAPABILITY DEFINES */
+#define PCIE_EXTCFG_OFFSET 0x100
+#define PCIE_ADVERRREP_CAPID 0x0001
+#define PCIE_VC_CAPID 0x0002
+#define PCIE_DEVSNUM_CAPID 0x0003
+#define PCIE_PWRBUDGET_CAPID 0x0004
+
+/* PCIE Extended configuration */
+#define PCIE_ADV_CORR_ERR_MASK 0x114
+#define CORR_ERR_RE (1 << 0) /* Receiver */
+#define CORR_ERR_BT (1 << 6) /* Bad TLP */
+#define CORR_ERR_BD (1 << 7) /* Bad DLLP */
+#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */
+#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */
+#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \
+ CORR_ERR_RR | CORR_ERR_RT)
+
+/* PCIE Root Control Register bits (Host mode only) */
+#define PCIE_RC_CORR_SERR_EN 0x0001
+#define PCIE_RC_NONFATAL_SERR_EN 0x0002
+#define PCIE_RC_FATAL_SERR_EN 0x0004
+#define PCIE_RC_PME_INT_EN 0x0008
+#define PCIE_RC_CRS_EN 0x0010
+
+/* PCIE Root Capability Register bits (Host mode only) */
+#define PCIE_RC_CRS_VISIBILITY 0x0001
+
+/* Header to define the PCIE specific capabilities in the extended config space */
+typedef struct _pcie_enhanced_caphdr {
+ uint16 capID;
+ uint16 cap_ver : 4;
+ uint16 next_ptr : 12;
+} pcie_enhanced_caphdr;
+
+
+/* Everything below is BRCM HND proprietary */
+
+
+/* Brcm PCI configuration registers */
+#define cap_list rsvd_a[0]
+#define bar0_window dev_dep[0x80 - 0x40]
+#define bar1_window dev_dep[0x84 - 0x40]
+#define sprom_control dev_dep[0x88 - 0x40]
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */
+#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */
+#define PCI_SPROM_CONTROL 0x88 /* sprom property control */
+#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */
+#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */
+#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */
+#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */
+#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */
+#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */
+#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */
+#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */
+#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */
+#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */
+#define PCI_L1SS_CTRL2 0x24c /* The L1 PM Substates Control register */
+
+/* Private Registers */
+#define PCI_STAT_CTRL 0xa80
+#define PCI_L0_EVENTCNT 0xa84
+#define PCI_L0_STATETMR 0xa88
+#define PCI_L1_EVENTCNT 0xa8c
+#define PCI_L1_STATETMR 0xa90
+#define PCI_L1_1_EVENTCNT 0xa94
+#define PCI_L1_1_STATETMR 0xa98
+#define PCI_L1_2_EVENTCNT 0xa9c
+#define PCI_L1_2_STATETMR 0xaa0
+#define PCI_L2_EVENTCNT 0xaa4
+#define PCI_L2_STATETMR 0xaa8
+
+#define PCI_PMCR_REFUP 0x1814 /* Trefup time */
+#define PCI_PMCR_REFUP_EXT 0x1818 /* Trefup extend Max */
+#define PCI_TPOWER_SCALE_MASK 0x3
+#define PCI_TPOWER_SCALE_SHIFT 3 /* 0:1 is scale and 2 is rsvd */
+
+
+#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */
+#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */
+#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */
+#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the
+ * 8KB window, so their address is the "regular"
+ * address plus 4K
+ */
+/*
+ * PCIE GEN2 changed some of the above locations for
+ * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase
+ * BAR0 maps 32K of register space
+*/
+#define PCIE2_BAR0_WIN2 0x70 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */
+#define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */
+
+#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */
+/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */
+#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */
+#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */
+#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* On AI chips we have a second window to map DMP regs are mapped: */
+#define PCI_16KB0_WIN2_OFFSET (4 * 1024) /* bar0 + 4K is "Window 2" */
+
+/* PCI_INT_STATUS */
+#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */
+
+/* PCI_INT_MASK */
+#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */
+#define PCI_SBIM_MASK 0xff00 /* backplane core interrupt mask */
+#define PCI_SBIM_MASK_SERR 0x4 /* backplane SBErr interrupt mask */
+
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/* PCI_SPROM_CONTROL */
+#define SPROM_SZ_MSK 0x02 /* SPROM Size Mask */
+#define SPROM_LOCKED 0x08 /* SPROM Locked */
+#define SPROM_BLANK 0x04 /* indicating a blank SPROM */
+#define SPROM_WRITEEN 0x10 /* SPROM write enable */
+#define SPROM_BOOTROM_WE 0x20 /* external bootrom write enable */
+#define SPROM_BACKPLANE_EN 0x40 /* Enable indirect backplane access */
+#define SPROM_OTPIN_USE 0x80 /* device OTP In use */
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+/* Bits in PCI command and status regs */
+#define PCI_CMD_IO 0x00000001 /* I/O enable */
+#define PCI_CMD_MEMORY 0x00000002 /* Memory enable */
+#define PCI_CMD_MASTER 0x00000004 /* Master enable */
+#define PCI_CMD_SPECIAL 0x00000008 /* Special cycles enable */
+#define PCI_CMD_INVALIDATE 0x00000010 /* Invalidate? */
+#define PCI_CMD_VGA_PAL 0x00000040 /* VGA Palate */
+#define PCI_STAT_TA 0x08000000 /* target abort status */
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define PCI_CONFIG_SPACE_SIZE 256
-#endif
+#endif /* _h_pcicfg_ */
--- /dev/null
+/*
+ * BCM43XX PCIE core hardware definitions.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: pcie_core.h 483003 2014-06-05 19:57:46Z $
+ */
+#ifndef _PCIE_CORE_H
+#define _PCIE_CORE_H
+
+#include <sbhnddma.h>
+#include <siutils.h>
+
+/* cpp contortions to concatenate w/arg prescan */
+#ifndef PAD
+#define _PADLINE(line) pad ## line
+#define _XSTR(line) _PADLINE(line)
+#define PAD _XSTR(__LINE__)
+#endif
+
+/* PCIE Enumeration space offsets */
+#define PCIE_CORE_CONFIG_OFFSET 0x0
+#define PCIE_FUNC0_CONFIG_OFFSET 0x400
+#define PCIE_FUNC1_CONFIG_OFFSET 0x500
+#define PCIE_FUNC2_CONFIG_OFFSET 0x600
+#define PCIE_FUNC3_CONFIG_OFFSET 0x700
+#define PCIE_SPROM_SHADOW_OFFSET 0x800
+#define PCIE_SBCONFIG_OFFSET 0xE00
+
+
+#define PCIEDEV_MAX_DMAS 4
+
+/* PCIE Bar0 Address Mapping. Each function maps 16KB config space */
+#define PCIE_DEV_BAR0_SIZE 0x4000
+#define PCIE_BAR0_WINMAPCORE_OFFSET 0x0
+#define PCIE_BAR0_EXTSPROM_OFFSET 0x1000
+#define PCIE_BAR0_PCIECORE_OFFSET 0x2000
+#define PCIE_BAR0_CCCOREREG_OFFSET 0x3000
+
+/* different register spaces to access thr'u pcie indirect access */
+#define PCIE_CONFIGREGS 1 /* Access to config space */
+#define PCIE_PCIEREGS 2 /* Access to pcie registers */
+
+/* dma regs to control the flow between host2dev and dev2host */
+typedef struct pcie_devdmaregs {
+ dma64regs_t tx;
+ uint32 PAD[2];
+ dma64regs_t rx;
+ uint32 PAD[2];
+} pcie_devdmaregs_t;
+
+#define PCIE_DB_HOST2DEV_0 0x1
+#define PCIE_DB_HOST2DEV_1 0x2
+#define PCIE_DB_DEV2HOST_0 0x3
+#define PCIE_DB_DEV2HOST_1 0x4
+
+/* door bell register sets */
+typedef struct pcie_doorbell {
+ uint32 host2dev_0;
+ uint32 host2dev_1;
+ uint32 dev2host_0;
+ uint32 dev2host_1;
+} pcie_doorbell_t;
+
+/* SB side: PCIE core and host control registers */
+typedef struct sbpcieregs {
+ uint32 control; /* host mode only */
+ uint32 iocstatus; /* PCIE2: iostatus */
+ uint32 PAD[1];
+ uint32 biststatus; /* bist Status: 0x00C */
+ uint32 gpiosel; /* PCIE gpio sel: 0x010 */
+ uint32 gpioouten; /* PCIE gpio outen: 0x14 */
+ uint32 PAD[2];
+ uint32 intstatus; /* Interrupt status: 0x20 */
+ uint32 intmask; /* Interrupt mask: 0x24 */
+ uint32 sbtopcimailbox; /* sb to pcie mailbox: 0x028 */
+ uint32 obffcontrol; /* PCIE2: 0x2C */
+ uint32 obffintstatus; /* PCIE2: 0x30 */
+ uint32 obffdatastatus; /* PCIE2: 0x34 */
+ uint32 PAD[2];
+ uint32 errlog; /* PCIE2: 0x40 */
+ uint32 errlogaddr; /* PCIE2: 0x44 */
+ uint32 mailboxint; /* PCIE2: 0x48 */
+ uint32 mailboxintmsk; /* PCIE2: 0x4c */
+ uint32 ltrspacing; /* PCIE2: 0x50 */
+ uint32 ltrhysteresiscnt; /* PCIE2: 0x54 */
+ uint32 PAD[42];
+
+ uint32 sbtopcie0; /* sb to pcie translation 0: 0x100 */
+ uint32 sbtopcie1; /* sb to pcie translation 1: 0x104 */
+ uint32 sbtopcie2; /* sb to pcie translation 2: 0x108 */
+ uint32 PAD[5];
+
+ /* pcie core supports in direct access to config space */
+ uint32 configaddr; /* pcie config space access: Address field: 0x120 */
+ uint32 configdata; /* pcie config space access: Data field: 0x124 */
+ union {
+ struct {
+ /* mdio access to serdes */
+ uint32 mdiocontrol; /* controls the mdio access: 0x128 */
+ uint32 mdiodata; /* Data to the mdio access: 0x12c */
+ /* pcie protocol phy/dllp/tlp register indirect access mechanism */
+ uint32 pcieindaddr; /* indirect access to the internal register: 0x130 */
+ uint32 pcieinddata; /* Data to/from the internal regsiter: 0x134 */
+ uint32 clkreqenctrl; /* >= rev 6, Clkreq rdma control : 0x138 */
+ uint32 PAD[177];
+ } pcie1;
+ struct {
+ /* mdio access to serdes */
+ uint32 mdiocontrol; /* controls the mdio access: 0x128 */
+ uint32 mdiowrdata; /* write data to mdio 0x12C */
+ uint32 mdiorddata; /* read data to mdio 0x130 */
+ uint32 PAD[3]; /* 0x134-0x138-0x13c */
+ /* door bell registers available from gen2 rev5 onwards */
+ pcie_doorbell_t dbls[PCIEDEV_MAX_DMAS]; /* 0x140 - 0x17F */
+ uint32 dataintf; /* 0x180 */
+ uint32 PAD[1]; /* 0x184 */
+ uint32 d2h_intrlazy_0; /* 0x188 */
+ uint32 h2d_intrlazy_0; /* 0x18c */
+ uint32 h2d_intstat_0; /* 0x190 */
+ uint32 h2d_intmask_0; /* 0x194 */
+ uint32 d2h_intstat_0; /* 0x198 */
+ uint32 d2h_intmask_0; /* 0x19c */
+ uint32 ltr_state; /* 0x1A0 */
+ uint32 pwr_int_status; /* 0x1A4 */
+ uint32 pwr_int_mask; /* 0x1A8 */
+ uint32 PAD[21]; /* 0x1AC - 0x200 */
+ pcie_devdmaregs_t h2d0_dmaregs; /* 0x200 - 0x23c */
+ pcie_devdmaregs_t d2h0_dmaregs; /* 0x240 - 0x27c */
+ pcie_devdmaregs_t h2d1_dmaregs; /* 0x280 - 0x2bc */
+ pcie_devdmaregs_t d2h1_dmaregs; /* 0x2c0 - 0x2fc */
+ pcie_devdmaregs_t h2d2_dmaregs; /* 0x300 - 0x33c */
+ pcie_devdmaregs_t d2h2_dmaregs; /* 0x340 - 0x37c */
+ pcie_devdmaregs_t h2d3_dmaregs; /* 0x380 - 0x3bc */
+ pcie_devdmaregs_t d2h3_dmaregs; /* 0x3c0 - 0x3fc */
+ } pcie2;
+ } u;
+ uint32 pciecfg[4][64]; /* 0x400 - 0x7FF, PCIE Cfg Space */
+ uint16 sprom[64]; /* SPROM shadow Area */
+} sbpcieregs_t;
+
+/* PCI control */
+#define PCIE_RST_OE 0x01 /* When set, drives PCI_RESET out to pin */
+#define PCIE_RST 0x02 /* Value driven out to pin */
+#define PCIE_SPERST 0x04 /* SurvivePeRst */
+#define PCIE_DISABLE_L1CLK_GATING 0x10
+#define PCIE_DLYPERST 0x100 /* Delay PeRst to CoE Core */
+#define PCIE_DISSPROMLD 0x200 /* DisableSpromLoadOnPerst */
+#define PCIE_WakeModeL2 0x1000 /* Wake on L2 */
+
+#define PCIE_CFGADDR 0x120 /* offsetof(configaddr) */
+#define PCIE_CFGDATA 0x124 /* offsetof(configdata) */
+
+/* Interrupt status/mask */
+#define PCIE_INTA 0x01 /* PCIE INTA message is received */
+#define PCIE_INTB 0x02 /* PCIE INTB message is received */
+#define PCIE_INTFATAL 0x04 /* PCIE INTFATAL message is received */
+#define PCIE_INTNFATAL 0x08 /* PCIE INTNONFATAL message is received */
+#define PCIE_INTCORR 0x10 /* PCIE INTCORR message is received */
+#define PCIE_INTPME 0x20 /* PCIE INTPME message is received */
+#define PCIE_PERST 0x40 /* PCIE Reset Interrupt */
+
+#define PCIE_INT_MB_FN0_0 0x0100 /* PCIE to SB Mailbox int Fn0.0 is received */
+#define PCIE_INT_MB_FN0_1 0x0200 /* PCIE to SB Mailbox int Fn0.1 is received */
+#define PCIE_INT_MB_FN1_0 0x0400 /* PCIE to SB Mailbox int Fn1.0 is received */
+#define PCIE_INT_MB_FN1_1 0x0800 /* PCIE to SB Mailbox int Fn1.1 is received */
+#define PCIE_INT_MB_FN2_0 0x1000 /* PCIE to SB Mailbox int Fn2.0 is received */
+#define PCIE_INT_MB_FN2_1 0x2000 /* PCIE to SB Mailbox int Fn2.1 is received */
+#define PCIE_INT_MB_FN3_0 0x4000 /* PCIE to SB Mailbox int Fn3.0 is received */
+#define PCIE_INT_MB_FN3_1 0x8000 /* PCIE to SB Mailbox int Fn3.1 is received */
+
+/* PCIE MailboxInt/MailboxIntMask register */
+#define PCIE_MB_TOSB_FN0_0 0x0001 /* write to assert PCIEtoSB Mailbox interrupt */
+#define PCIE_MB_TOSB_FN0_1 0x0002
+#define PCIE_MB_TOSB_FN1_0 0x0004
+#define PCIE_MB_TOSB_FN1_1 0x0008
+#define PCIE_MB_TOSB_FN2_0 0x0010
+#define PCIE_MB_TOSB_FN2_1 0x0020
+#define PCIE_MB_TOSB_FN3_0 0x0040
+#define PCIE_MB_TOSB_FN3_1 0x0080
+#define PCIE_MB_TOPCIE_FN0_0 0x0100 /* int status/mask for SBtoPCIE Mailbox interrupts */
+#define PCIE_MB_TOPCIE_FN0_1 0x0200
+#define PCIE_MB_TOPCIE_FN1_0 0x0400
+#define PCIE_MB_TOPCIE_FN1_1 0x0800
+#define PCIE_MB_TOPCIE_FN2_0 0x1000
+#define PCIE_MB_TOPCIE_FN2_1 0x2000
+#define PCIE_MB_TOPCIE_FN3_0 0x4000
+#define PCIE_MB_TOPCIE_FN3_1 0x8000
+#define PCIE_MB_TOPCIE_D2H0_DB0 0x10000
+#define PCIE_MB_TOPCIE_D2H0_DB1 0x20000
+#define PCIE_MB_TOPCIE_D2H1_DB0 0x40000
+#define PCIE_MB_TOPCIE_D2H1_DB1 0x80000
+#define PCIE_MB_TOPCIE_D2H2_DB0 0x100000
+#define PCIE_MB_TOPCIE_D2H2_DB1 0x200000
+#define PCIE_MB_TOPCIE_D2H3_DB0 0x400000
+#define PCIE_MB_TOPCIE_D2H3_DB1 0x800000
+
+#define PCIE_MB_D2H_MB_MASK \
+ (PCIE_MB_TOPCIE_D2H0_DB0 | PCIE_MB_TOPCIE_D2H0_DB1 | \
+ PCIE_MB_TOPCIE_D2H1_DB1 | PCIE_MB_TOPCIE_D2H1_DB1 | \
+ PCIE_MB_TOPCIE_D2H2_DB1 | PCIE_MB_TOPCIE_D2H2_DB1 | \
+ PCIE_MB_TOPCIE_D2H3_DB1 | PCIE_MB_TOPCIE_D2H3_DB1)
+
+/* SB to PCIE translation masks */
+#define SBTOPCIE0_MASK 0xfc000000
+#define SBTOPCIE1_MASK 0xfc000000
+#define SBTOPCIE2_MASK 0xc0000000
+
+/* Access type bits (0:1) */
+#define SBTOPCIE_MEM 0
+#define SBTOPCIE_IO 1
+#define SBTOPCIE_CFG0 2
+#define SBTOPCIE_CFG1 3
+
+/* Prefetch enable bit 2 */
+#define SBTOPCIE_PF 4
+
+/* Write Burst enable for memory write bit 3 */
+#define SBTOPCIE_WR_BURST 8
+
+/* config access */
+#define CONFIGADDR_FUNC_MASK 0x7000
+#define CONFIGADDR_FUNC_SHF 12
+#define CONFIGADDR_REG_MASK 0x0FFF
+#define CONFIGADDR_REG_SHF 0
+
+#define PCIE_CONFIG_INDADDR(f, r) ((((f) & CONFIGADDR_FUNC_MASK) << CONFIGADDR_FUNC_SHF) | \
+ (((r) & CONFIGADDR_REG_MASK) << CONFIGADDR_REG_SHF))
+
+/* PCIE protocol regs Indirect Address */
+#define PCIEADDR_PROT_MASK 0x300
+#define PCIEADDR_PROT_SHF 8
+#define PCIEADDR_PL_TLP 0
+#define PCIEADDR_PL_DLLP 1
+#define PCIEADDR_PL_PLP 2
+
+/* PCIE protocol PHY diagnostic registers */
+#define PCIE_PLP_MODEREG 0x200 /* Mode */
+#define PCIE_PLP_STATUSREG 0x204 /* Status */
+#define PCIE_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */
+#define PCIE_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */
+#define PCIE_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */
+#define PCIE_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */
+#define PCIE_PLP_ATTNREG 0x218 /* Attention */
+#define PCIE_PLP_ATTNMASKREG 0x21C /* Attention Mask */
+#define PCIE_PLP_RXERRCTR 0x220 /* Rx Error */
+#define PCIE_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */
+#define PCIE_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */
+#define PCIE_PLP_TESTCTRLREG 0x22C /* Test Control reg */
+#define PCIE_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */
+#define PCIE_PLP_TIMINGOVRDREG 0x234 /* Timing param override */
+#define PCIE_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */
+#define PCIE_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */
+
+/* PCIE protocol DLLP diagnostic registers */
+#define PCIE_DLLP_LCREG 0x100 /* Link Control */
+#define PCIE_DLLP_LSREG 0x104 /* Link Status */
+#define PCIE_DLLP_LAREG 0x108 /* Link Attention */
+#define PCIE_DLLP_LAMASKREG 0x10C /* Link Attention Mask */
+#define PCIE_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */
+#define PCIE_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */
+#define PCIE_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */
+#define PCIE_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */
+#define PCIE_DLLP_LRREG 0x120 /* Link Replay */
+#define PCIE_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */
+#define PCIE_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
+#define PCIE_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */
+#define PCIE_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */
+#define PCIE_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */
+#define PCIE_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */
+#define PCIE_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */
+#define PCIE_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */
+#define PCIE_DLLP_ERRCTRREG 0x144 /* Error Counter */
+#define PCIE_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */
+#define PCIE_DLLP_TESTREG 0x14C /* Test */
+#define PCIE_DLLP_PKTBIST 0x150 /* Packet BIST */
+#define PCIE_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */
+
+#define PCIE_DLLP_LSREG_LINKUP (1 << 16)
+
+/* PCIE protocol TLP diagnostic registers */
+#define PCIE_TLP_CONFIGREG 0x000 /* Configuration */
+#define PCIE_TLP_WORKAROUNDSREG 0x004 /* TLP Workarounds */
+#define PCIE_TLP_WRDMAUPPER 0x010 /* Write DMA Upper Address */
+#define PCIE_TLP_WRDMALOWER 0x014 /* Write DMA Lower Address */
+#define PCIE_TLP_WRDMAREQ_LBEREG 0x018 /* Write DMA Len/ByteEn Req */
+#define PCIE_TLP_RDDMAUPPER 0x01C /* Read DMA Upper Address */
+#define PCIE_TLP_RDDMALOWER 0x020 /* Read DMA Lower Address */
+#define PCIE_TLP_RDDMALENREG 0x024 /* Read DMA Len Req */
+#define PCIE_TLP_MSIDMAUPPER 0x028 /* MSI DMA Upper Address */
+#define PCIE_TLP_MSIDMALOWER 0x02C /* MSI DMA Lower Address */
+#define PCIE_TLP_MSIDMALENREG 0x030 /* MSI DMA Len Req */
+#define PCIE_TLP_SLVREQLENREG 0x034 /* Slave Request Len */
+#define PCIE_TLP_FCINPUTSREQ 0x038 /* Flow Control Inputs */
+#define PCIE_TLP_TXSMGRSREQ 0x03C /* Tx StateMachine and Gated Req */
+#define PCIE_TLP_ADRACKCNTARBLEN 0x040 /* Address Ack XferCnt and ARB Len */
+#define PCIE_TLP_DMACPLHDR0 0x044 /* DMA Completion Hdr 0 */
+#define PCIE_TLP_DMACPLHDR1 0x048 /* DMA Completion Hdr 1 */
+#define PCIE_TLP_DMACPLHDR2 0x04C /* DMA Completion Hdr 2 */
+#define PCIE_TLP_DMACPLMISC0 0x050 /* DMA Completion Misc0 */
+#define PCIE_TLP_DMACPLMISC1 0x054 /* DMA Completion Misc1 */
+#define PCIE_TLP_DMACPLMISC2 0x058 /* DMA Completion Misc2 */
+#define PCIE_TLP_SPTCTRLLEN 0x05C /* Split Controller Req len */
+#define PCIE_TLP_SPTCTRLMSIC0 0x060 /* Split Controller Misc 0 */
+#define PCIE_TLP_SPTCTRLMSIC1 0x064 /* Split Controller Misc 1 */
+#define PCIE_TLP_BUSDEVFUNC 0x068 /* Bus/Device/Func */
+#define PCIE_TLP_RESETCTR 0x06C /* Reset Counter */
+#define PCIE_TLP_RTRYBUF 0x070 /* Retry Buffer value */
+#define PCIE_TLP_TGTDEBUG1 0x074 /* Target Debug Reg1 */
+#define PCIE_TLP_TGTDEBUG2 0x078 /* Target Debug Reg2 */
+#define PCIE_TLP_TGTDEBUG3 0x07C /* Target Debug Reg3 */
+#define PCIE_TLP_TGTDEBUG4 0x080 /* Target Debug Reg4 */
+
+/* PCIE2 MDIO register offsets */
+#define PCIE2_MDIO_CONTROL 0x128
+#define PCIE2_MDIO_WR_DATA 0x12C
+#define PCIE2_MDIO_RD_DATA 0x130
+
+
+/* MDIO control */
+#define MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
+#define MDIOCTL_DIVISOR_VAL 0x2
+#define MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
+#define MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */
+
+/* MDIO Data */
+#define MDIODATA_MASK 0x0000ffff /* data 2 bytes */
+#define MDIODATA_TA 0x00020000 /* Turnaround */
+#define MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */
+#define MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */
+#define MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */
+#define MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */
+#define MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
+#define MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
+#define MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
+#define MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */
+#define MDIODATA_WRITE 0x10000000 /* write Transaction */
+#define MDIODATA_READ 0x20000000 /* Read Transaction */
+#define MDIODATA_START 0x40000000 /* start of Transaction */
+
+#define MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
+#define MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
+
+/* MDIO control/wrData/rdData register defines for PCIE Gen 2 */
+#define MDIOCTL2_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
+#define MDIOCTL2_DIVISOR_VAL 0x2
+#define MDIOCTL2_REGADDR_SHF 8 /* Regaddr shift */
+#define MDIOCTL2_REGADDR_MASK 0x00FFFF00 /* Regaddr Mask */
+#define MDIOCTL2_DEVADDR_SHF 24 /* Physmedia devaddr shift */
+#define MDIOCTL2_DEVADDR_MASK 0x0f000000 /* Physmedia devaddr Mask */
+#define MDIOCTL2_SLAVE_BYPASS 0x10000000 /* IP slave bypass */
+#define MDIOCTL2_READ 0x20000000 /* IP slave bypass */
+
+#define MDIODATA2_DONE 0x80000000 /* rd/wr transaction done */
+#define MDIODATA2_MASK 0x7FFFFFFF /* rd/wr transaction data */
+#define MDIODATA2_DEVADDR_SHF 4 /* Physmedia devaddr shift */
+
+
+/* MDIO devices (SERDES modules)
+ * unlike old pcie cores (rev < 10), rev10 pcie serde organizes registers into a few blocks.
+ * two layers mapping (blockidx, register offset) is required
+ */
+#define MDIO_DEV_IEEE0 0x000
+#define MDIO_DEV_IEEE1 0x001
+#define MDIO_DEV_BLK0 0x800
+#define MDIO_DEV_BLK1 0x801
+#define MDIO_DEV_BLK2 0x802
+#define MDIO_DEV_BLK3 0x803
+#define MDIO_DEV_BLK4 0x804
+#define MDIO_DEV_TXPLL 0x808 /* TXPLL register block idx */
+#define MDIO_DEV_TXCTRL0 0x820
+#define MDIO_DEV_SERDESID 0x831
+#define MDIO_DEV_RXCTRL0 0x840
+
+
+/* XgxsBlk1_A Register Offsets */
+#define BLK1_PWR_MGMT0 0x16
+#define BLK1_PWR_MGMT1 0x17
+#define BLK1_PWR_MGMT2 0x18
+#define BLK1_PWR_MGMT3 0x19
+#define BLK1_PWR_MGMT4 0x1A
+
+/* serdes regs (rev < 10) */
+#define MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
+#define MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
+#define MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
+ /* SERDES RX registers */
+#define SERDES_RX_CTRL 1 /* Rx cntrl */
+#define SERDES_RX_TIMER1 2 /* Rx Timer1 */
+#define SERDES_RX_CDR 6 /* CDR */
+#define SERDES_RX_CDRBW 7 /* CDR BW */
+
+ /* SERDES RX control register */
+#define SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
+#define SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
+
+ /* SERDES PLL registers */
+#define SERDES_PLL_CTRL 1 /* PLL control reg */
+#define PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
+
+/* Power management threshold */
+#define PCIE_L0THRESHOLDTIME_MASK 0xFF00 /* bits 0 - 7 */
+#define PCIE_L1THRESHOLDTIME_MASK 0xFF00 /* bits 8 - 15 */
+#define PCIE_L1THRESHOLDTIME_SHIFT 8 /* PCIE_L1THRESHOLDTIME_SHIFT */
+#define PCIE_L1THRESHOLD_WARVAL 0x72 /* WAR value */
+#define PCIE_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */
+
+/* SPROM offsets */
+#define SRSH_ASPM_OFFSET 4 /* word 4 */
+#define SRSH_ASPM_ENB 0x18 /* bit 3, 4 */
+#define SRSH_ASPM_L1_ENB 0x10 /* bit 4 */
+#define SRSH_ASPM_L0s_ENB 0x8 /* bit 3 */
+#define SRSH_PCIE_MISC_CONFIG 5 /* word 5 */
+#define SRSH_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
+#define SRSH_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
+#define SRSH_CLKREQ_OFFSET_REV8 52 /* word 52 for srom rev 8 */
+#define SRSH_CLKREQ_ENB 0x0800 /* bit 11 */
+#define SRSH_BD_OFFSET 6 /* word 6 */
+#define SRSH_AUTOINIT_OFFSET 18 /* auto initialization enable */
+
+/* Linkcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_LINKCTRL_OFFSET 16 /* linkctrl offset in pcie cap */
+#define PCIE_CAP_LCREG_ASPML0s 0x01 /* ASPM L0s in linkctrl */
+#define PCIE_CAP_LCREG_ASPML1 0x02 /* ASPM L1 in linkctrl */
+#define PCIE_CLKREQ_ENAB 0x100 /* CLKREQ Enab in linkctrl */
+#define PCIE_LINKSPEED_MASK 0xF0000 /* bits 0 - 3 of high word */
+#define PCIE_LINKSPEED_SHIFT 16 /* PCIE_LINKSPEED_SHIFT */
+
+/* Devcontrol reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL_OFFSET 8 /* devctrl offset in pcie cap */
+#define PCIE_CAP_DEVCTRL_MRRS_MASK 0x7000 /* Max read request size mask */
+#define PCIE_CAP_DEVCTRL_MRRS_SHIFT 12 /* Max read request size shift */
+#define PCIE_CAP_DEVCTRL_MRRS_128B 0 /* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_256B 1 /* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_512B 2 /* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MRRS_1024B 3 /* 1024 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_MASK 0x00e0 /* Max payload size mask */
+#define PCIE_CAP_DEVCTRL_MPS_SHIFT 5 /* Max payload size shift */
+#define PCIE_CAP_DEVCTRL_MPS_128B 0 /* 128 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_256B 1 /* 256 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_512B 2 /* 512 Byte */
+#define PCIE_CAP_DEVCTRL_MPS_1024B 3 /* 1024 Byte */
+
+#define PCIE_ASPM_ENAB 3 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L1_ENAB 2 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_L0s_ENAB 1 /* ASPM L0s & L1 in linkctrl */
+#define PCIE_ASPM_DISAB 0 /* ASPM L0s & L1 in linkctrl */
+
+#define PCIE_ASPM_L11_ENAB 8 /* ASPM L1.1 in PML1_sub_control2 */
+#define PCIE_ASPM_L12_ENAB 4 /* ASPM L1.2 in PML1_sub_control2 */
+
+/* Devcontrol2 reg offset in PCIE Cap */
+#define PCIE_CAP_DEVCTRL2_OFFSET 0x28 /* devctrl2 offset in pcie cap */
+#define PCIE_CAP_DEVCTRL2_LTR_ENAB_MASK 0x400 /* Latency Tolerance Reporting Enable */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_SHIFT 13 /* Enable OBFF mechanism, select signaling method */
+#define PCIE_CAP_DEVCTRL2_OBFF_ENAB_MASK 0x6000 /* Enable OBFF mechanism, select signaling method */
+
+/* LTR registers in PCIE Cap */
+#define PCIE_LTR0_REG_OFFSET 0x844 /* ltr0_reg offset in pcie cap */
+#define PCIE_LTR1_REG_OFFSET 0x848 /* ltr1_reg offset in pcie cap */
+#define PCIE_LTR2_REG_OFFSET 0x84c /* ltr2_reg offset in pcie cap */
+#define PCIE_LTR0_REG_DEFAULT_60 0x883c883c /* active latency default to 60usec */
+#define PCIE_LTR0_REG_DEFAULT_150 0x88968896 /* active latency default to 150usec */
+#define PCIE_LTR1_REG_DEFAULT 0x88648864 /* idle latency default to 100usec */
+#define PCIE_LTR2_REG_DEFAULT 0x90039003 /* sleep latency default to 3msec */
+
+/* Status reg PCIE_PLP_STATUSREG */
+#define PCIE_PLP_POLARITYINV_STAT 0x10
+
+
+/* PCIE BRCM Vendor CAP REVID reg bits */
+#define BRCMCAP_PCIEREV_CT_MASK 0xF00
+#define BRCMCAP_PCIEREV_CT_SHIFT 8
+#define BRCMCAP_PCIEREV_REVID_MASK 0xFF
+#define BRCMCAP_PCIEREV_REVID_SHIFT 0
+
+#define PCIE_REVREG_CT_PCIE1 0
+#define PCIE_REVREG_CT_PCIE2 1
+
+/* PCIE GEN2 specific defines */
+/* PCIE BRCM Vendor Cap offsets w.r.t to vendor cap ptr */
+#define PCIE2R0_BRCMCAP_REVID_OFFSET 4
+#define PCIE2R0_BRCMCAP_BAR0_WIN0_WRAP_OFFSET 8
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_OFFSET 12
+#define PCIE2R0_BRCMCAP_BAR0_WIN2_WRAP_OFFSET 16
+#define PCIE2R0_BRCMCAP_BAR0_WIN_OFFSET 20
+#define PCIE2R0_BRCMCAP_BAR1_WIN_OFFSET 24
+#define PCIE2R0_BRCMCAP_SPROM_CTRL_OFFSET 28
+#define PCIE2R0_BRCMCAP_BAR2_WIN_OFFSET 32
+#define PCIE2R0_BRCMCAP_INTSTATUS_OFFSET 36
+#define PCIE2R0_BRCMCAP_INTMASK_OFFSET 40
+#define PCIE2R0_BRCMCAP_PCIE2SB_MB_OFFSET 44
+#define PCIE2R0_BRCMCAP_BPADDR_OFFSET 48
+#define PCIE2R0_BRCMCAP_BPDATA_OFFSET 52
+#define PCIE2R0_BRCMCAP_CLKCTLSTS_OFFSET 56
+
+/* definition of configuration space registers of PCIe gen2
+ * http://hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/CurrentPcieGen2ProgramGuide/pcie_ep.htm
+ */
+#define PCIECFGREG_STATUS_CMD 0x4
+#define PCIECFGREG_PM_CSR 0x4C
+#define PCIECFGREG_MSI_CAP 0x58
+#define PCIECFGREG_MSI_ADDR_L 0x5C
+#define PCIECFGREG_MSI_ADDR_H 0x60
+#define PCIECFGREG_MSI_DATA 0x64
+#define PCIECFGREG_LINK_STATUS_CTRL 0xBC
+#define PCIECFGREG_LINK_STATUS_CTRL2 0xDC
+#define PCIECFGREG_RBAR_CTRL 0x228
+#define PCIECFGREG_PML1_SUB_CTRL1 0x248
+#define PCIECFGREG_REG_BAR2_CONFIG 0x4E0
+#define PCIECFGREG_REG_BAR3_CONFIG 0x4F4
+#define PCIECFGREG_PDL_CTRL1 0x1004
+#define PCIECFGREG_PDL_IDDQ 0x1814
+#define PCIECFGREG_REG_PHY_CTL7 0x181c
+
+/* PCIECFGREG_PML1_SUB_CTRL1 Bit Definition */
+#define PCI_PM_L1_2_ENA_MASK 0x00000001 /* PCI-PM L1.2 Enabled */
+#define PCI_PM_L1_1_ENA_MASK 0x00000002 /* PCI-PM L1.1 Enabled */
+#define ASPM_L1_2_ENA_MASK 0x00000004 /* ASPM L1.2 Enabled */
+#define ASPM_L1_1_ENA_MASK 0x00000008 /* ASPM L1.1 Enabled */
+
+/* PCIe gen2 mailbox interrupt masks */
+#define I_MB 0x3
+#define I_BIT0 0x1
+#define I_BIT1 0x2
+
+/* PCIE gen2 config regs */
+#define PCIIntstatus 0x090
+#define PCIIntmask 0x094
+#define PCISBMbx 0x98
+
+/* enumeration Core regs */
+#define PCIH2D_MailBox 0x140
+#define PCIH2D_DB1 0x144
+#define PCID2H_MailBox 0x148
+#define PCIMailBoxInt 0x48
+#define PCIMailBoxMask 0x4C
+
+#define I_F0_B0 (0x1 << 8) /* Mail box interrupt Function 0 interrupt, bit 0 */
+#define I_F0_B1 (0x1 << 9) /* Mail box interrupt Function 0 interrupt, bit 1 */
+
+#define PCIECFGREG_DEVCONTROL 0xB4
+
+/* SROM hardware region */
+#define SROM_OFFSET_BAR1_CTRL 52
+
+#define BAR1_ENC_SIZE_MASK 0x000e
+#define BAR1_ENC_SIZE_SHIFT 1
+
+#define BAR1_ENC_SIZE_1M 0
+#define BAR1_ENC_SIZE_2M 1
+#define BAR1_ENC_SIZE_4M 2
+
+#define PCIEGEN2_CAP_DEVSTSCTRL2_OFFSET 0xD4
+#define PCIEGEN2_CAP_DEVSTSCTRL2_LTRENAB 0x400
+
+/*
+ * Latency Tolerance Reporting (LTR) states
+ * Active has the least tolerant latency requirement
+ * Sleep is most tolerant
+ */
+#define LTR_ACTIVE 2
+#define LTR_ACTIVE_IDLE 1
+#define LTR_SLEEP 0
+#define LTR_FINAL_MASK 0x300
+#define LTR_FINAL_SHIFT 8
+
+/* pwrinstatus, pwrintmask regs */
+#define PCIEGEN2_PWRINT_D0_STATE_SHIFT 0
+#define PCIEGEN2_PWRINT_D1_STATE_SHIFT 1
+#define PCIEGEN2_PWRINT_D2_STATE_SHIFT 2
+#define PCIEGEN2_PWRINT_D3_STATE_SHIFT 3
+#define PCIEGEN2_PWRINT_L0_LINK_SHIFT 4
+#define PCIEGEN2_PWRINT_L0s_LINK_SHIFT 5
+#define PCIEGEN2_PWRINT_L1_LINK_SHIFT 6
+#define PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT 7
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT 8
+
+#define PCIEGEN2_PWRINT_D0_STATE_MASK (1 << PCIEGEN2_PWRINT_D0_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D1_STATE_MASK (1 << PCIEGEN2_PWRINT_D1_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D2_STATE_MASK (1 << PCIEGEN2_PWRINT_D2_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_D3_STATE_MASK (1 << PCIEGEN2_PWRINT_D3_STATE_SHIFT)
+#define PCIEGEN2_PWRINT_L0_LINK_MASK (1 << PCIEGEN2_PWRINT_L0_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L0s_LINK_MASK (1 << PCIEGEN2_PWRINT_L0s_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L1_LINK_MASK (1 << PCIEGEN2_PWRINT_L1_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_L2_L3_LINK_MASK (1 << PCIEGEN2_PWRINT_L2_L3_LINK_SHIFT)
+#define PCIEGEN2_PWRINT_OBFF_CHANGE_MASK (1 << PCIEGEN2_PWRINT_OBFF_CHANGE_SHIFT)
+
+/* sbtopcie mail box */
+#define SBTOPCIE_MB_FUNC0_SHIFT 8
+#define SBTOPCIE_MB_FUNC1_SHIFT 10
+#define SBTOPCIE_MB_FUNC2_SHIFT 12
+#define SBTOPCIE_MB_FUNC3_SHIFT 14
+
+/* pcieiocstatus */
+#define PCIEGEN2_IOC_D0_STATE_SHIFT 8
+#define PCIEGEN2_IOC_D1_STATE_SHIFT 9
+#define PCIEGEN2_IOC_D2_STATE_SHIFT 10
+#define PCIEGEN2_IOC_D3_STATE_SHIFT 11
+#define PCIEGEN2_IOC_L0_LINK_SHIFT 12
+#define PCIEGEN2_IOC_L1_LINK_SHIFT 13
+#define PCIEGEN2_IOC_L1L2_LINK_SHIFT 14
+#define PCIEGEN2_IOC_L2_L3_LINK_SHIFT 15
+
+#define PCIEGEN2_IOC_D0_STATE_MASK (1 << PCIEGEN2_IOC_D0_STATE_SHIFT)
+#define PCIEGEN2_IOC_D1_STATE_MASK (1 << PCIEGEN2_IOC_D1_STATE_SHIF)
+#define PCIEGEN2_IOC_D2_STATE_MASK (1 << PCIEGEN2_IOC_D2_STATE_SHIF)
+#define PCIEGEN2_IOC_D3_STATE_MASK (1 << PCIEGEN2_IOC_D3_STATE_SHIF)
+#define PCIEGEN2_IOC_L0_LINK_MASK (1 << PCIEGEN2_IOC_L0_LINK_SHIF)
+#define PCIEGEN2_IOC_L1_LINK_MASK (1 << PCIEGEN2_IOC_L1_LINK_SHIF)
+#define PCIEGEN2_IOC_L1L2_LINK_MASK (1 << PCIEGEN2_IOC_L1L2_LINK_SHIFT)
+#define PCIEGEN2_IOC_L2_L3_LINK_MASK (1 << PCIEGEN2_IOC_L2_L3_LINK_SHIFT)
+
+/* stat_ctrl */
+#define PCIE_STAT_CTRL_RESET 0x1
+#define PCIE_STAT_CTRL_ENABLE 0x2
+#define PCIE_STAT_CTRL_INTENABLE 0x4
+#define PCIE_STAT_CTRL_INTSTATUS 0x8
+
+#ifdef BCMDRIVER
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs);
+#endif /* BCMDRIVER */
+
+#endif /* _PCIE_CORE_H */
*
* Fundamental types and constants relating to 802.11
*
- * $Id: 802.11.h 386067 2013-02-19 15:24:20Z $
+ * $Id: 802.11.h 469158 2014-04-09 21:31:31Z $
*/
#ifndef _802_11_H_
#include <proto/wpa.h>
-
+/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
-#define DOT11_TU_TO_US 1024
-
-
-#define DOT11_A3_HDR_LEN 24
-#define DOT11_A4_HDR_LEN 30
-#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN
-#define DOT11_FCS_LEN 4
-#define DOT11_ICV_LEN 4
-#define DOT11_ICV_AES_LEN 8
-#define DOT11_QOS_LEN 2
-#define DOT11_HTC_LEN 4
-
-#define DOT11_KEY_INDEX_SHIFT 6
-#define DOT11_IV_LEN 4
-#define DOT11_IV_TKIP_LEN 8
-#define DOT11_IV_AES_OCB_LEN 4
-#define DOT11_IV_AES_CCM_LEN 8
-#define DOT11_IV_MAX_LEN 8
-
-
-#define DOT11_MAX_MPDU_BODY_LEN 2304
-
+#define DOT11_TU_TO_US 1024 /* 802.11 Time Unit is 1024 microseconds */
+
+/* Generic 802.11 frame constants */
+#define DOT11_A3_HDR_LEN 24 /* d11 header length with A3 */
+#define DOT11_A4_HDR_LEN 30 /* d11 header length with A4 */
+#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN /* MAC header length */
+#define DOT11_FCS_LEN 4 /* d11 FCS length */
+#define DOT11_ICV_LEN 4 /* d11 ICV length */
+#define DOT11_ICV_AES_LEN 8 /* d11 ICV/AES length */
+#define DOT11_QOS_LEN 2 /* d11 QoS length */
+#define DOT11_HTC_LEN 4 /* d11 HT Control field length */
+
+#define DOT11_KEY_INDEX_SHIFT 6 /* d11 key index shift */
+#define DOT11_IV_LEN 4 /* d11 IV length */
+#define DOT11_IV_TKIP_LEN 8 /* d11 IV TKIP length */
+#define DOT11_IV_AES_OCB_LEN 4 /* d11 IV/AES/OCB length */
+#define DOT11_IV_AES_CCM_LEN 8 /* d11 IV/AES/CCM length */
+#define DOT11_IV_MAX_LEN 8 /* maximum iv len for any encryption */
+
+/* Includes MIC */
+#define DOT11_MAX_MPDU_BODY_LEN 2304 /* max MPDU body length */
+/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */
#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \
DOT11_QOS_LEN + \
DOT11_IV_AES_CCM_LEN + \
DOT11_MAX_MPDU_BODY_LEN + \
DOT11_ICV_LEN + \
- DOT11_FCS_LEN)
+ DOT11_FCS_LEN) /* d11 max MPDU length */
-#define DOT11_MAX_SSID_LEN 32
+#define DOT11_MAX_SSID_LEN 32 /* d11 max ssid length */
+/* dot11RTSThreshold */
+#define DOT11_DEFAULT_RTS_LEN 2347 /* d11 default RTS length */
+#define DOT11_MAX_RTS_LEN 2347 /* d11 max RTS length */
-#define DOT11_DEFAULT_RTS_LEN 2347
-#define DOT11_MAX_RTS_LEN 2347
+/* dot11FragmentationThreshold */
+#define DOT11_MIN_FRAG_LEN 256 /* d11 min fragmentation length */
+#define DOT11_MAX_FRAG_LEN 2346 /* Max frag is also limited by aMPDUMaxLength
+ * of the attached PHY
+ */
+#define DOT11_DEFAULT_FRAG_LEN 2346 /* d11 default fragmentation length */
+/* dot11BeaconPeriod */
+#define DOT11_MIN_BEACON_PERIOD 1 /* d11 min beacon period */
+#define DOT11_MAX_BEACON_PERIOD 0xFFFF /* d11 max beacon period */
-#define DOT11_MIN_FRAG_LEN 256
-#define DOT11_MAX_FRAG_LEN 2346
-#define DOT11_DEFAULT_FRAG_LEN 2346
+/* dot11DTIMPeriod */
+#define DOT11_MIN_DTIM_PERIOD 1 /* d11 min DTIM period */
+#define DOT11_MAX_DTIM_PERIOD 0xFF /* d11 max DTIM period */
-
-#define DOT11_MIN_BEACON_PERIOD 1
-#define DOT11_MAX_BEACON_PERIOD 0xFFFF
-
-
-#define DOT11_MIN_DTIM_PERIOD 1
-#define DOT11_MAX_DTIM_PERIOD 0xFF
-
-
-#define DOT11_LLC_SNAP_HDR_LEN 8
-#define DOT11_OUI_LEN 3
+/** 802.2 LLC/SNAP header used by 802.11 per 802.1H */
+#define DOT11_LLC_SNAP_HDR_LEN 8 /* d11 LLC/SNAP header length */
+#define DOT11_OUI_LEN 3 /* d11 OUI length */
BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header {
- uint8 dsap;
- uint8 ssap;
- uint8 ctl;
- uint8 oui[DOT11_OUI_LEN];
- uint16 type;
-} BWL_POST_PACKED_STRUCT;
-
-
-#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN)
-
-
-
+ uint8 dsap; /* always 0xAA */
+ uint8 ssap; /* always 0xAA */
+ uint8 ctl; /* always 0x03 */
+ uint8 oui[DOT11_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00
+ * Bridge-Tunnel: 0x00 0x00 0xF8
+ */
+ uint16 type; /* ethertype */
+} BWL_POST_PACKED_STRUCT;
+
+/* RFC1042 header used by 802.11 per 802.1H */
+#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) /* RCF1042 header length */
+
+/* Generic 802.11 MAC header */
+/**
+ * N.B.: This struct reflects the full 4 address 802.11 MAC header.
+ * The fields are defined such that the shorter 1, 2, and 3
+ * address headers just use the first k fields.
+ */
BWL_PRE_PACKED_STRUCT struct dot11_header {
- uint16 fc;
- uint16 durid;
- struct ether_addr a1;
- struct ether_addr a2;
- struct ether_addr a3;
- uint16 seq;
- struct ether_addr a4;
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr a1; /* address 1 */
+ struct ether_addr a2; /* address 2 */
+ struct ether_addr a3; /* address 3 */
+ uint16 seq; /* sequence control */
+ struct ether_addr a4; /* address 4 */
} BWL_POST_PACKED_STRUCT;
-
+/* Control frames */
BWL_PRE_PACKED_STRUCT struct dot11_rts_frame {
- uint16 fc;
- uint16 durid;
- struct ether_addr ra;
- struct ether_addr ta;
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+ struct ether_addr ta; /* transmitter address */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_RTS_LEN 16
+#define DOT11_RTS_LEN 16 /* d11 RTS frame length */
BWL_PRE_PACKED_STRUCT struct dot11_cts_frame {
- uint16 fc;
- uint16 durid;
- struct ether_addr ra;
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_CTS_LEN 10
+#define DOT11_CTS_LEN 10 /* d11 CTS frame length */
BWL_PRE_PACKED_STRUCT struct dot11_ack_frame {
- uint16 fc;
- uint16 durid;
- struct ether_addr ra;
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_ACK_LEN 10
+#define DOT11_ACK_LEN 10 /* d11 ACK frame length */
BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame {
- uint16 fc;
- uint16 durid;
- struct ether_addr bssid;
- struct ether_addr ta;
+ uint16 fc; /* frame control */
+ uint16 durid; /* AID */
+ struct ether_addr bssid; /* receiver address, STA in AP */
+ struct ether_addr ta; /* transmitter address */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_PS_POLL_LEN 16
+#define DOT11_PS_POLL_LEN 16 /* d11 PS poll frame length */
BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame {
- uint16 fc;
- uint16 durid;
- struct ether_addr ra;
- struct ether_addr bssid;
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+ struct ether_addr bssid; /* transmitter address, STA in AP */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_CS_END_LEN 16
-
+#define DOT11_CS_END_LEN 16 /* d11 CF-END frame length */
+/**
+ * RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling
+ * category+OUI+vendor specific content ( this can be variable)
+ */
BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific {
uint8 category;
uint8 OUI[3];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t;
-
+/** generic vendor specific action frame with variable length */
BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr {
uint8 category;
uint8 OUI[3];
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t;
+
#define DOT11_ACTION_VS_HDR_LEN 6
#define BCM_ACTION_OUI_BYTE0 0x00
#define BCM_ACTION_OUI_BYTE1 0x90
#define BCM_ACTION_OUI_BYTE2 0x4c
+/* BA/BAR Control parameters */
+#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 /* normal ack */
+#define DOT11_BA_CTL_POLICY_NOACK 0x0001 /* no ack */
+#define DOT11_BA_CTL_POLICY_MASK 0x0001 /* ack policy mask */
-#define DOT11_BA_CTL_POLICY_NORMAL 0x0000
-#define DOT11_BA_CTL_POLICY_NOACK 0x0001
-#define DOT11_BA_CTL_POLICY_MASK 0x0001
+#define DOT11_BA_CTL_MTID 0x0002 /* multi tid BA */
+#define DOT11_BA_CTL_COMPRESSED 0x0004 /* compressed bitmap */
-#define DOT11_BA_CTL_MTID 0x0002
-#define DOT11_BA_CTL_COMPRESSED 0x0004
-
-#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0
-#define DOT11_BA_CTL_NUMMSDU_SHIFT 6
-
-#define DOT11_BA_CTL_TID_MASK 0xF000
-#define DOT11_BA_CTL_TID_SHIFT 12
+#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 /* num msdu in bitmap mask */
+#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 /* num msdu in bitmap shift */
+#define DOT11_BA_CTL_TID_MASK 0xF000 /* tid mask */
+#define DOT11_BA_CTL_TID_SHIFT 12 /* tid shift */
+/** control frame header (BA/BAR) */
BWL_PRE_PACKED_STRUCT struct dot11_ctl_header {
- uint16 fc;
- uint16 durid;
- struct ether_addr ra;
- struct ether_addr ta;
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr ra; /* receiver address */
+ struct ether_addr ta; /* transmitter address */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_CTL_HDR_LEN 16
-
+#define DOT11_CTL_HDR_LEN 16 /* control frame hdr len */
+/** BAR frame payload */
BWL_PRE_PACKED_STRUCT struct dot11_bar {
- uint16 bar_control;
- uint16 seqnum;
+ uint16 bar_control; /* BAR Control */
+ uint16 seqnum; /* Starting Sequence control */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_BAR_LEN 4
-
-#define DOT11_BA_BITMAP_LEN 128
-#define DOT11_BA_CMP_BITMAP_LEN 8
+#define DOT11_BAR_LEN 4 /* BAR frame payload length */
+#define DOT11_BA_BITMAP_LEN 128 /* bitmap length */
+#define DOT11_BA_CMP_BITMAP_LEN 8 /* compressed bitmap length */
+/** BA frame payload */
BWL_PRE_PACKED_STRUCT struct dot11_ba {
- uint16 ba_control;
- uint16 seqnum;
- uint8 bitmap[DOT11_BA_BITMAP_LEN];
+ uint16 ba_control; /* BA Control */
+ uint16 seqnum; /* Starting Sequence control */
+ uint8 bitmap[DOT11_BA_BITMAP_LEN]; /* Block Ack Bitmap */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_BA_LEN 4
-
+#define DOT11_BA_LEN 4 /* BA frame payload len (wo bitmap) */
+/** Management frame header */
BWL_PRE_PACKED_STRUCT struct dot11_management_header {
- uint16 fc;
- uint16 durid;
- struct ether_addr da;
- struct ether_addr sa;
- struct ether_addr bssid;
- uint16 seq;
+ uint16 fc; /* frame control */
+ uint16 durid; /* duration/ID */
+ struct ether_addr da; /* receiver address */
+ struct ether_addr sa; /* transmitter address */
+ struct ether_addr bssid; /* BSS ID */
+ uint16 seq; /* sequence control */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_MGMT_HDR_LEN 24
-
+#define DOT11_MGMT_HDR_LEN 24 /* d11 management header length */
+/* Management frame payloads */
BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb {
uint32 timestamp[2];
uint16 beacon_interval;
uint16 capability;
} BWL_POST_PACKED_STRUCT;
-#define DOT11_BCN_PRB_LEN 12
-#define DOT11_BCN_PRB_FIXED_LEN 12
+#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */
+#define DOT11_BCN_PRB_FIXED_LEN 12 /* 802.11 beacon/probe frame fixed length */
BWL_PRE_PACKED_STRUCT struct dot11_auth {
- uint16 alg;
- uint16 seq;
- uint16 status;
+ uint16 alg; /* algorithm */
+ uint16 seq; /* sequence control */
+ uint16 status; /* status code */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_AUTH_FIXED_LEN 6
+#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */
BWL_PRE_PACKED_STRUCT struct dot11_assoc_req {
- uint16 capability;
- uint16 listen;
+ uint16 capability; /* capability information */
+ uint16 listen; /* listen interval */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_ASSOC_REQ_FIXED_LEN 4
+#define DOT11_ASSOC_REQ_FIXED_LEN 4 /* length of assoc frame without info elts */
BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req {
- uint16 capability;
- uint16 listen;
- struct ether_addr ap;
+ uint16 capability; /* capability information */
+ uint16 listen; /* listen interval */
+ struct ether_addr ap; /* Current AP address */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_REASSOC_REQ_FIXED_LEN 10
+#define DOT11_REASSOC_REQ_FIXED_LEN 10 /* length of assoc frame without info elts */
BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp {
- uint16 capability;
- uint16 status;
- uint16 aid;
+ uint16 capability; /* capability information */
+ uint16 status; /* status code */
+ uint16 aid; /* association ID */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_ASSOC_RESP_FIXED_LEN 6
+#define DOT11_ASSOC_RESP_FIXED_LEN 6 /* length of assoc resp frame without info elts */
BWL_PRE_PACKED_STRUCT struct dot11_action_measure {
uint8 category;
uint8 token;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
-#define DOT11_ACTION_MEASURE_LEN 3
+#define DOT11_ACTION_MEASURE_LEN 3 /* d11 action measurement header length */
BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width {
uint8 category;
#define SM_PWRSAVE_ENABLE 1
#define SM_PWRSAVE_MODE 2
-
+/* ************* 802.11h related definitions. ************* */
BWL_PRE_PACKED_STRUCT struct dot11_power_cnst {
uint8 id;
uint8 len;
typedef struct dot11_power_cnst dot11_power_cnst_t;
BWL_PRE_PACKED_STRUCT struct dot11_power_cap {
- uint8 min;
- uint8 max;
+ int8 min;
+ int8 max;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_power_cap dot11_power_cap_t;
uint8 margin;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tpc_rep dot11_tpc_rep_t;
-#define DOT11_MNG_IE_TPC_REPORT_LEN 2
+#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */
BWL_PRE_PACKED_STRUCT struct dot11_supp_channels {
uint8 id;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_supp_channels dot11_supp_channels_t;
-
+/**
+ * Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband
+ * offset for 40MHz operation. The possible 3 values are:
+ * 1 = above control channel
+ * 3 = below control channel
+ * 0 = no extension channel
+ */
BWL_PRE_PACKED_STRUCT struct dot11_extch {
- uint8 id;
- uint8 len;
+ uint8 id; /* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */
+ uint8 len; /* IE length */
uint8 extch;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_extch dot11_extch_ie_t;
BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch {
- uint8 id;
- uint8 len;
- uint8 oui[3];
- uint8 type;
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3];
+ uint8 type; /* type indicates what follows */
uint8 extch;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t;
#define BRCM_EXTCH_IE_LEN 5
-#define BRCM_EXTCH_IE_TYPE 53
+#define BRCM_EXTCH_IE_TYPE 53 /* 802.11n ID not yet assigned */
#define DOT11_EXTCH_IE_LEN 1
-#define DOT11_EXT_CH_MASK 0x03
-#define DOT11_EXT_CH_UPPER 0x01
-#define DOT11_EXT_CH_LOWER 0x03
-#define DOT11_EXT_CH_NONE 0x00
+#define DOT11_EXT_CH_MASK 0x03 /* extension channel mask */
+#define DOT11_EXT_CH_UPPER 0x01 /* ext. ch. on upper sb */
+#define DOT11_EXT_CH_LOWER 0x03 /* ext. ch. on lower sb */
+#define DOT11_EXT_CH_NONE 0x00 /* no extension ch. */
BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr {
uint8 category;
} BWL_POST_PACKED_STRUCT;
#define DOT11_ACTION_FRMHDR_LEN 2
-
+/** CSA IE data structure */
BWL_PRE_PACKED_STRUCT struct dot11_channel_switch {
- uint8 id;
- uint8 len;
- uint8 mode;
- uint8 channel;
- uint8 count;
+ uint8 id; /* id DOT11_MNG_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
+ uint8 mode; /* mode 0 or 1 */
+ uint8 channel; /* channel switch to */
+ uint8 count; /* number of beacons before switching */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_channel_switch dot11_chan_switch_ie_t;
-#define DOT11_SWITCH_IE_LEN 3
-
-#define DOT11_CSA_MODE_ADVISORY 0
-#define DOT11_CSA_MODE_NO_TX 1
+#define DOT11_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */
+/* CSA mode - 802.11h-2003 $7.3.2.20 */
+#define DOT11_CSA_MODE_ADVISORY 0 /* no DOT11_CSA_MODE_NO_TX restriction imposed */
+#define DOT11_CSA_MODE_NO_TX 1 /* no transmission upon receiving CSA frame. */
BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel {
uint8 category;
uint8 action;
- dot11_chan_switch_ie_t chan_switch_ie;
- dot11_brcm_extch_ie_t extch_ie;
+ dot11_chan_switch_ie_t chan_switch_ie; /* for switch IE */
+ dot11_brcm_extch_ie_t extch_ie; /* extension channel offset */
} BWL_POST_PACKED_STRUCT;
BWL_PRE_PACKED_STRUCT struct dot11_csa_body {
- uint8 mode;
- uint8 reg;
- uint8 channel;
- uint8 count;
+ uint8 mode; /* mode 0 or 1 */
+ uint8 reg; /* regulatory class */
+ uint8 channel; /* channel switch to */
+ uint8 count; /* number of beacons before switching */
} BWL_POST_PACKED_STRUCT;
-
+/** 11n Extended Channel Switch IE data structure */
BWL_PRE_PACKED_STRUCT struct dot11_ext_csa {
- uint8 id;
- uint8 len;
- struct dot11_csa_body b;
+ uint8 id; /* id DOT11_MNG_EXT_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
+ struct dot11_csa_body b; /* body of the ie */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ext_csa dot11_ext_csa_ie_t;
-#define DOT11_EXT_CSA_IE_LEN 4
+#define DOT11_EXT_CSA_IE_LEN 4 /* length of extended channel switch IE body */
BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa {
uint8 category;
uint8 action;
- dot11_ext_csa_ie_t chan_switch_ie;
+ dot11_ext_csa_ie_t chan_switch_ie; /* for switch IE */
} BWL_POST_PACKED_STRUCT;
BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa {
uint8 category;
uint8 action;
- struct dot11_csa_body b;
+ struct dot11_csa_body b; /* body of the ie */
} BWL_POST_PACKED_STRUCT;
-
+/** Wide Bandwidth Channel Switch IE data structure */
BWL_PRE_PACKED_STRUCT struct dot11_wide_bw_channel_switch {
- uint8 id;
- uint8 len;
- uint8 channel_width;
- uint8 center_frequency_segment_0;
- uint8 center_frequency_segment_1;
+ uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
+ uint8 channel_width; /* new channel width */
+ uint8 center_frequency_segment_0; /* center frequency segment 0 */
+ uint8 center_frequency_segment_1; /* center frequency segment 1 */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_wide_bw_channel_switch dot11_wide_bw_chan_switch_ie_t;
-#define DOT11_WIDE_BW_SWITCH_IE_LEN 3
-
+#define DOT11_WIDE_BW_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */
+/** Channel Switch Wrapper IE data structure */
BWL_PRE_PACKED_STRUCT struct dot11_channel_switch_wrapper {
- uint8 id;
- uint8 len;
+ uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
dot11_wide_bw_chan_switch_ie_t wb_chan_switch_ie;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_channel_switch_wrapper dot11_chan_switch_wrapper_ie_t;
-
+/** VHT Transmit Power Envelope IE data structure */
BWL_PRE_PACKED_STRUCT struct dot11_vht_transmit_power_envelope {
- uint8 id;
- uint8 len;
+ uint8 id; /* id DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID */
+ uint8 len; /* length of IE */
uint8 transmit_power_info;
uint8 local_max_transmit_power_20;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_vht_transmit_power_envelope dot11_vht_transmit_power_envelope_ie_t;
+/* vht transmit power envelope IE length depends on channel width */
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_40MHZ 1
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_80MHZ 2
+#define DOT11_VHT_TRANSMIT_PWR_ENVELOPE_IE_LEN_160MHZ 3
BWL_PRE_PACKED_STRUCT struct dot11_obss_coex {
uint8 id;
uint8 info;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_obss_coex dot11_obss_coex_t;
-#define DOT11_OBSS_COEXINFO_LEN 1
+#define DOT11_OBSS_COEXINFO_LEN 1 /* length of OBSS Coexistence INFO IE */
#define DOT11_OBSS_COEX_INFO_REQ 0x01
#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02
uint8 chanlist[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_obss_chanlist dot11_obss_chanlist_t;
-#define DOT11_OBSS_CHANLIST_FIXED_LEN 1
+#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 /* fixed length of regclass */
BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie {
uint8 id;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_extcap dot11_extcap_t;
-
-#define DOT11_TDLS_CAP_TDLS 37
-#define DOT11_TDLS_CAP_PU_BUFFER_STA 28
-#define DOT11_TDLS_CAP_PEER_PSM 20
-#define DOT11_TDLS_CAP_CH_SW 30
-#define DOT11_TDLS_CAP_PROH 38
-#define DOT11_TDLS_CAP_CH_SW_PROH 39
-#define DOT11_TDLS_CAP_TDLS_WIDER_BW 61
-
-#define TDLS_CAP_MAX_BIT 39
-
-
-
-#define DOT11_MEASURE_TYPE_BASIC 0
-#define DOT11_MEASURE_TYPE_CCA 1
-#define DOT11_MEASURE_TYPE_RPI 2
-#define DOT11_MEASURE_TYPE_CHLOAD 3
-#define DOT11_MEASURE_TYPE_NOISE 4
-#define DOT11_MEASURE_TYPE_BEACON 5
-#define DOT11_MEASURE_TYPE_FRAME 6
-#define DOT11_MEASURE_TYPE_STAT 7
-#define DOT11_MEASURE_TYPE_LCI 8
-#define DOT11_MEASURE_TYPE_TXSTREAM 9
-#define DOT11_MEASURE_TYPE_PAUSE 255
-
-
-#define DOT11_MEASURE_MODE_PARALLEL (1<<0)
-#define DOT11_MEASURE_MODE_ENABLE (1<<1)
-#define DOT11_MEASURE_MODE_REQUEST (1<<2)
-#define DOT11_MEASURE_MODE_REPORT (1<<3)
-#define DOT11_MEASURE_MODE_DUR (1<<4)
-
-#define DOT11_MEASURE_MODE_LATE (1<<0)
-#define DOT11_MEASURE_MODE_INCAPABLE (1<<1)
-#define DOT11_MEASURE_MODE_REFUSED (1<<2)
-
-#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0))
-#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1))
-#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2))
-#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3))
-#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4))
+/* TDLS Capabilities */
+#define DOT11_TDLS_CAP_TDLS 37 /* TDLS support */
+#define DOT11_TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */
+#define DOT11_TDLS_CAP_PEER_PSM 20 /* TDLS Peer PSM support */
+#define DOT11_TDLS_CAP_CH_SW 30 /* TDLS Channel switch */
+#define DOT11_TDLS_CAP_PROH 38 /* TDLS prohibited */
+#define DOT11_TDLS_CAP_CH_SW_PROH 39 /* TDLS Channel switch prohibited */
+#define DOT11_TDLS_CAP_TDLS_WIDER_BW 61 /* TDLS Wider Band-Width */
+
+#define TDLS_CAP_MAX_BIT 39 /* TDLS max bit defined in ext cap */
+
+/* 802.11h/802.11k Measurement Request/Report IEs */
+/* Measurement Type field */
+#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */
+#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */
+#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */
+#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */
+#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */
+#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */
+#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */
+#define DOT11_MEASURE_TYPE_STAT 7 /* d11 measurement STA Statistics type */
+#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */
+#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */
+#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */
+
+/* Measurement Request Modes */
+#define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */
+#define DOT11_MEASURE_MODE_ENABLE (1<<1) /* d11 measurement enable */
+#define DOT11_MEASURE_MODE_REQUEST (1<<2) /* d11 measurement request */
+#define DOT11_MEASURE_MODE_REPORT (1<<3) /* d11 measurement report */
+#define DOT11_MEASURE_MODE_DUR (1<<4) /* d11 measurement dur mandatory */
+/* Measurement Report Modes */
+#define DOT11_MEASURE_MODE_LATE (1<<0) /* d11 measurement late */
+#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) /* d11 measurement incapable */
+#define DOT11_MEASURE_MODE_REFUSED (1<<2) /* d11 measurement refuse */
+/* Basic Measurement Map bits */
+#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) /* d11 measurement basic map BSS */
+#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) /* d11 measurement map OFDM */
+#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) /* d11 measurement map unknown */
+#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) /* d11 measurement map radar */
+#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) /* d11 measurement map unmeasuremnt */
BWL_PRE_PACKED_STRUCT struct dot11_meas_req {
uint8 id;
uint16 duration;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_meas_req dot11_meas_req_t;
-#define DOT11_MNG_IE_MREQ_LEN 14
-
-#define DOT11_MNG_IE_MREQ_FIXED_LEN 3
+#define DOT11_MNG_IE_MREQ_LEN 14 /* d11 measurement request IE length */
+/* length of Measure Request IE data not including variable len */
+#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */
BWL_PRE_PACKED_STRUCT struct dot11_meas_rep {
uint8 id;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_meas_rep dot11_meas_rep_t;
-
-#define DOT11_MNG_IE_MREP_FIXED_LEN 3
+/* length of Measure Report IE data not including variable len */
+#define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */
BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic {
uint8 channel;
uint8 map;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t;
-#define DOT11_MEASURE_BASIC_REP_LEN 12
+#define DOT11_MEASURE_BASIC_REP_LEN 12 /* d11 measurement basic report length */
BWL_PRE_PACKED_STRUCT struct dot11_quiet {
uint8 id;
uint8 len;
- uint8 count;
- uint8 period;
- uint16 duration;
- uint16 offset;
+ uint8 count; /* TBTTs until beacon interval in quiet starts */
+ uint8 period; /* Beacon intervals between periodic quiet periods ? */
+ uint16 duration; /* Length of quiet period, in TU's */
+ uint16 offset; /* TU's offset from TBTT in Count field */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_quiet dot11_quiet_t;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ibss_dfs dot11_ibss_dfs_t;
-
-#define WME_OUI "\x00\x50\xf2"
+/* WME Elements */
+#define WME_OUI "\x00\x50\xf2" /* WME OUI */
#define WME_OUI_LEN 3
-#define WME_OUI_TYPE 2
-#define WME_TYPE 2
-#define WME_SUBTYPE_IE 0
-#define WME_SUBTYPE_PARAM_IE 1
-#define WME_SUBTYPE_TSPEC 2
-#define WME_VER 1
-
-
-#define AC_BE 0
-#define AC_BK 1
-#define AC_VI 2
-#define AC_VO 3
-#define AC_COUNT 4
-
-typedef uint8 ac_bitmap_t;
-
-#define AC_BITMAP_NONE 0x0
-#define AC_BITMAP_ALL 0xf
+#define WME_OUI_TYPE 2 /* WME type */
+#define WME_TYPE 2 /* WME type, deprecated */
+#define WME_SUBTYPE_IE 0 /* Information Element */
+#define WME_SUBTYPE_PARAM_IE 1 /* Parameter Element */
+#define WME_SUBTYPE_TSPEC 2 /* Traffic Specification */
+#define WME_VER 1 /* WME version */
+
+/* WME Access Category Indices (ACIs) */
+#define AC_BE 0 /* Best Effort */
+#define AC_BK 1 /* Background */
+#define AC_VI 2 /* Video */
+#define AC_VO 3 /* Voice */
+#define AC_COUNT 4 /* number of ACs */
+
+typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */
+
+#define AC_BITMAP_NONE 0x0 /* No ACs */
+#define AC_BITMAP_ALL 0xf /* All ACs */
#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0)
#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac))))
#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac))))
-
+/** WME Information Element (IE) */
BWL_PRE_PACKED_STRUCT struct wme_ie {
uint8 oui[3];
uint8 type;
uint8 qosinfo;
} BWL_POST_PACKED_STRUCT;
typedef struct wme_ie wme_ie_t;
-#define WME_IE_LEN 7
+#define WME_IE_LEN 7 /* WME IE length */
BWL_PRE_PACKED_STRUCT struct edcf_acparam {
uint8 ACI;
uint8 ECW;
- uint16 TXOP;
+ uint16 TXOP; /* stored in network order (ls octet first) */
} BWL_POST_PACKED_STRUCT;
typedef struct edcf_acparam edcf_acparam_t;
-
+/** WME Parameter Element (PE) */
BWL_PRE_PACKED_STRUCT struct wme_param_ie {
uint8 oui[3];
uint8 type;
edcf_acparam_t acparam[AC_COUNT];
} BWL_POST_PACKED_STRUCT;
typedef struct wme_param_ie wme_param_ie_t;
-#define WME_PARAM_IE_LEN 24
-
-
-#define WME_QI_AP_APSD_MASK 0x80
-#define WME_QI_AP_APSD_SHIFT 7
-#define WME_QI_AP_COUNT_MASK 0x0f
-#define WME_QI_AP_COUNT_SHIFT 0
-
-
-#define WME_QI_STA_MAXSPLEN_MASK 0x60
-#define WME_QI_STA_MAXSPLEN_SHIFT 5
-#define WME_QI_STA_APSD_ALL_MASK 0xf
-#define WME_QI_STA_APSD_ALL_SHIFT 0
-#define WME_QI_STA_APSD_BE_MASK 0x8
-#define WME_QI_STA_APSD_BE_SHIFT 3
-#define WME_QI_STA_APSD_BK_MASK 0x4
-#define WME_QI_STA_APSD_BK_SHIFT 2
-#define WME_QI_STA_APSD_VI_MASK 0x2
-#define WME_QI_STA_APSD_VI_SHIFT 1
-#define WME_QI_STA_APSD_VO_MASK 0x1
-#define WME_QI_STA_APSD_VO_SHIFT 0
-
-
-#define EDCF_AIFSN_MIN 1
-#define EDCF_AIFSN_MAX 15
-#define EDCF_AIFSN_MASK 0x0f
-#define EDCF_ACM_MASK 0x10
-#define EDCF_ACI_MASK 0x60
-#define EDCF_ACI_SHIFT 5
-#define EDCF_AIFSN_SHIFT 12
-
-
-#define EDCF_ECW_MIN 0
-#define EDCF_ECW_MAX 15
+#define WME_PARAM_IE_LEN 24 /* WME Parameter IE length */
+
+/* QoS Info field for IE as sent from AP */
+#define WME_QI_AP_APSD_MASK 0x80 /* U-APSD Supported mask */
+#define WME_QI_AP_APSD_SHIFT 7 /* U-APSD Supported shift */
+#define WME_QI_AP_COUNT_MASK 0x0f /* Parameter set count mask */
+#define WME_QI_AP_COUNT_SHIFT 0 /* Parameter set count shift */
+
+/* QoS Info field for IE as sent from STA */
+#define WME_QI_STA_MAXSPLEN_MASK 0x60 /* Max Service Period Length mask */
+#define WME_QI_STA_MAXSPLEN_SHIFT 5 /* Max Service Period Length shift */
+#define WME_QI_STA_APSD_ALL_MASK 0xf /* APSD all AC bits mask */
+#define WME_QI_STA_APSD_ALL_SHIFT 0 /* APSD all AC bits shift */
+#define WME_QI_STA_APSD_BE_MASK 0x8 /* APSD AC_BE mask */
+#define WME_QI_STA_APSD_BE_SHIFT 3 /* APSD AC_BE shift */
+#define WME_QI_STA_APSD_BK_MASK 0x4 /* APSD AC_BK mask */
+#define WME_QI_STA_APSD_BK_SHIFT 2 /* APSD AC_BK shift */
+#define WME_QI_STA_APSD_VI_MASK 0x2 /* APSD AC_VI mask */
+#define WME_QI_STA_APSD_VI_SHIFT 1 /* APSD AC_VI shift */
+#define WME_QI_STA_APSD_VO_MASK 0x1 /* APSD AC_VO mask */
+#define WME_QI_STA_APSD_VO_SHIFT 0 /* APSD AC_VO shift */
+
+/* ACI */
+#define EDCF_AIFSN_MIN 1 /* AIFSN minimum value */
+#define EDCF_AIFSN_MAX 15 /* AIFSN maximum value */
+#define EDCF_AIFSN_MASK 0x0f /* AIFSN mask */
+#define EDCF_ACM_MASK 0x10 /* ACM mask */
+#define EDCF_ACI_MASK 0x60 /* ACI mask */
+#define EDCF_ACI_SHIFT 5 /* ACI shift */
+#define EDCF_AIFSN_SHIFT 12 /* 4 MSB(0xFFF) in ifs_ctl for AC idx */
+
+/* ECW */
+#define EDCF_ECW_MIN 0 /* cwmin/cwmax exponent minimum value */
+#define EDCF_ECW_MAX 15 /* cwmin/cwmax exponent maximum value */
#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
-#define EDCF_ECWMIN_MASK 0x0f
-#define EDCF_ECWMAX_MASK 0xf0
-#define EDCF_ECWMAX_SHIFT 4
-
+#define EDCF_ECWMIN_MASK 0x0f /* cwmin exponent form mask */
+#define EDCF_ECWMAX_MASK 0xf0 /* cwmax exponent form mask */
+#define EDCF_ECWMAX_SHIFT 4 /* cwmax exponent form shift */
-#define EDCF_TXOP_MIN 0
-#define EDCF_TXOP_MAX 65535
+/* TXOP */
+#define EDCF_TXOP_MIN 0 /* TXOP minimum value */
+#define EDCF_TXOP_MAX 65535 /* TXOP maximum value */
#define EDCF_TXOP2USEC(txop) ((txop) << 5)
-
+/* Default BE ACI value for non-WME connection STA */
#define NON_EDCF_AC_BE_ACI_STA 0x02
-
-#define EDCF_AC_BE_ACI_STA 0x03
-#define EDCF_AC_BE_ECW_STA 0xA4
-#define EDCF_AC_BE_TXOP_STA 0x0000
-#define EDCF_AC_BK_ACI_STA 0x27
-#define EDCF_AC_BK_ECW_STA 0xA4
-#define EDCF_AC_BK_TXOP_STA 0x0000
-#define EDCF_AC_VI_ACI_STA 0x42
-#define EDCF_AC_VI_ECW_STA 0x43
-#define EDCF_AC_VI_TXOP_STA 0x005e
-#define EDCF_AC_VO_ACI_STA 0x62
-#define EDCF_AC_VO_ECW_STA 0x32
-#define EDCF_AC_VO_TXOP_STA 0x002f
-
-
-#define EDCF_AC_BE_ACI_AP 0x03
-#define EDCF_AC_BE_ECW_AP 0x64
-#define EDCF_AC_BE_TXOP_AP 0x0000
-#define EDCF_AC_BK_ACI_AP 0x27
-#define EDCF_AC_BK_ECW_AP 0xA4
-#define EDCF_AC_BK_TXOP_AP 0x0000
-#define EDCF_AC_VI_ACI_AP 0x41
-#define EDCF_AC_VI_ECW_AP 0x43
-#define EDCF_AC_VI_TXOP_AP 0x005e
-#define EDCF_AC_VO_ACI_AP 0x61
-#define EDCF_AC_VO_ECW_AP 0x32
-#define EDCF_AC_VO_TXOP_AP 0x002f
-
-
+/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */
+#define EDCF_AC_BE_ACI_STA 0x03 /* STA ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_STA 0xA4 /* STA ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_STA 0x0000 /* STA TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_STA 0x27 /* STA ACI value for background AC */
+#define EDCF_AC_BK_ECW_STA 0xA4 /* STA ECW value for background AC */
+#define EDCF_AC_BK_TXOP_STA 0x0000 /* STA TXOP value for background AC */
+#define EDCF_AC_VI_ACI_STA 0x42 /* STA ACI value for video AC */
+#define EDCF_AC_VI_ECW_STA 0x43 /* STA ECW value for video AC */
+#define EDCF_AC_VI_TXOP_STA 0x005e /* STA TXOP value for video AC */
+#define EDCF_AC_VO_ACI_STA 0x62 /* STA ACI value for audio AC */
+#define EDCF_AC_VO_ECW_STA 0x32 /* STA ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_STA 0x002f /* STA TXOP value for audio AC */
+
+/* Default EDCF parameters that AP uses; WMM draft Table 14 */
+#define EDCF_AC_BE_ACI_AP 0x03 /* AP ACI value for best effort AC */
+#define EDCF_AC_BE_ECW_AP 0x64 /* AP ECW value for best effort AC */
+#define EDCF_AC_BE_TXOP_AP 0x0000 /* AP TXOP value for best effort AC */
+#define EDCF_AC_BK_ACI_AP 0x27 /* AP ACI value for background AC */
+#define EDCF_AC_BK_ECW_AP 0xA4 /* AP ECW value for background AC */
+#define EDCF_AC_BK_TXOP_AP 0x0000 /* AP TXOP value for background AC */
+#define EDCF_AC_VI_ACI_AP 0x41 /* AP ACI value for video AC */
+#define EDCF_AC_VI_ECW_AP 0x43 /* AP ECW value for video AC */
+#define EDCF_AC_VI_TXOP_AP 0x005e /* AP TXOP value for video AC */
+#define EDCF_AC_VO_ACI_AP 0x61 /* AP ACI value for audio AC */
+#define EDCF_AC_VO_ECW_AP 0x32 /* AP ECW value for audio AC */
+#define EDCF_AC_VO_TXOP_AP 0x002f /* AP TXOP value for audio AC */
+
+/** EDCA Parameter IE */
BWL_PRE_PACKED_STRUCT struct edca_param_ie {
uint8 qosinfo;
uint8 rsvd;
edcf_acparam_t acparam[AC_COUNT];
} BWL_POST_PACKED_STRUCT;
typedef struct edca_param_ie edca_param_ie_t;
-#define EDCA_PARAM_IE_LEN 18
-
+#define EDCA_PARAM_IE_LEN 18 /* EDCA Parameter IE length */
+/** QoS Capability IE */
BWL_PRE_PACKED_STRUCT struct qos_cap_ie {
uint8 qosinfo;
} BWL_POST_PACKED_STRUCT;
typedef struct qos_cap_ie qos_cap_ie_t;
BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie {
- uint8 id;
+ uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */
uint8 length;
- uint16 station_count;
- uint8 channel_utilization;
- uint16 aac;
+ uint16 station_count; /* total number of STAs associated */
+ uint8 channel_utilization; /* % of time, normalized to 255, QAP sensed medium busy */
+ uint16 aac; /* available admission capacity */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t;
-#define BSS_LOAD_IE_SIZE 7
-
-
-#define FIXED_MSDU_SIZE 0x8000
-#define MSDU_SIZE_MASK 0x7fff
+#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */
+#define WLC_QBSS_LOAD_CHAN_FREE_MAX 0xff /* max for channel free score */
+/* nom_msdu_size */
+#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */
+#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */
-#define INTEGER_SHIFT 13
-#define FRACTION_MASK 0x1FFF
-
+/* surplus_bandwidth */
+/* Represented as 3 bits of integer, binary point, 13 bits fraction */
+#define INTEGER_SHIFT 13 /* integer shift */
+#define FRACTION_MASK 0x1FFF /* fraction mask */
+/** Management Notification Frame */
BWL_PRE_PACKED_STRUCT struct dot11_management_notification {
- uint8 category;
+ uint8 category; /* DOT11_ACTION_NOTIFICATION */
uint8 action;
uint8 token;
uint8 status;
- uint8 data[1];
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
-#define DOT11_MGMT_NOTIFICATION_LEN 4
-
+#define DOT11_MGMT_NOTIFICATION_LEN 4 /* Fixed length */
+/** Timeout Interval IE */
BWL_PRE_PACKED_STRUCT struct ti_ie {
uint8 ti_type;
uint32 ti_val;
#define TI_TYPE_REASSOC_DEADLINE 1
#define TI_TYPE_KEY_LIFETIME 2
+/* WME Action Codes */
+#define WME_ADDTS_REQUEST 0 /* WME ADDTS request */
+#define WME_ADDTS_RESPONSE 1 /* WME ADDTS response */
+#define WME_DELTS_REQUEST 2 /* WME DELTS request */
-#define WME_ADDTS_REQUEST 0
-#define WME_ADDTS_RESPONSE 1
-#define WME_DELTS_REQUEST 2
-
-
-#define WME_ADMISSION_ACCEPTED 0
-#define WME_INVALID_PARAMETERS 1
-#define WME_ADMISSION_REFUSED 3
-
+/* WME Setup Response Status Codes */
+#define WME_ADMISSION_ACCEPTED 0 /* WME admission accepted */
+#define WME_INVALID_PARAMETERS 1 /* WME invalide parameters */
+#define WME_ADMISSION_REFUSED 3 /* WME admission refused */
+/* Macro to take a pointer to a beacon or probe response
+ * body and return the char* pointer to the SSID info element
+ */
#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN)
-
-#define DOT11_OPEN_SYSTEM 0
-#define DOT11_SHARED_KEY 1
-#define DOT11_FAST_BSS 2
-#define DOT11_CHALLENGE_LEN 128
-
-
-#define FC_PVER_MASK 0x3
-#define FC_PVER_SHIFT 0
-#define FC_TYPE_MASK 0xC
-#define FC_TYPE_SHIFT 2
-#define FC_SUBTYPE_MASK 0xF0
-#define FC_SUBTYPE_SHIFT 4
-#define FC_TODS 0x100
-#define FC_TODS_SHIFT 8
-#define FC_FROMDS 0x200
-#define FC_FROMDS_SHIFT 9
-#define FC_MOREFRAG 0x400
-#define FC_MOREFRAG_SHIFT 10
-#define FC_RETRY 0x800
-#define FC_RETRY_SHIFT 11
-#define FC_PM 0x1000
-#define FC_PM_SHIFT 12
-#define FC_MOREDATA 0x2000
-#define FC_MOREDATA_SHIFT 13
-#define FC_WEP 0x4000
-#define FC_WEP_SHIFT 14
-#define FC_ORDER 0x8000
-#define FC_ORDER_SHIFT 15
-
-
-#define SEQNUM_SHIFT 4
-#define SEQNUM_MAX 0x1000
-#define FRAGNUM_MASK 0xF
-
-
-
-
-#define FC_TYPE_MNG 0
-#define FC_TYPE_CTL 1
-#define FC_TYPE_DATA 2
-
-
-#define FC_SUBTYPE_ASSOC_REQ 0
-#define FC_SUBTYPE_ASSOC_RESP 1
-#define FC_SUBTYPE_REASSOC_REQ 2
-#define FC_SUBTYPE_REASSOC_RESP 3
-#define FC_SUBTYPE_PROBE_REQ 4
-#define FC_SUBTYPE_PROBE_RESP 5
-#define FC_SUBTYPE_BEACON 8
-#define FC_SUBTYPE_ATIM 9
-#define FC_SUBTYPE_DISASSOC 10
-#define FC_SUBTYPE_AUTH 11
-#define FC_SUBTYPE_DEAUTH 12
-#define FC_SUBTYPE_ACTION 13
-#define FC_SUBTYPE_ACTION_NOACK 14
-
-
-#define FC_SUBTYPE_CTL_WRAPPER 7
-#define FC_SUBTYPE_BLOCKACK_REQ 8
-#define FC_SUBTYPE_BLOCKACK 9
-#define FC_SUBTYPE_PS_POLL 10
-#define FC_SUBTYPE_RTS 11
-#define FC_SUBTYPE_CTS 12
-#define FC_SUBTYPE_ACK 13
-#define FC_SUBTYPE_CF_END 14
-#define FC_SUBTYPE_CF_END_ACK 15
-
-
-#define FC_SUBTYPE_DATA 0
-#define FC_SUBTYPE_DATA_CF_ACK 1
-#define FC_SUBTYPE_DATA_CF_POLL 2
-#define FC_SUBTYPE_DATA_CF_ACK_POLL 3
-#define FC_SUBTYPE_NULL 4
-#define FC_SUBTYPE_CF_ACK 5
-#define FC_SUBTYPE_CF_POLL 6
-#define FC_SUBTYPE_CF_ACK_POLL 7
-#define FC_SUBTYPE_QOS_DATA 8
-#define FC_SUBTYPE_QOS_DATA_CF_ACK 9
-#define FC_SUBTYPE_QOS_DATA_CF_POLL 10
-#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11
-#define FC_SUBTYPE_QOS_NULL 12
-#define FC_SUBTYPE_QOS_CF_POLL 14
-#define FC_SUBTYPE_QOS_CF_ACK_POLL 15
-
-
+/* Authentication frame payload constants */
+#define DOT11_OPEN_SYSTEM 0 /* d11 open authentication */
+#define DOT11_SHARED_KEY 1 /* d11 shared authentication */
+#define DOT11_FAST_BSS 2 /* d11 fast bss authentication */
+#define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */
+
+/* Frame control macros */
+#define FC_PVER_MASK 0x3 /* PVER mask */
+#define FC_PVER_SHIFT 0 /* PVER shift */
+#define FC_TYPE_MASK 0xC /* type mask */
+#define FC_TYPE_SHIFT 2 /* type shift */
+#define FC_SUBTYPE_MASK 0xF0 /* subtype mask */
+#define FC_SUBTYPE_SHIFT 4 /* subtype shift */
+#define FC_TODS 0x100 /* to DS */
+#define FC_TODS_SHIFT 8 /* to DS shift */
+#define FC_FROMDS 0x200 /* from DS */
+#define FC_FROMDS_SHIFT 9 /* from DS shift */
+#define FC_MOREFRAG 0x400 /* more frag. */
+#define FC_MOREFRAG_SHIFT 10 /* more frag. shift */
+#define FC_RETRY 0x800 /* retry */
+#define FC_RETRY_SHIFT 11 /* retry shift */
+#define FC_PM 0x1000 /* PM */
+#define FC_PM_SHIFT 12 /* PM shift */
+#define FC_MOREDATA 0x2000 /* more data */
+#define FC_MOREDATA_SHIFT 13 /* more data shift */
+#define FC_WEP 0x4000 /* WEP */
+#define FC_WEP_SHIFT 14 /* WEP shift */
+#define FC_ORDER 0x8000 /* order */
+#define FC_ORDER_SHIFT 15 /* order shift */
+
+/* sequence control macros */
+#define SEQNUM_SHIFT 4 /* seq. number shift */
+#define SEQNUM_MAX 0x1000 /* max seqnum + 1 */
+#define FRAGNUM_MASK 0xF /* frag. number mask */
+
+/* Frame Control type/subtype defs */
+
+/* FC Types */
+#define FC_TYPE_MNG 0 /* management type */
+#define FC_TYPE_CTL 1 /* control type */
+#define FC_TYPE_DATA 2 /* data type */
+
+/* Management Subtypes */
+#define FC_SUBTYPE_ASSOC_REQ 0 /* assoc. request */
+#define FC_SUBTYPE_ASSOC_RESP 1 /* assoc. response */
+#define FC_SUBTYPE_REASSOC_REQ 2 /* reassoc. request */
+#define FC_SUBTYPE_REASSOC_RESP 3 /* reassoc. response */
+#define FC_SUBTYPE_PROBE_REQ 4 /* probe request */
+#define FC_SUBTYPE_PROBE_RESP 5 /* probe response */
+#define FC_SUBTYPE_BEACON 8 /* beacon */
+#define FC_SUBTYPE_ATIM 9 /* ATIM */
+#define FC_SUBTYPE_DISASSOC 10 /* disassoc. */
+#define FC_SUBTYPE_AUTH 11 /* authentication */
+#define FC_SUBTYPE_DEAUTH 12 /* de-authentication */
+#define FC_SUBTYPE_ACTION 13 /* action */
+#define FC_SUBTYPE_ACTION_NOACK 14 /* action no-ack */
+
+/* Control Subtypes */
+#define FC_SUBTYPE_CTL_WRAPPER 7 /* Control Wrapper */
+#define FC_SUBTYPE_BLOCKACK_REQ 8 /* Block Ack Req */
+#define FC_SUBTYPE_BLOCKACK 9 /* Block Ack */
+#define FC_SUBTYPE_PS_POLL 10 /* PS poll */
+#define FC_SUBTYPE_RTS 11 /* RTS */
+#define FC_SUBTYPE_CTS 12 /* CTS */
+#define FC_SUBTYPE_ACK 13 /* ACK */
+#define FC_SUBTYPE_CF_END 14 /* CF-END */
+#define FC_SUBTYPE_CF_END_ACK 15 /* CF-END ACK */
+
+/* Data Subtypes */
+#define FC_SUBTYPE_DATA 0 /* Data */
+#define FC_SUBTYPE_DATA_CF_ACK 1 /* Data + CF-ACK */
+#define FC_SUBTYPE_DATA_CF_POLL 2 /* Data + CF-Poll */
+#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 /* Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_NULL 4 /* Null */
+#define FC_SUBTYPE_CF_ACK 5 /* CF-Ack */
+#define FC_SUBTYPE_CF_POLL 6 /* CF-Poll */
+#define FC_SUBTYPE_CF_ACK_POLL 7 /* CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA 8 /* QoS Data */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 /* QoS Data + CF-Ack */
+#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 /* QoS Data + CF-Poll */
+#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 /* QoS Data + CF-Ack + CF-Poll */
+#define FC_SUBTYPE_QOS_NULL 12 /* QoS Null */
+#define FC_SUBTYPE_QOS_CF_POLL 14 /* QoS CF-Poll */
+#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 /* QoS CF-Ack + CF-Poll */
+
+/* Data Subtype Groups */
#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0)
#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0)
#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0)
#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0)
#define FC_SUBTYPE_ANY_PSPOLL(s) (((s) & 10) != 0)
-
-#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK)
-
-#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT))
-
-#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT)
-#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT)
-
-#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ)
-#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP)
-#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ)
-#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP)
-#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ)
-#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP)
-#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON)
-#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC)
-#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH)
-#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH)
-#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION)
-#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK)
-
-#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER)
-#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ)
-#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK)
-#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL)
-#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS)
-#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS)
-#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK)
-#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END)
-#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK)
-
-#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA)
-#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL)
-#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK)
-#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA)
-#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL)
-
-
-
-
-#define QOS_PRIO_SHIFT 0
-#define QOS_PRIO_MASK 0x0007
-#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT)
-
-
-#define QOS_TID_SHIFT 0
-#define QOS_TID_MASK 0x000f
-#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT)
-
-
-#define QOS_EOSP_SHIFT 4
-#define QOS_EOSP_MASK 0x0010
-#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT)
-
-
-#define QOS_ACK_NORMAL_ACK 0
-#define QOS_ACK_NO_ACK 1
-#define QOS_ACK_NO_EXP_ACK 2
-#define QOS_ACK_BLOCK_ACK 3
-#define QOS_ACK_SHIFT 5
-#define QOS_ACK_MASK 0x0060
-#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT)
-
-
-#define QOS_AMSDU_SHIFT 7
-#define QOS_AMSDU_MASK 0x0080
-
-
-
-
-
-
-#define DOT11_MNG_AUTH_ALGO_LEN 2
-#define DOT11_MNG_AUTH_SEQ_LEN 2
-#define DOT11_MNG_BEACON_INT_LEN 2
-#define DOT11_MNG_CAP_LEN 2
-#define DOT11_MNG_AP_ADDR_LEN 6
-#define DOT11_MNG_LISTEN_INT_LEN 2
-#define DOT11_MNG_REASON_LEN 2
-#define DOT11_MNG_AID_LEN 2
-#define DOT11_MNG_STATUS_LEN 2
-#define DOT11_MNG_TIMESTAMP_LEN 8
-
-
-#define DOT11_AID_MASK 0x3fff
-
-
-#define DOT11_RC_RESERVED 0
-#define DOT11_RC_UNSPECIFIED 1
-#define DOT11_RC_AUTH_INVAL 2
-#define DOT11_RC_DEAUTH_LEAVING 3
-#define DOT11_RC_INACTIVITY 4
-#define DOT11_RC_BUSY 5
-#define DOT11_RC_INVAL_CLASS_2 6
-#define DOT11_RC_INVAL_CLASS_3 7
-#define DOT11_RC_DISASSOC_LEAVING 8
-#define DOT11_RC_NOT_AUTH 9
-#define DOT11_RC_BAD_PC 10
-#define DOT11_RC_BAD_CHANNELS 11
-
-
-
-#define DOT11_RC_UNSPECIFIED_QOS 32
-#define DOT11_RC_INSUFFCIENT_BW 33
-#define DOT11_RC_EXCESSIVE_FRAMES 34
-#define DOT11_RC_TX_OUTSIDE_TXOP 35
-#define DOT11_RC_LEAVING_QBSS 36
-#define DOT11_RC_BAD_MECHANISM 37
-#define DOT11_RC_SETUP_NEEDED 38
-#define DOT11_RC_TIMEOUT 39
-
-#define DOT11_RC_MAX 23
+/* Type/Subtype Combos */
+#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) /* FC kind mask */
+
+#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) /* FC kind */
+
+#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) /* Subtype from FC */
+#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) /* Type from FC */
+
+#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) /* assoc. request */
+#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) /* assoc. response */
+#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) /* reassoc. request */
+#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) /* reassoc. response */
+#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) /* probe request */
+#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) /* probe response */
+#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) /* beacon */
+#define FC_ATIM FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ATIM) /* ATIM */
+#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) /* disassoc */
+#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) /* authentication */
+#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) /* deauthentication */
+#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) /* action */
+#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) /* action no-ack */
+
+#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) /* Control Wrapper */
+#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) /* Block Ack Req */
+#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) /* Block Ack */
+#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) /* PS poll */
+#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) /* RTS */
+#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) /* CTS */
+#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) /* ACK */
+#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) /* CF-END */
+#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) /* CF-END ACK */
+
+#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) /* data */
+#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) /* null data */
+#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) /* data CF ACK */
+#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) /* QoS data */
+#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) /* QoS null */
+
+/* QoS Control Field */
+
+/* 802.1D Priority */
+#define QOS_PRIO_SHIFT 0 /* QoS priority shift */
+#define QOS_PRIO_MASK 0x0007 /* QoS priority mask */
+#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) /* QoS priority */
+
+/* Traffic Identifier */
+#define QOS_TID_SHIFT 0 /* QoS TID shift */
+#define QOS_TID_MASK 0x000f /* QoS TID mask */
+#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) /* QoS TID */
+
+/* End of Service Period (U-APSD) */
+#define QOS_EOSP_SHIFT 4 /* QoS End of Service Period shift */
+#define QOS_EOSP_MASK 0x0010 /* QoS End of Service Period mask */
+#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) /* Qos EOSP */
+
+/* Ack Policy */
+#define QOS_ACK_NORMAL_ACK 0 /* Normal Ack */
+#define QOS_ACK_NO_ACK 1 /* No Ack (eg mcast) */
+#define QOS_ACK_NO_EXP_ACK 2 /* No Explicit Ack */
+#define QOS_ACK_BLOCK_ACK 3 /* Block Ack */
+#define QOS_ACK_SHIFT 5 /* QoS ACK shift */
+#define QOS_ACK_MASK 0x0060 /* QoS ACK mask */
+#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) /* QoS ACK */
+
+/* A-MSDU flag */
+#define QOS_AMSDU_SHIFT 7 /* AMSDU shift */
+#define QOS_AMSDU_MASK 0x0080 /* AMSDU mask */
+
+/* Management Frames */
+
+/* Management Frame Constants */
+
+/* Fixed fields */
+#define DOT11_MNG_AUTH_ALGO_LEN 2 /* d11 management auth. algo. length */
+#define DOT11_MNG_AUTH_SEQ_LEN 2 /* d11 management auth. seq. length */
+#define DOT11_MNG_BEACON_INT_LEN 2 /* d11 management beacon interval length */
+#define DOT11_MNG_CAP_LEN 2 /* d11 management cap. length */
+#define DOT11_MNG_AP_ADDR_LEN 6 /* d11 management AP address length */
+#define DOT11_MNG_LISTEN_INT_LEN 2 /* d11 management listen interval length */
+#define DOT11_MNG_REASON_LEN 2 /* d11 management reason length */
+#define DOT11_MNG_AID_LEN 2 /* d11 management AID length */
+#define DOT11_MNG_STATUS_LEN 2 /* d11 management status length */
+#define DOT11_MNG_TIMESTAMP_LEN 8 /* d11 management timestamp length */
+
+/* DUR/ID field in assoc resp is 0xc000 | AID */
+#define DOT11_AID_MASK 0x3fff /* d11 AID mask */
+
+/* Reason Codes */
+#define DOT11_RC_RESERVED 0 /* d11 RC reserved */
+#define DOT11_RC_UNSPECIFIED 1 /* Unspecified reason */
+#define DOT11_RC_AUTH_INVAL 2 /* Previous authentication no longer valid */
+#define DOT11_RC_DEAUTH_LEAVING 3 /* Deauthenticated because sending station
+ * is leaving (or has left) IBSS or ESS
+ */
+#define DOT11_RC_INACTIVITY 4 /* Disassociated due to inactivity */
+#define DOT11_RC_BUSY 5 /* Disassociated because AP is unable to handle
+ * all currently associated stations
+ */
+#define DOT11_RC_INVAL_CLASS_2 6 /* Class 2 frame received from
+ * nonauthenticated station
+ */
+#define DOT11_RC_INVAL_CLASS_3 7 /* Class 3 frame received from
+ * nonassociated station
+ */
+#define DOT11_RC_DISASSOC_LEAVING 8 /* Disassociated because sending station is
+ * leaving (or has left) BSS
+ */
+#define DOT11_RC_NOT_AUTH 9 /* Station requesting (re)association is not
+ * authenticated with responding station
+ */
+#define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */
+#define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */
+/* 12 is unused */
+
+/* 32-39 are QSTA specific reasons added in 11e */
+#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */
+#define DOT11_RC_INSUFFCIENT_BW 33 /* QAP lacks sufficient bandwidth */
+#define DOT11_RC_EXCESSIVE_FRAMES 34 /* excessive number of frames need ack */
+#define DOT11_RC_TX_OUTSIDE_TXOP 35 /* transmitting outside the limits of txop */
+#define DOT11_RC_LEAVING_QBSS 36 /* QSTA is leaving the QBSS (or restting) */
+#define DOT11_RC_BAD_MECHANISM 37 /* does not want to use the mechanism */
+#define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */
+#define DOT11_RC_TIMEOUT 39 /* timeout */
+
+#define DOT11_RC_MAX 23 /* Reason codes > 23 are reserved */
#define DOT11_RC_TDLS_PEER_UNREACH 25
#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26
+/* Status Codes */
+#define DOT11_SC_SUCCESS 0 /* Successful */
+#define DOT11_SC_FAILURE 1 /* Unspecified failure */
+#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2 /* TDLS wakeup schedule rejected but alternative */
+ /* schedule provided */
+#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3 /* TDLS wakeup schedule rejected */
+#define DOT11_SC_TDLS_SEC_DISABLED 5 /* TDLS Security disabled */
+#define DOT11_SC_LIFETIME_REJ 6 /* Unacceptable lifetime */
+#define DOT11_SC_NOT_SAME_BSS 7 /* Not in same BSS */
+#define DOT11_SC_CAP_MISMATCH 10 /* Cannot support all requested
+ * capabilities in the Capability
+ * Information field
+ */
+#define DOT11_SC_REASSOC_FAIL 11 /* Reassociation denied due to inability
+ * to confirm that association exists
+ */
+#define DOT11_SC_ASSOC_FAIL 12 /* Association denied due to reason
+ * outside the scope of this standard
+ */
+#define DOT11_SC_AUTH_MISMATCH 13 /* Responding station does not support
+ * the specified authentication
+ * algorithm
+ */
+#define DOT11_SC_AUTH_SEQ 14 /* Received an Authentication frame
+ * with authentication transaction
+ * sequence number out of expected
+ * sequence
+ */
+#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 /* Authentication rejected because of
+ * challenge failure
+ */
+#define DOT11_SC_AUTH_TIMEOUT 16 /* Authentication rejected due to timeout
+ * waiting for next frame in sequence
+ */
+#define DOT11_SC_ASSOC_BUSY_FAIL 17 /* Association denied because AP is
+ * unable to handle additional
+ * associated stations
+ */
+#define DOT11_SC_ASSOC_RATE_MISMATCH 18 /* Association denied due to requesting
+ * station not supporting all of the
+ * data rates in the BSSBasicRateSet
+ * parameter
+ */
+#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 /* Association denied due to requesting
+ * station not supporting the Short
+ * Preamble option
+ */
+#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 /* Association denied due to requesting
+ * station not supporting the PBCC
+ * Modulation option
+ */
+#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 /* Association denied due to requesting
+ * station not supporting the Channel
+ * Agility option
+ */
+#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 /* Association denied because Spectrum
+ * Management capability is required.
+ */
+#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 /* Association denied because the info
+ * in the Power Cap element is
+ * unacceptable.
+ */
+#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 /* Association denied because the info
+ * in the Supported Channel element is
+ * unacceptable
+ */
+#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 /* Association denied due to requesting
+ * station not supporting the Short Slot
+ * Time option
+ */
+#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 26 /* Association denied because requesting station
+ * does not support the DSSS-OFDM option
+ */
+#define DOT11_SC_ASSOC_HT_REQUIRED 27 /* Association denied because the requesting
+ * station does not support HT features
+ */
+#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28 /* Association denied due to AP
+ * being unable to reach the R0 Key Holder
+ */
+#define DOT11_SC_ASSOC_TRY_LATER 30 /* Association denied temporarily, try again later
+ */
+#define DOT11_SC_ASSOC_MFP_VIOLATION 31 /* Association denied due to Robust Management
+ * frame policy violation
+ */
+
+#define DOT11_SC_DECLINED 37 /* request declined */
+#define DOT11_SC_INVALID_PARAMS 38 /* One or more params have invalid values */
+#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 /* invalid pairwise cipher */
+#define DOT11_SC_INVALID_AKMP 43 /* Association denied due to invalid AKMP */
+#define DOT11_SC_INVALID_RSNIE_CAP 45 /* invalid RSN IE capabilities */
+#define DOT11_SC_DLS_NOT_ALLOWED 48 /* DLS is not allowed in the BSS by policy */
+#define DOT11_SC_INVALID_PMKID 53 /* Association denied due to invalid PMKID */
+#define DOT11_SC_INVALID_MDID 54 /* Association denied due to invalid MDID */
+#define DOT11_SC_INVALID_FTIE 55 /* Association denied due to invalid FTIE */
+
+#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED 59 /* ad proto not supported */
+#define DOT11_SC_NO_OUTSTAND_REQ 60 /* no outstanding req */
+#define DOT11_SC_RSP_NOT_RX_FROM_SERVER 61 /* no response from server */
+#define DOT11_SC_TIMEOUT 62 /* timeout */
+#define DOT11_SC_QUERY_RSP_TOO_LARGE 63 /* query rsp too large */
+#define DOT11_SC_SERVER_UNREACHABLE 65 /* server unreachable */
+
+#define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */
+#define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */
+#define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */
+#define DOT11_SC_ASSOC_VHT_REQUIRED 104 /* Association denied because the requesting
+ * station does not support VHT features.
+ */
+
+#define DOT11_SC_TRANSMIT_FAILURE 79 /* transmission failure */
+
+/* Info Elts, length of INFORMATION portion of Info Elts */
+#define DOT11_MNG_DS_PARAM_LEN 1 /* d11 management DS parameter length */
+#define DOT11_MNG_IBSS_PARAM_LEN 2 /* d11 management IBSS parameter length */
+
+/* TIM Info element has 3 bytes fixed info in INFORMATION field,
+ * followed by 1 to 251 bytes of Partial Virtual Bitmap
+ */
+#define DOT11_MNG_TIM_FIXED_LEN 3 /* d11 management TIM fixed length */
+#define DOT11_MNG_TIM_DTIM_COUNT 0 /* d11 management DTIM count */
+#define DOT11_MNG_TIM_DTIM_PERIOD 1 /* d11 management DTIM period */
+#define DOT11_MNG_TIM_BITMAP_CTL 2 /* d11 management TIM BITMAP control */
+#define DOT11_MNG_TIM_PVB 3 /* d11 management TIM PVB */
+
+/* TLV defines */
+#define TLV_TAG_OFF 0 /* tag offset */
+#define TLV_LEN_OFF 1 /* length offset */
+#define TLV_HDR_LEN 2 /* header length */
+#define TLV_BODY_OFF 2 /* body offset */
+#define TLV_BODY_LEN_MAX 255 /* max body length */
+
+/* Management Frame Information Element IDs */
+#define DOT11_MNG_SSID_ID 0 /* d11 management SSID id */
+#define DOT11_MNG_RATES_ID 1 /* d11 management rates id */
+#define DOT11_MNG_FH_PARMS_ID 2 /* d11 management FH parameter id */
+#define DOT11_MNG_DS_PARMS_ID 3 /* d11 management DS parameter id */
+#define DOT11_MNG_CF_PARMS_ID 4 /* d11 management CF parameter id */
+#define DOT11_MNG_TIM_ID 5 /* d11 management TIM id */
+#define DOT11_MNG_IBSS_PARMS_ID 6 /* d11 management IBSS parameter id */
+#define DOT11_MNG_COUNTRY_ID 7 /* d11 management country id */
+#define DOT11_MNG_HOPPING_PARMS_ID 8 /* d11 management hopping parameter id */
+#define DOT11_MNG_HOPPING_TABLE_ID 9 /* d11 management hopping table id */
+#define DOT11_MNG_REQUEST_ID 10 /* d11 management request id */
+#define DOT11_MNG_QBSS_LOAD_ID 11 /* d11 management QBSS Load id */
+#define DOT11_MNG_EDCA_PARAM_ID 12 /* 11E EDCA Parameter id */
+#define DOT11_MNG_TSPEC_ID 13 /* d11 management TSPEC id */
+#define DOT11_MNG_TCLAS_ID 14 /* d11 management TCLAS id */
+#define DOT11_MNG_CHALLENGE_ID 16 /* d11 management chanllenge id */
+#define DOT11_MNG_PWR_CONSTRAINT_ID 32 /* 11H PowerConstraint */
+#define DOT11_MNG_PWR_CAP_ID 33 /* 11H PowerCapability */
+#define DOT11_MNG_TPC_REQUEST_ID 34 /* 11H TPC Request */
+#define DOT11_MNG_TPC_REPORT_ID 35 /* 11H TPC Report */
+#define DOT11_MNG_SUPP_CHANNELS_ID 36 /* 11H Supported Channels */
+#define DOT11_MNG_CHANNEL_SWITCH_ID 37 /* 11H ChannelSwitch Announcement */
+#define DOT11_MNG_MEASURE_REQUEST_ID 38 /* 11H MeasurementRequest */
+#define DOT11_MNG_MEASURE_REPORT_ID 39 /* 11H MeasurementReport */
+#define DOT11_MNG_QUIET_ID 40 /* 11H Quiet */
+#define DOT11_MNG_IBSS_DFS_ID 41 /* 11H IBSS_DFS */
+#define DOT11_MNG_ERP_ID 42 /* d11 management ERP id */
+#define DOT11_MNG_TS_DELAY_ID 43 /* d11 management TS Delay id */
+#define DOT11_MNG_TCLAS_PROC_ID 44 /* d11 management TCLAS processing id */
+#define DOT11_MNG_HT_CAP 45 /* d11 mgmt HT cap id */
+#define DOT11_MNG_QOS_CAP_ID 46 /* 11E QoS Capability id */
+#define DOT11_MNG_NONERP_ID 47 /* d11 management NON-ERP id */
+#define DOT11_MNG_RSN_ID 48 /* d11 management RSN id */
+#define DOT11_MNG_EXT_RATES_ID 50 /* d11 management ext. rates id */
+#define DOT11_MNG_AP_CHREP_ID 51 /* 11k AP Channel report id */
+#define DOT11_MNG_NEIGHBOR_REP_ID 52 /* 11k & 11v Neighbor report id */
+#define DOT11_MNG_RCPI_ID 53 /* 11k RCPI */
+#define DOT11_MNG_MDIE_ID 54 /* 11r Mobility domain id */
+#define DOT11_MNG_FTIE_ID 55 /* 11r Fast Bss Transition id */
+#define DOT11_MNG_FT_TI_ID 56 /* 11r Timeout Interval id */
+#define DOT11_MNG_RDE_ID 57 /* 11r RIC Data Element id */
+#define DOT11_MNG_REGCLASS_ID 59 /* d11 management regulatory class id */
+#define DOT11_MNG_EXT_CSA_ID 60 /* d11 Extended CSA */
+#define DOT11_MNG_HT_ADD 61 /* d11 mgmt additional HT info */
+#define DOT11_MNG_EXT_CHANNEL_OFFSET 62 /* d11 mgmt ext channel offset */
+#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID 63 /* 11k bss average access delay */
+#define DOT11_MNG_ANTENNA_ID 64 /* 11k antenna id */
+#define DOT11_MNG_RSNI_ID 65 /* 11k RSNI id */
+#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID 66 /* 11k measurement pilot tx info id */
+#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID 67 /* 11k bss aval admission cap id */
+#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID 68 /* 11k bss AC access delay id */
+#define DOT11_MNG_WAPI_ID 68 /* d11 management WAPI id */
+#define DOT11_MNG_TIME_ADVERTISE_ID 69 /* 11p time advertisement */
+#define DOT11_MNG_RRM_CAP_ID 70 /* 11k radio measurement capability */
+#define DOT11_MNG_MULTIPLE_BSSID_ID 71 /* 11k multiple BSSID id */
+#define DOT11_MNG_HT_BSS_COEXINFO_ID 72 /* d11 mgmt OBSS Coexistence INFO */
+#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */
+#define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */
+#define DOT11_MNG_MMIE_ID 76 /* d11 mgmt MIC IE */
+#define DOT11_MNG_FMS_DESCR_ID 86 /* 11v FMS descriptor */
+#define DOT11_MNG_FMS_REQ_ID 87 /* 11v FMS request id */
+#define DOT11_MNG_FMS_RESP_ID 88 /* 11v FMS response id */
+#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID 90 /* 11v bss max idle id */
+#define DOT11_MNG_TFS_REQUEST_ID 91 /* 11v tfs request id */
+#define DOT11_MNG_TFS_RESPONSE_ID 92 /* 11v tfs response id */
+#define DOT11_MNG_WNM_SLEEP_MODE_ID 93 /* 11v wnm-sleep mode id */
+#define DOT11_MNG_TIMBC_REQ_ID 94 /* 11v TIM broadcast request id */
+#define DOT11_MNG_TIMBC_RESP_ID 95 /* 11v TIM broadcast response id */
+#define DOT11_MNG_CHANNEL_USAGE 97 /* 11v channel usage */
+#define DOT11_MNG_TIME_ZONE_ID 98 /* 11v time zone */
+#define DOT11_MNG_DMS_REQUEST_ID 99 /* 11v dms request id */
+#define DOT11_MNG_DMS_RESPONSE_ID 100 /* 11v dms response id */
+#define DOT11_MNG_LINK_IDENTIFIER_ID 101 /* 11z TDLS Link Identifier IE */
+#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102 /* 11z TDLS Wakeup Schedule IE */
+#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104 /* 11z TDLS Channel Switch Timing IE */
+#define DOT11_MNG_PTI_CONTROL_ID 105 /* 11z TDLS PTI Control IE */
+#define DOT11_MNG_PU_BUFFER_STATUS_ID 106 /* 11z TDLS PU Buffer Status IE */
+#define DOT11_MNG_INTERWORKING_ID 107 /* 11u interworking */
+#define DOT11_MNG_ADVERTISEMENT_ID 108 /* 11u advertisement protocol */
+#define DOT11_MNG_EXP_BW_REQ_ID 109 /* 11u expedited bandwith request */
+#define DOT11_MNG_QOS_MAP_ID 110 /* 11u QoS map set */
+#define DOT11_MNG_ROAM_CONSORT_ID 111 /* 11u roaming consortium */
+#define DOT11_MNG_EMERGCY_ALERT_ID 112 /* 11u emergency alert identifier */
+#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */
+#define DOT11_MNG_VHT_CAP_ID 191 /* d11 mgmt VHT cap id */
+#define DOT11_MNG_VHT_OPERATION_ID 192 /* d11 mgmt VHT op id */
+#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194 /* Wide BW Channel Switch IE */
+#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195 /* VHT transmit Power Envelope IE */
+#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196 /* Channel Switch Wrapper IE */
+#define DOT11_MNG_AID_ID 197 /* Association ID IE */
+#define DOT11_MNG_OPER_MODE_NOTIF_ID 199 /* d11 mgmt VHT oper mode notif */
+
+
+#define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */
+#define DOT11_MNG_PROPR_ID 221
+/* should start using this one instead of above two */
+#define DOT11_MNG_VS_ID 221 /* d11 management Vendor Specific IE */
+
+/* Rate Defines */
+
+/* Valid rates for the Supported Rates and Extended Supported Rates IEs.
+ * Encoding is the rate in 500kbps units, rouding up for fractional values.
+ * 802.11-2012, section 6.5.5.2, DATA_RATE parameter enumerates all the values.
+ * The rate values cover DSSS, HR/DSSS, ERP, and OFDM phy rates.
+ * The defines below do not cover the rates specific to 10MHz, {3, 4.5, 27},
+ * and 5MHz, {1.5, 2.25, 3, 4.5, 13.5}, which are not supported by Broadcom devices.
+ */
-#define DOT11_SC_SUCCESS 0
-#define DOT11_SC_FAILURE 1
-#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2
-
-#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3
-#define DOT11_SC_TDLS_SEC_DISABLED 5
-#define DOT11_SC_LIFETIME_REJ 6
-#define DOT11_SC_NOT_SAME_BSS 7
-#define DOT11_SC_CAP_MISMATCH 10
-#define DOT11_SC_REASSOC_FAIL 11
-#define DOT11_SC_ASSOC_FAIL 12
-#define DOT11_SC_AUTH_MISMATCH 13
-#define DOT11_SC_AUTH_SEQ 14
-#define DOT11_SC_AUTH_CHALLENGE_FAIL 15
-#define DOT11_SC_AUTH_TIMEOUT 16
-#define DOT11_SC_ASSOC_BUSY_FAIL 17
-#define DOT11_SC_ASSOC_RATE_MISMATCH 18
-#define DOT11_SC_ASSOC_SHORT_REQUIRED 19
-#define DOT11_SC_ASSOC_PBCC_REQUIRED 20
-#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21
-#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22
-#define DOT11_SC_ASSOC_BAD_POWER_CAP 23
-#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24
-#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25
-#define DOT11_SC_ASSOC_ERPBCC_REQUIRED 26
-#define DOT11_SC_ASSOC_DSSSOFDM_REQUIRED 27
-#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28
-#define DOT11_SC_ASSOC_TRY_LATER 30
-#define DOT11_SC_ASSOC_MFP_VIOLATION 31
-#define DOT11_SC_ASSOC_HT_REQUIRED 32
-
-#define DOT11_SC_DECLINED 37
-#define DOT11_SC_INVALID_PARAMS 38
-#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42
-#define DOT11_SC_INVALID_AKMP 43
-#define DOT11_SC_INVALID_RSNIE_CAP 45
-#define DOT11_SC_DLS_NOT_ALLOWED 48
-#define DOT11_SC_INVALID_PMKID 53
-#define DOT11_SC_INVALID_MDID 54
-#define DOT11_SC_INVALID_FTIE 55
-
-#define DOT11_SC_ADV_PROTO_NOT_SUPPORTED 59
-#define DOT11_SC_NO_OUTSTAND_REQ 60
-#define DOT11_SC_RSP_NOT_RX_FROM_SERVER 61
-#define DOT11_SC_TIMEOUT 62
-#define DOT11_SC_QUERY_RSP_TOO_LARGE 63
-#define DOT11_SC_SERVER_UNREACHABLE 65
-
-#define DOT11_SC_UNEXP_MSG 70
-#define DOT11_SC_INVALID_SNONCE 71
-#define DOT11_SC_INVALID_RSNIE 72
-#define DOT11_SC_ASSOC_VHT_REQUIRED 104
-
-#define DOT11_SC_TRANSMIT_FAILURE 79
-
-
-#define DOT11_MNG_DS_PARAM_LEN 1
-#define DOT11_MNG_IBSS_PARAM_LEN 2
-
-
-#define DOT11_MNG_TIM_FIXED_LEN 3
-#define DOT11_MNG_TIM_DTIM_COUNT 0
-#define DOT11_MNG_TIM_DTIM_PERIOD 1
-#define DOT11_MNG_TIM_BITMAP_CTL 2
-#define DOT11_MNG_TIM_PVB 3
-
-
-#define TLV_TAG_OFF 0
-#define TLV_LEN_OFF 1
-#define TLV_HDR_LEN 2
-#define TLV_BODY_OFF 2
-
-
-#define DOT11_MNG_SSID_ID 0
-#define DOT11_MNG_RATES_ID 1
-#define DOT11_MNG_FH_PARMS_ID 2
-#define DOT11_MNG_DS_PARMS_ID 3
-#define DOT11_MNG_CF_PARMS_ID 4
-#define DOT11_MNG_TIM_ID 5
-#define DOT11_MNG_IBSS_PARMS_ID 6
-#define DOT11_MNG_COUNTRY_ID 7
-#define DOT11_MNG_HOPPING_PARMS_ID 8
-#define DOT11_MNG_HOPPING_TABLE_ID 9
-#define DOT11_MNG_REQUEST_ID 10
-#define DOT11_MNG_QBSS_LOAD_ID 11
-#define DOT11_MNG_EDCA_PARAM_ID 12
-#define DOT11_MNG_TSPEC_ID 13
-#define DOT11_MNG_TCLAS_ID 14
-#define DOT11_MNG_CHALLENGE_ID 16
-#define DOT11_MNG_PWR_CONSTRAINT_ID 32
-#define DOT11_MNG_PWR_CAP_ID 33
-#define DOT11_MNG_TPC_REQUEST_ID 34
-#define DOT11_MNG_TPC_REPORT_ID 35
-#define DOT11_MNG_SUPP_CHANNELS_ID 36
-#define DOT11_MNG_CHANNEL_SWITCH_ID 37
-#define DOT11_MNG_MEASURE_REQUEST_ID 38
-#define DOT11_MNG_MEASURE_REPORT_ID 39
-#define DOT11_MNG_QUIET_ID 40
-#define DOT11_MNG_IBSS_DFS_ID 41
-#define DOT11_MNG_ERP_ID 42
-#define DOT11_MNG_TS_DELAY_ID 43
-#define DOT11_MNG_TCLAS_PROC_ID 44
-#define DOT11_MNG_HT_CAP 45
-#define DOT11_MNG_QOS_CAP_ID 46
-#define DOT11_MNG_NONERP_ID 47
-#define DOT11_MNG_RSN_ID 48
-#define DOT11_MNG_EXT_RATES_ID 50
-#define DOT11_MNG_AP_CHREP_ID 51
-#define DOT11_MNG_NEIGHBOR_REP_ID 52
-#define DOT11_MNG_RCPI_ID 53
-#define DOT11_MNG_MDIE_ID 54
-#define DOT11_MNG_FTIE_ID 55
-#define DOT11_MNG_FT_TI_ID 56
-#define DOT11_MNG_RDE_ID 57
-#define DOT11_MNG_REGCLASS_ID 59
-#define DOT11_MNG_EXT_CSA_ID 60
-#define DOT11_MNG_HT_ADD 61
-#define DOT11_MNG_EXT_CHANNEL_OFFSET 62
-#define DOT11_MNG_BSS_AVR_ACCESS_DELAY_ID 63
-#define DOT11_MNG_ANTENNA_ID 64
-#define DOT11_MNG_RSNI_ID 65
-#define DOT11_MNG_MEASUREMENT_PILOT_TX_ID 66
-#define DOT11_MNG_BSS_AVAL_ADMISSION_CAP_ID 67
-#define DOT11_MNG_BSS_AC_ACCESS_DELAY_ID 68
-#define DOT11_MNG_WAPI_ID 68
-#define DOT11_MNG_TIME_ADVERTISE_ID 69
-#define DOT11_MNG_RRM_CAP_ID 70
-#define DOT11_MNG_MULTIPLE_BSSID_ID 71
-#define DOT11_MNG_HT_BSS_COEXINFO_ID 72
-#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73
-#define DOT11_MNG_HT_OBSS_ID 74
-#define DOT11_MNG_MMIE_ID 76
-#define DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID 90
-#define DOT11_MNG_TFS_REQUEST_ID 91
-#define DOT11_MNG_TFS_RESPONSE_ID 92
-#define DOT11_MNG_WNM_SLEEP_MODE_ID 93
-#define DOT11_MNG_TIMBC_REQ_ID 94
-#define DOT11_MNG_TIMBC_RESP_ID 95
-#define DOT11_MNG_CHANNEL_USAGE 97
-#define DOT11_MNG_TIME_ZONE_ID 98
-#define DOT11_MNG_DMS_REQUEST_ID 99
-#define DOT11_MNG_DMS_RESPONSE_ID 100
-#define DOT11_MNG_LINK_IDENTIFIER_ID 101
-#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102
-#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104
-#define DOT11_MNG_PTI_CONTROL_ID 105
-#define DOT11_MNG_PU_BUFFER_STATUS_ID 106
-#define DOT11_MNG_INTERWORKING_ID 107
-#define DOT11_MNG_ADVERTISEMENT_ID 108
-#define DOT11_MNG_EXP_BW_REQ_ID 109
-#define DOT11_MNG_QOS_MAP_ID 110
-#define DOT11_MNG_ROAM_CONSORT_ID 111
-#define DOT11_MNG_EMERGCY_ALERT_ID 112
-#define DOT11_MNG_EXT_CAP_ID 127
-#define DOT11_MNG_VHT_CAP_ID 191
-#define DOT11_MNG_VHT_OPERATION_ID 192
-#define DOT11_MNG_WIDE_BW_CHANNEL_SWITCH_ID 194
-#define DOT11_MNG_VHT_TRANSMIT_POWER_ENVELOPE_ID 195
-#define DOT11_MNG_CHANNEL_SWITCH_WRAPPER_ID 196
-#define DOT11_MNG_AID_ID 197
-#define DOT11_MNG_OPER_MODE_NOTIF_ID 199
-
-
-#define DOT11_MNG_WPA_ID 221
-#define DOT11_MNG_PROPR_ID 221
-
-#define DOT11_MNG_VS_ID 221
-
-
-
-
-
-#define DOT11_RATE_1M 2
-#define DOT11_RATE_2M 4
-#define DOT11_RATE_5M5 11
-#define DOT11_RATE_11M 22
-#define DOT11_RATE_6M 12
-#define DOT11_RATE_9M 18
-#define DOT11_RATE_12M 24
-#define DOT11_RATE_18M 36
-#define DOT11_RATE_24M 48
-#define DOT11_RATE_36M 72
-#define DOT11_RATE_48M 96
-#define DOT11_RATE_54M 108
-#define DOT11_RATE_MAX 108
-
-
-#define DOT11_RATE_BASIC 0x80
-#define DOT11_RATE_MASK 0x7F
-
-
-#define DOT11_BSS_MEMBERSHIP_HT 0xFF
-#define DOT11_BSS_MEMBERSHIP_VHT 0xFE
-
-
-#define DOT11_MNG_ERP_LEN 1
-#define DOT11_MNG_NONERP_PRESENT 0x01
-#define DOT11_MNG_USE_PROTECTION 0x02
-#define DOT11_MNG_BARKER_PREAMBLE 0x04
-
-#define DOT11_MGN_TS_DELAY_LEN 4
-#define TS_DELAY_FIELD_SIZE 4
-
-
-#define DOT11_CAP_ESS 0x0001
-#define DOT11_CAP_IBSS 0x0002
-#define DOT11_CAP_POLLABLE 0x0004
-#define DOT11_CAP_POLL_RQ 0x0008
-#define DOT11_CAP_PRIVACY 0x0010
-#define DOT11_CAP_SHORT 0x0020
-#define DOT11_CAP_PBCC 0x0040
-#define DOT11_CAP_AGILITY 0x0080
-#define DOT11_CAP_SPECTRUM 0x0100
-#define DOT11_CAP_QOS 0x0200
-#define DOT11_CAP_SHORTSLOT 0x0400
-#define DOT11_CAP_APSD 0x0800
-#define DOT11_CAP_RRM 0x1000
-#define DOT11_CAP_CCK_OFDM 0x2000
-#define DOT11_CAP_DELAY_BA 0x4000
-#define DOT11_CAP_IMMEDIATE_BA 0x8000
-
-
-
+#define DOT11_RATE_1M 2 /* 1 Mbps in 500kbps units */
+#define DOT11_RATE_2M 4 /* 2 Mbps in 500kbps units */
+#define DOT11_RATE_5M5 11 /* 5.5 Mbps in 500kbps units */
+#define DOT11_RATE_11M 22 /* 11 Mbps in 500kbps units */
+#define DOT11_RATE_6M 12 /* 6 Mbps in 500kbps units */
+#define DOT11_RATE_9M 18 /* 9 Mbps in 500kbps units */
+#define DOT11_RATE_12M 24 /* 12 Mbps in 500kbps units */
+#define DOT11_RATE_18M 36 /* 18 Mbps in 500kbps units */
+#define DOT11_RATE_24M 48 /* 24 Mbps in 500kbps units */
+#define DOT11_RATE_36M 72 /* 36 Mbps in 500kbps units */
+#define DOT11_RATE_48M 96 /* 48 Mbps in 500kbps units */
+#define DOT11_RATE_54M 108 /* 54 Mbps in 500kbps units */
+#define DOT11_RATE_MAX 108 /* highest rate (54 Mbps) in 500kbps units */
+
+/* Supported Rates and Extended Supported Rates IEs
+ * The supported rates octets are defined a the MSB indicatin a Basic Rate
+ * and bits 0-6 as the rate value
+ */
+#define DOT11_RATE_BASIC 0x80 /* flag for a Basic Rate */
+#define DOT11_RATE_MASK 0x7F /* mask for numeric part of rate */
+
+/* BSS Membership Selector parameters
+ * 802.11-2012 and 802.11ac_D4.0 sec 8.4.2.3
+ * These selector values are advertised in Supported Rates and Extended Supported Rates IEs
+ * in the supported rates list with the Basic rate bit set.
+ * Constants below include the basic bit.
+ */
+#define DOT11_BSS_MEMBERSHIP_HT 0xFF /* Basic 0x80 + 127, HT Required to join */
+#define DOT11_BSS_MEMBERSHIP_VHT 0xFE /* Basic 0x80 + 126, VHT Required to join */
+
+/* ERP info element bit values */
+#define DOT11_MNG_ERP_LEN 1 /* ERP is currently 1 byte long */
+#define DOT11_MNG_NONERP_PRESENT 0x01 /* NonERP (802.11b) STAs are present
+ *in the BSS
+ */
+#define DOT11_MNG_USE_PROTECTION 0x02 /* Use protection mechanisms for
+ *ERP-OFDM frames
+ */
+#define DOT11_MNG_BARKER_PREAMBLE 0x04 /* Short Preambles: 0 == allowed,
+ * 1 == not allowed
+ */
+/* TS Delay element offset & size */
+#define DOT11_MGN_TS_DELAY_LEN 4 /* length of TS DELAY IE */
+#define TS_DELAY_FIELD_SIZE 4 /* TS DELAY field size */
+
+/* Capability Information Field */
+#define DOT11_CAP_ESS 0x0001 /* d11 cap. ESS */
+#define DOT11_CAP_IBSS 0x0002 /* d11 cap. IBSS */
+#define DOT11_CAP_POLLABLE 0x0004 /* d11 cap. pollable */
+#define DOT11_CAP_POLL_RQ 0x0008 /* d11 cap. poll request */
+#define DOT11_CAP_PRIVACY 0x0010 /* d11 cap. privacy */
+#define DOT11_CAP_SHORT 0x0020 /* d11 cap. short */
+#define DOT11_CAP_PBCC 0x0040 /* d11 cap. PBCC */
+#define DOT11_CAP_AGILITY 0x0080 /* d11 cap. agility */
+#define DOT11_CAP_SPECTRUM 0x0100 /* d11 cap. spectrum */
+#define DOT11_CAP_QOS 0x0200 /* d11 cap. qos */
+#define DOT11_CAP_SHORTSLOT 0x0400 /* d11 cap. shortslot */
+#define DOT11_CAP_APSD 0x0800 /* d11 cap. apsd */
+#define DOT11_CAP_RRM 0x1000 /* d11 cap. 11k radio measurement */
+#define DOT11_CAP_CCK_OFDM 0x2000 /* d11 cap. CCK/OFDM */
+#define DOT11_CAP_DELAY_BA 0x4000 /* d11 cap. delayed block ack */
+#define DOT11_CAP_IMMEDIATE_BA 0x8000 /* d11 cap. immediate block ack */
+
+/* Extended capabilities IE bitfields */
+/* 20/40 BSS Coexistence Management support bit position */
#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0
-
+/* Extended Channel Switching support bit position */
+#define DOT11_EXT_CAP_EXT_CHAN_SWITCHING 2
+/* scheduled PSMP support bit position */
#define DOT11_EXT_CAP_SPSMP 6
-
+/* Flexible Multicast Service */
#define DOT11_EXT_CAP_FMS 11
-
+/* proxy ARP service support bit position */
#define DOT11_EXT_CAP_PROXY_ARP 12
-
+/* Traffic Filter Service */
#define DOT11_EXT_CAP_TFS 16
-
+/* WNM-Sleep Mode */
#define DOT11_EXT_CAP_WNM_SLEEP 17
-
+/* TIM Broadcast service */
#define DOT11_EXT_CAP_TIMBC 18
-
+/* BSS Transition Management support bit position */
#define DOT11_EXT_CAP_BSSTRANS_MGMT 19
-
+/* Direct Multicast Service */
#define DOT11_EXT_CAP_DMS 26
-
+/* Interworking support bit position */
#define DOT11_EXT_CAP_IW 31
-
+/* QoS map support bit position */
+#define DOT11_EXT_CAP_QOS_MAP 32
+/* service Interval granularity bit position and mask */
#define DOT11_EXT_CAP_SI 41
#define DOT11_EXT_CAP_SI_MASK 0x0E
-
+/* WNM notification */
#define DOT11_EXT_CAP_WNM_NOTIF 46
-
+/* Operating mode notification - VHT (11ac D3.0 - 8.4.2.29) */
#define DOT11_EXT_CAP_OPER_MODE_NOTIF 62
-
+/* VHT Operating mode bit fields - (11ac D3.0 - 8.4.1.50) */
#define DOT11_OPER_MODE_CHANNEL_WIDTH_SHIFT 0
#define DOT11_OPER_MODE_CHANNEL_WIDTH_MASK 0x3
#define DOT11_OPER_MODE_RXNSS_SHIFT 4
#define DOT11_OPER_MODE_CHANNEL_WIDTH_8080MHZ(mode) (\
((mode) & DOT11_OPER_MODE_CHANNEL_WIDTH_MASK) == DOT11_OPER_MODE_8080MHZ)
-
+/* Operating mode information element 802.11ac D3.0 - 8.4.2.168 */
BWL_PRE_PACKED_STRUCT struct dot11_oper_mode_notif_ie {
uint8 mode;
} BWL_POST_PACKED_STRUCT;
#define DOT11_OPER_MODE_NOTIF_IE_LEN 1
+/* Extended Capability Information Field */
+#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01 /* 20/40 BSS Coexistence Management support */
-#define DOT11_OBSS_COEX_MNG_SUPPORT 0x01
-
-
-#define DOT11_ACTION_HDR_LEN 2
-#define DOT11_ACTION_CAT_OFF 0
-#define DOT11_ACTION_ACT_OFF 1
-
-
-#define DOT11_ACTION_CAT_ERR_MASK 0x80
-#define DOT11_ACTION_CAT_MASK 0x7F
-#define DOT11_ACTION_CAT_SPECT_MNG 0
-#define DOT11_ACTION_CAT_QOS 1
-#define DOT11_ACTION_CAT_DLS 2
-#define DOT11_ACTION_CAT_BLOCKACK 3
-#define DOT11_ACTION_CAT_PUBLIC 4
-#define DOT11_ACTION_CAT_RRM 5
-#define DOT11_ACTION_CAT_FBT 6
-#define DOT11_ACTION_CAT_HT 7
-#define DOT11_ACTION_CAT_SA_QUERY 8
-#define DOT11_ACTION_CAT_PDPA 9
-#define DOT11_ACTION_CAT_WNM 10
-#define DOT11_ACTION_CAT_UWNM 11
+/*
+ * Action Frame Constants
+ */
+#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action field */
+#define DOT11_ACTION_CAT_OFF 0 /* category offset */
+#define DOT11_ACTION_ACT_OFF 1 /* action offset */
+
+/* Action Category field (sec 8.4.1.11) */
+#define DOT11_ACTION_CAT_ERR_MASK 0x80 /* category error mask */
+#define DOT11_ACTION_CAT_MASK 0x7F /* category mask */
+#define DOT11_ACTION_CAT_SPECT_MNG 0 /* category spectrum management */
+#define DOT11_ACTION_CAT_QOS 1 /* category QoS */
+#define DOT11_ACTION_CAT_DLS 2 /* category DLS */
+#define DOT11_ACTION_CAT_BLOCKACK 3 /* category block ack */
+#define DOT11_ACTION_CAT_PUBLIC 4 /* category public */
+#define DOT11_ACTION_CAT_RRM 5 /* category radio measurements */
+#define DOT11_ACTION_CAT_FBT 6 /* category fast bss transition */
+#define DOT11_ACTION_CAT_HT 7 /* category for HT */
+#define DOT11_ACTION_CAT_SA_QUERY 8 /* security association query */
+#define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */
+#define DOT11_ACTION_CAT_WNM 10 /* category for WNM */
+#define DOT11_ACTION_CAT_UWNM 11 /* category for Unprotected WNM */
#define DOT11_ACTION_NOTIFICATION 17
-#define DOT11_ACTION_CAT_VHT 21
-#define DOT11_ACTION_CAT_VSP 126
-#define DOT11_ACTION_CAT_VS 127
-
-
-#define DOT11_SM_ACTION_M_REQ 0
-#define DOT11_SM_ACTION_M_REP 1
-#define DOT11_SM_ACTION_TPC_REQ 2
-#define DOT11_SM_ACTION_TPC_REP 3
-#define DOT11_SM_ACTION_CHANNEL_SWITCH 4
-#define DOT11_SM_ACTION_EXT_CSA 5
-
-
-#define DOT11_ACTION_ID_HT_CH_WIDTH 0
-#define DOT11_ACTION_ID_HT_MIMO_PS 1
-
-
-#define DOT11_PUB_ACTION_BSS_COEX_MNG 0
-#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4
-
-
-#define DOT11_BA_ACTION_ADDBA_REQ 0
-#define DOT11_BA_ACTION_ADDBA_RESP 1
-#define DOT11_BA_ACTION_DELBA 2
-
-
-#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001
-#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002
-#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1
-#define DOT11_ADDBA_PARAM_TID_MASK 0x003c
-#define DOT11_ADDBA_PARAM_TID_SHIFT 2
-#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0
-#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6
-
-#define DOT11_ADDBA_POLICY_DELAYED 0
-#define DOT11_ADDBA_POLICY_IMMEDIATE 1
-
-
+#define DOT11_ACTION_CAT_VHT 21 /* VHT action */
+#define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */
+#define DOT11_ACTION_CAT_VS 127 /* category Vendor Specific */
+
+/* Spectrum Management Action IDs (sec 7.4.1) */
+#define DOT11_SM_ACTION_M_REQ 0 /* d11 action measurement request */
+#define DOT11_SM_ACTION_M_REP 1 /* d11 action measurement response */
+#define DOT11_SM_ACTION_TPC_REQ 2 /* d11 action TPC request */
+#define DOT11_SM_ACTION_TPC_REP 3 /* d11 action TPC response */
+#define DOT11_SM_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */
+#define DOT11_SM_ACTION_EXT_CSA 5 /* d11 extened CSA for 11n */
+
+/* QoS action ids */
+#define DOT11_QOS_ACTION_ADDTS_REQ 0 /* d11 action ADDTS request */
+#define DOT11_QOS_ACTION_ADDTS_RESP 1 /* d11 action ADDTS response */
+#define DOT11_QOS_ACTION_DELTS 2 /* d11 action DELTS */
+#define DOT11_QOS_ACTION_SCHEDULE 3 /* d11 action schedule */
+#define DOT11_QOS_ACTION_QOS_MAP 4 /* d11 action QOS map */
+
+/* HT action ids */
+#define DOT11_ACTION_ID_HT_CH_WIDTH 0 /* notify channel width action id */
+#define DOT11_ACTION_ID_HT_MIMO_PS 1 /* mimo ps action id */
+
+/* Public action ids */
+#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */
+#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */
+#define DOT11_PUB_ACTION_GAS_CB_REQ 12 /* GAS Comeback Request */
+
+/* Block Ack action types */
+#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */
+#define DOT11_BA_ACTION_ADDBA_RESP 1 /* ADDBA Resp action frame type */
+#define DOT11_BA_ACTION_DELBA 2 /* DELBA action frame type */
+
+/* ADDBA action parameters */
+#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 /* AMSDU supported under BA */
+#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 /* policy mask(ack vs delayed) */
+#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 /* policy shift */
+#define DOT11_ADDBA_PARAM_TID_MASK 0x003c /* tid mask */
+#define DOT11_ADDBA_PARAM_TID_SHIFT 2 /* tid shift */
+#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 /* buffer size mask */
+#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 /* buffer size shift */
+
+#define DOT11_ADDBA_POLICY_DELAYED 0 /* delayed BA policy */
+#define DOT11_ADDBA_POLICY_IMMEDIATE 1 /* immediate BA policy */
+
+/* Fast Transition action types */
#define DOT11_FT_ACTION_FT_RESERVED 0
-#define DOT11_FT_ACTION_FT_REQ 1
-#define DOT11_FT_ACTION_FT_RES 2
-#define DOT11_FT_ACTION_FT_CON 3
-#define DOT11_FT_ACTION_FT_ACK 4
-
-
-#define DOT11_DLS_ACTION_REQ 0
-#define DOT11_DLS_ACTION_RESP 1
-#define DOT11_DLS_ACTION_TD 2
+#define DOT11_FT_ACTION_FT_REQ 1 /* FBT request - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_RES 2 /* FBT response - for over-the-DS FBT */
+#define DOT11_FT_ACTION_FT_CON 3 /* FBT confirm - for OTDS with RRP */
+#define DOT11_FT_ACTION_FT_ACK 4 /* FBT ack */
+/* DLS action types */
+#define DOT11_DLS_ACTION_REQ 0 /* DLS Request */
+#define DOT11_DLS_ACTION_RESP 1 /* DLS Response */
+#define DOT11_DLS_ACTION_TD 2 /* DLS Teardown */
+/* Wireless Network Management (WNM) action types */
#define DOT11_WNM_ACTION_EVENT_REQ 0
#define DOT11_WNM_ACTION_EVENT_REP 1
#define DOT11_WNM_ACTION_DIAG_REQ 2
#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12
#define DOT11_WNM_ACTION_TFS_REQ 13
#define DOT11_WNM_ACTION_TFS_RESP 14
-#define DOT11_WNM_ACTION_TFS_NOTIFY 15
+#define DOT11_WNM_ACTION_TFS_NOTIFY_REQ 15
#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16
#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17
#define DOT11_WNM_ACTION_TIMBC_REQ 18
#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25
#define DOT11_WNM_ACTION_NOTFCTN_REQ 26
#define DOT11_WNM_ACTION_NOTFCTN_RESP 27
+#define DOT11_WNM_ACTION_TFS_NOTIFY_RESP 28
-
+/* Unprotected Wireless Network Management (WNM) action types */
#define DOT11_UWNM_ACTION_TIM 0
#define DOT11_UWNM_ACTION_TIMING_MEASUREMENT 1
#define DOT11_MNG_COUNTRY_ID_LEN 3
+/* VHT category action types - 802.11ac D3.0 - 8.5.23.1 */
+#define DOT11_VHT_ACTION_CBF 0 /* Compressed Beamforming */
+#define DOT11_VHT_ACTION_GID_MGMT 1 /* Group ID Management */
+#define DOT11_VHT_ACTION_OPER_MODE_NOTIF 2 /* Operating mode notif'n */
-#define DOT11_VHT_ACTION_CBF 0
-#define DOT11_VHT_ACTION_GID_MGMT 1
-#define DOT11_VHT_ACTION_OPER_MODE_NOTIF 2
-
-
+/** DLS Request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_dls_req {
- uint8 category;
- uint8 action;
- struct ether_addr da;
- struct ether_addr sa;
- uint16 cap;
- uint16 timeout;
- uint8 data[1];
+ uint8 category; /* category of action frame (2) */
+ uint8 action; /* DLS action: req (0) */
+ struct ether_addr da; /* destination address */
+ struct ether_addr sa; /* source address */
+ uint16 cap; /* capability */
+ uint16 timeout; /* timeout value */
+ uint8 data[1]; /* IE:support rate, extend support rate, HT cap */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_dls_req dot11_dls_req_t;
-#define DOT11_DLS_REQ_LEN 18
-
+#define DOT11_DLS_REQ_LEN 18 /* Fixed length */
+/** DLS response frame header */
BWL_PRE_PACKED_STRUCT struct dot11_dls_resp {
- uint8 category;
- uint8 action;
- uint16 status;
- struct ether_addr da;
- struct ether_addr sa;
- uint8 data[1];
+ uint8 category; /* category of action frame (2) */
+ uint8 action; /* DLS action: req (0) */
+ uint16 status; /* status code field */
+ struct ether_addr da; /* destination address */
+ struct ether_addr sa; /* source address */
+ uint8 data[1]; /* optional: capability, rate ... */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_dls_resp dot11_dls_resp_t;
-#define DOT11_DLS_RESP_LEN 16
-
-
+#define DOT11_DLS_RESP_LEN 16 /* Fixed length */
+/* ************* 802.11v related definitions. ************* */
+/** BSS Management Transition Query frame header */
BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_query {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 reason;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: trans_query (6) */
+ uint8 token; /* dialog token */
+ uint8 reason; /* transition query reason */
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_bsstrans_query dot11_bsstrans_query_t;
-#define DOT11_BSSTRANS_QUERY_LEN 4
-
+#define DOT11_BSSTRANS_QUERY_LEN 4 /* Fixed length */
+/** BSS Management Transition Request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_req {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 reqmode;
- uint16 disassoc_tmr;
- uint8 validity_intrvl;
- uint8 data[1];
-
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: trans_req (7) */
+ uint8 token; /* dialog token */
+ uint8 reqmode; /* transition request mode */
+ uint16 disassoc_tmr; /* disassociation timer */
+ uint8 validity_intrvl; /* validity interval */
+ uint8 data[1]; /* optional: BSS term duration, ... */
+ /* ...session info URL, candidate list */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_bsstrans_req dot11_bsstrans_req_t;
-#define DOT11_BSSTRANS_REQ_LEN 7
-
+#define DOT11_BSSTRANS_REQ_LEN 7 /* Fixed length */
+/* BSS Mgmt Transition Request Mode Field - 802.11v */
#define DOT11_BSSTRANS_REQMODE_PREF_LIST_INCL 0x01
#define DOT11_BSSTRANS_REQMODE_ABRIDGED 0x02
#define DOT11_BSSTRANS_REQMODE_DISASSOC_IMMINENT 0x04
#define DOT11_BSSTRANS_REQMODE_BSS_TERM_INCL 0x08
#define DOT11_BSSTRANS_REQMODE_ESS_DISASSOC_IMNT 0x10
-
+/** BSS Management transition response frame header */
BWL_PRE_PACKED_STRUCT struct dot11_bsstrans_resp {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 status;
- uint8 term_delay;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: trans_resp (8) */
+ uint8 token; /* dialog token */
+ uint8 status; /* transition status */
+ uint8 term_delay; /* validity interval */
+ uint8 data[1]; /* optional: BSSID target, candidate list */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_bsstrans_resp dot11_bsstrans_resp_t;
-#define DOT11_BSSTRANS_RESP_LEN 5
-
+#define DOT11_BSSTRANS_RESP_LEN 5 /* Fixed length */
+/* BSS Mgmt Transition Response Status Field */
#define DOT11_BSSTRANS_RESP_STATUS_ACCEPT 0
#define DOT11_BSSTRANS_RESP_STATUS_REJECT 1
#define DOT11_BSSTRANS_RESP_STATUS_REJ_INSUFF_BCN 2
#define DOT11_BSSTRANS_RESP_STATUS_REJ_LEAVING_ESS 8
-
+/** BSS Max Idle Period element */
BWL_PRE_PACKED_STRUCT struct dot11_bss_max_idle_period_ie {
- uint8 id;
+ uint8 id; /* 90, DOT11_MNG_BSS_MAX_IDLE_PERIOD_ID */
uint8 len;
- uint16 max_idle_period;
+ uint16 max_idle_period; /* in unit of 1000 TUs */
uint8 idle_opt;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_bss_max_idle_period_ie dot11_bss_max_idle_period_ie_t;
-#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN 3
-#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED 1
-
+#define DOT11_BSS_MAX_IDLE_PERIOD_IE_LEN 3 /* bss max idle period IE size */
+#define DOT11_BSS_MAX_IDLE_PERIOD_OPT_PROTECTED 1 /* BSS max idle option */
+/** TIM Broadcast request element */
BWL_PRE_PACKED_STRUCT struct dot11_timbc_req_ie {
- uint8 id;
+ uint8 id; /* 94, DOT11_MNG_TIMBC_REQ_ID */
uint8 len;
- uint8 interval;
+ uint8 interval; /* in unit of beacon interval */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_timbc_req_ie dot11_timbc_req_ie_t;
-#define DOT11_TIMBC_REQ_IE_LEN 1
-
+#define DOT11_TIMBC_REQ_IE_LEN 1 /* Fixed length */
+/** TIM Broadcast request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_timbc_req {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: DOT11_WNM_ACTION_TIMBC_REQ(18) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* TIM broadcast request element */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_timbc_req dot11_timbc_req_t;
-#define DOT11_TIMBC_REQ_LEN 3
-
+#define DOT11_TIMBC_REQ_LEN 3 /* Fixed length */
+/** TIM Broadcast response element */
BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp_ie {
- uint8 id;
+ uint8 id; /* 95, DOT11_MNG_TIM_BROADCAST_RESP_ID */
uint8 len;
- uint8 status;
- uint8 interval;
- int32 offset;
- uint16 high_rate;
- uint16 low_rate;
+ uint8 status; /* status of add request */
+ uint8 interval; /* in unit of beacon interval */
+ int32 offset; /* in unit of ms */
+ uint16 high_rate; /* in unit of 0.5 Mb/s */
+ uint16 low_rate; /* in unit of 0.5 Mb/s */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_timbc_resp_ie dot11_timbc_resp_ie_t;
-#define DOT11_TIMBC_DENY_RESP_IE_LEN 1
-#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN 10
+#define DOT11_TIMBC_DENY_RESP_IE_LEN 1 /* Deny. Fixed length */
+#define DOT11_TIMBC_ACCEPT_RESP_IE_LEN 10 /* Accept. Fixed length */
#define DOT11_TIMBC_STATUS_ACCEPT 0
#define DOT11_TIMBC_STATUS_ACCEPT_TSTAMP 1
#define DOT11_TIMBC_STATUS_DENY 2
#define DOT11_TIMBC_STATUS_OVERRIDDEN 3
+#define DOT11_TIMBC_STATUS_RESERVED 4
-
+/** TIM Broadcast request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_timbc_resp {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* action: DOT11_WNM_ACTION_TIMBC_RESP(19) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* TIM broadcast response element */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_timbc_resp dot11_timbc_resp_t;
-#define DOT11_TIMBC_RESP_LEN 3
-
+#define DOT11_TIMBC_RESP_LEN 3 /* Fixed length */
+/** TIM element */
BWL_PRE_PACKED_STRUCT struct dot11_tim_ie {
- uint8 id;
- uint8 len;
- uint8 dtim_count;
- uint8 dtim_period;
- uint8 bitmap_control;
- uint8 pvb[1];
+ uint8 id; /* 5, DOT11_MNG_TIM_ID */
+ uint8 len; /* 4 - 255 */
+ uint8 dtim_count; /* DTIM decrementing counter */
+ uint8 dtim_period; /* DTIM period */
+ uint8 bitmap_control; /* AID 0 + bitmap offset */
+ uint8 pvb[1]; /* Partial Virtual Bitmap, variable length */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tim_ie dot11_tim_ie_t;
-#define DOT11_TIM_IE_FIXED_LEN 3
-#define DOT11_TIM_IE_FIXED_TOTAL_LEN 5
-
+#define DOT11_TIM_IE_FIXED_LEN 3 /* Fixed length, without id and len */
+#define DOT11_TIM_IE_FIXED_TOTAL_LEN 5 /* Fixed length, with id and len */
+/** TIM Broadcast frame header */
BWL_PRE_PACKED_STRUCT struct dot11_timbc {
- uint8 category;
- uint8 action;
- uint8 check_beacon;
- uint8 tsf[8];
- dot11_tim_ie_t tim_ie;
+ uint8 category; /* category of action frame (11) */
+ uint8 action; /* action: TIM (0) */
+ uint8 check_beacon; /* need to check-beacon */
+ uint8 tsf[8]; /* Time Synchronization Function */
+ dot11_tim_ie_t tim_ie; /* TIM element */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_timbc dot11_timbc_t;
#define DOT11_TIMBC_HDR_LEN (sizeof(dot11_timbc_t) - sizeof(dot11_tim_ie_t))
-#define DOT11_TIMBC_FIXED_LEN (sizeof(dot11_timbc_t) - 1)
-#define DOT11_TIMBC_LEN 11
-
+#define DOT11_TIMBC_FIXED_LEN (sizeof(dot11_timbc_t) - 1) /* Fixed length */
+#define DOT11_TIMBC_LEN 11 /* Fixed length */
+/** TCLAS frame classifier type */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_hdr {
uint8 type;
uint8 mask;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tclas_fc_hdr dot11_tclas_fc_hdr_t;
-#define DOT11_TCLAS_FC_HDR_LEN 2
+#define DOT11_TCLAS_FC_HDR_LEN 2 /* Fixed length */
#define DOT11_TCLAS_MASK_0 0x1
#define DOT11_TCLAS_MASK_1 0x2
#define DOT11_TCLAS_FC_4_IP_HIGHER 4
#define DOT11_TCLAS_FC_5_8021D 5
-
+/** TCLAS frame classifier type 0 parameters for Ethernet */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_0_eth {
uint8 type;
uint8 mask;
typedef struct dot11_tclas_fc_0_eth dot11_tclas_fc_0_eth_t;
#define DOT11_TCLAS_FC_0_ETH_LEN 16
-
+/** TCLAS frame classifier type 1 parameters for IPV4 */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_1_ipv4 {
uint8 type;
uint8 mask;
typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_1_ipv4_t;
#define DOT11_TCLAS_FC_1_IPV4_LEN 18
-
+/** TCLAS frame classifier type 2 parameters for 802.1Q */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_2_8021q {
uint8 type;
uint8 mask;
typedef struct dot11_tclas_fc_2_8021q dot11_tclas_fc_2_8021q_t;
#define DOT11_TCLAS_FC_2_8021Q_LEN 4
-
+/** TCLAS frame classifier type 3 parameters for filter offset */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_3_filter {
uint8 type;
uint8 mask;
typedef struct dot11_tclas_fc_3_filter dot11_tclas_fc_3_filter_t;
#define DOT11_TCLAS_FC_3_FILTER_LEN 4
-
+/** TCLAS frame classifier type 4 parameters for IPV4 is the same as TCLAS type 1 */
typedef struct dot11_tclas_fc_1_ipv4 dot11_tclas_fc_4_ipv4_t;
#define DOT11_TCLAS_FC_4_IPV4_LEN DOT11_TCLAS_FC_1_IPV4_LEN
-
+/** TCLAS frame classifier type 4 parameters for IPV6 */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_4_ipv6 {
uint8 type;
uint8 mask;
typedef struct dot11_tclas_fc_4_ipv6 dot11_tclas_fc_4_ipv6_t;
#define DOT11_TCLAS_FC_4_IPV6_LEN 44
-
+/** TCLAS frame classifier type 5 parameters for 802.1D */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_fc_5_8021d {
uint8 type;
uint8 mask;
typedef struct dot11_tclas_fc_5_8021d dot11_tclas_fc_5_8021d_t;
#define DOT11_TCLAS_FC_5_8021D_LEN 6
-
+/** TCLAS frame classifier type parameters */
BWL_PRE_PACKED_STRUCT union dot11_tclas_fc {
uint8 data[1];
dot11_tclas_fc_hdr_t hdr;
} BWL_POST_PACKED_STRUCT;
typedef union dot11_tclas_fc dot11_tclas_fc_t;
-#define DOT11_TCLAS_FC_MIN_LEN 4
+#define DOT11_TCLAS_FC_MIN_LEN 4 /* Classifier Type 2 has the min size */
#define DOT11_TCLAS_FC_MAX_LEN 254
-
+/** TCLAS element */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_ie {
- uint8 id;
+ uint8 id; /* 14, DOT11_MNG_TCLAS_ID */
uint8 len;
uint8 user_priority;
dot11_tclas_fc_t fc;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tclas_ie dot11_tclas_ie_t;
-#define DOT11_TCLAS_IE_LEN 3
-
+#define DOT11_TCLAS_IE_LEN 3 /* Fixed length, include id and len */
+/** TCLAS processing element */
BWL_PRE_PACKED_STRUCT struct dot11_tclas_proc_ie {
- uint8 id;
+ uint8 id; /* 44, DOT11_MNG_TCLAS_PROC_ID */
uint8 len;
uint8 process;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tclas_proc_ie dot11_tclas_proc_ie_t;
-#define DOT11_TCLAS_PROC_IE_LEN 3
+#define DOT11_TCLAS_PROC_IE_LEN 3 /* Fixed length, include id and len */
-#define DOT11_TCLAS_PROC_MATCHALL 0
-#define DOT11_TCLAS_PROC_MATCHONE 1
-#define DOT11_TCLAS_PROC_NONMATCH 2
+#define DOT11_TCLAS_PROC_MATCHALL 0 /* All high level element need to match */
+#define DOT11_TCLAS_PROC_MATCHONE 1 /* One high level element need to match */
+#define DOT11_TCLAS_PROC_NONMATCH 2 /* Non match to any high level element */
+/* TSPEC element defined in 802.11 std section 8.4.2.32 - Not supported */
+#define DOT11_TSPEC_IE_LEN 57 /* Fixed length */
-#define DOT11_TSPEC_IE_LEN 57
-
-
+/** TFS request element */
BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_ie {
- uint8 id;
+ uint8 id; /* 91, DOT11_MNG_TFS_REQUEST_ID */
uint8 len;
uint8 tfs_id;
- uint8 tfs_actcode;
+ uint8 actcode;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tfs_req_ie dot11_tfs_req_ie_t;
-#define DOT11_TFS_REQ_IE_LEN 4
+#define DOT11_TFS_REQ_IE_LEN 2 /* Fixed length, without id and len */
+/** TFS request action codes (bitfield) */
#define DOT11_TFS_ACTCODE_DELETE 1
-#define DOT11_TFS_ACTCODE_MODIFY 2
+#define DOT11_TFS_ACTCODE_NOTIFY 2
+/** TFS request subelement IDs */
+#define DOT11_TFS_REQ_TFS_SE_ID 1
+#define DOT11_TFS_REQ_VENDOR_SE_ID 221
-BWL_PRE_PACKED_STRUCT struct dot11_tfs_req_se {
- uint8 sub_id;
- uint8 length;
- uint8 data[1];
-} BWL_POST_PACKED_STRUCT;
-typedef struct dot11_tfs_req_se dot11_tfs_req_se_t;
-
+/** TFS subelement */
BWL_PRE_PACKED_STRUCT struct dot11_tfs_se {
uint8 sub_id;
uint8 len;
- uint8 data[1];
+ uint8 data[1]; /* TCLAS element(s) + optional TCLAS proc */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tfs_se dot11_tfs_se_t;
-#define DOT11_TFS_REQ_SUBELEM_LEN 2
-
-#define DOT11_TFS_SUBELEM_ID_TFS 1
-#define DOT11_TFS_SUBELEM_ID_VENDOR 221
+/** TFS response element */
BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp_ie {
- uint8 id;
+ uint8 id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */
uint8 len;
+ uint8 tfs_id;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tfs_resp_ie dot11_tfs_resp_ie_t;
-#define DOT11_TFS_RESP_IE_LEN 2
+#define DOT11_TFS_RESP_IE_LEN 1 /* Fixed length, without id and len */
+/** TFS response subelement IDs (same subelments, but different IDs than in TFS request */
+#define DOT11_TFS_RESP_TFS_STATUS_SE_ID 1
+#define DOT11_TFS_RESP_TFS_SE_ID 2
+#define DOT11_TFS_RESP_VENDOR_SE_ID 221
+/** TFS status subelement */
BWL_PRE_PACKED_STRUCT struct dot11_tfs_status_se {
- uint8 id;
+ uint8 sub_id; /* 92, DOT11_MNG_TFS_RESPONSE_ID */
uint8 len;
uint8 resp_st;
- uint8 tfs_id;
+ uint8 data[1]; /* Potential dot11_tfs_se_t included */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tfs_status_se dot11_tfs_status_se_t;
-
-#define DOT11_TFS_STATUS_SE_LEN 4
-#define DOT11_TFS_STATUS_SE_DATA_LEN 2
-
-#define DOT11_TFS_STATUS_SE_ID_TFS_ST 1
-#define DOT11_TFS_STATUS_SE_ID_TFS 2
-#define DOT11_TFS_STATUS_SE_ID_VENDOR 221
-
-#define DOT11_TFS_RESP_ST_ACCEPT 0
-#define DOT11_TFS_RESP_ST_DENY_FORMAT 1
-#define DOT11_TFS_RESP_ST_DENY_RESOURCE 2
-#define DOT11_TFS_RESP_ST_DENY_POLICY 4
-#define DOT11_TFS_RESP_ST_PREFERRED_AP_INCAP 14
-
-
-
+#define DOT11_TFS_STATUS_SE_LEN 1 /* Fixed length, without id and len */
+
+/* Following Definition should be merged to FMS_TFS macro below */
+/* TFS Response status code. Identical to FMS Element status, without N/A */
+#define DOT11_TFS_STATUS_ACCEPT 0
+#define DOT11_TFS_STATUS_DENY_FORMAT 1
+#define DOT11_TFS_STATUS_DENY_RESOURCE 2
+#define DOT11_TFS_STATUS_DENY_POLICY 4
+#define DOT11_TFS_STATUS_DENY_UNSPECIFIED 5
+#define DOT11_TFS_STATUS_ALTPREF_POLICY 7
+#define DOT11_TFS_STATUS_ALTPREF_TCLAS_UNSUPP 14
+
+/* FMS Element Status and TFS Response Status Definition */
+#define DOT11_FMS_TFS_STATUS_ACCEPT 0
+#define DOT11_FMS_TFS_STATUS_DENY_FORMAT 1
+#define DOT11_FMS_TFS_STATUS_DENY_RESOURCE 2
+#define DOT11_FMS_TFS_STATUS_DENY_MULTIPLE_DI 3
+#define DOT11_FMS_TFS_STATUS_DENY_POLICY 4
+#define DOT11_FMS_TFS_STATUS_DENY_UNSPECIFIED 5
+#define DOT11_FMS_TFS_STATUS_ALT_DIFF_DI 6
+#define DOT11_FMS_TFS_STATUS_ALT_POLICY 7
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_DI 8
+#define DOT11_FMS_TFS_STATUS_ALT_MCRATE 9
+#define DOT11_FMS_TFS_STATUS_TERM_POLICY 10
+#define DOT11_FMS_TFS_STATUS_TERM_RESOURCE 11
+#define DOT11_FMS_TFS_STATUS_TERM_HIGHER_PRIO 12
+#define DOT11_FMS_TFS_STATUS_ALT_CHANGE_MDI 13
+#define DOT11_FMS_TFS_STATUS_ALT_TCLAS_UNSUPP 14
+
+/** TFS Management Request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_tfs_req {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: TFS request (13) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tfs_req dot11_tfs_req_t;
-#define DOT11_TFS_REQ_LEN 3
-
+#define DOT11_TFS_REQ_LEN 3 /* Fixed length */
+/** TFS Management Response frame header */
BWL_PRE_PACKED_STRUCT struct dot11_tfs_resp {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: TFS request (14) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_tfs_resp dot11_tfs_resp_t;
-#define DOT11_TFS_RESP_LEN 3
-
+#define DOT11_TFS_RESP_LEN 3 /* Fixed length */
-BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify {
- uint8 category;
- uint8 action;
- uint8 num_tfs_id;
- uint8 data[1];
+/** TFS Management Notify frame request header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: TFS notify request (15) */
+ uint8 tfs_id_cnt; /* TFS IDs count */
+ uint8 tfs_id[1]; /* Array of TFS IDs */
} BWL_POST_PACKED_STRUCT;
-typedef struct dot11_tfs_notify dot11_tfs_notify_t;
-#define DOT11_TFS_NOTIFY_LEN 3
+typedef struct dot11_tfs_notify_req dot11_tfs_notify_req_t;
+#define DOT11_TFS_NOTIFY_REQ_LEN 3 /* Fixed length */
-#define DOT11_TFS_NOTIFY_ACT_DEL 1
-#define DOT11_TFS_NOTIFY_ACT_NOTIFY 2
+/** TFS Management Notify frame response header */
+BWL_PRE_PACKED_STRUCT struct dot11_tfs_notify_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: TFS notify response (28) */
+ uint8 tfs_id_cnt; /* TFS IDs count */
+ uint8 tfs_id[1]; /* Array of TFS IDs */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tfs_notify_resp dot11_tfs_notify_resp_t;
+#define DOT11_TFS_NOTIFY_RESP_LEN 3 /* Fixed length */
+/** WNM-Sleep Management Request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_req {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: wnm-sleep request (16) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_wnm_sleep_req dot11_wnm_sleep_req_t;
-#define DOT11_WNM_SLEEP_REQ_LEN 3
-
+#define DOT11_WNM_SLEEP_REQ_LEN 3 /* Fixed length */
+/** WNM-Sleep Management Response frame header */
BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_resp {
- uint8 category;
- uint8 action;
- uint8 token;
- uint16 key_len;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: wnm-sleep request (17) */
+ uint8 token; /* dialog token */
+ uint16 key_len; /* key data length */
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_wnm_sleep_resp dot11_wnm_sleep_resp_t;
-#define DOT11_WNM_SLEEP_RESP_LEN 5
+#define DOT11_WNM_SLEEP_RESP_LEN 5 /* Fixed length */
#define DOT11_WNM_SLEEP_SUBELEM_ID_GTK 0
#define DOT11_WNM_SLEEP_SUBELEM_ID_IGTK 1
uint8 key[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_wnm_sleep_subelem_gtk dot11_wnm_sleep_subelem_gtk_t;
-#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN 11
-#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN 43
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_FIXED_LEN 11 /* without sub_id, len, and key */
+#define DOT11_WNM_SLEEP_SUBELEM_GTK_MAX_LEN 43 /* without sub_id and len */
BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_subelem_igtk {
uint8 sub_id;
uint8 key[16];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_wnm_sleep_subelem_igtk dot11_wnm_sleep_subelem_igtk_t;
-#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24
+#define DOT11_WNM_SLEEP_SUBELEM_IGTK_LEN 24 /* Fixed length */
BWL_PRE_PACKED_STRUCT struct dot11_wnm_sleep_ie {
- uint8 id;
+ uint8 id; /* 93, DOT11_MNG_WNM_SLEEP_MODE_ID */
uint8 len;
uint8 act_type;
uint8 resp_status;
uint16 interval;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_wnm_sleep_ie dot11_wnm_sleep_ie_t;
-#define DOT11_WNM_SLEEP_IE_LEN 4
+#define DOT11_WNM_SLEEP_IE_LEN 4 /* Fixed length */
#define DOT11_WNM_SLEEP_ACT_TYPE_ENTER 0
#define DOT11_WNM_SLEEP_ACT_TYPE_EXIT 1
#define DOT11_WNM_SLEEP_RESP_DENY_INUSE 5
#define DOT11_WNM_SLEEP_RESP_LAST 6
-
+/** DMS Management Request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_dms_req {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: dms request (23) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_dms_req dot11_dms_req_t;
-#define DOT11_DMS_REQ_LEN 3
-
+#define DOT11_DMS_REQ_LEN 3 /* Fixed length */
+/** DMS Management Response frame header */
BWL_PRE_PACKED_STRUCT struct dot11_dms_resp {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 data[1];
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: dms request (24) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_dms_resp dot11_dms_resp_t;
-#define DOT11_DMS_RESP_LEN 3
-
+#define DOT11_DMS_RESP_LEN 3 /* Fixed length */
+/** DMS request element */
BWL_PRE_PACKED_STRUCT struct dot11_dms_req_ie {
- uint8 id;
+ uint8 id; /* 99, DOT11_MNG_DMS_REQUEST_ID */
uint8 len;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_dms_req_ie dot11_dms_req_ie_t;
-#define DOT11_DMS_REQ_IE_LEN 2
-
+#define DOT11_DMS_REQ_IE_LEN 2 /* Fixed length */
+/** DMS response element */
BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_ie {
- uint8 id;
+ uint8 id; /* 100, DOT11_MNG_DMS_RESPONSE_ID */
uint8 len;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_dms_resp_ie dot11_dms_resp_ie_t;
-#define DOT11_DMS_RESP_IE_LEN 2
-
+#define DOT11_DMS_RESP_IE_LEN 2 /* Fixed length */
+/** DMS request descriptor */
BWL_PRE_PACKED_STRUCT struct dot11_dms_req_desc {
uint8 dms_id;
uint8 len;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_dms_req_desc dot11_dms_req_desc_t;
-#define DOT11_DMS_REQ_DESC_LEN 3
+#define DOT11_DMS_REQ_DESC_LEN 3 /* Fixed length */
#define DOT11_DMS_REQ_TYPE_ADD 0
#define DOT11_DMS_REQ_TYPE_REMOVE 1
#define DOT11_DMS_REQ_TYPE_CHANGE 2
-
+/** DMS response status */
BWL_PRE_PACKED_STRUCT struct dot11_dms_resp_st {
uint8 dms_id;
uint8 len;
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_dms_resp_st dot11_dms_resp_st_t;
-#define DOT11_DMS_RESP_STATUS_LEN 5
+#define DOT11_DMS_RESP_STATUS_LEN 5 /* Fixed length */
#define DOT11_DMS_RESP_TYPE_ACCEPT 0
#define DOT11_DMS_RESP_TYPE_DENY 1
#define DOT11_DMS_RESP_LSC_UNSUPPORTED 0xFFFF
+/** FMS Management Request frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: fms request (9) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req dot11_fms_req_t;
+#define DOT11_FMS_REQ_LEN 3 /* Fixed length */
+
+/** FMS Management Response frame header */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp {
+ uint8 category; /* category of action frame (10) */
+ uint8 action; /* WNM action: fms request (10) */
+ uint8 token; /* dialog token */
+ uint8 data[1]; /* Elements */
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp dot11_fms_resp_t;
+#define DOT11_FMS_RESP_LEN 3 /* Fixed length */
+
+/** FMS Descriptor element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_desc {
+ uint8 id;
+ uint8 len;
+ uint8 num_fms_cnt;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_desc dot11_fms_desc_t;
+#define DOT11_FMS_DESC_LEN 1 /* Fixed length */
+
+#define DOT11_FMS_CNTR_MAX 0x8
+#define DOT11_FMS_CNTR_ID_MASK 0x7
+#define DOT11_FMS_CNTR_ID_SHIFT 0x0
+#define DOT11_FMS_CNTR_COUNT_MASK 0xf1
+#define DOT11_FMS_CNTR_SHIFT 0x3
+
+/** FMS request element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_req_ie {
+ uint8 id;
+ uint8 len;
+ uint8 fms_token; /* token used to identify fms stream set */
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_req_ie dot11_fms_req_ie_t;
+#define DOT11_FMS_REQ_IE_FIX_LEN 1 /* Fixed length */
+
+BWL_PRE_PACKED_STRUCT struct dot11_rate_id_field {
+ uint8 mask;
+ uint8 mcs_idx;
+ uint16 rate;
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_rate_id_field dot11_rate_id_field_t;
+#define DOT11_RATE_ID_FIELD_MCS_SEL_MASK 0x7
+#define DOT11_RATE_ID_FIELD_MCS_SEL_OFFSET 0
+#define DOT11_RATE_ID_FIELD_RATETYPE_MASK 0x18
+#define DOT11_RATE_ID_FIELD_RATETYPE_OFFSET 3
+#define DOT11_RATE_ID_FIELD_LEN sizeof(dot11_rate_id_field_t)
+
+/** FMS request subelements */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 interval;
+ uint8 max_interval;
+ dot11_rate_id_field_t rate;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_se dot11_fms_se_t;
+#define DOT11_FMS_REQ_SE_LEN 6 /* Fixed length */
+
+#define DOT11_FMS_REQ_SE_ID_FMS 1 /* FMS subelement */
+#define DOT11_FMS_REQ_SE_ID_VS 221 /* Vendor Specific subelement */
+
+/** FMS response element */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_resp_ie {
+ uint8 id;
+ uint8 len;
+ uint8 fms_token;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_resp_ie dot11_fms_resp_ie_t;
+#define DOT11_FMS_RESP_IE_FIX_LEN 1 /* Fixed length */
+
+/* FMS status subelements */
+#define DOT11_FMS_STATUS_SE_ID_FMS 1 /* FMS Status */
+#define DOT11_FMS_STATUS_SE_ID_TCLAS 2 /* TCLAS Status */
+#define DOT11_FMS_STATUS_SE_ID_VS 221 /* Vendor Specific subelement */
+
+/** FMS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_fms_status_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 status;
+ uint8 interval;
+ uint8 max_interval;
+ uint8 fmsid;
+ uint8 counter;
+ dot11_rate_id_field_t rate;
+ uint8 mcast_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_fms_status_se dot11_fms_status_se_t;
+#define DOT11_FMS_STATUS_SE_LEN 15 /* Fixed length */
+
+/** TCLAS status subelement */
+BWL_PRE_PACKED_STRUCT struct dot11_tclas_status_se {
+ uint8 sub_id;
+ uint8 len;
+ uint8 fmsid;
+ uint8 data[1];
+} BWL_POST_PACKED_STRUCT;
+typedef struct dot11_tclas_status_se dot11_tclas_status_se_t;
+#define DOT11_TCLAS_STATUS_SE_LEN 1 /* Fixed length */
+
BWL_PRE_PACKED_STRUCT struct dot11_addba_req {
- uint8 category;
- uint8 action;
- uint8 token;
- uint16 addba_param_set;
- uint16 timeout;
- uint16 start_seqnum;
+ uint8 category; /* category of action frame (3) */
+ uint8 action; /* action: addba req */
+ uint8 token; /* identifier */
+ uint16 addba_param_set; /* parameter set */
+ uint16 timeout; /* timeout in seconds */
+ uint16 start_seqnum; /* starting sequence number */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_addba_req dot11_addba_req_t;
-#define DOT11_ADDBA_REQ_LEN 9
+#define DOT11_ADDBA_REQ_LEN 9 /* length of addba req frame */
BWL_PRE_PACKED_STRUCT struct dot11_addba_resp {
- uint8 category;
- uint8 action;
- uint8 token;
- uint16 status;
- uint16 addba_param_set;
- uint16 timeout;
+ uint8 category; /* category of action frame (3) */
+ uint8 action; /* action: addba resp */
+ uint8 token; /* identifier */
+ uint16 status; /* status of add request */
+ uint16 addba_param_set; /* negotiated parameter set */
+ uint16 timeout; /* negotiated timeout in seconds */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_addba_resp dot11_addba_resp_t;
-#define DOT11_ADDBA_RESP_LEN 9
-
+#define DOT11_ADDBA_RESP_LEN 9 /* length of addba resp frame */
-#define DOT11_DELBA_PARAM_INIT_MASK 0x0800
-#define DOT11_DELBA_PARAM_INIT_SHIFT 11
-#define DOT11_DELBA_PARAM_TID_MASK 0xf000
-#define DOT11_DELBA_PARAM_TID_SHIFT 12
+/* DELBA action parameters */
+#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 /* initiator mask */
+#define DOT11_DELBA_PARAM_INIT_SHIFT 11 /* initiator shift */
+#define DOT11_DELBA_PARAM_TID_MASK 0xf000 /* tid mask */
+#define DOT11_DELBA_PARAM_TID_SHIFT 12 /* tid shift */
BWL_PRE_PACKED_STRUCT struct dot11_delba {
- uint8 category;
- uint8 action;
- uint16 delba_param_set;
- uint16 reason;
+ uint8 category; /* category of action frame (3) */
+ uint8 action; /* action: addba req */
+ uint16 delba_param_set; /* paarmeter set */
+ uint16 reason; /* reason for dellba */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_delba dot11_delba_t;
-#define DOT11_DELBA_LEN 6
-
+#define DOT11_DELBA_LEN 6 /* length of delba frame */
+/* SA Query action field value */
#define SA_QUERY_REQUEST 0
#define SA_QUERY_RESPONSE 1
+/* ************* 802.11r related definitions. ************* */
-
-
+/** Over-the-DS Fast Transition Request frame header */
BWL_PRE_PACKED_STRUCT struct dot11_ft_req {
- uint8 category;
- uint8 action;
+ uint8 category; /* category of action frame (6) */
+ uint8 action; /* action: ft req */
uint8 sta_addr[ETHER_ADDR_LEN];
uint8 tgt_ap_addr[ETHER_ADDR_LEN];
- uint8 data[1];
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ft_req dot11_ft_req_t;
#define DOT11_FT_REQ_FIXED_LEN 14
-
+/** Over-the-DS Fast Transition Response frame header */
BWL_PRE_PACKED_STRUCT struct dot11_ft_res {
- uint8 category;
- uint8 action;
+ uint8 category; /* category of action frame (6) */
+ uint8 action; /* action: ft resp */
uint8 sta_addr[ETHER_ADDR_LEN];
uint8 tgt_ap_addr[ETHER_ADDR_LEN];
- uint16 status;
- uint8 data[1];
+ uint16 status; /* status code */
+ uint8 data[1]; /* Elements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ft_res dot11_ft_res_t;
#define DOT11_FT_RES_FIXED_LEN 16
-
+/** RDE RIC Data Element. */
BWL_PRE_PACKED_STRUCT struct dot11_rde_ie {
- uint8 id;
+ uint8 id; /* 11r, DOT11_MNG_RDE_ID */
uint8 length;
- uint8 rde_id;
- uint8 rd_count;
- uint16 status;
+ uint8 rde_id; /* RDE identifier. */
+ uint8 rd_count; /* Resource Descriptor Count. */
+ uint16 status; /* Status Code. */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rde_ie dot11_rde_ie_t;
-
+/* 11r - Size of the RDE (RIC Data Element) IE, including TLV header. */
#define DOT11_MNG_RDE_IE_LEN sizeof(dot11_rde_ie_t)
+/* ************* 802.11k related definitions. ************* */
-
-
-
-#define DOT11_RRM_CAP_LEN 5
+/* Radio measurements enabled capability ie */
+#define DOT11_RRM_CAP_LEN 5 /* length of rrm cap bitmap */
#define RCPI_IE_LEN 1
#define RSNI_IE_LEN 1
BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie {
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t;
-
-#define DOT11_RRM_CAP_LINK 0
+/* Bitmap definitions for cap ie */
+#define DOT11_RRM_CAP_LINK 0
#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1
#define DOT11_RRM_CAP_PARALLEL 2
#define DOT11_RRM_CAP_REPEATED 3
#define DOT11_RRM_CAP_BCN_ACTIVE 5
#define DOT11_RRM_CAP_BCN_TABLE 6
#define DOT11_RRM_CAP_BCN_REP_COND 7
+#define DOT11_RRM_CAP_FM 8
+#define DOT11_RRM_CAP_CLM 9
+#define DOT11_RRM_CAP_NHM 10
+#define DOT11_RRM_CAP_SM 11
+#define DOT11_RRM_CAP_LCIM 12
+#define DOT11_RRM_CAP_LCIA 13
+#define DOT11_RRM_CAP_TSCM 14
+#define DOT11_RRM_CAP_TTSCM 15
#define DOT11_RRM_CAP_AP_CHANREP 16
-
-
-
+#define DOT11_RRM_CAP_RMMIB 17
+/* bit18-bit26, not used for RRM_IOVAR */
+#define DOT11_RRM_CAP_MPTI 27
+#define DOT11_RRM_CAP_NBRTSFO 28
+#define DOT11_RRM_CAP_RCPI 29
+#define DOT11_RRM_CAP_RSNI 30
+#define DOT11_RRM_CAP_BSSAAD 31
+#define DOT11_RRM_CAP_BSSAAC 32
+#define DOT11_RRM_CAP_AI 33
+
+/* Operating Class (formerly "Regulatory Class") definitions */
#define DOT11_OP_CLASS_NONE 255
BWL_PRE_PACKED_STRUCT struct do11_ap_chrep {
} BWL_POST_PACKED_STRUCT;
typedef struct do11_ap_chrep dot11_ap_chrep_t;
+/* Radio Measurements action ids */
+#define DOT11_RM_ACTION_RM_REQ 0 /* Radio measurement request */
+#define DOT11_RM_ACTION_RM_REP 1 /* Radio measurement report */
+#define DOT11_RM_ACTION_LM_REQ 2 /* Link measurement request */
+#define DOT11_RM_ACTION_LM_REP 3 /* Link measurement report */
+#define DOT11_RM_ACTION_NR_REQ 4 /* Neighbor report request */
+#define DOT11_RM_ACTION_NR_REP 5 /* Neighbor report response */
-#define DOT11_RM_ACTION_RM_REQ 0
-#define DOT11_RM_ACTION_RM_REP 1
-#define DOT11_RM_ACTION_LM_REQ 2
-#define DOT11_RM_ACTION_LM_REP 3
-#define DOT11_RM_ACTION_NR_REQ 4
-#define DOT11_RM_ACTION_NR_REP 5
-
-
+/** Generic radio measurement action frame header */
BWL_PRE_PACKED_STRUCT struct dot11_rm_action {
- uint8 category;
- uint8 action;
- uint8 token;
+ uint8 category; /* category of action frame (5) */
+ uint8 action; /* radio measurement action */
+ uint8 token; /* dialog token */
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rm_action dot11_rm_action_t;
#define DOT11_RM_ACTION_LEN 3
BWL_PRE_PACKED_STRUCT struct dot11_rmreq {
- uint8 category;
- uint8 action;
- uint8 token;
- uint16 reps;
+ uint8 category; /* category of action frame (5) */
+ uint8 action; /* radio measurement action */
+ uint8 token; /* dialog token */
+ uint16 reps; /* no. of repetitions */
uint8 data[1];
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rmreq dot11_rmreq_t;
typedef struct dot11_rm_ie dot11_rm_ie_t;
#define DOT11_RM_IE_LEN 5
-
+/* Definitions for "mode" bits in rm req */
#define DOT11_RMREQ_MODE_PARALLEL 1
#define DOT11_RMREQ_MODE_ENABLE 2
#define DOT11_RMREQ_MODE_REQUEST 4
#define DOT11_RMREQ_MODE_REPORT 8
-#define DOT11_RMREQ_MODE_DURMAND 0x10
-
+#define DOT11_RMREQ_MODE_DURMAND 0x10 /* Duration Mandatory */
+/* Definitions for "mode" bits in rm rep */
#define DOT11_RMREP_MODE_LATE 1
#define DOT11_RMREP_MODE_INCAPABLE 2
#define DOT11_RMREP_MODE_REFUSED 4
typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t;
#define DOT11_RMREP_BCN_LEN 26
-
+/* Beacon request measurement mode */
#define DOT11_RMREQ_BCN_PASSIVE 0
#define DOT11_RMREQ_BCN_ACTIVE 1
#define DOT11_RMREQ_BCN_TABLE 2
-
+/* Sub-element IDs for Beacon Request */
#define DOT11_RMREQ_BCN_SSID_ID 0
#define DOT11_RMREQ_BCN_REPINFO_ID 1
#define DOT11_RMREQ_BCN_REPDET_ID 2
#define DOT11_RMREQ_BCN_REQUEST_ID 10
#define DOT11_RMREQ_BCN_APCHREP_ID DOT11_MNG_AP_CHREP_ID
+/* Reporting Detail element definition */
+#define DOT11_RMREQ_BCN_REPDET_FIXED 0 /* Fixed length fields only */
+#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */
+#define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */
-#define DOT11_RMREQ_BCN_REPDET_FIXED 0
-#define DOT11_RMREQ_BCN_REPDET_REQUEST 1
-#define DOT11_RMREQ_BCN_REPDET_ALL 2
-
-
+/* Sub-element IDs for Beacon Report */
#define DOT11_RMREP_BCN_FRM_BODY 1
-
+/* Sub-element IDs for Frame Report */
#define DOT11_RMREP_FRAME_COUNT_REPORT 1
-
+/** Channel load request */
BWL_PRE_PACKED_STRUCT struct dot11_rmreq_chanload {
uint8 id;
uint8 len;
typedef struct dot11_rmreq_chanload dot11_rmreq_chanload_t;
#define DOT11_RMREQ_CHANLOAD_LEN 11
-
+/** Channel load report */
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_chanload {
uint8 reg;
uint8 channel;
typedef struct dot11_rmrep_chanload dot11_rmrep_chanload_t;
#define DOT11_RMREP_CHANLOAD_LEN 13
-
+/** Noise histogram request */
BWL_PRE_PACKED_STRUCT struct dot11_rmreq_noise {
uint8 id;
uint8 len;
typedef struct dot11_rmreq_noise dot11_rmreq_noise_t;
#define DOT11_RMREQ_NOISE_LEN 11
-
+/** Noise histogram report */
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_noise {
uint8 reg;
uint8 channel;
typedef struct dot11_rmrep_noise dot11_rmrep_noise_t;
#define DOT11_RMREP_NOISE_LEN 25
-
+/** Frame request */
BWL_PRE_PACKED_STRUCT struct dot11_rmreq_frame {
uint8 id;
uint8 len;
typedef struct dot11_rmreq_frame dot11_rmreq_frame_t;
#define DOT11_RMREQ_FRAME_LEN 18
-
+/** Frame report */
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frame {
uint8 reg;
uint8 channel;
typedef struct dot11_rmrep_frame dot11_rmrep_frame_t;
#define DOT11_RMREP_FRAME_LEN 12
-
+/** Frame report entry */
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_frmentry {
struct ether_addr ta;
struct ether_addr bssid;
typedef struct dot11_rmrep_frmentry dot11_rmrep_frmentry_t;
#define DOT11_RMREP_FRMENTRY_LEN 19
-
+/** STA statistics request */
BWL_PRE_PACKED_STRUCT struct dot11_rmreq_stat {
uint8 id;
uint8 len;
typedef struct dot11_rmreq_stat dot11_rmreq_stat_t;
#define DOT11_RMREQ_STAT_LEN 16
-
+/** STA statistics report */
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_stat {
uint16 duration;
uint8 group_id;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rmrep_stat dot11_rmrep_stat_t;
-
+/** Transmit stream/category measurement request */
BWL_PRE_PACKED_STRUCT struct dot11_rmreq_tx_stream {
uint8 id;
uint8 len;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rmreq_tx_stream dot11_rmreq_tx_stream_t;
-
+/** Transmit stream/category measurement report */
BWL_PRE_PACKED_STRUCT struct dot11_rmrep_tx_stream {
uint32 starttime[2];
uint16 duration;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_rmrep_tx_stream dot11_rmrep_tx_stream_t;
-
+/** Measurement pause request */
BWL_PRE_PACKED_STRUCT struct dot11_rmreq_pause_time {
uint8 id;
uint8 len;
typedef struct dot11_rmreq_pause_time dot11_rmreq_pause_time_t;
-
+/* Neighbor Report subelements ID (11k & 11v) */
#define DOT11_NGBR_TSF_INFO_SE_ID 1
#define DOT11_NGBR_CCS_SE_ID 2
#define DOT11_NGBR_BSSTRANS_PREF_SE_ID 3
#define DOT11_NGBR_BSS_TERM_DUR_SE_ID 4
#define DOT11_NGBR_BEARING_SE_ID 5
-
+/** Neighbor Report, BSS Transition Candidate Preference subelement */
BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bsstrans_pref_se {
uint8 sub_id;
uint8 len;
typedef struct dot11_ngbr_bsstrans_pref_se dot11_ngbr_bsstrans_pref_se_t;
#define DOT11_NGBR_BSSTRANS_PREF_SE_LEN 1
-
+/** Neighbor Report, BSS Termination Duration subelement */
BWL_PRE_PACKED_STRUCT struct dot11_ngbr_bss_term_dur_se {
uint8 sub_id;
uint8 len;
typedef struct dot11_ngbr_bss_term_dur_se dot11_ngbr_bss_term_dur_se_t;
#define DOT11_NGBR_BSS_TERM_DUR_SE_LEN 10
-
+/* Neighbor Report BSSID Information Field */
#define DOT11_NGBR_BI_REACHABILTY_UNKN 0x0002
#define DOT11_NGBR_BI_REACHABILTY 0x0003
#define DOT11_NGBR_BI_SEC 0x0004
#define DOT11_NGBR_BI_MOBILITY 0x0400
#define DOT11_NGBR_BI_HT 0x0800
-
+/** Neighbor Report element (11k & 11v) */
BWL_PRE_PACKED_STRUCT struct dot11_neighbor_rep_ie {
uint8 id;
uint8 len;
struct ether_addr bssid;
uint32 bssid_info;
- uint8 reg;
+ uint8 reg; /* Operating class */
uint8 channel;
uint8 phytype;
- uint8 data[1];
+ uint8 data[1]; /* Variable size subelements */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_neighbor_rep_ie dot11_neighbor_rep_ie_t;
#define DOT11_NEIGHBOR_REP_IE_FIXED_LEN 13
+/* MLME Enumerations */
+#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */
+#define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */
+#define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */
+#define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */
+#define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */
-#define DOT11_BSSTYPE_INFRASTRUCTURE 0
-#define DOT11_BSSTYPE_INDEPENDENT 1
-#define DOT11_BSSTYPE_ANY 2
-#define DOT11_SCANTYPE_ACTIVE 0
-#define DOT11_SCANTYPE_PASSIVE 1
-
-
+/** Link Measurement */
BWL_PRE_PACKED_STRUCT struct dot11_lmreq {
- uint8 category;
- uint8 action;
- uint8 token;
- uint8 txpwr;
- uint8 maxtxpwr;
+ uint8 category; /* category of action frame (5) */
+ uint8 action; /* radio measurement action */
+ uint8 token; /* dialog token */
+ uint8 txpwr; /* Transmit Power Used */
+ uint8 maxtxpwr; /* Max Transmit Power */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_lmreq dot11_lmreq_t;
#define DOT11_LMREQ_LEN 5
BWL_PRE_PACKED_STRUCT struct dot11_lmrep {
- uint8 category;
- uint8 action;
- uint8 token;
- dot11_tpc_rep_t tpc;
- uint8 rxant;
- uint8 txant;
- uint8 rcpi;
- uint8 rsni;
+ uint8 category; /* category of action frame (5) */
+ uint8 action; /* radio measurement action */
+ uint8 token; /* dialog token */
+ dot11_tpc_rep_t tpc; /* TPC element */
+ uint8 rxant; /* Receive Antenna ID */
+ uint8 txant; /* Transmit Antenna ID */
+ uint8 rcpi; /* RCPI */
+ uint8 rsni; /* RSNI */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_lmrep dot11_lmrep_t;
#define DOT11_LMREP_LEN 11
+/* 802.11 BRCM "Compromise" Pre N constants */
+#define PREN_PREAMBLE 24 /* green field preamble time */
+#define PREN_MM_EXT 12 /* extra mixed mode preamble time */
+#define PREN_PREAMBLE_EXT 4 /* extra preamble (multiply by unique_streams-1) */
-#define PREN_PREAMBLE 24
-#define PREN_MM_EXT 12
-#define PREN_PREAMBLE_EXT 4
-
-
-#define RIFS_11N_TIME 2
-
-
+/* 802.11N PHY constants */
+#define RIFS_11N_TIME 2 /* NPHY RIFS time */
+/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3
+ * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2
+ */
+/* HT-SIG1 */
#define HT_SIG1_MCS_MASK 0x00007F
#define HT_SIG1_CBW 0x000080
#define HT_SIG1_HT_LENGTH 0xFFFF00
-
+/* HT-SIG2 */
#define HT_SIG2_SMOOTHING 0x000001
#define HT_SIG2_NOT_SOUNDING 0x000002
#define HT_SIG2_RESERVED 0x000004
#define HT_SIG2_CRC 0x03FC00
#define HT_SIG2_TAIL 0x1C0000
-
+/* HT Timing-related parameters (802.11-2012, sec 20.3.6) */
#define HT_T_LEG_PREAMBLE 16
#define HT_T_L_SIG 4
#define HT_T_SIG 8
#define HT_T_GF_STF 8
#define HT_T_SYML 4
-#define HT_N_SERVICE 16
-#define HT_N_TAIL 6
-
-
-#define APHY_SLOT_TIME 9
-#define APHY_SIFS_TIME 16
-#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME))
-#define APHY_PREAMBLE_TIME 16
-#define APHY_SIGNAL_TIME 4
-#define APHY_SYMBOL_TIME 4
-#define APHY_SERVICE_NBITS 16
-#define APHY_TAIL_NBITS 6
-#define APHY_CWMIN 15
+#define HT_N_SERVICE 16 /* bits in SERVICE field */
+#define HT_N_TAIL 6 /* tail bits per BCC encoder */
+/* 802.11 A PHY constants */
+#define APHY_SLOT_TIME 9 /* APHY slot time */
+#define APHY_SIFS_TIME 16 /* APHY SIFS time */
+#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) /* APHY DIFS time */
+#define APHY_PREAMBLE_TIME 16 /* APHY preamble time */
+#define APHY_SIGNAL_TIME 4 /* APHY signal time */
+#define APHY_SYMBOL_TIME 4 /* APHY symbol time */
+#define APHY_SERVICE_NBITS 16 /* APHY service nbits */
+#define APHY_TAIL_NBITS 6 /* APHY tail nbits */
+#define APHY_CWMIN 15 /* APHY cwmin */
-#define BPHY_SLOT_TIME 20
-#define BPHY_SIFS_TIME 10
-#define BPHY_DIFS_TIME 50
-#define BPHY_PLCP_TIME 192
-#define BPHY_PLCP_SHORT_TIME 96
-#define BPHY_CWMIN 31
+/* 802.11 B PHY constants */
+#define BPHY_SLOT_TIME 20 /* BPHY slot time */
+#define BPHY_SIFS_TIME 10 /* BPHY SIFS time */
+#define BPHY_DIFS_TIME 50 /* BPHY DIFS time */
+#define BPHY_PLCP_TIME 192 /* BPHY PLCP time */
+#define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */
+#define BPHY_CWMIN 31 /* BPHY cwmin */
+/* 802.11 G constants */
+#define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */
-#define DOT11_OFDM_SIGNAL_EXTENSION 6
-
-#define PHY_CWMAX 1023
-
-#define DOT11_MAXNUMFRAGS 16
+#define PHY_CWMAX 1023 /* PHY cwmax */
+#define DOT11_MAXNUMFRAGS 16 /* max # fragments per MSDU */
+/* 802.11 VHT constants */
typedef int vht_group_id_t;
-
-
+/* for VHT-A1 */
+/* SIG-A1 reserved bits */
#define VHT_SIGA1_CONST_MASK 0x800004
#define VHT_SIGA1_BW_MASK 0x000003
#define VHT_SIGA1_TXOP_PS_NOT_ALLOWED 0x400000
-
+/* for VHT-A2 */
#define VHT_SIGA2_GI_NONE 0x000000
#define VHT_SIGA2_GI_SHORT 0x000001
#define VHT_SIGA2_GI_W_MOD10 0x000002
#define VHT_SIGA2_TAIL_MASK 0xfc0000
#define VHT_SIGA2_TAIL_VALUE 0x000000
-
+/* VHT Timing-related parameters (802.11ac D4.0, sec 22.3.6) */
#define VHT_T_LEG_PREAMBLE 16
#define VHT_T_L_SIG 4
#define VHT_T_SIG_A 8
#define VHT_T_SIG_B 4
#define VHT_T_SYML 4
-#define VHT_N_SERVICE 16
-#define VHT_N_TAIL 6
-
+#define VHT_N_SERVICE 16 /* bits in SERVICE field */
+#define VHT_N_TAIL 6 /* tail bits per BCC encoder */
+/** dot11Counters Table - 802.11 spec., Annex D */
typedef struct d11cnt {
- uint32 txfrag;
- uint32 txmulti;
- uint32 txfail;
- uint32 txretry;
- uint32 txretrie;
- uint32 rxdup;
- uint32 txrts;
- uint32 txnocts;
- uint32 txnoack;
- uint32 rxfrag;
- uint32 rxmulti;
- uint32 rxcrc;
- uint32 txfrmsnt;
- uint32 rxundec;
+ uint32 txfrag; /* dot11TransmittedFragmentCount */
+ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */
+ uint32 txfail; /* dot11FailedCount */
+ uint32 txretry; /* dot11RetryCount */
+ uint32 txretrie; /* dot11MultipleRetryCount */
+ uint32 rxdup; /* dot11FrameduplicateCount */
+ uint32 txrts; /* dot11RTSSuccessCount */
+ uint32 txnocts; /* dot11RTSFailureCount */
+ uint32 txnoack; /* dot11ACKFailureCount */
+ uint32 rxfrag; /* dot11ReceivedFragmentCount */
+ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */
+ uint32 rxcrc; /* dot11FCSErrorCount */
+ uint32 txfrmsnt; /* dot11TransmittedFrameCount */
+ uint32 rxundec; /* dot11WEPUndecryptableCount */
} d11cnt_t;
+#define BRCM_PROP_OUI "\x00\x90\x4C"
-#define BRCM_PROP_OUI "\x00\x90\x4C"
+/* Action frame type for RWL */
+#define RWL_WIFI_DEFAULT 0
+#define RWL_WIFI_FIND_MY_PEER 9 /* Used while finding server */
+#define RWL_WIFI_FOUND_PEER 10 /* Server response to the client */
+#define RWL_ACTION_WIFI_FRAG_TYPE 85 /* Fragment indicator for receiver */
+#define PROXD_AF_TYPE 11 /* Wifi proximity action frame type */
+#define BRCM_RELMACST_AF_TYPE 12 /* RMC action frame type */
-#define BRCM_SYSCAP_WET_TUNNEL 0x0100
-
-
-#define BRCM_OUI "\x00\x10\x18"
-
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+/*
+ * This BRCM_PROP_OUI types is intended for use in events to embed additional
+ * data, and would not be expected to appear on the air -- but having an IE
+ * format allows IE frame data with extra data in events in that allows for
+ * more flexible parsing.
+ */
+#define BRCM_EVT_WL_BSS_INFO 64
-BWL_PRE_PACKED_STRUCT struct brcm_ie {
- uint8 id;
- uint8 len;
- uint8 oui[3];
- uint8 ver;
- uint8 assoc;
- uint8 flags;
- uint8 flags1;
- uint16 amsdu_mtu_pref;
+/**
+ * Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI).
+ * DPT uses this format with type set to DPT_IE_TYPE
+ */
+BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */
+ uint8 type; /* type of this IE */
+ uint16 cap; /* DPT capabilities */
} BWL_POST_PACKED_STRUCT;
-typedef struct brcm_ie brcm_ie_t;
-#define BRCM_IE_LEN 11
-#define BRCM_IE_VER 2
-#define BRCM_IE_LEGACY_AES_VER 1
+typedef struct brcm_prop_ie_s brcm_prop_ie_t;
+#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */
-#define BRF_LZWDS 0x4
-#define BRF_BLOCKACK 0x8
+#define DPT_IE_TYPE 2
-#define BRF1_AMSDU 0x1
-#define BRF1_WMEPS 0x4
-#define BRF1_PSOFIX 0x8
-#define BRF1_RX_LARGE_AGG 0x10
-#define BRF1_RFAWARE_DCS 0x20
-#define BRF1_SOFTAP 0x40
-#define BRF1_DWDS 0x80
+#define BRCM_SYSCAP_IE_TYPE 3
+#define WET_TUNNEL_IE_TYPE 3
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+/* brcm syscap_ie cap */
+#define BRCM_SYSCAP_WET_TUNNEL 0x0100 /* Device with WET_TUNNEL support */
+#define BRCM_OUI "\x00\x10\x18" /* Broadcom OUI */
+
+/** BRCM info element */
+BWL_PRE_PACKED_STRUCT struct brcm_ie {
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3];
+ uint8 ver; /* type/ver of this IE */
+ uint8 assoc; /* # of assoc STAs */
+ uint8 flags; /* misc flags */
+ uint8 flags1; /* misc flags */
+ uint16 amsdu_mtu_pref; /* preferred A-MSDU MTU */
+} BWL_POST_PACKED_STRUCT;
+typedef struct brcm_ie brcm_ie_t;
+#define BRCM_IE_LEN 11 /* BRCM IE length */
+#define BRCM_IE_VER 2 /* BRCM IE version */
+#define BRCM_IE_LEGACY_AES_VER 1 /* BRCM IE legacy AES version */
+
+/* brcm_ie flags */
+#define BRF_ABCAP 0x1 /* afterburner is obsolete, defined for backward compat */
+#define BRF_ABRQRD 0x2 /* afterburner is obsolete, defined for backward compat */
+#define BRF_LZWDS 0x4 /* lazy wds enabled */
+#define BRF_BLOCKACK 0x8 /* BlockACK capable */
+#define BRF_ABCOUNTER_MASK 0xf0 /* afterburner is obsolete, defined for backward compat */
+#define BRF_PROP_11N_MCS 0x10 /* re-use afterburner bit */
+
+/**
+ * Support for Broadcom proprietary HT MCS rates. Re-uses afterburner bits since afterburner is not
+ * used anymore. Checks for BRF_ABCAP to stay compliant with 'old' images in the field.
+ */
+#define GET_BRF_PROP_11N_MCS(brcm_ie) \
+ (!((brcm_ie)->flags & BRF_ABCAP) && ((brcm_ie)->flags & BRF_PROP_11N_MCS))
+
+/* brcm_ie flags1 */
+#define BRF1_AMSDU 0x1 /* A-MSDU capable */
+#define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */
+#define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */
+#define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */
+#define BRF1_RFAWARE_DCS 0x20 /* RFAWARE dynamic channel selection (DCS) */
+#define BRF1_SOFTAP 0x40 /* Configure as Broadcom SOFTAP */
+#define BRF1_DWDS 0x80 /* DWDS capable */
+
+/** Vendor IE structure */
BWL_PRE_PACKED_STRUCT struct vndr_ie {
uchar id;
uchar len;
uchar oui [3];
- uchar data [1];
+ uchar data [1]; /* Variable size data */
} BWL_POST_PACKED_STRUCT;
typedef struct vndr_ie vndr_ie_t;
-#define VNDR_IE_HDR_LEN 2
-#define VNDR_IE_MIN_LEN 3
+#define VNDR_IE_HDR_LEN 2 /* id + len field */
+#define VNDR_IE_MIN_LEN 3 /* size of the oui field */
#define VNDR_IE_FIXED_LEN (VNDR_IE_HDR_LEN + VNDR_IE_MIN_LEN)
-#define VNDR_IE_MAX_LEN 255
-
+#define VNDR_IE_MAX_LEN 255 /* vendor IE max length, without ID and len */
+/** BRCM PROP DEVICE PRIMARY MAC ADDRESS IE */
BWL_PRE_PACKED_STRUCT struct member_of_brcm_prop_ie {
uchar id;
uchar len;
uchar oui[3];
- uint8 type;
- struct ether_addr ea;
+ uint8 type; /* type indicates what follows */
+ struct ether_addr ea; /* Device Primary MAC Adrress */
} BWL_POST_PACKED_STRUCT;
typedef struct member_of_brcm_prop_ie member_of_brcm_prop_ie_t;
-#define MEMBER_OF_BRCM_PROP_IE_LEN 10
+#define MEMBER_OF_BRCM_PROP_IE_LEN 10 /* IE max length */
+#define MEMBER_OF_BRCM_PROP_IE_HDRLEN (sizeof(member_of_brcm_prop_ie_t))
#define MEMBER_OF_BRCM_PROP_IE_TYPE 54
-
+/** BRCM Reliable Multicast IE */
BWL_PRE_PACKED_STRUCT struct relmcast_brcm_prop_ie {
- uchar id;
- uchar len;
- uchar oui[3];
- uint8 type;
- struct ether_addr ea;
- struct ether_addr mcast_ea;
- uint8 updtmo;
+ uint8 id;
+ uint8 len;
+ uint8 oui[3];
+ uint8 type; /* type indicates what follows */
+ struct ether_addr ea; /* The ack sender's MAC Adrress */
+ struct ether_addr mcast_ea; /* The multicast MAC address */
+ uint8 updtmo; /* time interval(second) for client to send null packet to report its rssi */
} BWL_POST_PACKED_STRUCT;
typedef struct relmcast_brcm_prop_ie relmcast_brcm_prop_ie_t;
-#define RELMCAST_BRCM_PROP_IE_LEN (sizeof(relmcast_brcm_prop_ie_t)-2)
-#define RELMCAST_BRCM_PROP_IE_TYPE 55
+/* IE length */
+/* BRCM_PROP_IE_LEN = sizeof(relmcast_brcm_prop_ie_t)-((sizeof (id) + sizeof (len)))? */
+#define RELMCAST_BRCM_PROP_IE_LEN (sizeof(relmcast_brcm_prop_ie_t)-(2*sizeof(uint8)))
+#define RELMCAST_BRCM_PROP_IE_TYPE 55
-#define MCSSET_LEN 16
-#define MAX_MCS_NUM (128)
+/* ************* HT definitions. ************* */
+#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */
+#define MAX_MCS_NUM (128) /* max mcs number = 128 */
BWL_PRE_PACKED_STRUCT struct ht_cap_ie {
uint16 cap;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_ht_cap_ie dot11_ht_cap_ie_t;
-
-
+/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the capability IE is primarily used to convey this nodes abilities */
BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie {
- uint8 id;
- uint8 len;
- uint8 oui[3];
- uint8 type;
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3];
+ uint8 type; /* type indicates what follows */
ht_cap_ie_t cap_ie;
} BWL_POST_PACKED_STRUCT;
typedef struct ht_prop_cap_ie ht_prop_cap_ie_t;
-#define HT_PROP_IE_OVERHEAD 4
-#define HT_CAP_IE_LEN 26
+#define HT_PROP_IE_OVERHEAD 4 /* overhead bytes for prop oui ie */
+#define HT_CAP_IE_LEN 26 /* HT capability len (based on .11n d2.0) */
#define HT_CAP_IE_TYPE 51
-#define HT_CAP_LDPC_CODING 0x0001
-#define HT_CAP_40MHZ 0x0002
-#define HT_CAP_MIMO_PS_MASK 0x000C
-#define HT_CAP_MIMO_PS_SHIFT 0x0002
-#define HT_CAP_MIMO_PS_OFF 0x0003
-#define HT_CAP_MIMO_PS_RTS 0x0001
-#define HT_CAP_MIMO_PS_ON 0x0000
-#define HT_CAP_GF 0x0010
-#define HT_CAP_SHORT_GI_20 0x0020
-#define HT_CAP_SHORT_GI_40 0x0040
-#define HT_CAP_TX_STBC 0x0080
-#define HT_CAP_RX_STBC_MASK 0x0300
-#define HT_CAP_RX_STBC_SHIFT 8
-#define HT_CAP_DELAYED_BA 0x0400
-#define HT_CAP_MAX_AMSDU 0x0800
-
-#define HT_CAP_DSSS_CCK 0x1000
-#define HT_CAP_PSMP 0x2000
-#define HT_CAP_40MHZ_INTOLERANT 0x4000
-#define HT_CAP_LSIG_TXOP 0x8000
-
-#define HT_CAP_RX_STBC_NO 0x0
-#define HT_CAP_RX_STBC_ONE_STREAM 0x1
-#define HT_CAP_RX_STBC_TWO_STREAM 0x2
-#define HT_CAP_RX_STBC_THREE_STREAM 0x3
+#define HT_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */
+#define HT_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */
+#define HT_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */
+#define HT_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */
+#define HT_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */
+#define HT_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */
+#define HT_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */
+#define HT_CAP_GF 0x0010 /* Greenfield preamble support */
+#define HT_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */
+#define HT_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */
+#define HT_CAP_TX_STBC 0x0080 /* Tx STBC support */
+#define HT_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */
+#define HT_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */
+#define HT_CAP_DELAYED_BA 0x0400 /* delayed BA support */
+#define HT_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */
+
+#define HT_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */
+#define HT_CAP_PSMP 0x2000 /* Power Save Multi Poll support */
+#define HT_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */
+#define HT_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */
+
+#define HT_CAP_RX_STBC_NO 0x0 /* no rx STBC support */
+#define HT_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */
+#define HT_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */
+#define HT_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */
#define HT_CAP_TXBF_CAP_IMPLICIT_TXBF_RX 0x1
-#define HT_CAP_TXBF_CAP_NDP_TX 0x8
-#define HT_CAP_TXBF_CAP_NDP_RX 0x10
+#define HT_CAP_TXBF_CAP_NDP_RX 0x8
+#define HT_CAP_TXBF_CAP_NDP_TX 0x10
#define HT_CAP_TXBF_CAP_EXPLICIT_CSI 0x100
#define HT_CAP_TXBF_CAP_EXPLICIT_NC_STEERING 0x200
#define HT_CAP_TXBF_CAP_EXPLICIT_C_STEERING 0x400
#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_MASK 0x18000
#define HT_CAP_TX_BF_CAP_EXPLICIT_COMPRESSED_FB_SHIFT 15
-#define VHT_MAX_MPDU 11454
-#define VHT_MPDU_MSDU_DELTA 56
-
+#define VHT_MAX_MPDU 11454 /* max mpdu size for now (bytes) */
+#define VHT_MPDU_MSDU_DELTA 56 /* Difference in spec - vht mpdu, amsdu len */
+/* Max AMSDU len - per spec */
#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA)
-#define HT_MAX_AMSDU 7935
-#define HT_MIN_AMSDU 3835
-
-#define HT_PARAMS_RX_FACTOR_MASK 0x03
-#define HT_PARAMS_DENSITY_MASK 0x1C
-#define HT_PARAMS_DENSITY_SHIFT 2
-
-
-#define AMPDU_MAX_MPDU_DENSITY 7
-#define AMPDU_DENSITY_NONE 0
-#define AMPDU_DENSITY_1over4_US 1
-#define AMPDU_DENSITY_1over2_US 2
-#define AMPDU_DENSITY_1_US 3
-#define AMPDU_DENSITY_2_US 4
-#define AMPDU_DENSITY_4_US 5
-#define AMPDU_DENSITY_8_US 6
-#define AMPDU_DENSITY_16_US 7
-#define AMPDU_RX_FACTOR_8K 0
-#define AMPDU_RX_FACTOR_16K 1
-#define AMPDU_RX_FACTOR_32K 2
-#define AMPDU_RX_FACTOR_64K 3
-#define AMPDU_RX_FACTOR_BASE 8*1024
-
-#define AMPDU_DELIMITER_LEN 4
-#define AMPDU_DELIMITER_LEN_MAX 63
+#define HT_MAX_AMSDU 7935 /* max amsdu size (bytes) per the HT spec */
+#define HT_MIN_AMSDU 3835 /* min amsdu size (bytes) per the HT spec */
+
+#define HT_PARAMS_RX_FACTOR_MASK 0x03 /* ampdu rcv factor mask */
+#define HT_PARAMS_DENSITY_MASK 0x1C /* ampdu density mask */
+#define HT_PARAMS_DENSITY_SHIFT 2 /* ampdu density shift */
+
+/* HT/AMPDU specific define */
+#define AMPDU_MAX_MPDU_DENSITY 7 /* max mpdu density; in 1/4 usec units */
+#define AMPDU_DENSITY_NONE 0 /* No density requirement */
+#define AMPDU_DENSITY_1over4_US 1 /* 1/4 us density */
+#define AMPDU_DENSITY_1over2_US 2 /* 1/2 us density */
+#define AMPDU_DENSITY_1_US 3 /* 1 us density */
+#define AMPDU_DENSITY_2_US 4 /* 2 us density */
+#define AMPDU_DENSITY_4_US 5 /* 4 us density */
+#define AMPDU_DENSITY_8_US 6 /* 8 us density */
+#define AMPDU_DENSITY_16_US 7 /* 16 us density */
+#define AMPDU_RX_FACTOR_8K 0 /* max rcv ampdu len (8kb) */
+#define AMPDU_RX_FACTOR_16K 1 /* max rcv ampdu len (16kb) */
+#define AMPDU_RX_FACTOR_32K 2 /* max rcv ampdu len (32kb) */
+#define AMPDU_RX_FACTOR_64K 3 /* max rcv ampdu len (64kb) */
+
+/* AMPDU RX factors for VHT rates */
+#define AMPDU_RX_FACTOR_128K 4 /* max rcv ampdu len (128kb) */
+#define AMPDU_RX_FACTOR_256K 5 /* max rcv ampdu len (256kb) */
+#define AMPDU_RX_FACTOR_512K 6 /* max rcv ampdu len (512kb) */
+#define AMPDU_RX_FACTOR_1024K 7 /* max rcv ampdu len (1024kb) */
+
+#define AMPDU_RX_FACTOR_BASE 8*1024 /* ampdu factor base for rx len */
+#define AMPDU_RX_FACTOR_BASE_PWR 13 /* ampdu factor base for rx len in power of 2 */
+
+#define AMPDU_DELIMITER_LEN 4 /* length of ampdu delimiter */
+#define AMPDU_DELIMITER_LEN_MAX 63 /* max length of ampdu delimiter(enforced in HW) */
#define HT_CAP_EXT_PCO 0x0001
#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006
#define HT_CAP_EXT_HTC 0x0400
#define HT_CAP_EXT_RD_RESP 0x0800
+/** 'ht_add' is called 'HT Operation' information element in the 802.11 standard */
BWL_PRE_PACKED_STRUCT struct ht_add_ie {
- uint8 ctl_ch;
- uint8 byte1;
- uint16 opmode;
- uint16 misc_bits;
- uint8 basic_mcs[MCSSET_LEN];
+ uint8 ctl_ch; /* control channel number */
+ uint8 byte1; /* ext ch,rec. ch. width, RIFS support */
+ uint16 opmode; /* operation mode */
+ uint16 misc_bits; /* misc bits */
+ uint8 basic_mcs[MCSSET_LEN]; /* required MCS set */
} BWL_POST_PACKED_STRUCT;
typedef struct ht_add_ie ht_add_ie_t;
-
-
+/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */
+/* the additional IE is primarily used to convey the current BSS configuration */
BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie {
- uint8 id;
- uint8 len;
- uint8 oui[3];
- uint8 type;
+ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */
+ uint8 len; /* IE length */
+ uint8 oui[3];
+ uint8 type; /* indicates what follows */
ht_add_ie_t add_ie;
} BWL_POST_PACKED_STRUCT;
typedef struct ht_prop_add_ie ht_prop_add_ie_t;
#define HT_ADD_IE_LEN 22
#define HT_ADD_IE_TYPE 52
-
-#define HT_BW_ANY 0x04
-#define HT_RIFS_PERMITTED 0x08
-
-
-#define HT_OPMODE_MASK 0x0003
-#define HT_OPMODE_SHIFT 0
-#define HT_OPMODE_PURE 0x0000
-#define HT_OPMODE_OPTIONAL 0x0001
-#define HT_OPMODE_HT20IN40 0x0002
-#define HT_OPMODE_MIXED 0x0003
-#define HT_OPMODE_NONGF 0x0004
-#define DOT11N_TXBURST 0x0008
-#define DOT11N_OBSS_NONHT 0x0010
-
-
-#define HT_BASIC_STBC_MCS 0x007f
-#define HT_DUAL_STBC_PROT 0x0080
-#define HT_SECOND_BCN 0x0100
-#define HT_LSIG_TXOP 0x0200
-#define HT_PCO_ACTIVE 0x0400
-#define HT_PCO_PHASE 0x0800
-#define HT_DUALCTS_PROTECTION 0x0080
-
-
-#define DOT11N_2G_TXBURST_LIMIT 6160
-#define DOT11N_5G_TXBURST_LIMIT 3080
-
-
+/* byte1 defn's */
+#define HT_BW_ANY 0x04 /* set, STA can use 20 or 40MHz */
+#define HT_RIFS_PERMITTED 0x08 /* RIFS allowed */
+
+/* opmode defn's */
+#define HT_OPMODE_MASK 0x0003 /* protection mode mask */
+#define HT_OPMODE_SHIFT 0 /* protection mode shift */
+#define HT_OPMODE_PURE 0x0000 /* protection mode PURE */
+#define HT_OPMODE_OPTIONAL 0x0001 /* protection mode optional */
+#define HT_OPMODE_HT20IN40 0x0002 /* protection mode 20MHz HT in 40MHz BSS */
+#define HT_OPMODE_MIXED 0x0003 /* protection mode Mixed Mode */
+#define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */
+#define DOT11N_TXBURST 0x0008 /* Tx burst limit */
+#define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */
+
+/* misc_bites defn's */
+#define HT_BASIC_STBC_MCS 0x007f /* basic STBC MCS */
+#define HT_DUAL_STBC_PROT 0x0080 /* Dual STBC Protection */
+#define HT_SECOND_BCN 0x0100 /* Secondary beacon support */
+#define HT_LSIG_TXOP 0x0200 /* L-SIG TXOP Protection full support */
+#define HT_PCO_ACTIVE 0x0400 /* PCO active */
+#define HT_PCO_PHASE 0x0800 /* PCO phase */
+#define HT_DUALCTS_PROTECTION 0x0080 /* DUAL CTS protection needed */
+
+/* Tx Burst Limits */
+#define DOT11N_2G_TXBURST_LIMIT 6160 /* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+#define DOT11N_5G_TXBURST_LIMIT 3080 /* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */
+
+/* Macros for opmode */
#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
>> HT_OPMODE_SHIFT)
#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
- == HT_OPMODE_MIXED)
+ == HT_OPMODE_MIXED) /* mixed mode present */
#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
- == HT_OPMODE_HT20IN40)
+ == HT_OPMODE_HT20IN40) /* 20MHz HT present */
#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \
- == HT_OPMODE_OPTIONAL)
+ == HT_OPMODE_OPTIONAL) /* Optional protection present */
#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \
- HT_MIXEDMODE_PRESENT((add_ie)))
+ HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */
#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \
- == HT_OPMODE_NONGF)
+ == HT_OPMODE_NONGF) /* non-GF present */
#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \
- == DOT11N_TXBURST)
+ == DOT11N_TXBURST) /* Tx Burst present */
#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \
- == DOT11N_OBSS_NONHT)
+ == DOT11N_OBSS_NONHT) /* OBSS Non-HT present */
BWL_PRE_PACKED_STRUCT struct obss_params {
uint16 passive_dwell;
obss_params_t obss_params;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_obss_ie dot11_obss_ie_t;
-#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t)
-
+#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) /* HT OBSS len (based on 802.11n d3.0) */
-#define HT_CTRL_LA_TRQ 0x00000002
-#define HT_CTRL_LA_MAI 0x0000003C
+/* HT control field */
+#define HT_CTRL_LA_TRQ 0x00000002 /* sounding request */
+#define HT_CTRL_LA_MAI 0x0000003C /* MCS request or antenna selection indication */
#define HT_CTRL_LA_MAI_SHIFT 2
-#define HT_CTRL_LA_MAI_MRQ 0x00000004
-#define HT_CTRL_LA_MAI_MSI 0x00000038
-#define HT_CTRL_LA_MFSI 0x000001C0
+#define HT_CTRL_LA_MAI_MRQ 0x00000004 /* MCS request */
+#define HT_CTRL_LA_MAI_MSI 0x00000038 /* MCS request sequence identifier */
+#define HT_CTRL_LA_MFSI 0x000001C0 /* MFB sequence identifier */
#define HT_CTRL_LA_MFSI_SHIFT 6
-#define HT_CTRL_LA_MFB_ASELC 0x0000FE00
+#define HT_CTRL_LA_MFB_ASELC 0x0000FE00 /* MCS feedback, antenna selection command/data */
#define HT_CTRL_LA_MFB_ASELC_SH 9
-#define HT_CTRL_LA_ASELC_CMD 0x00000C00
-#define HT_CTRL_LA_ASELC_DATA 0x0000F000
-#define HT_CTRL_CAL_POS 0x00030000
-#define HT_CTRL_CAL_SEQ 0x000C0000
-#define HT_CTRL_CSI_STEERING 0x00C00000
+#define HT_CTRL_LA_ASELC_CMD 0x00000C00 /* ASEL command */
+#define HT_CTRL_LA_ASELC_DATA 0x0000F000 /* ASEL data */
+#define HT_CTRL_CAL_POS 0x00030000 /* Calibration position */
+#define HT_CTRL_CAL_SEQ 0x000C0000 /* Calibration sequence */
+#define HT_CTRL_CSI_STEERING 0x00C00000 /* CSI/Steering */
#define HT_CTRL_CSI_STEER_SHIFT 22
-#define HT_CTRL_CSI_STEER_NFB 0
-#define HT_CTRL_CSI_STEER_CSI 1
-#define HT_CTRL_CSI_STEER_NCOM 2
-#define HT_CTRL_CSI_STEER_COM 3
-#define HT_CTRL_NDP_ANNOUNCE 0x01000000
-#define HT_CTRL_AC_CONSTRAINT 0x40000000
-#define HT_CTRL_RDG_MOREPPDU 0x80000000
-
-#define HT_OPMODE_OPTIONAL 0x0001
-#define HT_OPMODE_HT20IN40 0x0002
-#define HT_OPMODE_MIXED 0x0003
-#define HT_OPMODE_NONGF 0x0004
-#define DOT11N_TXBURST 0x0008
-#define DOT11N_OBSS_NONHT 0x0010
-
-
-
-
+#define HT_CTRL_CSI_STEER_NFB 0 /* no fedback required */
+#define HT_CTRL_CSI_STEER_CSI 1 /* CSI, H matrix */
+#define HT_CTRL_CSI_STEER_NCOM 2 /* non-compressed beamforming */
+#define HT_CTRL_CSI_STEER_COM 3 /* compressed beamforming */
+#define HT_CTRL_NDP_ANNOUNCE 0x01000000 /* NDP announcement */
+#define HT_CTRL_AC_CONSTRAINT 0x40000000 /* AC Constraint */
+#define HT_CTRL_RDG_MOREPPDU 0x80000000 /* RDG/More PPDU */
+
+/* ************* VHT definitions. ************* */
+
+/**
+ * VHT Capabilites IE (sec 8.4.2.160)
+ */
BWL_PRE_PACKED_STRUCT struct vht_cap_ie {
uint32 vht_cap_info;
-
+ /* supported MCS set - 64 bit field */
uint16 rx_mcs_map;
uint16 rx_max_rate;
uint16 tx_mcs_map;
} BWL_POST_PACKED_STRUCT;
typedef struct vht_cap_ie vht_cap_ie_t;
-
+/* 4B cap_info + 8B supp_mcs */
#define VHT_CAP_IE_LEN 12
-
+/* VHT Capabilities Info field - 32bit - in VHT Cap IE */
#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003
#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c
#define VHT_CAP_INFO_LDPC 0x00000010
#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000
#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26
-
+/* VHT Supported MCS Set - 64-bit - in VHT Cap IE */
#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff
#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0
#define VHT_CAP_MCS_MAP_0_8 1
#define VHT_CAP_MCS_MAP_0_9 2
#define VHT_CAP_MCS_MAP_NONE 3
-#define VHT_CAP_MCS_MAP_S 2
-#define VHT_CAP_MCS_MAP_M 0x3
-
+#define VHT_CAP_MCS_MAP_S 2 /* num bits for 1-stream */
+#define VHT_CAP_MCS_MAP_M 0x3 /* mask for 1-stream */
+/* assumes VHT_CAP_MCS_MAP_NONE is 3 and 2 bits are used for encoding */
#define VHT_CAP_MCS_MAP_NONE_ALL 0xffff
-
+/* mcsmap with MCS0-9 for Nss = 3 */
#define VHT_CAP_MCS_MAP_0_9_NSS3 \
((VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(1)) | \
(VHT_CAP_MCS_MAP_0_9 << VHT_MCS_MAP_GET_SS_IDX(2)) | \
#define VHT_CAP_MCS_MAP_NSS_MAX 8
-
+/* get mcsmap with given mcs for given nss streams */
#define VHT_CAP_MCS_MAP_CREATE(mcsmap, nss, mcs) \
do { \
int i; \
} \
} while (0)
-
+/* Map the mcs code to mcs bit map */
#define VHT_MCS_CODE_TO_MCS_MAP(mcs_code) \
((mcs_code == VHT_CAP_MCS_MAP_0_7) ? 0xff : \
(mcs_code == VHT_CAP_MCS_MAP_0_8) ? 0x1ff : \
(mcs_code == VHT_CAP_MCS_MAP_0_9) ? 0x3ff : 0)
-
+/* Map the mcs bit map to mcs code */
#define VHT_MCS_MAP_TO_MCS_CODE(mcs_map) \
((mcs_map == 0xff) ? VHT_CAP_MCS_MAP_0_7 : \
(mcs_map == 0x1ff) ? VHT_CAP_MCS_MAP_0_8 : \
(mcs_map == 0x3ff) ? VHT_CAP_MCS_MAP_0_9 : VHT_CAP_MCS_MAP_NONE)
-
+/** VHT Capabilities Supported Channel Width */
typedef enum vht_cap_chan_width {
VHT_CAP_CHAN_WIDTH_SUPPORT_MANDATORY = 0x00,
VHT_CAP_CHAN_WIDTH_SUPPORT_160 = 0x04,
VHT_CAP_CHAN_WIDTH_SUPPORT_160_8080 = 0x08
} vht_cap_chan_width_t;
-
+/** VHT Capabilities Supported max MPDU LEN (sec 8.4.2.160.2) */
typedef enum vht_cap_max_mpdu_len {
VHT_CAP_MPDU_MAX_4K = 0x00,
VHT_CAP_MPDU_MAX_8K = 0x01,
VHT_CAP_MPDU_MAX_11K = 0x02
} vht_cap_max_mpdu_len_t;
-
+/* Maximum MPDU Length byte counts for the VHT Capabilities advertised limits */
#define VHT_MPDU_LIMIT_4K 3895
#define VHT_MPDU_LIMIT_8K 7991
#define VHT_MPDU_LIMIT_11K 11454
-
+/**
+ * VHT Operation IE (sec 8.4.2.161)
+ */
BWL_PRE_PACKED_STRUCT struct vht_op_ie {
uint8 chan_width;
uint8 chan1;
uint8 chan2;
- uint16 supp_mcs;
+ uint16 supp_mcs; /* same def as above in vht cap */
} BWL_POST_PACKED_STRUCT;
typedef struct vht_op_ie vht_op_ie_t;
-
+/* 3B VHT Op info + 2B Basic MCS */
#define VHT_OP_IE_LEN 5
typedef enum vht_op_chan_width {
VHT_OP_CHAN_WIDTH_80_80 = 3
} vht_op_chan_width_t;
-
+/* AID length */
#define AID_IE_LEN 2
-
+/**
+ * BRCM vht features IE header
+ * The header if the fixed part of the IE
+ * On the 5GHz band this is the entire IE,
+ * on 2.4GHz the VHT IEs as defined in the 802.11ac
+ * specification follows
+ *
+ *
+ * VHT features rates bitmap.
+ * Bit0: 5G MCS 0-9 BW 160MHz
+ * Bit1: 5G MCS 0-9 support BW 80MHz
+ * Bit2: 5G MCS 0-9 support BW 20MHz
+ * Bit3: 2.4G MCS 0-9 support BW 20MHz
+ * Bits:4-7 Reserved for future use
+ *
+ */
#define VHT_FEATURES_IE_TYPE 0x4
BWL_PRE_PACKED_STRUCT struct vht_features_ie_hdr {
- uint8 oui[3];
- uint8 type;
- uint8 rate_mask;
+ uint8 oui[3];
+ uint8 type; /* type of this IE = 4 */
+ uint8 rate_mask; /* VHT rate mask */
} BWL_POST_PACKED_STRUCT;
typedef struct vht_features_ie_hdr vht_features_ie_hdr_t;
-
+/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */
#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1) * VHT_CAP_MCS_MAP_S)
#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \
(((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & VHT_CAP_MCS_MAP_M)
(VHT_MCS_MAP_GET_MCS_PER_SS((nss), (mcsMap)) != VHT_CAP_MCS_MAP_NONE)
-
-#define WPA_OUI "\x00\x50\xF2"
-#define WPA_OUI_LEN 3
+/* ************* WPA definitions. ************* */
+#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */
+#define WPA_OUI_LEN 3 /* WPA OUI length */
#define WPA_OUI_TYPE 1
-#define WPA_VERSION 1
-#define WPA2_OUI "\x00\x0F\xAC"
-#define WPA2_OUI_LEN 3
-#define WPA2_VERSION 1
-#define WPA2_VERSION_LEN 2
-
-
-#define WPS_OUI "\x00\x50\xF2"
-#define WPS_OUI_LEN 3
+#define WPA_VERSION 1 /* WPA version */
+#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */
+#define WPA2_OUI_LEN 3 /* WPA2 OUI length */
+#define WPA2_VERSION 1 /* WPA2 version */
+#define WPA2_VERSION_LEN 2 /* WAP2 version length */
+
+/* ************* WPS definitions. ************* */
+#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */
+#define WPS_OUI_LEN 3 /* WPS OUI length */
#define WPS_OUI_TYPE 4
-
+/* ************* WFA definitions. ************* */
#ifdef P2P_IE_OVRD
#define WFA_OUI MAC_OUI
#else
-#define WFA_OUI "\x50\x6F\x9A"
-#endif
-#define WFA_OUI_LEN 3
+#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */
+#endif /* P2P_IE_OVRD */
+#define WFA_OUI_LEN 3 /* WFA OUI length */
#ifdef P2P_IE_OVRD
#define WFA_OUI_TYPE_P2P MAC_OUI_TYPE_P2P
#else
#define WFA_OUI_TYPE_TPC 8
#ifdef WLTDLS
-#define WFA_OUI_TYPE_TPQ 4
-#define WFA_OUI_TYPE_TPS 5
+#define WFA_OUI_TYPE_TPQ 4 /* WFD Tunneled Probe ReQuest */
+#define WFA_OUI_TYPE_TPS 5 /* WFD Tunneled Probe ReSponse */
#define WFA_OUI_TYPE_WFD 10
-#endif
+#endif /* WTDLS */
#define WFA_OUI_TYPE_HS20 0x10
-
-
-#define RSN_AKM_NONE 0
-#define RSN_AKM_UNSPECIFIED 1
-#define RSN_AKM_PSK 2
-#define RSN_AKM_FBT_1X 3
-#define RSN_AKM_FBT_PSK 4
-#define RSN_AKM_MFP_1X 5
-#define RSN_AKM_MFP_PSK 6
-#define RSN_AKM_TPK 7
-
-
-#define DOT11_MAX_DEFAULT_KEYS 4
-#define DOT11_MAX_KEY_SIZE 32
-#define DOT11_MAX_IV_SIZE 16
-#define DOT11_EXT_IV_FLAG (1<<5)
-#define DOT11_WPA_KEY_RSC_LEN 8
-
-#define WEP1_KEY_SIZE 5
-#define WEP1_KEY_HEX_SIZE 10
-#define WEP128_KEY_SIZE 13
-#define WEP128_KEY_HEX_SIZE 26
-#define TKIP_MIC_SIZE 8
-#define TKIP_EOM_SIZE 7
-#define TKIP_EOM_FLAG 0x5a
-#define TKIP_KEY_SIZE 32
-#define TKIP_MIC_AUTH_TX 16
-#define TKIP_MIC_AUTH_RX 24
-#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX
-#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX
-#define AES_KEY_SIZE 16
-#define AES_MIC_SIZE 8
-#define BIP_KEY_SIZE 16
-#define BIP_MIC_SIZE 8
-
-
-#define WCN_OUI "\x00\x50\xf2"
-#define WCN_TYPE 4
+#define WFA_OUI_TYPE_OSEN 0x12
+#define WFA_OUI_TYPE_NAN 0x13
+
+/* RSN authenticated key managment suite */
+#define RSN_AKM_NONE 0 /* None (IBSS) */
+#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */
+#define RSN_AKM_PSK 2 /* Pre-shared Key */
+#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */
+#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */
+#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */
+#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */
+#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */
+
+/* OSEN authenticated key managment suite */
+#define OSEN_AKM_UNSPECIFIED RSN_AKM_UNSPECIFIED /* Over 802.1x */
+
+/* Key related defines */
+#define DOT11_MAX_DEFAULT_KEYS 4 /* number of default keys */
+#define DOT11_MAX_IGTK_KEYS 2
+#define DOT11_MAX_KEY_SIZE 32 /* max size of any key */
+#define DOT11_MAX_IV_SIZE 16 /* max size of any IV */
+#define DOT11_EXT_IV_FLAG (1<<5) /* flag to indicate IV is > 4 bytes */
+#define DOT11_WPA_KEY_RSC_LEN 8 /* WPA RSC key len */
+
+#define WEP1_KEY_SIZE 5 /* max size of any WEP key */
+#define WEP1_KEY_HEX_SIZE 10 /* size of WEP key in hex. */
+#define WEP128_KEY_SIZE 13 /* max size of any WEP key */
+#define WEP128_KEY_HEX_SIZE 26 /* size of WEP key in hex. */
+#define TKIP_MIC_SIZE 8 /* size of TKIP MIC */
+#define TKIP_EOM_SIZE 7 /* max size of TKIP EOM */
+#define TKIP_EOM_FLAG 0x5a /* TKIP EOM flag byte */
+#define TKIP_KEY_SIZE 32 /* size of any TKIP key, includs MIC keys */
+#define TKIP_TK_SIZE 16
+#define TKIP_MIC_KEY_SIZE 8
+#define TKIP_MIC_AUTH_TX 16 /* offset to Authenticator MIC TX key */
+#define TKIP_MIC_AUTH_RX 24 /* offset to Authenticator MIC RX key */
+#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX /* offset to Supplicant MIC RX key */
+#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX /* offset to Supplicant MIC TX key */
+#define AES_KEY_SIZE 16 /* size of AES key */
+#define AES_MIC_SIZE 8 /* size of AES MIC */
+#define BIP_KEY_SIZE 16 /* size of BIP key */
+#define BIP_MIC_SIZE 8 /* sizeof BIP MIC */
+
+#define AES_GCM_MIC_SIZE 16 /* size of MIC for 128-bit GCM - .11adD9 */
+
+#define AES256_KEY_SIZE 32 /* size of AES 256 key - .11acD5 */
+#define AES256_MIC_SIZE 16 /* size of MIC for 256 bit keys, incl BIP */
+
+/* WCN */
+#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */
+#define WCN_TYPE 4 /* WCN type */
#ifdef BCMWAPI_WPI
#define SMS4_KEY_LEN 16
#define SMS4_WPI_CBC_MAC_LEN 16
#endif
+/* 802.11r protocol definitions */
-
-
+/** Mobility Domain IE */
BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie {
uint8 id;
uint8 len;
- uint16 mdid;
+ uint16 mdid; /* Mobility Domain Id */
uint8 cap;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_mdid_ie dot11_mdid_ie_t;
-#define FBT_MDID_CAP_OVERDS 0x01
-#define FBT_MDID_CAP_RRP 0x02
-
+#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */
+#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */
+/** Fast Bss Transition IE */
BWL_PRE_PACKED_STRUCT struct dot11_ft_ie {
uint8 id;
uint8 len;
- uint16 mic_control;
+ uint16 mic_control; /* Mic Control */
uint8 mic[16];
uint8 anonce[32];
uint8 snonce[32];
BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie {
uint8 id;
uint8 len;
- uint8 type;
- uint32 value;
+ uint8 type; /* timeout interval type */
+ uint32 value; /* timeout interval value */
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_timeout_ie dot11_timeout_ie_t;
-
+/** GTK ie */
BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie {
uint8 id;
uint8 len;
} BWL_POST_PACKED_STRUCT;
typedef struct dot11_gtk_ie dot11_gtk_ie_t;
-
+/** Management MIC ie */
BWL_PRE_PACKED_STRUCT struct mmic_ie {
- uint8 id;
- uint8 len;
- uint16 key_id;
- uint8 ipn[6];
- uint8 mic[BIP_MIC_SIZE];
+ uint8 id; /* IE ID: DOT11_MNG_MMIE_ID */
+ uint8 len; /* IE length */
+ uint16 key_id; /* key id */
+ uint8 ipn[6]; /* ipn */
+ uint8 mic[16]; /* mic */
} BWL_POST_PACKED_STRUCT;
typedef struct mmic_ie mmic_ie_t;
#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF"
#ifdef BCMWAPI_WAI
-#define WAPI_IE_MIN_LEN 20
-#define WAPI_VERSION 1
-#define WAPI_VERSION_LEN 2
-#define WAPI_OUI "\x00\x14\x72"
-#define WAPI_OUI_LEN DOT11_OUI_LEN
-#endif
-
-
-#define WMM_OUI "\x00\x50\xF2"
-#define WMM_OUI_LEN 3
-#define WMM_OUI_TYPE 2
+#define WAPI_IE_MIN_LEN 20 /* WAPI IE min length */
+#define WAPI_VERSION 1 /* WAPI version */
+#define WAPI_VERSION_LEN 2 /* WAPI version length */
+#define WAPI_OUI "\x00\x14\x72" /* WAPI OUI */
+#define WAPI_OUI_LEN DOT11_OUI_LEN /* WAPI OUI length */
+#endif /* BCMWAPI_WAI */
+
+/* ************* WMM Parameter definitions. ************* */
+#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */
+#define WMM_OUI_LEN 3 /* WMM OUI length */
+#define WMM_OUI_TYPE 2 /* WMM OUT type */
#define WMM_VERSION 1
#define WMM_VERSION_LEN 1
-
+/* WMM OUI subtype */
#define WMM_OUI_SUBTYPE_PARAMETER 1
#define WMM_PARAMETER_IE_LEN 24
-
+/** Link Identifier Element */
BWL_PRE_PACKED_STRUCT struct link_id_ie {
uint8 id;
uint8 len;
typedef struct link_id_ie link_id_ie_t;
#define TDLS_LINK_ID_IE_LEN 18
-
+/** Link Wakeup Schedule Element */
BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie {
uint8 id;
uint8 len;
- uint32 offset;
- uint32 interval;
- uint32 awake_win_slots;
- uint32 max_wake_win;
- uint16 idle_cnt;
+ uint32 offset; /* in ms between TSF0 and start of 1st Awake Window */
+ uint32 interval; /* in ms bwtween the start of 2 Awake Windows */
+ uint32 awake_win_slots; /* in backof slots, duration of Awake Window */
+ uint32 max_wake_win; /* in ms, max duration of Awake Window */
+ uint16 idle_cnt; /* number of consecutive Awake Windows */
} BWL_POST_PACKED_STRUCT;
typedef struct wakeup_sch_ie wakeup_sch_ie_t;
#define TDLS_WAKEUP_SCH_IE_LEN 18
-
+/** Channel Switch Timing Element */
BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie {
uint8 id;
uint8 len;
- uint16 switch_time;
- uint16 switch_timeout;
+ uint16 switch_time; /* in ms, time to switch channels */
+ uint16 switch_timeout; /* in ms */
} BWL_POST_PACKED_STRUCT;
typedef struct channel_switch_timing_ie channel_switch_timing_ie_t;
#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4
-
+/** PTI Control Element */
BWL_PRE_PACKED_STRUCT struct pti_control_ie {
uint8 id;
uint8 len;
typedef struct pti_control_ie pti_control_ie_t;
#define TDLS_PTI_CONTROL_IE_LEN 3
-
+/** PU Buffer Status Element */
BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie {
uint8 id;
uint8 len;
#define TDLS_PU_BUFFER_STATUS_AC_VI 4
#define TDLS_PU_BUFFER_STATUS_AC_VO 8
-
+/* TDLS Action Field Values */
+#define TDLS_SETUP_REQ 0
+#define TDLS_SETUP_RESP 1
+#define TDLS_SETUP_CONFIRM 2
+#define TDLS_TEARDOWN 3
+#define TDLS_PEER_TRAFFIC_IND 4
+#define TDLS_CHANNEL_SWITCH_REQ 5
+#define TDLS_CHANNEL_SWITCH_RESP 6
+#define TDLS_PEER_PSM_REQ 7
+#define TDLS_PEER_PSM_RESP 8
+#define TDLS_PEER_TRAFFIC_RESP 9
+#define TDLS_DISCOVERY_REQ 10
+
+/* 802.11z TDLS Public Action Frame action field */
+#define TDLS_DISCOVERY_RESP 14
+
+/* 802.11u GAS action frames */
#define GAS_REQUEST_ACTION_FRAME 10
#define GAS_RESPONSE_ACTION_FRAME 11
#define GAS_COMEBACK_REQUEST_ACTION_FRAME 12
#define GAS_COMEBACK_RESPONSE_ACTION_FRAME 13
-
+/* 802.11u interworking access network options */
#define IW_ANT_MASK 0x0f
#define IW_INTERNET_MASK 0x10
#define IW_ASRA_MASK 0x20
#define IW_ESR_MASK 0x40
#define IW_UESA_MASK 0x80
-
+/* 802.11u interworking access network type */
#define IW_ANT_PRIVATE_NETWORK 0
#define IW_ANT_PRIVATE_NETWORK_WITH_GUEST 1
#define IW_ANT_CHARGEABLE_PUBLIC_NETWORK 2
#define IW_ANT_TEST_NETWORK 14
#define IW_ANT_WILDCARD_NETWORK 15
-
+/* 802.11u advertisement protocol */
#define ADVP_ANQP_PROTOCOL_ID 0
-
+/* 802.11u advertisement protocol masks */
#define ADVP_QRL_MASK 0x7f
#define ADVP_PAME_BI_MASK 0x80
-
+/* 802.11u advertisement protocol values */
#define ADVP_QRL_REQUEST 0x00
#define ADVP_QRL_RESPONSE 0x7f
#define ADVP_PAME_BI_DEPENDENT 0x00
#define ADVP_PAME_BI_INDEPENDENT ADVP_PAME_BI_MASK
-
+/* 802.11u ANQP information ID */
#define ANQP_ID_QUERY_LIST 256
#define ANQP_ID_CAPABILITY_LIST 257
#define ANQP_ID_VENUE_NAME_INFO 258
#define ANQP_ID_EMERGENCY_NAI 271
#define ANQP_ID_VENDOR_SPECIFIC_LIST 56797
-
+/* 802.11u ANQP OUI */
#define ANQP_OUI_SUBTYPE 9
-
+/* 802.11u venue name */
#define VENUE_LANGUAGE_CODE_SIZE 3
#define VENUE_NAME_SIZE 255
-
+/* 802.11u venue groups */
#define VENUE_UNSPECIFIED 0
#define VENUE_ASSEMBLY 1
#define VENUE_BUSINESS 2
#define VENUE_VEHICULAR 10
#define VENUE_OUTDOOR 11
-
+/* 802.11u network authentication type indicator */
+#define NATI_UNSPECIFIED -1
#define NATI_ACCEPTANCE_OF_TERMS_CONDITIONS 0
#define NATI_ONLINE_ENROLLMENT_SUPPORTED 1
#define NATI_HTTP_HTTPS_REDIRECTION 2
#define NATI_DNS_REDIRECTION 3
-
+/* 802.11u IP address type availability - IPv6 */
#define IPA_IPV6_SHIFT 0
#define IPA_IPV6_MASK (0x03 << IPA_IPV6_SHIFT)
#define IPA_IPV6_NOT_AVAILABLE 0x00
#define IPA_IPV6_AVAILABLE 0x01
#define IPA_IPV6_UNKNOWN_AVAILABILITY 0x02
-
+/* 802.11u IP address type availability - IPv4 */
#define IPA_IPV4_SHIFT 2
#define IPA_IPV4_MASK (0x3f << IPA_IPV4_SHIFT)
#define IPA_IPV4_NOT_AVAILABLE 0x00
#define IPA_IPV4_PORT_RESTRICT_DOUBLE_NAT 0x06
#define IPA_IPV4_UNKNOWN_AVAILABILITY 0x07
-
+/* 802.11u NAI realm encoding */
#define REALM_ENCODING_RFC4282 0
#define REALM_ENCODING_UTF8 1
-
+/* 802.11u IANA EAP method type numbers */
#define REALM_EAP_TLS 13
+#define REALM_EAP_LEAP 17
#define REALM_EAP_SIM 18
#define REALM_EAP_TTLS 21
#define REALM_EAP_AKA 23
+#define REALM_EAP_PEAP 25
+#define REALM_EAP_FAST 43
#define REALM_EAP_PSK 47
#define REALM_EAP_AKAP 50
+#define REALM_EAP_EXPANDED 254
-
+/* 802.11u authentication ID */
#define REALM_EXPANDED_EAP 1
#define REALM_NON_EAP_INNER_AUTHENTICATION 2
#define REALM_INNER_AUTHENTICATION_EAP 3
#define REALM_TUNNELED_EAP_CREDENTIAL 6
#define REALM_VENDOR_SPECIFIC_EAP 221
-
+/* 802.11u non-EAP inner authentication type */
+#define REALM_RESERVED_AUTH 0
#define REALM_PAP 1
#define REALM_CHAP 2
#define REALM_MSCHAP 3
#define REALM_MSCHAPV2 4
-
+/* 802.11u credential type */
#define REALM_SIM 1
#define REALM_USIM 2
#define REALM_NFC 3
#define REALM_CERTIFICATE 6
#define REALM_USERNAME_PASSWORD 7
#define REALM_SERVER_SIDE 8
+#define REALM_RESERVED_CRED 9
+#define REALM_VENDOR_SPECIFIC_CRED 10
-
+/* 802.11u 3GPP PLMN */
#define G3PP_GUD_VERSION 0
#define G3PP_PLMN_LIST_IE 0
-
+/** hotspot2.0 indication element (vendor specific) */
BWL_PRE_PACKED_STRUCT struct hs20_ie {
uint8 oui[3];
uint8 type;
uint8 config;
} BWL_POST_PACKED_STRUCT;
typedef struct hs20_ie hs20_ie_t;
-#define HS20_IE_LEN 5
-
-
+#define HS20_IE_LEN 5 /* HS20 IE length */
+
+/** IEEE 802.11 Annex E */
+typedef enum {
+ DOT11_2GHZ_20MHZ_CLASS_12 = 81, /* Ch 1-11 */
+ DOT11_5GHZ_20MHZ_CLASS_1 = 115, /* Ch 36-48 */
+ DOT11_5GHZ_20MHZ_CLASS_2_DFS = 118, /* Ch 52-64 */
+ DOT11_5GHZ_20MHZ_CLASS_3 = 124, /* Ch 149-161 */
+ DOT11_5GHZ_20MHZ_CLASS_4_DFS = 121, /* Ch 100-140 */
+ DOT11_5GHZ_20MHZ_CLASS_5 = 125, /* Ch 149-165 */
+ DOT11_5GHZ_40MHZ_CLASS_22 = 116, /* Ch 36-44, lower */
+ DOT11_5GHZ_40MHZ_CLASS_23_DFS = 119, /* Ch 52-60, lower */
+ DOT11_5GHZ_40MHZ_CLASS_24_DFS = 122, /* Ch 100-132, lower */
+ DOT11_5GHZ_40MHZ_CLASS_25 = 126, /* Ch 149-157, lower */
+ DOT11_5GHZ_40MHZ_CLASS_27 = 117, /* Ch 40-48, upper */
+ DOT11_5GHZ_40MHZ_CLASS_28_DFS = 120, /* Ch 56-64, upper */
+ DOT11_5GHZ_40MHZ_CLASS_29_DFS = 123, /* Ch 104-136, upper */
+ DOT11_5GHZ_40MHZ_CLASS_30 = 127, /* Ch 153-161, upper */
+ DOT11_2GHZ_40MHZ_CLASS_32 = 83, /* Ch 1-7, lower */
+ DOT11_2GHZ_40MHZ_CLASS_33 = 84, /* Ch 5-11, upper */
+} dot11_op_class_t;
+
+/* QoS map */
+#define QOS_MAP_FIXED_LENGTH (8 * 2) /* DSCP ranges fixed with 8 entries */
+
+/* BCM proprietary IE type for AIBSS */
+#define BCM_AIBSS_IE_TYPE 56
+
+/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
-#endif
+#endif /* _802_11_H_ */
#ifndef _802_1_D_
#define _802_1_D_
-
-#define PRIO_8021D_NONE 2
-#define PRIO_8021D_BK 1
-#define PRIO_8021D_BE 0
-#define PRIO_8021D_EE 3
-#define PRIO_8021D_CL 4
-#define PRIO_8021D_VI 5
-#define PRIO_8021D_VO 6
-#define PRIO_8021D_NC 7
-#define MAXPRIO 7
+/* 802.1D priority defines */
+#define PRIO_8021D_NONE 2 /* None = - */
+#define PRIO_8021D_BK 1 /* BK - Background */
+#define PRIO_8021D_BE 0 /* BE - Best-effort */
+#define PRIO_8021D_EE 3 /* EE - Excellent-effort */
+#define PRIO_8021D_CL 4 /* CL - Controlled Load */
+#define PRIO_8021D_VI 5 /* Vi - Video */
+#define PRIO_8021D_VO 6 /* Vo - Voice */
+#define PRIO_8021D_NC 7 /* NC - Network Control */
+#define MAXPRIO 7 /* 0-7 */
#define NUMPRIO (MAXPRIO + 1)
-#define ALLPRIO -1
-
+#define ALLPRIO -1 /* All prioirty */
+/* Converts prio to precedence since the numerical value of
+ * PRIO_8021D_BE and PRIO_8021D_NONE are swapped.
+ */
#define PRIO2PREC(prio) \
(((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio))
-#endif
+#endif /* _802_1_D__ */
*
* Fundamental constants relating to 802.3
*
- * $Id: 802.3.h 417942 2013-08-13 07:53:57Z $
+ * $Id: 802.3.h 417943 2013-08-13 07:54:04Z $
*/
#ifndef _802_3_h_
--- /dev/null
+/*
+ * Copyright (C) 2014, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * Fundamental constants relating to DHCP Protocol
+ *
+ * $Id: bcmdhcp.h 382883 2013-02-04 23:26:09Z $
+ */
+
+#ifndef _bcmdhcp_h_
+#define _bcmdhcp_h_
+
+/* DHCP params */
+#define DHCP_TYPE_OFFSET 0 /* DHCP type (request|reply) offset */
+#define DHCP_TID_OFFSET 4 /* DHCP transition id offset */
+#define DHCP_FLAGS_OFFSET 10 /* DHCP flags offset */
+#define DHCP_CIADDR_OFFSET 12 /* DHCP client IP address offset */
+#define DHCP_YIADDR_OFFSET 16 /* DHCP your IP address offset */
+#define DHCP_GIADDR_OFFSET 24 /* DHCP relay agent IP address offset */
+#define DHCP_CHADDR_OFFSET 28 /* DHCP client h/w address offset */
+#define DHCP_OPT_OFFSET 236 /* DHCP options offset */
+
+#define DHCP_OPT_MSGTYPE 53 /* DHCP message type */
+#define DHCP_OPT_MSGTYPE_REQ 3
+#define DHCP_OPT_MSGTYPE_ACK 5 /* DHCP message type - ACK */
+
+#define DHCP_OPT_CODE_OFFSET 0 /* Option identifier */
+#define DHCP_OPT_LEN_OFFSET 1 /* Option data length */
+#define DHCP_OPT_DATA_OFFSET 2 /* Option data */
+
+#define DHCP_OPT_CODE_CLIENTID 61 /* Option identifier */
+
+#define DHCP_TYPE_REQUEST 1 /* DHCP request (discover|request) */
+#define DHCP_TYPE_REPLY 2 /* DHCP reply (offset|ack) */
+
+#define DHCP_PORT_SERVER 67 /* DHCP server UDP port */
+#define DHCP_PORT_CLIENT 68 /* DHCP client UDP port */
+
+#define DHCP_FLAG_BCAST 0x8000 /* DHCP broadcast flag */
+
+#define DHCP_FLAGS_LEN 2 /* DHCP flags field length */
+
+#define DHCP6_TYPE_SOLICIT 1 /* DHCP6 solicit */
+#define DHCP6_TYPE_ADVERTISE 2 /* DHCP6 advertise */
+#define DHCP6_TYPE_REQUEST 3 /* DHCP6 request */
+#define DHCP6_TYPE_CONFIRM 4 /* DHCP6 confirm */
+#define DHCP6_TYPE_RENEW 5 /* DHCP6 renew */
+#define DHCP6_TYPE_REBIND 6 /* DHCP6 rebind */
+#define DHCP6_TYPE_REPLY 7 /* DHCP6 reply */
+#define DHCP6_TYPE_RELEASE 8 /* DHCP6 release */
+#define DHCP6_TYPE_DECLINE 9 /* DHCP6 decline */
+#define DHCP6_TYPE_RECONFIGURE 10 /* DHCP6 reconfigure */
+#define DHCP6_TYPE_INFOREQ 11 /* DHCP6 information request */
+#define DHCP6_TYPE_RELAYFWD 12 /* DHCP6 relay forward */
+#define DHCP6_TYPE_RELAYREPLY 13 /* DHCP6 relay reply */
+
+#define DHCP6_TYPE_OFFSET 0 /* DHCP6 type offset */
+
+#define DHCP6_MSG_OPT_OFFSET 4 /* Offset of options in client server messages */
+#define DHCP6_RELAY_OPT_OFFSET 34 /* Offset of options in relay messages */
+
+#define DHCP6_OPT_CODE_OFFSET 0 /* Option identifier */
+#define DHCP6_OPT_LEN_OFFSET 2 /* Option data length */
+#define DHCP6_OPT_DATA_OFFSET 4 /* Option data */
+
+#define DHCP6_OPT_CODE_CLIENTID 1 /* DHCP6 CLIENTID option */
+#define DHCP6_OPT_CODE_SERVERID 2 /* DHCP6 SERVERID option */
+
+#define DHCP6_PORT_SERVER 547 /* DHCP6 server UDP port */
+#define DHCP6_PORT_CLIENT 546 /* DHCP6 client UDP port */
+
+#endif /* #ifndef _bcmdhcp_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: bcmeth.h 382882 2013-02-04 23:24:31Z $
+ * $Id: bcmeth.h 445746 2013-12-30 12:57:26Z $
*/
-
+/*
+ * Broadcom Ethernet protocol defines
+ */
#ifndef _BCMETH_H_
#define _BCMETH_H_
#include <typedefs.h>
#endif
-
+/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
+/* ETHER_TYPE_BRCM is defined in ethernet.h */
+/*
+ * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field
+ * in one of two formats: (only subtypes 32768-65535 are in use now)
+ *
+ * subtypes 0-32767:
+ * 8 bit subtype (0-127)
+ * 8 bit length in bytes (0-255)
+ *
+ * subtypes 32768-65535:
+ * 16 bit big-endian subtype
+ * 16 bit big-endian length in bytes (0-65535)
+ *
+ * length is the number of additional bytes beyond the 4 or 6 byte header
+ *
+ * Reserved values:
+ * 0 reserved
+ * 5-15 reserved for iLine protocol assignments
+ * 17-126 reserved, assignable
+ * 127 reserved
+ * 32768 reserved
+ * 32769-65534 reserved, assignable
+ * 65535 reserved
+ */
-
-
-
+/*
+ * While adding the subtypes and their specific processing code make sure
+ * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition
+ */
#define BCMILCP_SUBTYPE_RATE 1
#define BCMILCP_SUBTYPE_LINK 2
#define BCMILCP_BCM_SUBTYPE_RESERVED 0
#define BCMILCP_BCM_SUBTYPE_EVENT 1
#define BCMILCP_BCM_SUBTYPE_SES 2
-
-
-#define BCMILCP_BCM_SUBTYPE_DPT 4
+/*
+ * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded
+ * within BCMILCP_BCM_SUBTYPE_EVENT type messages
+ */
+/* #define BCMILCP_BCM_SUBTYPE_EAPOL 3 */
+#define BCMILCP_BCM_SUBTYPE_DPT 4
#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8
#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0
-
+/* These fields are stored in network order */
typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr
{
- uint16 subtype;
+ uint16 subtype; /* Vendor specific..32769 */
uint16 length;
- uint8 version;
- uint8 oui[3];
-
+ uint8 version; /* Version is 0 */
+ uint8 oui[3]; /* Broadcom OUI */
+ /* user specific Data */
uint16 usr_subtype;
} BWL_POST_PACKED_STRUCT bcmeth_hdr_t;
-
+/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
-#endif
+#endif /* _BCMETH_H_ */
*
* Dependencies: proto/bcmeth.h
*
- * $Id: bcmevent.h 419132 2013-08-19 21:33:05Z $
+ * $Id: bcmevent.h 490387 2014-07-10 15:12:52Z $
*
*/
-
+/*
+ * Broadcom Ethernet Events protocol defines
+ *
+ */
#ifndef _BCMEVENT_H_
#define _BCMEVENT_H_
#ifndef _TYPEDEFS_H_
#include <typedefs.h>
#endif
-
+/* #include <ethernet.h> -- TODO: req., excluded to overwhelming coupling (break up ethernet.h) */
#include <proto/bcmeth.h>
-
+/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
-#define BCM_EVENT_MSG_VERSION 2
-#define BCM_MSG_IFNAME_MAX 16
-
-
-#define WLC_EVENT_MSG_LINK 0x01
-#define WLC_EVENT_MSG_FLUSHTXQ 0x02
-#define WLC_EVENT_MSG_GROUP 0x04
-#define WLC_EVENT_MSG_UNKBSS 0x08
-#define WLC_EVENT_MSG_UNKIF 0x10
-
+#define BCM_EVENT_MSG_VERSION 2 /* wl_event_msg_t struct version */
+#define BCM_MSG_IFNAME_MAX 16 /* max length of interface name */
+/* flags */
+#define WLC_EVENT_MSG_LINK 0x01 /* link is up */
+#define WLC_EVENT_MSG_FLUSHTXQ 0x02 /* flush tx queue on MIC error */
+#define WLC_EVENT_MSG_GROUP 0x04 /* group MIC error */
+#define WLC_EVENT_MSG_UNKBSS 0x08 /* unknown source bsscfg */
+#define WLC_EVENT_MSG_UNKIF 0x10 /* unknown source OS i/f */
+/* these fields are stored in network order */
+/* version 1 */
typedef BWL_PRE_PACKED_STRUCT struct
{
uint16 version;
- uint16 flags;
- uint32 event_type;
- uint32 status;
- uint32 reason;
- uint32 auth_type;
- uint32 datalen;
- struct ether_addr addr;
- char ifname[BCM_MSG_IFNAME_MAX];
+ uint16 flags; /* see flags below */
+ uint32 event_type; /* Message (see below) */
+ uint32 status; /* Status code (see below) */
+ uint32 reason; /* Reason code (if applicable) */
+ uint32 auth_type; /* WLC_E_AUTH */
+ uint32 datalen; /* data buf */
+ struct ether_addr addr; /* Station address (if applicable) */
+ char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t;
-
+/* the current version */
typedef BWL_PRE_PACKED_STRUCT struct
{
uint16 version;
- uint16 flags;
- uint32 event_type;
- uint32 status;
- uint32 reason;
- uint32 auth_type;
- uint32 datalen;
- struct ether_addr addr;
- char ifname[BCM_MSG_IFNAME_MAX];
- uint8 ifidx;
- uint8 bsscfgidx;
+ uint16 flags; /* see flags below */
+ uint32 event_type; /* Message (see below) */
+ uint32 status; /* Status code (see below) */
+ uint32 reason; /* Reason code (if applicable) */
+ uint32 auth_type; /* WLC_E_AUTH */
+ uint32 datalen; /* data buf */
+ struct ether_addr addr; /* Station address (if applicable) */
+ char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */
+ uint8 ifidx; /* destination OS i/f index */
+ uint8 bsscfgidx; /* source bsscfg index */
} BWL_POST_PACKED_STRUCT wl_event_msg_t;
-
+/* used by driver msgs */
typedef BWL_PRE_PACKED_STRUCT struct bcm_event {
struct ether_header eth;
bcmeth_hdr_t bcm_hdr;
wl_event_msg_t event;
-
+ /* data portion follows */
} BWL_POST_PACKED_STRUCT bcm_event_t;
#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header))
-
-#define WLC_E_SET_SSID 0
-#define WLC_E_JOIN 1
-#define WLC_E_START 2
-#define WLC_E_AUTH 3
-#define WLC_E_AUTH_IND 4
-#define WLC_E_DEAUTH 5
-#define WLC_E_DEAUTH_IND 6
-#define WLC_E_ASSOC 7
-#define WLC_E_ASSOC_IND 8
-#define WLC_E_REASSOC 9
-#define WLC_E_REASSOC_IND 10
-#define WLC_E_DISASSOC 11
-#define WLC_E_DISASSOC_IND 12
-#define WLC_E_QUIET_START 13
-#define WLC_E_QUIET_END 14
-#define WLC_E_BEACON_RX 15
-#define WLC_E_LINK 16
-#define WLC_E_MIC_ERROR 17
-#define WLC_E_NDIS_LINK 18
-#define WLC_E_ROAM 19
-#define WLC_E_TXFAIL 20
-#define WLC_E_PMKID_CACHE 21
-#define WLC_E_RETROGRADE_TSF 22
-#define WLC_E_PRUNE 23
-#define WLC_E_AUTOAUTH 24
-#define WLC_E_EAPOL_MSG 25
-#define WLC_E_SCAN_COMPLETE 26
-#define WLC_E_ADDTS_IND 27
-#define WLC_E_DELTS_IND 28
-#define WLC_E_BCNSENT_IND 29
-#define WLC_E_BCNRX_MSG 30
-#define WLC_E_BCNLOST_MSG 31
-#define WLC_E_ROAM_PREP 32
-#define WLC_E_PFN_NET_FOUND 33
-#define WLC_E_PFN_NET_LOST 34
+/* Event messages */
+#define WLC_E_SET_SSID 0 /* indicates status of set SSID */
+#define WLC_E_JOIN 1 /* differentiates join IBSS from found (WLC_E_START) IBSS */
+#define WLC_E_START 2 /* STA founded an IBSS or AP started a BSS */
+#define WLC_E_AUTH 3 /* 802.11 AUTH request */
+#define WLC_E_AUTH_IND 4 /* 802.11 AUTH indication */
+#define WLC_E_DEAUTH 5 /* 802.11 DEAUTH request */
+#define WLC_E_DEAUTH_IND 6 /* 802.11 DEAUTH indication */
+#define WLC_E_ASSOC 7 /* 802.11 ASSOC request */
+#define WLC_E_ASSOC_IND 8 /* 802.11 ASSOC indication */
+#define WLC_E_REASSOC 9 /* 802.11 REASSOC request */
+#define WLC_E_REASSOC_IND 10 /* 802.11 REASSOC indication */
+#define WLC_E_DISASSOC 11 /* 802.11 DISASSOC request */
+#define WLC_E_DISASSOC_IND 12 /* 802.11 DISASSOC indication */
+#define WLC_E_QUIET_START 13 /* 802.11h Quiet period started */
+#define WLC_E_QUIET_END 14 /* 802.11h Quiet period ended */
+#define WLC_E_BEACON_RX 15 /* BEACONS received/lost indication */
+#define WLC_E_LINK 16 /* generic link indication */
+#define WLC_E_MIC_ERROR 17 /* TKIP MIC error occurred */
+#define WLC_E_NDIS_LINK 18 /* NDIS style link indication */
+#define WLC_E_ROAM 19 /* roam attempt occurred: indicate status & reason */
+#define WLC_E_TXFAIL 20 /* change in dot11FailedCount (txfail) */
+#define WLC_E_PMKID_CACHE 21 /* WPA2 pmkid cache indication */
+#define WLC_E_RETROGRADE_TSF 22 /* current AP's TSF value went backward */
+#define WLC_E_PRUNE 23 /* AP was pruned from join list for reason */
+#define WLC_E_AUTOAUTH 24 /* report AutoAuth table entry match for join attempt */
+#define WLC_E_EAPOL_MSG 25 /* Event encapsulating an EAPOL message */
+#define WLC_E_SCAN_COMPLETE 26 /* Scan results are ready or scan was aborted */
+#define WLC_E_ADDTS_IND 27 /* indicate to host addts fail/success */
+#define WLC_E_DELTS_IND 28 /* indicate to host delts fail/success */
+#define WLC_E_BCNSENT_IND 29 /* indicate to host of beacon transmit */
+#define WLC_E_BCNRX_MSG 30 /* Send the received beacon up to the host */
+#define WLC_E_BCNLOST_MSG 31 /* indicate to host loss of beacon */
+#define WLC_E_ROAM_PREP 32 /* before attempting to roam */
+#define WLC_E_PFN_NET_FOUND 33 /* PFN network found event */
+#define WLC_E_PFN_NET_LOST 34 /* PFN network lost event */
#define WLC_E_RESET_COMPLETE 35
#define WLC_E_JOIN_START 36
#define WLC_E_ROAM_START 37
#define WLC_E_ASSOC_START 38
#define WLC_E_IBSS_ASSOC 39
#define WLC_E_RADIO 40
-#define WLC_E_PSM_WATCHDOG 41
+#define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */
#if defined(BCMCCX) && defined(CCX_SDK)
-#define WLC_E_CCX_ASSOC_START 42
-#define WLC_E_CCX_ASSOC_ABORT 43
-#endif
-#define WLC_E_PROBREQ_MSG 44
+#define WLC_E_CCX_ASSOC_START 42 /* CCX association start */
+#define WLC_E_CCX_ASSOC_ABORT 43 /* CCX association abort */
+#endif /* BCMCCX && CCX_SDK */
+#define WLC_E_PROBREQ_MSG 44 /* probe request received */
#define WLC_E_SCAN_CONFIRM_IND 45
-#define WLC_E_PSK_SUP 46
+#define WLC_E_PSK_SUP 46 /* WPA Handshake fail */
#define WLC_E_COUNTRY_CODE_CHANGED 47
-#define WLC_E_EXCEEDED_MEDIUM_TIME 48
-#define WLC_E_ICV_ERROR 49
-#define WLC_E_UNICAST_DECODE_ERROR 50
-#define WLC_E_MULTICAST_DECODE_ERROR 51
+#define WLC_E_EXCEEDED_MEDIUM_TIME 48 /* WMMAC excedded medium time */
+#define WLC_E_ICV_ERROR 49 /* WEP ICV error occurred */
+#define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */
+#define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */
#define WLC_E_TRACE 52
#ifdef WLBTAMP
-#define WLC_E_BTA_HCI_EVENT 53
+#define WLC_E_BTA_HCI_EVENT 53 /* BT-AMP HCI event */
#endif
-#define WLC_E_IF 54
-#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55
-#define WLC_E_RSSI 56
-#define WLC_E_PFN_BEST_BATCHING 57
-#define WLC_E_PFN_SCAN_COMPLETE 57
+#define WLC_E_IF 54 /* I/F change (for dongle host notification) */
+#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */
+#define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */
+#define WLC_E_PFN_SCAN_COMPLETE 57 /* PFN completed scan of network list */
+/* PFN best network batching event, re-use obsolete WLC_E_PFN_SCAN_COMPLETE */
+#define WLC_E_PFN_BEST_BATCHING 57
#define WLC_E_EXTLOG_MSG 58
-#define WLC_E_ACTION_FRAME 59
-#define WLC_E_ACTION_FRAME_COMPLETE 60
-#define WLC_E_PRE_ASSOC_IND 61
-#define WLC_E_PRE_REASSOC_IND 62
+#define WLC_E_ACTION_FRAME 59 /* Action frame Rx */
+#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */
+#define WLC_E_PRE_ASSOC_IND 61 /* assoc request received */
+#define WLC_E_PRE_REASSOC_IND 62 /* re-assoc request received */
#define WLC_E_CHANNEL_ADOPTED 63
-#define WLC_E_AP_STARTED 64
-#define WLC_E_DFS_AP_STOP 65
-#define WLC_E_DFS_AP_RESUME 66
-#define WLC_E_WAI_STA_EVENT 67
-#define WLC_E_WAI_MSG 68
-#define WLC_E_ESCAN_RESULT 69
-#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
-#define WLC_E_PROBRESP_MSG 71
-#define WLC_E_P2P_PROBREQ_MSG 72
+#define WLC_E_AP_STARTED 64 /* AP started */
+#define WLC_E_DFS_AP_STOP 65 /* AP stopped due to DFS */
+#define WLC_E_DFS_AP_RESUME 66 /* AP resumed due to DFS */
+#define WLC_E_WAI_STA_EVENT 67 /* WAI stations event */
+#define WLC_E_WAI_MSG 68 /* event encapsulating an WAI message */
+#define WLC_E_ESCAN_RESULT 69 /* escan result event */
+#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70 /* action frame off channel complete */
+#define WLC_E_PROBRESP_MSG 71 /* probe response received */
+#define WLC_E_P2P_PROBREQ_MSG 72 /* P2P Probe request received */
#define WLC_E_DCS_REQUEST 73
-#define WLC_E_FIFO_CREDIT_MAP 74
-#define WLC_E_ACTION_FRAME_RX 75
-#define WLC_E_WAKE_EVENT 76
-#define WLC_E_RM_COMPLETE 77
-#define WLC_E_HTSFSYNC 78
-#define WLC_E_OVERLAY_REQ 79
-#define WLC_E_CSA_COMPLETE_IND 80
-#define WLC_E_EXCESS_PM_WAKE_EVENT 81
-#define WLC_E_PFN_BSSID_NET_FOUND 82
-#define WLC_E_PFN_SCAN_NONE 82
-#define WLC_E_PFN_BSSID_NET_LOST 83
-
-#define WLC_E_PFN_SCAN_ALLGONE 83
+#define WLC_E_FIFO_CREDIT_MAP 74 /* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */
+#define WLC_E_ACTION_FRAME_RX 75 /* Received action frame event WITH
+ * wl_event_rx_frame_data_t header
+ */
+#define WLC_E_WAKE_EVENT 76 /* Wake Event timer fired, used for wake WLAN test mode */
+#define WLC_E_RM_COMPLETE 77 /* Radio measurement complete */
+#define WLC_E_HTSFSYNC 78 /* Synchronize TSF with the host */
+#define WLC_E_OVERLAY_REQ 79 /* request an overlay IOCTL/iovar from the host */
+#define WLC_E_CSA_COMPLETE_IND 80 /* 802.11 CHANNEL SWITCH ACTION completed */
+#define WLC_E_EXCESS_PM_WAKE_EVENT 81 /* excess PM Wake Event to inform host */
+#define WLC_E_PFN_SCAN_NONE 82 /* no PFN networks around */
+/* PFN BSSID network found event, conflict/share with WLC_E_PFN_SCAN_NONE */
+#define WLC_E_PFN_BSSID_NET_FOUND 82
+#define WLC_E_PFN_SCAN_ALLGONE 83 /* last found PFN network gets lost */
+/* PFN BSSID network lost event, conflict/share with WLC_E_PFN_SCAN_ALLGONE */
+#define WLC_E_PFN_BSSID_NET_LOST 83
#define WLC_E_GTK_PLUMBED 84
-#define WLC_E_ASSOC_IND_NDIS 85
-#define WLC_E_REASSOC_IND_NDIS 86
+#define WLC_E_ASSOC_IND_NDIS 85 /* 802.11 ASSOC indication for NDIS only */
+#define WLC_E_REASSOC_IND_NDIS 86 /* 802.11 REASSOC indication for NDIS only */
#define WLC_E_ASSOC_REQ_IE 87
#define WLC_E_ASSOC_RESP_IE 88
-#define WLC_E_ASSOC_RECREATED 89
-#define WLC_E_ACTION_FRAME_RX_NDIS 90
-#define WLC_E_AUTH_REQ 91
-#define WLC_E_TDLS_PEER_EVENT 92
-#define WLC_E_SPEEDY_RECREATE_FAIL 93
-#define WLC_E_NATIVE 94
-#define WLC_E_PKTDELAY_IND 95
-#define WLC_E_AWDL_AW 96
-#define WLC_E_AWDL_ROLE 97
-#define WLC_E_AWDL_EVENT 98
-#ifdef WLNIC
-#define WLC_E_NIC_AF_TXS 99
-#define WLC_E_NIC_NIC_REPORT 100
-#endif
+#define WLC_E_ASSOC_RECREATED 89 /* association recreated on resume */
+#define WLC_E_ACTION_FRAME_RX_NDIS 90 /* rx action frame event for NDIS only */
+#define WLC_E_AUTH_REQ 91 /* authentication request received */
+#define WLC_E_TDLS_PEER_EVENT 92 /* discovered peer, connected/disconnected peer */
+#define WLC_E_SPEEDY_RECREATE_FAIL 93 /* fast assoc recreation failed */
+#define WLC_E_NATIVE 94 /* port-specific event and payload (e.g. NDIS) */
+#define WLC_E_PKTDELAY_IND 95 /* event for tx pkt delay suddently jump */
+#define WLC_E_PSTA_PRIMARY_INTF_IND 99 /* psta primary interface indication */
+#define WLC_E_NAN 100 /* NAN event */
#define WLC_E_BEACON_FRAME_RX 101
-#define WLC_E_SERVICE_FOUND 102
-#define WLC_E_GAS_FRAGMENT_RX 103
-#define WLC_E_GAS_COMPLETE 104
-#define WLC_E_P2PO_ADD_DEVICE 105
-#define WLC_E_P2PO_DEL_DEVICE 106
-#define WLC_E_WNM_STA_SLEEP 107
-#define WLC_E_NONE 108
-#define WLC_E_PROXD 109
-#define WLC_E_IBSS_COALESCE 110
-#define WLC_E_AWDL_AW_EXT_END 111
-#define WLC_E_AWDL_AW_EXT_START 112
-#define WLC_E_AWDL_AW_START 113
-#define WLC_E_AWDL_RADIO_OFF 114
-#define WLC_E_AWDL_PEER_STATE 115
-#define WLC_E_AWDL_SYNC_STATE_CHANGED 116
-#define WLC_E_AWDL_CHIP_RESET 117
-#define WLC_E_AWDL_INTERLEAVED_SCAN_START 118
-#define WLC_E_AWDL_INTERLEAVED_SCAN_STOP 119
-#define WLC_E_AWDL_PEER_CACHE_CONTROL 120
+#define WLC_E_SERVICE_FOUND 102 /* desired service found */
+#define WLC_E_GAS_FRAGMENT_RX 103 /* GAS fragment received */
+#define WLC_E_GAS_COMPLETE 104 /* GAS sessions all complete */
+#define WLC_E_P2PO_ADD_DEVICE 105 /* New device found by p2p offload */
+#define WLC_E_P2PO_DEL_DEVICE 106 /* device has been removed by p2p offload */
+#define WLC_E_WNM_STA_SLEEP 107 /* WNM event to notify STA enter sleep mode */
+#define WLC_E_TXFAIL_THRESH 108 /* Indication of MAC tx failures (exhaustion of
+ * 802.11 retries) exceeding threshold(s)
+ */
+#define WLC_E_PROXD 109 /* Proximity Detection event */
+#define WLC_E_IBSS_COALESCE 110 /* IBSS Coalescing */
+#define WLC_E_AIBSS_TXFAIL 110 /* TXFAIL event for AIBSS, re using event 110 */
+#define WLC_E_BSS_LOAD 114 /* Inform host of beacon bss load */
#define WLC_E_CSA_START_IND 121
#define WLC_E_CSA_DONE_IND 122
#define WLC_E_CSA_FAILURE_IND 123
-#define WLC_E_CCA_CHAN_QUAL 124
-#define WLC_E_LAST 125
-
-
-
-typedef struct {
- uint event;
- const char *name;
-} bcmevent_name_t;
-
-extern const bcmevent_name_t bcmevent_names[];
-extern const int bcmevent_names_size;
-
-
-#define WLC_E_STATUS_SUCCESS 0
-#define WLC_E_STATUS_FAIL 1
-#define WLC_E_STATUS_TIMEOUT 2
-#define WLC_E_STATUS_NO_NETWORKS 3
-#define WLC_E_STATUS_ABORT 4
-#define WLC_E_STATUS_NO_ACK 5
-#define WLC_E_STATUS_UNSOLICITED 6
-#define WLC_E_STATUS_ATTEMPT 7
-#define WLC_E_STATUS_PARTIAL 8
-#define WLC_E_STATUS_NEWSCAN 9
-#define WLC_E_STATUS_NEWASSOC 10
-#define WLC_E_STATUS_11HQUIET 11
-#define WLC_E_STATUS_SUPPRESS 12
-#define WLC_E_STATUS_NOCHANS 13
+#define WLC_E_CCA_CHAN_QUAL 124 /* CCA based channel quality report */
+#define WLC_E_BSSID 125 /* to report change in BSSID while roaming */
+#define WLC_E_TX_STAT_ERROR 126 /* tx error indication */
+#define WLC_E_BCMC_CREDIT_SUPPORT 127 /* credit check for BCMC supported */
+#define WLC_E_BT_WIFI_HANDOVER_REQ 130 /* Handover Request Initiated */
+#define WLC_E_SPW_TXINHIBIT 131 /* Southpaw TxInhibit notification */
+#define WLC_E_FBT_AUTH_REQ_IND 132 /* FBT Authentication Request Indication */
+#define WLC_E_RSSI_LQM 133 /* Enhancement addition for WLC_E_RSSI */
+#define WLC_E_PFN_GSCAN_FULL_RESULT 134 /* Full probe/beacon (IEs etc) results */
+#define WLC_E_PFN_SWC 135 /* Significant change in rssi of bssids being tracked */
+#define WLC_E_RMC_EVENT 139 /* RMC event */
+#define WLC_E_LAST 140 /* highest val + 1 for range checking */
+
+#if (WLC_E_LAST > 140)
+#error "WLC_E_LAST: Invalid value for last event; must be <= 140."
+#endif /* WLC_E_LAST */
+
+/* define an API for getting the string name of an event */
+extern const char *bcmevent_get_name(uint event_type);
+
+
+
+/* Event status codes */
+#define WLC_E_STATUS_SUCCESS 0 /* operation was successful */
+#define WLC_E_STATUS_FAIL 1 /* operation failed */
+#define WLC_E_STATUS_TIMEOUT 2 /* operation timed out */
+#define WLC_E_STATUS_NO_NETWORKS 3 /* failed due to no matching network found */
+#define WLC_E_STATUS_ABORT 4 /* operation was aborted */
+#define WLC_E_STATUS_NO_ACK 5 /* protocol failure: packet not ack'd */
+#define WLC_E_STATUS_UNSOLICITED 6 /* AUTH or ASSOC packet was unsolicited */
+#define WLC_E_STATUS_ATTEMPT 7 /* attempt to assoc to an auto auth configuration */
+#define WLC_E_STATUS_PARTIAL 8 /* scan results are incomplete */
+#define WLC_E_STATUS_NEWSCAN 9 /* scan aborted by another scan */
+#define WLC_E_STATUS_NEWASSOC 10 /* scan aborted due to assoc in progress */
+#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */
+#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */
+#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */
#ifdef BCMCCX
-#define WLC_E_STATUS_CCXFASTRM 14
-#endif
-#define WLC_E_STATUS_CS_ABORT 15
-#define WLC_E_STATUS_ERROR 16
-
-
-#define WLC_E_REASON_INITIAL_ASSOC 0
-#define WLC_E_REASON_LOW_RSSI 1
-#define WLC_E_REASON_DEAUTH 2
-#define WLC_E_REASON_DISASSOC 3
-#define WLC_E_REASON_BCNS_LOST 4
-
-
-#define WLC_E_REASON_FAST_ROAM_FAILED 5
-#define WLC_E_REASON_DIRECTED_ROAM 6
-#define WLC_E_REASON_TSPEC_REJECTED 7
-#define WLC_E_REASON_BETTER_AP 8
-#define WLC_E_REASON_MINTXRATE 9
-#define WLC_E_REASON_TXFAIL 10
-
-#define WLC_E_REASON_REQUESTED_ROAM 11
-
-
-
-#define WLC_E_PRUNE_ENCR_MISMATCH 1
-#define WLC_E_PRUNE_BCAST_BSSID 2
-#define WLC_E_PRUNE_MAC_DENY 3
-#define WLC_E_PRUNE_MAC_NA 4
-#define WLC_E_PRUNE_REG_PASSV 5
-#define WLC_E_PRUNE_SPCT_MGMT 6
-#define WLC_E_PRUNE_RADAR 7
-#define WLC_E_RSN_MISMATCH 8
-#define WLC_E_PRUNE_NO_COMMON_RATES 9
-#define WLC_E_PRUNE_BASIC_RATES 10
+#define WLC_E_STATUS_CCXFASTRM 14 /* scan aborted due to CCX fast roam */
+#endif /* BCMCCX */
+#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */
+#define WLC_E_STATUS_ERROR 16 /* request failed due to error */
+#define WLC_E_STATUS_INVALID 0xff /* Invalid status code to init variables. */
+
+
+/* roam reason codes */
+#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */
+#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */
+#define WLC_E_REASON_DEAUTH 2 /* roamed due to DEAUTH indication */
+#define WLC_E_REASON_DISASSOC 3 /* roamed due to DISASSOC indication */
+#define WLC_E_REASON_BCNS_LOST 4 /* roamed due to lost beacons */
+
+/* Roam codes used primarily by CCX */
+#define WLC_E_REASON_FAST_ROAM_FAILED 5 /* roamed due to fast roam failure */
+#define WLC_E_REASON_DIRECTED_ROAM 6 /* roamed due to request by AP */
+#define WLC_E_REASON_TSPEC_REJECTED 7 /* roamed due to TSPEC rejection */
+#define WLC_E_REASON_BETTER_AP 8 /* roamed due to finding better AP */
+#define WLC_E_REASON_MINTXRATE 9 /* roamed because at mintxrate for too long */
+#define WLC_E_REASON_TXFAIL 10 /* We can hear AP, but AP can't hear us */
+/* retained for precommit auto-merging errors; remove once all branches are synced */
+#define WLC_E_REASON_REQUESTED_ROAM 11
+#define WLC_E_REASON_BSSTRANS_REQ 11 /* roamed due to BSS Transition request by AP */
+
+/* prune reason codes */
+#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */
+#define WLC_E_PRUNE_BCAST_BSSID 2 /* AP uses a broadcast BSSID */
+#define WLC_E_PRUNE_MAC_DENY 3 /* STA's MAC addr is in AP's MAC deny list */
+#define WLC_E_PRUNE_MAC_NA 4 /* STA's MAC addr is not in AP's MAC allow list */
+#define WLC_E_PRUNE_REG_PASSV 5 /* AP not allowed due to regulatory restriction */
+#define WLC_E_PRUNE_SPCT_MGMT 6 /* AP does not support STA locale spectrum mgmt */
+#define WLC_E_PRUNE_RADAR 7 /* AP is on a radar channel of STA locale */
+#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */
+#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */
+#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */
#ifdef BCMCCX
-#define WLC_E_PRUNE_CCXFAST_PREVAP 11
-#endif
-#define WLC_E_PRUNE_CIPHER_NA 12
-#define WLC_E_PRUNE_KNOWN_STA 13
+#define WLC_E_PRUNE_CCXFAST_PREVAP 11 /* CCX FAST ROAM: prune previous AP */
+#endif /* def BCMCCX */
+#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */
+#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */
#ifdef BCMCCX
-#define WLC_E_PRUNE_CCXFAST_DROAM 14
-#endif
-#define WLC_E_PRUNE_WDS_PEER 15
-#define WLC_E_PRUNE_QBSS_LOAD 16
-#define WLC_E_PRUNE_HOME_AP 17
+#define WLC_E_PRUNE_CCXFAST_DROAM 14 /* CCX FAST ROAM: prune unqualified AP */
+#endif /* def BCMCCX */
+#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */
+#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */
+#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */
#ifdef BCMCCX
-#define WLC_E_PRUNE_AP_BLOCKED 18
-#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19
-#endif
-
-
-#define WLC_E_SUP_OTHER 0
-#define WLC_E_SUP_DECRYPT_KEY_DATA 1
-#define WLC_E_SUP_BAD_UCAST_WEP128 2
-#define WLC_E_SUP_BAD_UCAST_WEP40 3
-#define WLC_E_SUP_UNSUP_KEY_LEN 4
-#define WLC_E_SUP_PW_KEY_CIPHER 5
-#define WLC_E_SUP_MSG3_TOO_MANY_IE 6
-#define WLC_E_SUP_MSG3_IE_MISMATCH 7
-#define WLC_E_SUP_NO_INSTALL_FLAG 8
-#define WLC_E_SUP_MSG3_NO_GTK 9
-#define WLC_E_SUP_GRP_KEY_CIPHER 10
-#define WLC_E_SUP_GRP_MSG1_NO_GTK 11
-#define WLC_E_SUP_GTK_DECRYPT_FAIL 12
-#define WLC_E_SUP_SEND_FAIL 13
-#define WLC_E_SUP_DEAUTH 14
-#define WLC_E_SUP_WPA_PSK_TMO 15
-
-
-
-#ifdef WLAWDL
-#define WLC_E_AWDL_SCAN_START 1
-#define WLC_E_AWDL_SCAN_DONE 0
-
-#define WLC_E_AWDL_RX_ACT_FRAME 1
-#define WLC_E_AWDL_RX_PRB_RESP 2
-
-#endif
+#define WLC_E_PRUNE_AP_BLOCKED 18 /* prune blocked AP */
+#define WLC_E_PRUNE_NO_DIAG_SUPPORT 19 /* prune due to diagnostic mode not supported */
+#endif /* BCMCCX */
+
+/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */
+#define WLC_E_SUP_OTHER 0 /* Other reason */
+#define WLC_E_SUP_DECRYPT_KEY_DATA 1 /* Decryption of key data failed */
+#define WLC_E_SUP_BAD_UCAST_WEP128 2 /* Illegal use of ucast WEP128 */
+#define WLC_E_SUP_BAD_UCAST_WEP40 3 /* Illegal use of ucast WEP40 */
+#define WLC_E_SUP_UNSUP_KEY_LEN 4 /* Unsupported key length */
+#define WLC_E_SUP_PW_KEY_CIPHER 5 /* Unicast cipher mismatch in pairwise key */
+#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 /* WPA IE contains > 1 RSN IE in key msg 3 */
+#define WLC_E_SUP_MSG3_IE_MISMATCH 7 /* WPA IE mismatch in key message 3 */
+#define WLC_E_SUP_NO_INSTALL_FLAG 8 /* INSTALL flag unset in 4-way msg */
+#define WLC_E_SUP_MSG3_NO_GTK 9 /* encapsulated GTK missing from msg 3 */
+#define WLC_E_SUP_GRP_KEY_CIPHER 10 /* Multicast cipher mismatch in group key */
+#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 /* encapsulated GTK missing from group msg 1 */
+#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 /* GTK decrypt failure */
+#define WLC_E_SUP_SEND_FAIL 13 /* message send failure */
+#define WLC_E_SUP_DEAUTH 14 /* received FC_DEAUTH */
+#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */
+
+/* Event data for events that include frames received over the air */
+/* WLC_E_PROBRESP_MSG
+ * WLC_E_P2P_PROBREQ_MSG
+ * WLC_E_ACTION_FRAME_RX
+ */
typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data {
uint16 version;
- uint16 channel;
+ uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */
int32 rssi;
uint32 mactime;
uint32 rate;
#define BCM_RX_FRAME_DATA_VERSION 1
-
+/* WLC_E_IF event data */
typedef struct wl_event_data_if {
- uint8 ifidx;
- uint8 opcode;
- uint8 reserved;
- uint8 bssidx;
- uint8 role;
+ uint8 ifidx; /* RTE virtual device index (for dongle) */
+ uint8 opcode; /* see I/F opcode */
+ uint8 reserved; /* bit mask (WLC_E_IF_FLAGS_XXX ) */
+ uint8 bssidx; /* bsscfg index */
+ uint8 role; /* see I/F role */
} wl_event_data_if_t;
-
-#define WLC_E_IF_ADD 1
-#define WLC_E_IF_DEL 2
-#define WLC_E_IF_CHANGE 3
-
-
-#define WLC_E_IF_ROLE_STA 0
-#define WLC_E_IF_ROLE_AP 1
-#define WLC_E_IF_ROLE_WDS 2
-#define WLC_E_IF_ROLE_P2P_GO 3
-#define WLC_E_IF_ROLE_P2P_CLIENT 4
+/* opcode in WLC_E_IF event */
+#define WLC_E_IF_ADD 1 /* bsscfg add */
+#define WLC_E_IF_DEL 2 /* bsscfg delete */
+#define WLC_E_IF_CHANGE 3 /* bsscfg role change */
+
+/* I/F role code in WLC_E_IF event */
+#define WLC_E_IF_ROLE_STA 0 /* Infra STA */
+#define WLC_E_IF_ROLE_AP 1 /* Access Point */
+#define WLC_E_IF_ROLE_WDS 2 /* WDS link */
+#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */
+#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */
#ifdef WLBTAMP
-#define WLC_E_IF_ROLE_BTA_CREATOR 5
-#define WLC_E_IF_ROLE_BTA_ACCEPTOR 6
+#define WLC_E_IF_ROLE_BTA_CREATOR 5 /* BT-AMP Creator */
+#define WLC_E_IF_ROLE_BTA_ACCEPTOR 6 /* BT-AMP Acceptor */
#endif
-
+/* WLC_E_RSSI event data */
typedef struct wl_event_data_rssi {
int32 rssi;
int32 snr;
int32 noise;
} wl_event_data_rssi_t;
+/* WLC_E_IF flag */
+#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1 /* no host I/F creation needed */
-#define WLC_E_IF_FLAGS_BSSCFG_NOIF 0x1
-
-
-#define WLC_E_LINK_BCN_LOSS 1
-#define WLC_E_LINK_DISASSOC 2
-#define WLC_E_LINK_ASSOC_REC 3
-#define WLC_E_LINK_BSSCFG_DIS 4
-
-
-#define WLC_E_OVL_DOWNLOAD 0
-#define WLC_E_OVL_UPDATE_IND 1
+/* Reason codes for LINK */
+#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */
+#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */
+#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */
+#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */
+/* reason codes for WLC_E_OVERLAY_REQ event */
+#define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */
+#define WLC_E_OVL_UPDATE_IND 1 /* device indication of host overlay update */
-#define WLC_E_TDLS_PEER_DISCOVERED 0
+/* reason codes for WLC_E_TDLS_PEER_EVENT event */
+#define WLC_E_TDLS_PEER_DISCOVERED 0 /* peer is ready to establish TDLS */
#define WLC_E_TDLS_PEER_CONNECTED 1
#define WLC_E_TDLS_PEER_DISCONNECTED 2
-#ifdef WLAWDL
-
-#define WLC_E_AWDL_SCAN_STATUS 0
-#define WLC_E_AWDL_RX_ACT_FRAME 1
-#define WLC_E_AWDL_RX_PRB_RESP 2
-#define WLC_E_AWDL_PHYCAL_STATUS 3
-#define WLC_E_AWDL_WOWL_NULLPKT 4
-#define WLC_E_AWDL_OOB_AF_STATUS 5
-
-
-#define WLC_E_AWDL_SCAN_START 1
-#define WLC_E_AWDL_SCAN_DONE 0
-#define WLC_E_AWDL_PHYCAL_START 1
-#define WLC_E_AWDL_PHYCAL_DONE 0
-#endif
+/* reason codes for WLC_E_RMC_EVENT event */
+#define WLC_E_REASON_RMC_NONE 0
+#define WLC_E_REASON_RMC_AR_LOST 1
+#define WLC_E_REASON_RMC_AR_NO_ACK 2
+/* GAS event data */
typedef BWL_PRE_PACKED_STRUCT struct wl_event_gas {
- uint16 channel;
- uint8 dialog_token;
- uint8 fragment_id;
- uint16 status_code;
- uint16 data_len;
- uint8 data[1];
+ uint16 channel; /* channel of GAS protocol */
+ uint8 dialog_token; /* GAS dialog token */
+ uint8 fragment_id; /* fragment id */
+ uint16 status_code; /* status code on GAS completion */
+ uint16 data_len; /* length of data to follow */
+ uint8 data[1]; /* variable length specified by data_len */
} BWL_POST_PACKED_STRUCT wl_event_gas_t;
-
+/* service discovery TLV */
typedef BWL_PRE_PACKED_STRUCT struct wl_sd_tlv {
- uint16 length;
- uint8 protocol;
- uint8 transaction_id;
- uint8 status_code;
- uint8 data[1];
+ uint16 length; /* length of response_data */
+ uint8 protocol; /* service protocol type */
+ uint8 transaction_id; /* service transaction id */
+ uint8 status_code; /* status code */
+ uint8 data[1]; /* response data */
} BWL_POST_PACKED_STRUCT wl_sd_tlv_t;
-
+/* service discovery event data */
typedef BWL_PRE_PACKED_STRUCT struct wl_event_sd {
- uint16 channel;
- uint8 count;
- wl_sd_tlv_t tlv[1];
+ uint16 channel; /* channel */
+ uint8 count; /* number of tlvs */
+ wl_sd_tlv_t tlv[1]; /* service discovery TLV */
} BWL_POST_PACKED_STRUCT wl_event_sd_t;
-
-#define WLC_E_PROXD_FOUND 1
-#define WLC_E_PROXD_GONE 2
-
-
-typedef BWL_PRE_PACKED_STRUCT struct awdl_aws_event_data {
- uint32 fw_time;
- struct ether_addr current_master;
- uint16 aw_counter;
- uint8 aw_ext_count;
- uint8 aw_role;
- uint8 flags;
- uint16 aw_chan;
-} BWL_POST_PACKED_STRUCT awdl_aws_event_data_t;
-
-
-#define AWDL_AW_LAST_EXT 0x01
-
-
+/* Reason codes for WLC_E_PROXD */
+#define WLC_E_PROXD_FOUND 1 /* Found a proximity device */
+#define WLC_E_PROXD_GONE 2 /* Lost a proximity device */
+#define WLC_E_PROXD_START 3 /* used by: target */
+#define WLC_E_PROXD_STOP 4 /* used by: target */
+#define WLC_E_PROXD_COMPLETED 5 /* used by: initiator completed */
+#define WLC_E_PROXD_ERROR 6 /* used by both initiator and target */
+#define WLC_E_PROXD_COLLECT_START 7 /* used by: target & initiator */
+#define WLC_E_PROXD_COLLECT_STOP 8 /* used by: target */
+#define WLC_E_PROXD_COLLECT_COMPLETED 9 /* used by: initiator completed */
+#define WLC_E_PROXD_COLLECT_ERROR 10 /* used by both initiator and target */
+#define WLC_E_PROXD_NAN_EVENT 11 /* used by both initiator and target */
+
+/* proxd_event data */
+typedef struct ftm_sample {
+ uint32 value; /* RTT in ns */
+ int8 rssi; /* RSSI */
+} ftm_sample_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct proxd_event_data {
+ uint16 ver; /* version */
+ uint16 mode; /* mode: target/initiator */
+ uint16 method; /* method: rssi/TOF/AOA */
+ uint8 err_code; /* error classification */
+ uint8 TOF_type; /* one way or two way TOF */
+ uint8 OFDM_frame_type; /* legacy or VHT */
+ uint8 bandwidth; /* Bandwidth is 20, 40,80, MHZ */
+ struct ether_addr peer_mac; /* (e.g for tgt:initiator's */
+ uint32 distance; /* dst to tgt, units meter */
+ uint32 meanrtt; /* mean delta */
+ uint32 modertt; /* Mode delta */
+ uint32 medianrtt; /* median RTT */
+ uint32 sdrtt; /* Standard deviation of RTT */
+ int gdcalcresult; /* Software or Hardware Kind of redundant, but if */
+ /* frame type is VHT, then we should do it by hardware */
+ int16 avg_rssi; /* avg rssi accroos the ftm frames */
+ int16 validfrmcnt; /* Firmware's valid frame counts */
+ char *peer_router_info; /* Peer router information if available in TLV, */
+ /* We will add this field later */
+ int32 var1; /* average of group delay */
+ int32 var2; /* average of threshold crossing */
+ int32 var3; /* difference between group delay and threshold crossing */
+ /* raw Fine Time Measurements (ftm) data */
+ uint16 ftm_unit; /* ftm cnt resolution in picoseconds , 6250ps - default */
+ uint16 ftm_cnt; /* num of rtd measurments/length in the ftm buffer */
+ ftm_sample_t ftm_buff[1]; /* 1 ... ftm_cnt */
+} BWL_POST_PACKED_STRUCT wl_proxd_event_data_t;
+
+
+/* Video Traffic Interference Monitor Event */
+#define INTFER_EVENT_VERSION 1
+#define INTFER_STREAM_TYPE_NONTCP 1
+#define INTFER_STREAM_TYPE_TCP 2
+#define WLINTFER_STATS_NSMPLS 4
+typedef struct wl_intfer_event {
+ uint16 version; /* version */
+ uint16 status; /* status */
+ uint8 txfail_histo[WLINTFER_STATS_NSMPLS]; /* txfail histo */
+} wl_intfer_event_t;
+
+/* WLC_E_PSTA_PRIMARY_INTF_IND event data */
+typedef struct wl_psta_primary_intf_event {
+ struct ether_addr prim_ea; /* primary intf ether addr */
+} wl_psta_primary_intf_event_t;
+
+
+/* ********** NAN protocol events/subevents ********** */
+#define NAN_EVENT_BUFFER_SIZE 512 /* max size */
+/* nan application events to the host driver */
+enum nan_app_events {
+ WL_NAN_EVENT_START = 1, /* NAN cluster started */
+ WL_NAN_EVENT_JOIN = 2, /* Joined to a NAN cluster */
+ WL_NAN_EVENT_ROLE = 3, /* Role or State changed */
+ WL_NAN_EVENT_SCAN_COMPLETE = 4,
+ WL_NAN_EVENT_DISCOVERY_RESULT = 5,
+ WL_NAN_EVENT_REPLIED = 6,
+ WL_NAN_EVENT_TERMINATED = 7, /* the instance ID will be present in the ev data */
+ WL_NAN_EVENT_RECEIVE = 8,
+ WL_NAN_EVENT_STATUS_CHG = 9, /* generated on any change in nan_mac status */
+ WL_NAN_EVENT_MERGE = 10, /* Merged to a NAN cluster */
+ WL_NAN_EVENT_STOP = 11, /* NAN stopped */
+ WL_NAN_EVENT_INVALID = 12, /* delimiter for max value */
+};
+#define IS_NAN_EVT_ON(var, evt) ((var & (1 << (evt-1))) != 0)
+/* ******************* end of NAN section *************** */
+
+/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
-#endif
+#endif /* _BCMEVENT_H_ */
*
* Fundamental constants relating to IP Protocol
*
- * $Id: bcmip.h 384540 2013-02-12 04:28:58Z $
+ * $Id: bcmip.h 458522 2014-02-27 02:26:15Z $
*/
#ifndef _bcmip_h_
#include <typedefs.h>
#endif
-
+/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
-
-#define IP_VER_OFFSET 0x0
-#define IP_VER_MASK 0xf0
-#define IP_VER_SHIFT 4
-#define IP_VER_4 4
-#define IP_VER_6 6
+/* IPV4 and IPV6 common */
+#define IP_VER_OFFSET 0x0 /* offset to version field */
+#define IP_VER_MASK 0xf0 /* version mask */
+#define IP_VER_SHIFT 4 /* version shift */
+#define IP_VER_4 4 /* version number for IPV4 */
+#define IP_VER_6 6 /* version number for IPV6 */
#define IP_VER(ip_body) \
((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
-#define IP_PROT_ICMP 0x1
-#define IP_PROT_IGMP 0x2
-#define IP_PROT_TCP 0x6
-#define IP_PROT_UDP 0x11
-#define IP_PROT_ICMP6 0x3a
-
-
-#define IPV4_VER_HL_OFFSET 0
-#define IPV4_TOS_OFFSET 1
-#define IPV4_PKTLEN_OFFSET 2
-#define IPV4_PKTFLAG_OFFSET 6
-#define IPV4_PROT_OFFSET 9
-#define IPV4_CHKSUM_OFFSET 10
-#define IPV4_SRC_IP_OFFSET 12
-#define IPV4_DEST_IP_OFFSET 16
-#define IPV4_OPTIONS_OFFSET 20
-#define IPV4_MIN_HEADER_LEN 20
-
-
-#define IPV4_VER_MASK 0xf0
-#define IPV4_VER_SHIFT 4
-
-#define IPV4_HLEN_MASK 0x0f
+#define IP_PROT_ICMP 0x1 /* ICMP protocol */
+#define IP_PROT_IGMP 0x2 /* IGMP protocol */
+#define IP_PROT_TCP 0x6 /* TCP protocol */
+#define IP_PROT_UDP 0x11 /* UDP protocol type */
+#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */
+
+/* IPV4 field offsets */
+#define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */
+#define IPV4_TOS_OFFSET 1 /* type of service offset */
+#define IPV4_PKTLEN_OFFSET 2 /* packet length offset */
+#define IPV4_PKTFLAG_OFFSET 6 /* more-frag,dont-frag flag offset */
+#define IPV4_PROT_OFFSET 9 /* protocol type offset */
+#define IPV4_CHKSUM_OFFSET 10 /* IP header checksum offset */
+#define IPV4_SRC_IP_OFFSET 12 /* src IP addr offset */
+#define IPV4_DEST_IP_OFFSET 16 /* dest IP addr offset */
+#define IPV4_OPTIONS_OFFSET 20 /* IP options offset */
+#define IPV4_MIN_HEADER_LEN 20 /* Minimum size for an IP header (no options) */
+
+/* IPV4 field decodes */
+#define IPV4_VER_MASK 0xf0 /* IPV4 version mask */
+#define IPV4_VER_SHIFT 4 /* IPV4 version shift */
+
+#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */
#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
-#define IPV4_ADDR_LEN 4
+#define IPV4_ADDR_LEN 4 /* IPV4 address length */
#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \
((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff)
-#define IPV4_TOS_DSCP_MASK 0xfc
-#define IPV4_TOS_DSCP_SHIFT 2
+#define IPV4_TOS_DSCP_MASK 0xfc /* DiffServ codepoint mask */
+#define IPV4_TOS_DSCP_SHIFT 2 /* DiffServ codepoint shift */
#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
-#define IPV4_TOS_PREC_MASK 0xe0
-#define IPV4_TOS_PREC_SHIFT 5
+#define IPV4_TOS_PREC_MASK 0xe0 /* Historical precedence mask */
+#define IPV4_TOS_PREC_SHIFT 5 /* Historical precedence shift */
-#define IPV4_TOS_LOWDELAY 0x10
-#define IPV4_TOS_THROUGHPUT 0x8
-#define IPV4_TOS_RELIABILITY 0x4
+#define IPV4_TOS_LOWDELAY 0x10 /* Lowest delay requested */
+#define IPV4_TOS_THROUGHPUT 0x8 /* Best throughput requested */
+#define IPV4_TOS_RELIABILITY 0x4 /* Most reliable delivery requested */
-#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+#define IPV4_TOS_ROUTINE 0
+#define IPV4_TOS_PRIORITY 1
+#define IPV4_TOS_IMMEDIATE 2
+#define IPV4_TOS_FLASH 3
+#define IPV4_TOS_FLASHOVERRIDE 4
+#define IPV4_TOS_CRITICAL 5
+#define IPV4_TOS_INETWORK_CTRL 6
+#define IPV4_TOS_NETWORK_CTRL 7
-#define IPV4_FRAG_RESV 0x8000
-#define IPV4_FRAG_DONT 0x4000
-#define IPV4_FRAG_MORE 0x2000
-#define IPV4_FRAG_OFFSET_MASK 0x1fff
+#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
-#define IPV4_ADDR_STR_LEN 16
+#define IPV4_FRAG_RESV 0x8000 /* Reserved */
+#define IPV4_FRAG_DONT 0x4000 /* Don't fragment */
+#define IPV4_FRAG_MORE 0x2000 /* More fragments */
+#define IPV4_FRAG_OFFSET_MASK 0x1fff /* Fragment offset */
+#define IPV4_ADDR_STR_LEN 16 /* Max IP address length in string format */
+/* IPV4 packet formats */
BWL_PRE_PACKED_STRUCT struct ipv4_addr {
uint8 addr[IPV4_ADDR_LEN];
} BWL_POST_PACKED_STRUCT;
BWL_PRE_PACKED_STRUCT struct ipv4_hdr {
- uint8 version_ihl;
- uint8 tos;
- uint16 tot_len;
+ uint8 version_ihl; /* Version and Internet Header Length */
+ uint8 tos; /* Type Of Service */
+ uint16 tot_len; /* Number of bytes in packet (max 65535) */
uint16 id;
- uint16 frag;
- uint8 ttl;
- uint8 prot;
- uint16 hdr_chksum;
- uint8 src_ip[IPV4_ADDR_LEN];
- uint8 dst_ip[IPV4_ADDR_LEN];
+ uint16 frag; /* 3 flag bits and fragment offset */
+ uint8 ttl; /* Time To Live */
+ uint8 prot; /* Protocol */
+ uint16 hdr_chksum; /* IP header checksum */
+ uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */
+ uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */
} BWL_POST_PACKED_STRUCT;
+/* IPV6 field offsets */
+#define IPV6_PAYLOAD_LEN_OFFSET 4 /* payload length offset */
+#define IPV6_NEXT_HDR_OFFSET 6 /* next header/protocol offset */
+#define IPV6_HOP_LIMIT_OFFSET 7 /* hop limit offset */
+#define IPV6_SRC_IP_OFFSET 8 /* src IP addr offset */
+#define IPV6_DEST_IP_OFFSET 24 /* dst IP addr offset */
-#define IPV6_PAYLOAD_LEN_OFFSET 4
-#define IPV6_NEXT_HDR_OFFSET 6
-#define IPV6_HOP_LIMIT_OFFSET 7
-#define IPV6_SRC_IP_OFFSET 8
-#define IPV6_DEST_IP_OFFSET 24
-
-
+/* IPV6 field decodes */
#define IPV6_TRAFFIC_CLASS(ipv6_body) \
(((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body)
-#define IPV6_ADDR_LEN 16
-
+#define IPV6_ADDR_LEN 16 /* IPV6 address length */
+/* IPV4 TOS or IPV6 Traffic Classifier or 0 */
#define IP_TOS46(ip_body) \
(IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+#define IP_DSCP46(ip_body) (IP_TOS46(ip_body) >> IPV4_TOS_DSCP_SHIFT);
+/* IPV4 or IPV6 Protocol Classifier or 0 */
+#define IP_PROT46(ip_body) \
+ (IP_VER(ip_body) == IP_VER_4 ? IPV4_PROT(ip_body) : \
+ IP_VER(ip_body) == IP_VER_6 ? IPV6_PROT(ip_body) : 0)
+
+/* IPV6 extension headers (options) */
#define IPV6_EXTHDR_HOP 0
#define IPV6_EXTHDR_ROUTING 43
#define IPV6_EXTHDR_FRAGMENT 44
#define IPV4_ISMULTI(a) (((a) & 0xf0000000) == 0xe0000000)
+#define IPV4_MCAST_TO_ETHER_MCAST(ipv4, ether) \
+{ \
+ ether[0] = 0x01; \
+ ether[1] = 0x00; \
+ ether[2] = 0x5E; \
+ ether[3] = (ipv4 & 0x7f0000) >> 16; \
+ ether[4] = (ipv4 & 0xff00) >> 8; \
+ ether[5] = (ipv4 & 0xff); \
+}
+/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
-#endif
+#define IPV4_ADDR_STR "%d.%d.%d.%d"
+#define IPV4_ADDR_TO_STR(addr) ((uint32)addr & 0xff000000) >> 24, \
+ ((uint32)addr & 0x00ff0000) >> 16, \
+ ((uint32)addr & 0x0000ff00) >> 8, \
+ ((uint32)addr & 0x000000ff)
+
+#endif /* _bcmip_h_ */
--- /dev/null
+/*
+ * $Copyright Open Broadcom Corporation$
+ *
+ * Fundamental constants relating to Neighbor Discovery Protocol
+ *
+ * $Id: bcmipv6.h 439574 2013-11-27 06:37:37Z $
+ */
+
+#ifndef _bcmipv6_h_
+#define _bcmipv6_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+/* Extension headers */
+#define IPV6_EXT_HOP 0
+#define IPV6_EXT_ROUTE 43
+#define IPV6_EXT_FRAG 44
+#define IPV6_EXT_DEST 60
+#define IPV6_EXT_ESEC 50
+#define IPV6_EXT_AUTH 51
+
+/* Minimum size (extension header "word" length) */
+#define IPV6_EXT_WORD 8
+
+/* Offsets for most extension headers */
+#define IPV6_EXT_NEXTHDR 0
+#define IPV6_EXT_HDRLEN 1
+
+/* Constants specific to fragmentation header */
+#define IPV6_FRAG_MORE_MASK 0x0001
+#define IPV6_FRAG_MORE_SHIFT 0
+#define IPV6_FRAG_OFFS_MASK 0xfff8
+#define IPV6_FRAG_OFFS_SHIFT 3
+
+/* For icmpv6 */
+#define ICMPV6_HEADER_TYPE 0x3A
+#define ICMPV6_PKT_TYPE_RA 134
+#define ICMPV6_PKT_TYPE_NS 135
+#define ICMPV6_PKT_TYPE_NA 136
+
+#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2
+#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR 1
+
+#define ICMPV6_ND_OPT_LEN_LINKADDR 1
+
+#define IPV6_VERSION 6
+#define IPV6_HOP_LIMIT 255
+
+#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \
+ a[5] | a[6] | a[7] | a[8] | a[9] | \
+ a[10] | a[11] | a[12] | a[13] | \
+ a[14] | a[15]) == 0)
+
+#define IPV6_ADDR_LOCAL(a) (((a[0] == 0xfe) && (a[1] & 0x80))? TRUE: FALSE)
+
+/* IPV6 address */
+BWL_PRE_PACKED_STRUCT struct ipv6_addr {
+ uint8 addr[16];
+} BWL_POST_PACKED_STRUCT;
+
+
+/* ICMPV6 Header */
+BWL_PRE_PACKED_STRUCT struct icmp6_hdr {
+ uint8 icmp6_type;
+ uint8 icmp6_code;
+ uint16 icmp6_cksum;
+ BWL_PRE_PACKED_STRUCT union {
+ uint32 reserved;
+ BWL_PRE_PACKED_STRUCT struct nd_advt {
+ uint32 reserved1:5,
+ override:1,
+ solicited:1,
+ router:1,
+ reserved2:24;
+ } BWL_POST_PACKED_STRUCT nd_advt;
+ } BWL_POST_PACKED_STRUCT opt;
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Header Format */
+BWL_PRE_PACKED_STRUCT struct ipv6_hdr {
+ uint8 priority:4,
+ version:4;
+ uint8 flow_lbl[3];
+ uint16 payload_len;
+ uint8 nexthdr;
+ uint8 hop_limit;
+ struct ipv6_addr saddr;
+ struct ipv6_addr daddr;
+} BWL_POST_PACKED_STRUCT;
+
+/* Neighbor Advertisement/Solicitation Packet Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg {
+ struct icmp6_hdr icmph;
+ struct ipv6_addr target;
+} BWL_POST_PACKED_STRUCT;
+
+
+/* Neighibor Solicitation/Advertisement Optional Structure */
+BWL_PRE_PACKED_STRUCT struct nd_msg_opt {
+ uint8 type;
+ uint8 len;
+ uint8 mac_addr[ETHER_ADDR_LEN];
+} BWL_POST_PACKED_STRUCT;
+
+/* Ipv6 Fragmentation Header */
+BWL_PRE_PACKED_STRUCT struct ipv6_frag {
+ uint8 nexthdr;
+ uint8 reserved;
+ uint16 frag_offset;
+ uint32 ident;
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+static const struct ipv6_addr all_node_ipv6_maddr = {
+ { 0xff, 0x2, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 1
+ }};
+
+#define IPV6_ISMULTI(a) (a[0] == 0xff)
+
+#define IPV6_MCAST_TO_ETHER_MCAST(ipv6, ether) \
+{ \
+ ether[0] = 0x33; \
+ ether[1] = 0x33; \
+ ether[2] = ipv6[12]; \
+ ether[3] = ipv6[13]; \
+ ether[4] = ipv6[14]; \
+ ether[5] = ipv6[15]; \
+}
+
+#endif /* !defined(_bcmipv6_h_) */
--- /dev/null
+/*
+ * Fundamental constants relating to TCP Protocol
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: bcmtcp.h 458522 2014-02-27 02:26:15Z $
+ */
+
+#ifndef _bcmtcp_h_
+#define _bcmtcp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+#define TCP_SRC_PORT_OFFSET 0 /* TCP source port offset */
+#define TCP_DEST_PORT_OFFSET 2 /* TCP dest port offset */
+#define TCP_SEQ_NUM_OFFSET 4 /* TCP sequence number offset */
+#define TCP_ACK_NUM_OFFSET 8 /* TCP acknowledgement number offset */
+#define TCP_HLEN_OFFSET 12 /* HLEN and reserved bits offset */
+#define TCP_FLAGS_OFFSET 13 /* FLAGS and reserved bits offset */
+#define TCP_CHKSUM_OFFSET 16 /* TCP body checksum offset */
+
+#define TCP_PORT_LEN 2 /* TCP port field length */
+
+/* 8bit TCP flag field */
+#define TCP_FLAG_URG 0x20
+#define TCP_FLAG_ACK 0x10
+#define TCP_FLAG_PSH 0x08
+#define TCP_FLAG_RST 0x04
+#define TCP_FLAG_SYN 0x02
+#define TCP_FLAG_FIN 0x01
+
+#define TCP_HLEN_MASK 0xf000
+#define TCP_HLEN_SHIFT 12
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmtcp_hdr
+{
+ uint16 src_port; /* Source Port Address */
+ uint16 dst_port; /* Destination Port Address */
+ uint32 seq_num; /* TCP Sequence Number */
+ uint32 ack_num; /* TCP Sequence Number */
+ uint16 hdrlen_rsvd_flags; /* Header length, reserved bits and flags */
+ uint16 tcpwin; /* TCP window */
+ uint16 chksum; /* Segment checksum with pseudoheader */
+ uint16 urg_ptr; /* Points to seq-num of byte following urg data */
+} BWL_POST_PACKED_STRUCT;
+
+#define TCP_MIN_HEADER_LEN 20
+
+#define TCP_HDRLEN_MASK 0xf0
+#define TCP_HDRLEN_SHIFT 4
+#define TCP_HDRLEN(hdrlen) (((hdrlen) & TCP_HDRLEN_MASK) >> TCP_HDRLEN_SHIFT)
+
+#define TCP_FLAGS_MASK 0x1f
+#define TCP_FLAGS(hdrlen) ((hdrlen) & TCP_FLAGS_MASK)
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+/* To address round up by 32bit. */
+#define IS_TCPSEQ_GE(a, b) ((a - b) < NBITVAL(31)) /* a >= b */
+#define IS_TCPSEQ_LE(a, b) ((b - a) < NBITVAL(31)) /* a =< b */
+#define IS_TCPSEQ_GT(a, b) !IS_TCPSEQ_LE(a, b) /* a > b */
+#define IS_TCPSEQ_LT(a, b) !IS_TCPSEQ_GE(a, b) /* a < b */
+
+#endif /* #ifndef _bcmtcp_h_ */
--- /dev/null
+/*
+ * Copyright (C) 2014, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
+ * the contents of this file may not be disclosed to third parties, copied
+ * or duplicated in any form, in whole or in part, without the prior
+ * written permission of Broadcom Corporation.
+ *
+ * Fundamental constants relating to UDP Protocol
+ *
+ * $Id: bcmudp.h 382882 2013-02-04 23:24:31Z $
+ */
+
+#ifndef _bcmudp_h_
+#define _bcmudp_h_
+
+#ifndef _TYPEDEFS_H_
+#include <typedefs.h>
+#endif
+
+/* This marks the start of a packed structure section. */
+#include <packed_section_start.h>
+
+
+/* UDP header */
+#define UDP_DEST_PORT_OFFSET 2 /* UDP dest port offset */
+#define UDP_LEN_OFFSET 4 /* UDP length offset */
+#define UDP_CHKSUM_OFFSET 6 /* UDP body checksum offset */
+
+#define UDP_HDR_LEN 8 /* UDP header length */
+#define UDP_PORT_LEN 2 /* UDP port length */
+
+/* These fields are stored in network order */
+BWL_PRE_PACKED_STRUCT struct bcmudp_hdr
+{
+ uint16 src_port; /* Source Port Address */
+ uint16 dst_port; /* Destination Port Address */
+ uint16 len; /* Number of bytes in datagram including header */
+ uint16 chksum; /* entire datagram checksum with pseudoheader */
+} BWL_POST_PACKED_STRUCT;
+
+/* This marks the end of a packed structure section. */
+#include <packed_section_end.h>
+
+#endif /* #ifndef _bcmudp_h_ */
* IEEE Std 802.1X-2001
* IEEE 802.1X RADIUS Usage Guidelines
*
- * Copyright (C) 2002 Broadcom Corporation
+ * Copyright Open Broadcom Corporation
*
- * $Id: eapol.h 382882 2013-02-04 23:24:31Z $
+ * $Id: eapol.h 452703 2014-01-31 20:33:06Z $
*/
#ifndef _eapol_h_
#define EAPOL_WPA_KEY_LEN 95
/* WPA/802.11i/WPA2 KEY KEY_INFO bits */
+#define WPA_KEY_DESC_OSEN 0x0
#define WPA_KEY_DESC_V1 0x01
#define WPA_KEY_DESC_V2 0x02
#define WPA_KEY_DESC_V3 0x03
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: ethernet.h 384540 2013-02-12 04:28:58Z $
+ * $Id: ethernet.h 473238 2014-04-28 19:14:56Z $
*/
-#ifndef _NET_ETHERNET_H_
+#ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */
#define _NET_ETHERNET_H_
#ifndef _TYPEDEFS_H_
#include "typedefs.h"
#endif
-
+/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
-
+/*
+ * The number of bytes in an ethernet (MAC) address.
+ */
#define ETHER_ADDR_LEN 6
-
+/*
+ * The number of bytes in the type field.
+ */
#define ETHER_TYPE_LEN 2
-
+/*
+ * The number of bytes in the trailing CRC field.
+ */
#define ETHER_CRC_LEN 4
-
+/*
+ * The length of the combined header.
+ */
#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN)
-
+/*
+ * The minimum packet length.
+ */
#define ETHER_MIN_LEN 64
-
+/*
+ * The minimum packet user data length.
+ */
#define ETHER_MIN_DATA 46
-
+/*
+ * The maximum packet length.
+ */
#define ETHER_MAX_LEN 1518
-
+/*
+ * The maximum packet user data length.
+ */
#define ETHER_MAX_DATA 1500
-
-#define ETHER_TYPE_MIN 0x0600
-#define ETHER_TYPE_IP 0x0800
-#define ETHER_TYPE_ARP 0x0806
-#define ETHER_TYPE_8021Q 0x8100
-#define ETHER_TYPE_IPV6 0x86dd
-#define ETHER_TYPE_BRCM 0x886c
-#define ETHER_TYPE_802_1X 0x888e
+/* ether types */
+#define ETHER_TYPE_MIN 0x0600 /* Anything less than MIN is a length */
+#define ETHER_TYPE_IP 0x0800 /* IP */
+#define ETHER_TYPE_ARP 0x0806 /* ARP */
+#define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */
+#define ETHER_TYPE_IPV6 0x86dd /* IPv6 */
+#define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */
+#define ETHER_TYPE_802_1X 0x888e /* 802.1x */
#ifdef PLC
-#define ETHER_TYPE_88E1 0x88e1
-#define ETHER_TYPE_8912 0x8912
-#define ETHER_TYPE_GIGLED 0xffff
-#endif
-#define ETHER_TYPE_802_1X_PREAUTH 0x88c7
-#define ETHER_TYPE_WAI 0x88b4
-#define ETHER_TYPE_89_0D 0x890d
-
-#define ETHER_TYPE_PPP_SES 0x8864
-
+#define ETHER_TYPE_88E1 0x88e1 /* GIGLE */
+#define ETHER_TYPE_8912 0x8912 /* GIGLE */
+#define ETHER_TYPE_GIGLED 0xffff /* GIGLE */
+#endif /* PLC */
+#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */
+#define ETHER_TYPE_WAI 0x88b4 /* WAI */
+#define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */
-#define ETHER_BRCM_SUBTYPE_LEN 4
+#define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */
+#define ETHER_TYPE_IAPP_L2_UPDATE 0x6 /* IAPP L2 update frame */
-#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN)
-#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN)
-#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN)
+/* Broadcom subtype follows ethertype; First 2 bytes are reserved; Next 2 are subtype; */
+#define ETHER_BRCM_SUBTYPE_LEN 4 /* Broadcom 4 byte subtype */
+/* ether header */
+#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) /* dest address offset */
+#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) /* src address offset */
+#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) /* ether type offset */
+/*
+ * A macro to validate a length with
+ */
#define ETHER_IS_VALID_LEN(foo) \
((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN)
((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \
}
-#ifndef __INCif_etherh
-
+#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */
+/*
+ * Structure of a 10Mb/s Ethernet header.
+ */
BWL_PRE_PACKED_STRUCT struct ether_header {
uint8 ether_dhost[ETHER_ADDR_LEN];
uint8 ether_shost[ETHER_ADDR_LEN];
uint16 ether_type;
} BWL_POST_PACKED_STRUCT;
-
+/*
+ * Structure of a 48-bit Ethernet address.
+ */
BWL_PRE_PACKED_STRUCT struct ether_addr {
uint8 octet[ETHER_ADDR_LEN];
} BWL_POST_PACKED_STRUCT;
-#endif
-
+#endif /* !__INCif_etherh Quick and ugly hack for VxWorks */
+/*
+ * Takes a pointer, set, test, clear, toggle locally admininistered
+ * address bit in the 48-bit Ethernet address.
+ */
#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2))
#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2)
#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd))
#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2))
-
+/* Takes a pointer, marks unicast address bit in the MAC address */
#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1))
-
+/*
+ * Takes a pointer, returns true if a 48-bit multicast address
+ * (including broadcast, since it is all ones)
+ */
#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1)
-
+/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */
#define eacmp(a, b) ((((const uint16 *)(a))[0] ^ ((const uint16 *)(b))[0]) | \
(((const uint16 *)(a))[1] ^ ((const uint16 *)(b))[1]) | \
(((const uint16 *)(a))[2] ^ ((const uint16 *)(b))[2]))
#define ether_cmp(a, b) eacmp(a, b)
-
+/* copy an ethernet address - assumes the pointers can be referenced as shorts */
#define eacopy(s, d) \
do { \
((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \
#define ether_copy(s, d) eacopy(s, d)
-
+/* Copy an ethernet address in reverse order */
#define ether_rcopy(s, d) \
do { \
((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \
((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \
} while (0)
+/* Copy 14B ethernet header: 32bit aligned source and destination. */
+#define ehcopy32(s, d) \
+do { \
+ ((uint32 *)(d))[0] = ((const uint32 *)(s))[0]; \
+ ((uint32 *)(d))[1] = ((const uint32 *)(s))[1]; \
+ ((uint32 *)(d))[2] = ((const uint32 *)(s))[2]; \
+ ((uint16 *)(d))[6] = ((const uint16 *)(s))[6]; \
+} while (0)
static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}};
#define ETHER_ISUCAST(ea) ((((uint8 *)(ea))[0] & 0x01) == 0)
-
+/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
-#endif
+#endif /* _NET_ETHERNET_H_ */
*
* Fundamental types and constants relating to WFA P2P (aka WiFi Direct)
*
- * $Id: p2p.h 384536 2013-02-12 04:13:09Z $
+ * $Id: p2p.h 457033 2014-02-20 19:39:45Z $
*/
#ifndef _P2P_H_
#include <wlioctl.h>
#include <proto/802.11.h>
-
+/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
+/* WiFi P2P OUI values */
+#define P2P_OUI WFA_OUI /* WiFi P2P OUI */
+#define P2P_VER WFA_OUI_TYPE_P2P /* P2P version: 9=WiFi P2P v1.0 */
-#define P2P_OUI WFA_OUI
-#define P2P_VER WFA_OUI_TYPE_P2P
-
-#define P2P_IE_ID 0xdd
-
+#define P2P_IE_ID 0xdd /* P2P IE element ID */
+/* WiFi P2P IE */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_ie {
- uint8 id;
- uint8 len;
- uint8 OUI[3];
- uint8 oui_type;
- uint8 subelts[1];
+ uint8 id; /* IE ID: 0xDD */
+ uint8 len; /* IE length */
+ uint8 OUI[3]; /* WiFi P2P specific OUI: P2P_OUI */
+ uint8 oui_type; /* Identifies P2P version: P2P_VER */
+ uint8 subelts[1]; /* variable length subelements */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_ie wifi_p2p_ie_t;
#define P2P_ATTR_LEN_OFF 1
#define P2P_ATTR_DATA_OFF 3
-#define P2P_ATTR_ID_LEN 1
-#define P2P_ATTR_LEN_LEN 2
-#define P2P_ATTR_HDR_LEN 3
-
-
-#define P2P_SEID_STATUS 0
-#define P2P_SEID_MINOR_RC 1
-#define P2P_SEID_P2P_INFO 2
-#define P2P_SEID_DEV_ID 3
-#define P2P_SEID_INTENT 4
-#define P2P_SEID_CFG_TIMEOUT 5
-#define P2P_SEID_CHANNEL 6
-#define P2P_SEID_GRP_BSSID 7
-#define P2P_SEID_XT_TIMING 8
-#define P2P_SEID_INTINTADDR 9
-#define P2P_SEID_P2P_MGBTY 10
-#define P2P_SEID_CHAN_LIST 11
-#define P2P_SEID_ABSENCE 12
-#define P2P_SEID_DEV_INFO 13
-#define P2P_SEID_GROUP_INFO 14
-#define P2P_SEID_GROUP_ID 15
-#define P2P_SEID_P2P_IF 16
-#define P2P_SEID_OP_CHANNEL 17
-#define P2P_SEID_INVITE_FLAGS 18
-#define P2P_SEID_VNDR 221
-
-#define P2P_SE_VS_ID_SERVICES 0x1b
-
-
-
+#define P2P_ATTR_ID_LEN 1 /* ID filed length */
+#define P2P_ATTR_LEN_LEN 2 /* length field length */
+#define P2P_ATTR_HDR_LEN 3 /* ID + 2-byte length field spec 1.02 */
+
+#define P2P_WFDS_HASH_LEN 6
+#define P2P_WFDS_MAX_SVC_NAME_LEN 32
+
+/* P2P IE Subelement IDs from WiFi P2P Technical Spec 1.00 */
+#define P2P_SEID_STATUS 0 /* Status */
+#define P2P_SEID_MINOR_RC 1 /* Minor Reason Code */
+#define P2P_SEID_P2P_INFO 2 /* P2P Capability (capabilities info) */
+#define P2P_SEID_DEV_ID 3 /* P2P Device ID */
+#define P2P_SEID_INTENT 4 /* Group Owner Intent */
+#define P2P_SEID_CFG_TIMEOUT 5 /* Configuration Timeout */
+#define P2P_SEID_CHANNEL 6 /* Listen channel */
+#define P2P_SEID_GRP_BSSID 7 /* P2P Group BSSID */
+#define P2P_SEID_XT_TIMING 8 /* Extended Listen Timing */
+#define P2P_SEID_INTINTADDR 9 /* Intended P2P Interface Address */
+#define P2P_SEID_P2P_MGBTY 10 /* P2P Manageability */
+#define P2P_SEID_CHAN_LIST 11 /* Channel List */
+#define P2P_SEID_ABSENCE 12 /* Notice of Absence */
+#define P2P_SEID_DEV_INFO 13 /* Device Info */
+#define P2P_SEID_GROUP_INFO 14 /* Group Info */
+#define P2P_SEID_GROUP_ID 15 /* Group ID */
+#define P2P_SEID_P2P_IF 16 /* P2P Interface */
+#define P2P_SEID_OP_CHANNEL 17 /* Operating Channel */
+#define P2P_SEID_INVITE_FLAGS 18 /* Invitation Flags */
+#define P2P_SEID_SERVICE_HASH 21 /* Service hash */
+#define P2P_SEID_SESSION 22 /* Session information */
+#define P2P_SEID_CONNECT_CAP 23 /* Connection capability */
+#define P2P_SEID_ADVERTISE_ID 24 /* Advertisement ID */
+#define P2P_SEID_ADVERTISE_SERVICE 25 /* Advertised service */
+#define P2P_SEID_SESSION_ID 26 /* Session ID */
+#define P2P_SEID_FEATURE_CAP 27 /* Feature capability */
+#define P2P_SEID_PERSISTENT_GROUP 28 /* Persistent group */
+#define P2P_SEID_SESSION_INFO_RESP 29 /* Session Information Response */
+#define P2P_SEID_VNDR 221 /* Vendor-specific subelement */
+
+#define P2P_SE_VS_ID_SERVICES 0x1b
+
+
+/* WiFi P2P IE subelement: P2P Capability (capabilities info) */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_info_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 dev;
- uint8 group;
+ uint8 eltId; /* SE ID: P2P_SEID_P2P_INFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 dev; /* Device Capability Bitmap */
+ uint8 group; /* Group Capability Bitmap */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_info_se_s wifi_p2p_info_se_t;
-
-#define P2P_CAPSE_DEV_SERVICE_DIS 0x1
-#define P2P_CAPSE_DEV_CLIENT_DIS 0x2
-#define P2P_CAPSE_DEV_CONCURRENT 0x4
-#define P2P_CAPSE_DEV_INFRA_MAN 0x8
-#define P2P_CAPSE_DEV_LIMIT 0x10
-#define P2P_CAPSE_INVITE_PROC 0x20
-
-
-#define P2P_CAPSE_GRP_OWNER 0x1
-#define P2P_CAPSE_PERSIST_GRP 0x2
-#define P2P_CAPSE_GRP_LIMIT 0x4
-#define P2P_CAPSE_GRP_INTRA_BSS 0x8
-#define P2P_CAPSE_GRP_X_CONNECT 0x10
-#define P2P_CAPSE_GRP_PERSISTENT 0x20
-#define P2P_CAPSE_GRP_FORMATION 0x40
-
-
-
+/* P2P Capability subelement's Device Capability Bitmap bit values */
+#define P2P_CAPSE_DEV_SERVICE_DIS 0x1 /* Service Discovery */
+#define P2P_CAPSE_DEV_CLIENT_DIS 0x2 /* Client Discoverability */
+#define P2P_CAPSE_DEV_CONCURRENT 0x4 /* Concurrent Operation */
+#define P2P_CAPSE_DEV_INFRA_MAN 0x8 /* P2P Infrastructure Managed */
+#define P2P_CAPSE_DEV_LIMIT 0x10 /* P2P Device Limit */
+#define P2P_CAPSE_INVITE_PROC 0x20 /* P2P Invitation Procedure */
+
+/* P2P Capability subelement's Group Capability Bitmap bit values */
+#define P2P_CAPSE_GRP_OWNER 0x1 /* P2P Group Owner */
+#define P2P_CAPSE_PERSIST_GRP 0x2 /* Persistent P2P Group */
+#define P2P_CAPSE_GRP_LIMIT 0x4 /* P2P Group Limit */
+#define P2P_CAPSE_GRP_INTRA_BSS 0x8 /* Intra-BSS Distribution */
+#define P2P_CAPSE_GRP_X_CONNECT 0x10 /* Cross Connection */
+#define P2P_CAPSE_GRP_PERSISTENT 0x20 /* Persistent Reconnect */
+#define P2P_CAPSE_GRP_FORMATION 0x40 /* Group Formation */
+
+
+/* WiFi P2P IE subelement: Group Owner Intent */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_intent_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 intent;
+ uint8 eltId; /* SE ID: P2P_SEID_INTENT */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 intent; /* Intent Value 0...15 (0=legacy 15=master only) */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_intent_se_s wifi_p2p_intent_se_t;
-
+/* WiFi P2P IE subelement: Configuration Timeout */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_cfg_tmo_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 go_tmo;
- uint8 client_tmo;
+ uint8 eltId; /* SE ID: P2P_SEID_CFG_TIMEOUT */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 go_tmo; /* GO config timeout in units of 10 ms */
+ uint8 client_tmo; /* Client config timeout in units of 10 ms */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_cfg_tmo_se_s wifi_p2p_cfg_tmo_se_t;
-
+/* WiFi P2P IE subelement: Listen Channel */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_listen_channel_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 country[3];
- uint8 op_class;
- uint8 channel;
+ uint8 eltId; /* SE ID: P2P_SEID_CHANNEL */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 country[3]; /* Country String */
+ uint8 op_class; /* Operating Class */
+ uint8 channel; /* Channel */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_listen_channel_se_s wifi_p2p_listen_channel_se_t;
-
+/* WiFi P2P IE subelement: P2P Group BSSID */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_bssid_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 mac[6];
+ uint8 eltId; /* SE ID: P2P_SEID_GRP_BSSID */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P group bssid */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_grp_bssid_se_s wifi_p2p_grp_bssid_se_t;
-
+/* WiFi P2P IE subelement: P2P Group ID */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_grp_id_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 mac[6];
- uint8 ssid[1];
+ uint8 eltId; /* SE ID: P2P_SEID_GROUP_ID */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P device address */
+ uint8 ssid[1]; /* ssid. device id. variable length */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_grp_id_se_s wifi_p2p_grp_id_se_t;
-
+/* WiFi P2P IE subelement: P2P Interface */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_intf_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 mac[6];
- uint8 ifaddrs;
- uint8 ifaddr[1][6];
+ uint8 eltId; /* SE ID: P2P_SEID_P2P_IF */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P device address */
+ uint8 ifaddrs; /* P2P Interface Address count */
+ uint8 ifaddr[1][6]; /* P2P Interface Address list */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_intf_se_s wifi_p2p_intf_se_t;
-
+/* WiFi P2P IE subelement: Status */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_status_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 status;
+ uint8 eltId; /* SE ID: P2P_SEID_STATUS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 status; /* Status Code: P2P_STATSE_* */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_status_se_s wifi_p2p_status_se_t;
-
+/* Status subelement Status Code definitions */
#define P2P_STATSE_SUCCESS 0
-
+ /* Success */
#define P2P_STATSE_FAIL_INFO_CURR_UNAVAIL 1
-
+ /* Failed, information currently unavailable */
#define P2P_STATSE_PASSED_UP P2P_STATSE_FAIL_INFO_CURR_UNAVAIL
-
+ /* Old name for above in P2P spec 1.08 and older */
#define P2P_STATSE_FAIL_INCOMPAT_PARAMS 2
-
+ /* Failed, incompatible parameters */
#define P2P_STATSE_FAIL_LIMIT_REACHED 3
-
+ /* Failed, limit reached */
#define P2P_STATSE_FAIL_INVALID_PARAMS 4
-
+ /* Failed, invalid parameters */
#define P2P_STATSE_FAIL_UNABLE_TO_ACCOM 5
-
+ /* Failed, unable to accomodate request */
#define P2P_STATSE_FAIL_PROTO_ERROR 6
-
+ /* Failed, previous protocol error or disruptive behaviour */
#define P2P_STATSE_FAIL_NO_COMMON_CHAN 7
-
+ /* Failed, no common channels */
#define P2P_STATSE_FAIL_UNKNOWN_GROUP 8
-
+ /* Failed, unknown P2P Group */
#define P2P_STATSE_FAIL_INTENT 9
-
+ /* Failed, both peers indicated Intent 15 in GO Negotiation */
#define P2P_STATSE_FAIL_INCOMPAT_PROVIS 10
-
+ /* Failed, incompatible provisioning method */
#define P2P_STATSE_FAIL_USER_REJECT 11
-
-
+ /* Failed, rejected by user */
+#define P2P_STATSE_SUCCESS_USER_ACCEPT 12
+ /* Success, accepted by user */
+/* WiFi P2P IE attribute: Extended Listen Timing */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_ext_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 avail[2];
- uint8 interval[2];
+ uint8 eltId; /* ID: P2P_SEID_EXT_TIMING */
+ uint8 len[2]; /* length not including eltId, len fields */
+ uint8 avail[2]; /* availibility period */
+ uint8 interval[2]; /* availibility interval */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_ext_se_s wifi_p2p_ext_se_t;
-#define P2P_EXT_MIN 10
-
+#define P2P_EXT_MIN 10 /* minimum 10ms */
+/* WiFi P2P IE subelement: Intended P2P Interface Address */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_intintad_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 mac[6];
+ uint8 eltId; /* SE ID: P2P_SEID_INTINTADDR */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* intended P2P interface MAC address */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_intintad_se_s wifi_p2p_intintad_se_t;
-
+/* WiFi P2P IE subelement: Channel */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_channel_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 band;
- uint8 channel;
+ uint8 eltId; /* SE ID: P2P_SEID_STATUS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 band; /* Regulatory Class (band) */
+ uint8 channel; /* Channel */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_channel_se_s wifi_p2p_channel_se_t;
-
+/* Channel Entry structure within the Channel List SE */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_entry_s {
- uint8 band;
- uint8 num_channels;
- uint8 channels[WL_NUMCHANNELS];
+ uint8 band; /* Regulatory Class (band) */
+ uint8 num_channels; /* # of channels in the channel list */
+ uint8 channels[WL_NUMCHANNELS]; /* Channel List */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_chanlist_entry_s wifi_p2p_chanlist_entry_t;
#define WIFI_P2P_CHANLIST_SE_MAX_ENTRIES 2
-
+/* WiFi P2P IE subelement: Channel List */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_chanlist_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 country[3];
- uint8 num_entries;
+ uint8 eltId; /* SE ID: P2P_SEID_CHAN_LIST */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 country[3]; /* Country String */
+ uint8 num_entries; /* # of channel entries */
wifi_p2p_chanlist_entry_t entries[WIFI_P2P_CHANLIST_SE_MAX_ENTRIES];
-
+ /* Channel Entry List */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_chanlist_se_s wifi_p2p_chanlist_se_t;
-
+/* WiFi Primary Device Type structure */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_pri_devtype_s {
- uint16 cat_id;
- uint8 OUI[3];
- uint8 oui_type;
- uint16 sub_cat_id;
+ uint16 cat_id; /* Category ID */
+ uint8 OUI[3]; /* WFA OUI: 0x0050F2 */
+ uint8 oui_type; /* WPS_OUI_TYPE */
+ uint16 sub_cat_id; /* Sub Category ID */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_pri_devtype_s wifi_p2p_pri_devtype_t;
-
+/* WiFi P2P Device Info Sub Element Primary Device Type Sub Category
+ * maximum values for each category
+ */
+#define P2P_DISE_SUBCATEGORY_MINVAL 1
+#define P2P_DISE_CATEGORY_COMPUTER 1
+#define P2P_DISE_SUBCATEGORY_COMPUTER_MAXVAL 8
+#define P2P_DISE_CATEGORY_INPUT_DEVICE 2
+#define P2P_DISE_SUBCATEGORY_INPUT_DEVICE_MAXVAL 9
+#define P2P_DISE_CATEGORY_PRINTER 3
+#define P2P_DISE_SUBCATEGORY_PRINTER_MAXVAL 5
+#define P2P_DISE_CATEGORY_CAMERA 4
+#define P2P_DISE_SUBCATEGORY_CAMERA_MAXVAL 4
+#define P2P_DISE_CATEGORY_STORAGE 5
+#define P2P_DISE_SUBCATEGORY_STORAGE_MAXVAL 1
+#define P2P_DISE_CATEGORY_NETWORK_INFRA 6
+#define P2P_DISE_SUBCATEGORY_NETWORK_INFRA_MAXVAL 4
+#define P2P_DISE_CATEGORY_DISPLAY 7
+#define P2P_DISE_SUBCATEGORY_DISPLAY_MAXVAL 4
+#define P2P_DISE_CATEGORY_MULTIMEDIA 8
+#define P2P_DISE_SUBCATEGORY_MULTIMEDIA_MAXVAL 6
+#define P2P_DISE_CATEGORY_GAMING 9
+#define P2P_DISE_SUBCATEGORY_GAMING_MAXVAL 5
+#define P2P_DISE_CATEGORY_TELEPHONE 10
+#define P2P_DISE_SUBCATEGORY_TELEPHONE_MAXVAL 5
+#define P2P_DISE_CATEGORY_AUDIO 11
+#define P2P_DISE_SUBCATEGORY_AUDIO_MAXVAL 6
+
+/* WiFi P2P IE's Device Info subelement */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_devinfo_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 mac[6];
- uint16 wps_cfg_meths;
- uint8 pri_devtype[8];
+ uint8 eltId; /* SE ID: P2P_SEID_DEVINFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mac[6]; /* P2P Device MAC address */
+ uint16 wps_cfg_meths; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+ uint8 pri_devtype[8]; /* Primary Device Type */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_devinfo_se_s wifi_p2p_devinfo_se_t;
#define P2P_DEV_TYPE_LEN 8
-
+/* WiFi P2P IE's Group Info subelement Client Info Descriptor */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_cid_fixed_s {
uint8 len;
- uint8 devaddr[ETHER_ADDR_LEN];
- uint8 ifaddr[ETHER_ADDR_LEN];
- uint8 devcap;
- uint8 cfg_meths[2];
- uint8 pridt[P2P_DEV_TYPE_LEN];
- uint8 secdts;
+ uint8 devaddr[ETHER_ADDR_LEN]; /* P2P Device Address */
+ uint8 ifaddr[ETHER_ADDR_LEN]; /* P2P Interface Address */
+ uint8 devcap; /* Device Capability */
+ uint8 cfg_meths[2]; /* Config Methods: reg_prototlv.h WPS_CONFMET_* */
+ uint8 pridt[P2P_DEV_TYPE_LEN]; /* Primary Device Type */
+ uint8 secdts; /* Number of Secondary Device Types */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_cid_fixed_s wifi_p2p_cid_fixed_t;
-
+/* WiFi P2P IE's Device ID subelement */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_devid_se_s {
uint8 eltId;
uint8 len[2];
- struct ether_addr addr;
+ struct ether_addr addr; /* P2P Device MAC address */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_devid_se_s wifi_p2p_devid_se_t;
-
+/* WiFi P2P IE subelement: P2P Manageability */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_mgbt_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 mg_bitmap;
+ uint8 eltId; /* SE ID: P2P_SEID_P2P_MGBTY */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 mg_bitmap; /* manageability bitmap */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_mgbt_se_s wifi_p2p_mgbt_se_t;
+/* mg_bitmap field bit values */
+#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1 /* AP supports Managed P2P Device */
-#define P2P_MGBTSE_P2PDEVMGMT_FLAG 0x1
-
-
+/* WiFi P2P IE subelement: Group Info */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_grpinfo_se_s {
- uint8 eltId;
- uint8 len[2];
+ uint8 eltId; /* SE ID: P2P_SEID_GROUP_INFO */
+ uint8 len[2]; /* SE length not including eltId, len fields */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_grpinfo_se_s wifi_p2p_grpinfo_se_t;
-
+/* WiFi IE subelement: Operating Channel */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_op_channel_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 country[3];
- uint8 op_class;
- uint8 channel;
+ uint8 eltId; /* SE ID: P2P_SEID_OP_CHANNEL */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 country[3]; /* Country String */
+ uint8 op_class; /* Operating Class */
+ uint8 channel; /* Channel */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_op_channel_se_s wifi_p2p_op_channel_se_t;
-
+/* WiFi IE subelement: INVITATION FLAGS */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_invite_flags_se_s {
- uint8 eltId;
- uint8 len[2];
- uint8 flags;
+ uint8 eltId; /* SE ID: P2P_SEID_INVITE_FLAGS */
+ uint8 len[2]; /* SE length not including eltId, len fields */
+ uint8 flags; /* Flags */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_invite_flags_se_s wifi_p2p_invite_flags_se_t;
+/* WiFi P2P IE subelement: Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_hash_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_SERVICE_HASH */
+ uint8 len[2]; /* SE length not including eltId, len fields
+ * in multiple of 6 Bytes
+ */
+ uint8 hash[1]; /* Variable length - SHA256 hash of
+ * service names (can be more than one hashes)
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_hash_se_s wifi_p2p_serv_hash_se_t;
+
+/* WiFi P2P IE subelement: Service Instance Data */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_serv_inst_data_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_SESSION */
+ uint8 len[2]; /* SE length not including eltId, len */
+ uint8 ssn_info[1]; /* Variable length - Session information as specified by
+ * the service layer, type matches serv. name
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_serv_inst_data_se_s wifi_p2p_serv_inst_data_se_t;
+
+/* WiFi P2P IE subelement: Connection capability */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_conn_cap_data_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_CONNECT_CAP */
+ uint8 len[2]; /* SE length not including eltId, len */
+ uint8 conn_cap; /* 1byte capability as specified by the
+ * service layer, valid bitmask/values
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_conn_cap_data_se_s wifi_p2p_conn_cap_data_se_t;
+
+
+/* WiFi P2P IE subelement: Advertisement ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_id_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_ID */
+ uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */
+ uint8 advt_id[4]; /* 4byte Advertisement ID of the peer device sent in
+ * PROV Disc in Network byte order
+ */
+ uint8 advt_mac[6]; /* P2P device address of the service advertiser */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_id_se_s wifi_p2p_advt_id_se_t;
+
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_adv_serv_info_s {
+ uint8 advt_id[4]; /* SE Advertise ID for the service */
+ uint16 nw_cfg_method; /* SE Network Config method for the service */
+ uint8 serv_name_len; /* SE length of the service name */
+ uint8 serv_name[1]; /* Variable length service name field */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_adv_serv_info_s wifi_p2p_adv_serv_info_t;
+
+
+/* WiFi P2P IE subelement: Advertise Service Hash */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_advt_serv_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_ADVERTISE_SERVICE */
+ uint8 len[2]; /* SE length not including eltId, len fields mutiple len of
+ * wifi_p2p_adv_serv_info_t entries
+ */
+ wifi_p2p_adv_serv_info_t p_advt_serv_info[1]; /* Variable length
+ of multiple instances
+ of the advertise service info
+ */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_advt_serv_se_s wifi_p2p_advt_serv_se_t;
+
+
+/* WiFi P2P IE subelement: Session ID */
+BWL_PRE_PACKED_STRUCT struct wifi_p2p_ssn_id_se_s {
+ uint8 eltId; /* SE ID: P2P_SEID_SESSION_ID */
+ uint8 len[2]; /* SE length not including eltId, len fixed 4 Bytes */
+ uint8 ssn_id[4]; /* 4byte Session ID of the peer device sent in
+ * PROV Disc in Network byte order
+ */
+ uint8 ssn_mac[6]; /* P2P device address of the seeker - session mac */
+} BWL_POST_PACKED_STRUCT;
+typedef struct wifi_p2p_ssn_id_se_s wifi_p2p_ssn_id_se_t;
+
+
+#define P2P_ADVT_SERV_SE_FIXED_LEN 3 /* Includes only the element ID and len */
+#define P2P_ADVT_SERV_INFO_FIXED_LEN 7 /* Per ADV Service Instance advt_id +
+ * nw_config_method + serv_name_len
+ */
+
+/* WiFi P2P Action Frame */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_action_frame {
- uint8 category;
- uint8 OUI[3];
- uint8 type;
- uint8 subtype;
- uint8 dialog_token;
- uint8 elts[1];
+ uint8 category; /* P2P_AF_CATEGORY */
+ uint8 OUI[3]; /* OUI - P2P_OUI */
+ uint8 type; /* OUI Type - P2P_VER */
+ uint8 subtype; /* OUI Subtype - P2P_AF_* */
+ uint8 dialog_token; /* nonzero, identifies req/resp tranaction */
+ uint8 elts[1]; /* Variable length information elements. Max size =
+ * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+ */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_action_frame wifi_p2p_action_frame_t;
#define P2P_AF_CATEGORY 0x7f
#define P2P_AF_FIXED_LEN 7
-
-#define P2P_AF_NOTICE_OF_ABSENCE 0
-#define P2P_AF_PRESENCE_REQ 1
-#define P2P_AF_PRESENCE_RSP 2
-#define P2P_AF_GO_DISC_REQ 3
-
+/* WiFi P2P Action Frame OUI Subtypes */
+#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */
+#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */
+#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */
+#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */
+/* WiFi P2P Public Action Frame */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_pub_act_frame {
- uint8 category;
- uint8 action;
- uint8 oui[3];
- uint8 oui_type;
- uint8 subtype;
- uint8 dialog_token;
- uint8 elts[1];
+ uint8 category; /* P2P_PUB_AF_CATEGORY */
+ uint8 action; /* P2P_PUB_AF_ACTION */
+ uint8 oui[3]; /* P2P_OUI */
+ uint8 oui_type; /* OUI type - P2P_VER */
+ uint8 subtype; /* OUI subtype - P2P_TYPE_* */
+ uint8 dialog_token; /* nonzero, identifies req/rsp transaction */
+ uint8 elts[1]; /* Variable length information elements. Max size =
+ * ACTION_FRAME_SIZE - sizeof(this structure) - 1
+ */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_pub_act_frame wifi_p2p_pub_act_frame_t;
#define P2P_PUB_AF_FIXED_LEN 8
#define P2P_PUB_AF_CATEGORY 0x04
#define P2P_PUB_AF_ACTION 0x09
-
-#define P2P_PAF_GON_REQ 0
-#define P2P_PAF_GON_RSP 1
-#define P2P_PAF_GON_CONF 2
-#define P2P_PAF_INVITE_REQ 3
-#define P2P_PAF_INVITE_RSP 4
-#define P2P_PAF_DEVDIS_REQ 5
-#define P2P_PAF_DEVDIS_RSP 6
-#define P2P_PAF_PROVDIS_REQ 7
-#define P2P_PAF_PROVDIS_RSP 8
-#define P2P_PAF_SUBTYPE_INVALID 255
-
-
+/* WiFi P2P Public Action Frame OUI Subtypes */
+#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */
+#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */
+#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */
+#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */
+#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */
+#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */
+#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */
+#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */
+#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */
+#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */
+
+/* TODO: Stop using these obsolete aliases for P2P_PAF_GON_* */
#define P2P_TYPE_MNREQ P2P_PAF_GON_REQ
#define P2P_TYPE_MNRSP P2P_PAF_GON_RSP
#define P2P_TYPE_MNCONF P2P_PAF_GON_CONF
-
+/* WiFi P2P IE subelement: Notice of Absence */
BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_desc {
- uint8 cnt_type;
- uint32 duration;
- uint32 interval;
- uint32 start;
+ uint8 cnt_type; /* Count/Type */
+ uint32 duration; /* Duration */
+ uint32 interval; /* Interval */
+ uint32 start; /* Start Time */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_noa_desc wifi_p2p_noa_desc_t;
BWL_PRE_PACKED_STRUCT struct wifi_p2p_noa_se {
- uint8 eltId;
- uint8 len[2];
- uint8 index;
- uint8 ops_ctw_parms;
- wifi_p2p_noa_desc_t desc[1];
+ uint8 eltId; /* Subelement ID */
+ uint8 len[2]; /* Length */
+ uint8 index; /* Index */
+ uint8 ops_ctw_parms; /* CTWindow and OppPS Parameters */
+ wifi_p2p_noa_desc_t desc[1]; /* Notice of Absence Descriptor(s) */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2p_noa_se wifi_p2p_noa_se_t;
#define P2P_NOA_SE_FIXED_LEN 5
-#define P2P_NOA_SE_MAX_DESC 2
-
-
-#define P2P_NOA_DESC_CNT_RESERVED 0
-#define P2P_NOA_DESC_CNT_REPEAT 255
-#define P2P_NOA_DESC_TYPE_PREFERRED 1
-#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2
+#define P2P_NOA_SE_MAX_DESC 2 /* max NoA descriptors in presence request */
+/* cnt_type field values */
+#define P2P_NOA_DESC_CNT_RESERVED 0 /* reserved and should not be used */
+#define P2P_NOA_DESC_CNT_REPEAT 255 /* continuous schedule */
+#define P2P_NOA_DESC_TYPE_PREFERRED 1 /* preferred values */
+#define P2P_NOA_DESC_TYPE_ACCEPTABLE 2 /* acceptable limits */
+/* ctw_ops_parms field values */
#define P2P_NOA_CTW_MASK 0x7f
#define P2P_NOA_OPS_MASK 0x80
#define P2P_NOA_OPS_SHIFT 7
-#define P2P_CTW_MIN 10
-
+#define P2P_CTW_MIN 10 /* minimum 10TU */
+/*
+ * P2P Service Discovery related
+ */
#define P2PSD_ACTION_CATEGORY 0x04
-
+ /* Public action frame */
#define P2PSD_ACTION_ID_GAS_IREQ 0x0a
-
+ /* Action value for GAS Initial Request AF */
#define P2PSD_ACTION_ID_GAS_IRESP 0x0b
-
+ /* Action value for GAS Initial Response AF */
#define P2PSD_ACTION_ID_GAS_CREQ 0x0c
-
+ /* Action value for GAS Comback Request AF */
#define P2PSD_ACTION_ID_GAS_CRESP 0x0d
-
+ /* Action value for GAS Comback Response AF */
#define P2PSD_AD_EID 0x6c
-
+ /* Advertisement Protocol IE ID */
#define P2PSD_ADP_TUPLE_QLMT_PAMEBI 0x00
-
+ /* Query Response Length Limit 7 bits plus PAME-BI 1 bit */
#define P2PSD_ADP_PROTO_ID 0x00
-
+ /* Advertisement Protocol ID. Always 0 for P2P SD */
#define P2PSD_GAS_OUI P2P_OUI
-
+ /* WFA OUI */
#define P2PSD_GAS_OUI_SUBTYPE P2P_VER
-
+ /* OUI Subtype for GAS IE */
#define P2PSD_GAS_NQP_INFOID 0xDDDD
-
+ /* NQP Query Info ID: 56797 */
#define P2PSD_GAS_COMEBACKDEALY 0x00
-
-
+ /* Not used in the Native GAS protocol */
+/* Service Protocol Type */
typedef enum p2psd_svc_protype {
SVC_RPOTYPE_ALL = 0,
SVC_RPOTYPE_BONJOUR = 1,
SVC_RPOTYPE_UPNP = 2,
SVC_RPOTYPE_WSD = 3,
+ SVC_RPOTYPE_WFDS = 11,
SVC_RPOTYPE_VENDOR = 255
} p2psd_svc_protype_t;
-
+/* Service Discovery response status code */
typedef enum {
P2PSD_RESP_STATUS_SUCCESS = 0,
P2PSD_RESP_STATUS_PROTYPE_NA = 1,
P2PSD_RESP_STATUS_BAD_REQUEST = 3
} p2psd_resp_status_t;
-
+/* Advertisement Protocol IE tuple field */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_tpl {
- uint8 llm_pamebi;
- uint8 adp_id;
+ uint8 llm_pamebi; /* Query Response Length Limit bit 0-6, set to 0 plus
+ * Pre-Associated Message Exchange BSSID Independent bit 7, set to 0
+ */
+ uint8 adp_id; /* Advertisement Protocol ID: 0 for NQP Native Query Protocol */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_adp_tpl wifi_p2psd_adp_tpl_t;
-
+/* Advertisement Protocol IE */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_adp_ie {
- uint8 id;
- uint8 len;
- wifi_p2psd_adp_tpl_t adp_tpl;
+ uint8 id; /* IE ID: 0x6c - 108 */
+ uint8 len; /* IE length */
+ wifi_p2psd_adp_tpl_t adp_tpl; /* Advertisement Protocol Tuple field. Only one
+ * tuple is defined for P2P Service Discovery
+ */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_adp_ie wifi_p2psd_adp_ie_t;
-
+/* NQP Vendor-specific Content */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_nqp_query_vsc {
- uint8 oui_subtype;
- uint16 svc_updi;
- uint8 svc_tlvs[1];
+ uint8 oui_subtype; /* OUI Subtype: 0x09 */
+ uint16 svc_updi; /* Service Update Indicator */
+ uint8 svc_tlvs[1]; /* wifi_p2psd_qreq_tlv_t type for service request,
+ * wifi_p2psd_qresp_tlv_t type for service response
+ */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_nqp_query_vsc wifi_p2psd_nqp_query_vsc_t;
-
+/* Service Request TLV */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_tlv {
- uint16 len;
- uint8 svc_prot;
- uint8 svc_tscid;
- uint8 query_data[1];
+ uint16 len; /* Length: 5 plus size of Query Data */
+ uint8 svc_prot; /* Service Protocol Type */
+ uint8 svc_tscid; /* Service Transaction ID */
+ uint8 query_data[1]; /* Query Data, passed in from above Layer 2 */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_qreq_tlv wifi_p2psd_qreq_tlv_t;
-
+/* Query Request Frame, defined in generic format, instead of NQP specific */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qreq_frame {
- uint16 info_id;
- uint16 len;
- uint8 oui[3];
- uint8 qreq_vsc[1];
+ uint16 info_id; /* Info ID: 0xDDDD */
+ uint16 len; /* Length of service request TLV, 5 plus the size of request data */
+ uint8 oui[3]; /* WFA OUI: 0x0050F2 */
+ uint8 qreq_vsc[1]; /* Vendor-specific Content: wifi_p2psd_nqp_query_vsc_t type for NQP */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_qreq_frame wifi_p2psd_qreq_frame_t;
-
+/* GAS Initial Request AF body, "elts" in wifi_p2p_pub_act_frame */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_ireq_frame {
- wifi_p2psd_adp_ie_t adp_ie;
- uint16 qreq_len;
- uint8 qreq_frm[1];
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qreq_len; /* Query Request Length */
+ uint8 qreq_frm[1]; /* Query Request Frame wifi_p2psd_qreq_frame_t */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_gas_ireq_frame wifi_p2psd_gas_ireq_frame_t;
-
+/* Service Response TLV */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_tlv {
- uint16 len;
- uint8 svc_prot;
- uint8 svc_tscid;
- uint8 status;
- uint8 query_data[1];
+ uint16 len; /* Length: 5 plus size of Query Data */
+ uint8 svc_prot; /* Service Protocol Type */
+ uint8 svc_tscid; /* Service Transaction ID */
+ uint8 status; /* Value defined in Table 57 of P2P spec. */
+ uint8 query_data[1]; /* Response Data, passed in from above Layer 2 */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_qresp_tlv wifi_p2psd_qresp_tlv_t;
-
+/* Query Response Frame, defined in generic format, instead of NQP specific */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_qresp_frame {
- uint16 info_id;
- uint16 len;
- uint8 oui[3];
- uint8 qresp_vsc[1];
+ uint16 info_id; /* Info ID: 0xDDDD */
+ uint16 len; /* Lenth of service response TLV, 6 plus the size of resp data */
+ uint8 oui[3]; /* WFA OUI: 0x0050F2 */
+ uint8 qresp_vsc[1]; /* Vendor-specific Content: wifi_p2psd_qresp_tlv_t type for NQP */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_qresp_frame wifi_p2psd_qresp_frame_t;
-
+/* GAS Initial Response AF body, "elts" in wifi_p2p_pub_act_frame */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_iresp_frame {
- uint16 status;
- uint16 cb_delay;
- wifi_p2psd_adp_ie_t adp_ie;
- uint16 qresp_len;
- uint8 qresp_frm[1];
+ uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */
+ uint16 cb_delay; /* GAS Comeback Delay */
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qresp_len; /* Query Response Length */
+ uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_gas_iresp_frame wifi_p2psd_gas_iresp_frame_t;
-
+/* GAS Comeback Response AF body, "elts" in wifi_p2p_pub_act_frame */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_cresp_frame {
- uint16 status;
- uint8 fragment_id;
- uint16 cb_delay;
- wifi_p2psd_adp_ie_t adp_ie;
- uint16 qresp_len;
- uint8 qresp_frm[1];
+ uint16 status; /* Value defined in Table 7-23 of IEEE P802.11u */
+ uint8 fragment_id; /* Fragmentation ID */
+ uint16 cb_delay; /* GAS Comeback Delay */
+ wifi_p2psd_adp_ie_t adp_ie; /* Advertisement Protocol IE */
+ uint16 qresp_len; /* Query Response Length */
+ uint8 qresp_frm[1]; /* Query Response Frame wifi_p2psd_qresp_frame_t */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_gas_cresp_frame wifi_p2psd_gas_cresp_frame_t;
-
+/* Wi-Fi GAS Public Action Frame */
BWL_PRE_PACKED_STRUCT struct wifi_p2psd_gas_pub_act_frame {
- uint8 category;
- uint8 action;
- uint8 dialog_token;
- uint8 query_data[1];
+ uint8 category; /* 0x04 Public Action Frame */
+ uint8 action; /* 0x6c Advertisement Protocol */
+ uint8 dialog_token; /* nonzero, identifies req/rsp transaction */
+ uint8 query_data[1]; /* Query Data. wifi_p2psd_gas_ireq_frame_t
+ * or wifi_p2psd_gas_iresp_frame_t format
+ */
} BWL_POST_PACKED_STRUCT;
typedef struct wifi_p2psd_gas_pub_act_frame wifi_p2psd_gas_pub_act_frame_t;
-
+/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
-#endif
+#endif /* _P2P_H_ */
#include <typedefs.h>
#endif
-
+/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
#ifndef VLAN_VID_MASK
-#define VLAN_VID_MASK 0xfff
+#define VLAN_VID_MASK 0xfff /* low 12 bits are vlan id */
#endif
-#define VLAN_CFI_SHIFT 12
-#define VLAN_PRI_SHIFT 13
+#define VLAN_CFI_SHIFT 12 /* canonical format indicator bit */
+#define VLAN_PRI_SHIFT 13 /* user priority */
-#define VLAN_PRI_MASK 7
+#define VLAN_PRI_MASK 7 /* 3 bits of priority */
-#define VLAN_TPID_OFFSET 12
-#define VLAN_TCI_OFFSET 14
+#define VLAN_TPID_OFFSET 12 /* offset of tag protocol id field */
+#define VLAN_TCI_OFFSET 14 /* offset of tag ctrl info field */
#define VLAN_TAG_LEN 4
-#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN)
+#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) /* offset in Ethernet II packet only */
-#define VLAN_TPID 0x8100
+#define VLAN_TPID 0x8100 /* VLAN ethertype/Tag Protocol ID */
struct vlan_header {
- uint16 vlan_type;
- uint16 vlan_tag;
+ uint16 vlan_type; /* 0x8100 */
+ uint16 vlan_tag; /* priority, cfi and vid */
};
struct ethervlan_header {
uint8 ether_dhost[ETHER_ADDR_LEN];
uint8 ether_shost[ETHER_ADDR_LEN];
- uint16 vlan_type;
- uint16 vlan_tag;
+ uint16 vlan_type; /* 0x8100 */
+ uint16 vlan_tag; /* priority, cfi and vid */
uint16 ether_type;
};
struct dot3_mac_llc_snapvlan_header {
- uint8 ether_dhost[ETHER_ADDR_LEN];
- uint8 ether_shost[ETHER_ADDR_LEN];
- uint16 length;
- uint8 dsap;
- uint8 ssap;
- uint8 ctl;
- uint8 oui[3];
- uint16 vlan_type;
- uint16 vlan_tag;
- uint16 ether_type;
+ uint8 ether_dhost[ETHER_ADDR_LEN]; /* dest mac */
+ uint8 ether_shost[ETHER_ADDR_LEN]; /* src mac */
+ uint16 length; /* frame length incl header */
+ uint8 dsap; /* always 0xAA */
+ uint8 ssap; /* always 0xAA */
+ uint8 ctl; /* always 0x03 */
+ uint8 oui[3]; /* RFC1042: 0x00 0x00 0x00
+ * Bridge-Tunnel: 0x00 0x00 0xF8
+ */
+ uint16 vlan_type; /* 0x8100 */
+ uint16 vlan_tag; /* priority, cfi and vid */
+ uint16 ether_type; /* ethertype */
};
#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN)
-
+/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
#define ETHERVLAN_MOVE_HDR(d, s) \
*(struct ethervlan_header *)(d) = t; \
} while (0)
-#endif
+#endif /* _vlan_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wpa.h 384536 2013-02-12 04:13:09Z $
+ * $Id: wpa.h 450928 2014-01-23 14:13:38Z $
*/
#ifndef _proto_wpa_h_
#include <proto/ethernet.h>
-
+/* This marks the start of a packed structure section. */
#include <packed_section_start.h>
-
-
-
-#define DOT11_RC_INVALID_WPA_IE 13
-#define DOT11_RC_MIC_FAILURE 14
-#define DOT11_RC_4WH_TIMEOUT 15
-#define DOT11_RC_GTK_UPDATE_TIMEOUT 16
-#define DOT11_RC_WPA_IE_MISMATCH 17
-#define DOT11_RC_INVALID_MC_CIPHER 18
-#define DOT11_RC_INVALID_UC_CIPHER 19
-#define DOT11_RC_INVALID_AKMP 20
-#define DOT11_RC_BAD_WPA_VERSION 21
-#define DOT11_RC_INVALID_WPA_CAP 22
-#define DOT11_RC_8021X_AUTH_FAIL 23
+/* Reason Codes */
+
+/* 13 through 23 taken from IEEE Std 802.11i-2004 */
+#define DOT11_RC_INVALID_WPA_IE 13 /* Invalid info. element */
+#define DOT11_RC_MIC_FAILURE 14 /* Michael failure */
+#define DOT11_RC_4WH_TIMEOUT 15 /* 4-way handshake timeout */
+#define DOT11_RC_GTK_UPDATE_TIMEOUT 16 /* Group key update timeout */
+#define DOT11_RC_WPA_IE_MISMATCH 17 /* WPA IE in 4-way handshake differs from
+ * (re-)assoc. request/probe response
+ */
+#define DOT11_RC_INVALID_MC_CIPHER 18 /* Invalid multicast cipher */
+#define DOT11_RC_INVALID_UC_CIPHER 19 /* Invalid unicast cipher */
+#define DOT11_RC_INVALID_AKMP 20 /* Invalid authenticated key management protocol */
+#define DOT11_RC_BAD_WPA_VERSION 21 /* Unsupported WPA version */
+#define DOT11_RC_INVALID_WPA_CAP 22 /* Invalid WPA IE capabilities */
+#define DOT11_RC_8021X_AUTH_FAIL 23 /* 802.1X authentication failure */
#define WPA2_PMKID_LEN 16
-
+/* WPA IE fixed portion */
typedef BWL_PRE_PACKED_STRUCT struct
{
- uint8 tag;
- uint8 length;
- uint8 oui[3];
- uint8 oui_type;
+ uint8 tag; /* TAG */
+ uint8 length; /* TAG length */
+ uint8 oui[3]; /* IE OUI */
+ uint8 oui_type; /* OUI type */
BWL_PRE_PACKED_STRUCT struct {
uint8 low;
uint8 high;
- } BWL_POST_PACKED_STRUCT version;
+ } BWL_POST_PACKED_STRUCT version; /* IE version */
} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t;
#define WPA_IE_OUITYPE_LEN 4
#define WPA_IE_FIXED_LEN 8
#define WPA_IE_TAG_FIXED_LEN 6
typedef BWL_PRE_PACKED_STRUCT struct {
- uint8 tag;
- uint8 length;
+ uint8 tag; /* TAG */
+ uint8 length; /* TAG length */
BWL_PRE_PACKED_STRUCT struct {
uint8 low;
uint8 high;
- } BWL_POST_PACKED_STRUCT version;
+ } BWL_POST_PACKED_STRUCT version; /* IE version */
} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t;
#define WPA_RSN_IE_FIXED_LEN 4
#define WPA_RSN_IE_TAG_FIXED_LEN 2
typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN];
+#define WFA_OSEN_IE_FIXED_LEN 6
+/* WPA suite/multicast suite */
typedef BWL_PRE_PACKED_STRUCT struct
{
uint8 oui[3];
} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t;
#define WPA_SUITE_LEN 4
-
+/* WPA unicast suite list/key management suite list */
typedef BWL_PRE_PACKED_STRUCT struct
{
BWL_PRE_PACKED_STRUCT struct {
wpa_pmkid_t list[1];
} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t;
-
-#define WPA_CIPHER_NONE 0
-#define WPA_CIPHER_WEP_40 1
-#define WPA_CIPHER_TKIP 2
-#define WPA_CIPHER_AES_OCB 3
-#define WPA_CIPHER_AES_CCM 4
-#define WPA_CIPHER_WEP_104 5
-#define WPA_CIPHER_BIP 6
-#define WPA_CIPHER_TPK 7
+/* WPA cipher suites */
+#define WPA_CIPHER_NONE 0 /* None */
+#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */
+#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */
+#define WPA_CIPHER_AES_OCB 3 /* AES (OCB) */
+#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */
+#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */
+#define WPA_CIPHER_BIP 6 /* WEP (104-bit) */
+#define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */
#ifdef BCMCCX
-#define WPA_CIPHER_CKIP 8
-#define WPA_CIPHER_CKIP_MMH 9
-#define WPA_CIPHER_WEP_MMH 10
+#define WPA_CIPHER_CKIP 8 /* KP with no MIC */
+#define WPA_CIPHER_CKIP_MMH 9 /* KP with MIC ("CKIP/MMH", "CKIP+CMIC") */
+#define WPA_CIPHER_WEP_MMH 10 /* MIC with no KP ("WEP/MMH", "CMIC") */
#define IS_CCX_CIPHER(cipher) ((cipher) == WPA_CIPHER_CKIP || \
(cipher) == WPA_CIPHER_CKIP_MMH || \
#define WAPI_CIPHER_SMS4 11
#define WAPI_CSE_WPI_SMS4 1
-#endif
+#endif /* BCMWAPI_WAI */
#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \
(cipher) == WPA_CIPHER_WEP_40 || \
#define IS_WAPI_CIPHER(cipher) ((cipher) == WAPI_CIPHER_NONE || \
(cipher) == WAPI_CSE_WPI_SMS4)
-
+/* convert WAPI_CSE_WPI_XXX to WAPI_CIPHER_XXX */
#define WAPI_CSE_WPI_2_CIPHER(cse) ((cse) == WAPI_CSE_WPI_SMS4 ? \
WAPI_CIPHER_SMS4 : WAPI_CIPHER_NONE)
#define WAPI_CIPHER_2_CSE_WPI(cipher) ((cipher) == WAPI_CIPHER_SMS4 ? \
WAPI_CSE_WPI_SMS4 : WAPI_CIPHER_NONE)
-#endif
+#endif /* BCMWAPI_WAI */
+/* WPA TKIP countermeasures parameters */
+#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */
+#define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */
-#define WPA_TKIP_CM_DETECT 60
-#define WPA_TKIP_CM_BLOCK 60
-
-
-#define RSN_CAP_LEN 2
-
+/* RSN IE defines */
+#define RSN_CAP_LEN 2 /* Length of RSN capabilities field (2 octets) */
+/* RSN Capabilities defined in 802.11i */
#define RSN_CAP_PREAUTH 0x0001
#define RSN_CAP_NOPAIRWISE 0x0002
#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C
#define RSN_CAP_SPPC 0x0400
#define RSN_CAP_SPPR 0x0800
-
+/* WPA capabilities defined in 802.11i */
#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS
#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS
#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT
#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK
+/* WPA capabilities defined in 802.11zD9.0 */
+#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1) /* bit 9 */
-#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1)
-
-
-#define WPA_CAP_LEN RSN_CAP_LEN
-#define WPA_PMKID_CNT_LEN 2
+/* WPA Specific defines */
+#define WPA_CAP_LEN RSN_CAP_LEN /* Length of RSN capabilities in RSN IE (2 octets) */
+#define WPA_PMKID_CNT_LEN 2 /* Length of RSN PMKID count (2 octests) */
#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH
#ifdef BCMWAPI_WAI
#define WAPI_CAP_PREAUTH RSN_CAP_PREAUTH
-
+/* Other WAI definition */
#define WAPI_WAI_REQUEST 0x00F1
#define WAPI_UNICAST_REKEY 0x00F2
#define WAPI_STA_AGING 0x00F3
#define WAPI_MUTIL_REKEY 0x00F4
#define WAPI_STA_STATS 0x00F5
-#define WAPI_USK_REKEY_COUNT 0x4000000
-#define WAPI_MSK_REKEY_COUNT 0x4000000
-#endif
-
+#define WAPI_USK_REKEY_COUNT 0x4000000 /* 0xA00000 */
+#define WAPI_MSK_REKEY_COUNT 0x4000000 /* 0xA00000 */
+#endif /* BCMWAPI_WAI */
+/* This marks the end of a packed structure section. */
#include <packed_section_end.h>
-#endif
+#endif /* _proto_wpa_h_ */
* JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer,
* GPIO interface, extbus, and support for serial and parallel flashes.
*
- * $Id: sbchipc.h 385540 2013-02-15 23:14:50Z $
+ * $Id: sbchipc.h 474281 2014-04-30 18:24:55Z $
*
* $Copyright Open Broadcom Corporation$
*/
#ifndef _SBCHIPC_H
#define _SBCHIPC_H
-#ifndef _LANGUAGE_ASSEMBLY
-
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
+/* cpp contortions to concatenate w/arg prescan */
#ifndef PAD
#define _PADLINE(line) pad ## line
#define _XSTR(line) _PADLINE(line)
#define PAD _XSTR(__LINE__)
-#endif
+#endif /* PAD */
+
+/**
+ * In chipcommon rev 49 the pmu registers have been moved from chipc to the pmu core if the
+ * 'AOBPresent' bit of 'CoreCapabilitiesExt' is set. If this field is set, the traditional chipc to
+ * [pmu|gci|sreng] register interface is deprecated and removed. These register blocks would instead
+ * be assigned their respective chipc-specific address space and connected to the Always On
+ * Backplane via the APB interface.
+ */
+typedef volatile struct {
+ uint32 PAD[384];
+ uint32 pmucontrol; /* 0x600 */
+ uint32 pmucapabilities;
+ uint32 pmustatus;
+ uint32 res_state;
+ uint32 res_pending;
+ uint32 pmutimer;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+ uint32 res_table_sel;
+ uint32 res_dep_mask;
+ uint32 res_updn_timer;
+ uint32 res_timer;
+ uint32 clkstretch;
+ uint32 pmuwatchdog;
+ uint32 gpiosel; /* 0x638, rev >= 1 */
+ uint32 gpioenable; /* 0x63c, rev >= 1 */
+ uint32 res_req_timer_sel;
+ uint32 res_req_timer;
+ uint32 res_req_mask;
+ uint32 PAD;
+ uint32 chipcontrol_addr; /* 0x650 */
+ uint32 chipcontrol_data; /* 0x654 */
+ uint32 regcontrol_addr;
+ uint32 regcontrol_data;
+ uint32 pllcontrol_addr;
+ uint32 pllcontrol_data;
+ uint32 pmustrapopt; /* 0x668, corerev >= 28 */
+ uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
+ uint32 retention_ctl; /* 0x670 */
+ uint32 PAD[3];
+ uint32 retention_grpidx; /* 0x680 */
+ uint32 retention_grpctl; /* 0x684 */
+ uint32 PAD[20];
+ uint32 pmucontrol_ext; /* 0x6d8 */
+ uint32 slowclkperiod; /* 0x6dc */
+ uint32 PAD[8];
+ uint32 pmuintmask0; /* 0x700 */
+ uint32 pmuintmask1; /* 0x704 */
+ uint32 PAD[14];
+ uint32 pmuintstatus; /* 0x740 */
+} pmuregs_t;
typedef struct eci_prerev35 {
uint32 eci_output;
typedef struct flash_config {
uint32 PAD[19];
-
+ /* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */
uint32 flashstrconfig;
} flash_config_t;
typedef volatile struct {
- uint32 chipid;
+ uint32 chipid; /* 0x0 */
uint32 capabilities;
- uint32 corecontrol;
+ uint32 corecontrol; /* corerev >= 1 */
uint32 bist;
-
- uint32 otpstatus;
+ /* OTP */
+ uint32 otpstatus; /* 0x10, corerev >= 10 */
uint32 otpcontrol;
uint32 otpprog;
- uint32 otplayout;
+ uint32 otplayout; /* corerev >= 23 */
-
- uint32 intstatus;
+ /* Interrupt control */
+ uint32 intstatus; /* 0x20 */
uint32 intmask;
-
- uint32 chipcontrol;
- uint32 chipstatus;
+ /* Chip specific regs */
+ uint32 chipcontrol; /* 0x28, rev >= 11 */
+ uint32 chipstatus; /* 0x2c, rev >= 11 */
-
- uint32 jtagcmd;
+ /* Jtag Master */
+ uint32 jtagcmd; /* 0x30, rev >= 10 */
uint32 jtagir;
uint32 jtagdr;
uint32 jtagctrl;
-
- uint32 flashcontrol;
+ /* serial flash interface registers */
+ uint32 flashcontrol; /* 0x40 */
uint32 flashaddress;
uint32 flashdata;
- uint32 otplayoutextension;
+ uint32 otplayoutextension; /* rev >= 35 */
-
- uint32 broadcastaddress;
+ /* Silicon backplane configuration broadcast control */
+ uint32 broadcastaddress; /* 0x50 */
uint32 broadcastdata;
-
- uint32 gpiopullup;
- uint32 gpiopulldown;
- uint32 gpioin;
- uint32 gpioout;
- uint32 gpioouten;
- uint32 gpiocontrol;
- uint32 gpiointpolarity;
- uint32 gpiointmask;
-
-
+ /* gpio - cleared only by power-on-reset */
+ uint32 gpiopullup; /* 0x58, corerev >= 20 */
+ uint32 gpiopulldown; /* 0x5c, corerev >= 20 */
+ uint32 gpioin; /* 0x60 */
+ uint32 gpioout; /* 0x64 */
+ uint32 gpioouten; /* 0x68 */
+ uint32 gpiocontrol; /* 0x6C */
+ uint32 gpiointpolarity; /* 0x70 */
+ uint32 gpiointmask; /* 0x74 */
+
+ /* GPIO events corerev >= 11 */
uint32 gpioevent;
uint32 gpioeventintmask;
-
- uint32 watchdog;
+ /* Watchdog timer */
+ uint32 watchdog; /* 0x80 */
-
+ /* GPIO events corerev >= 11 */
uint32 gpioeventintpolarity;
-
- uint32 gpiotimerval;
+ /* GPIO based LED powersave registers corerev >= 16 */
+ uint32 gpiotimerval; /* 0x88 */
uint32 gpiotimeroutmask;
-
- uint32 clockcontrol_n;
- uint32 clockcontrol_sb;
- uint32 clockcontrol_pci;
- uint32 clockcontrol_m2;
- uint32 clockcontrol_m3;
- uint32 clkdiv;
- uint32 gpiodebugsel;
- uint32 capabilities_ext;
-
-
- uint32 pll_on_delay;
+ /* clock control */
+ uint32 clockcontrol_n; /* 0x90 */
+ uint32 clockcontrol_sb; /* aka m0 */
+ uint32 clockcontrol_pci; /* aka m1 */
+ uint32 clockcontrol_m2; /* mii/uart/mipsref */
+ uint32 clockcontrol_m3; /* cpu */
+ uint32 clkdiv; /* corerev >= 3 */
+ uint32 gpiodebugsel; /* corerev >= 28 */
+ uint32 capabilities_ext; /* 0xac */
+
+ /* pll delay registers (corerev >= 4) */
+ uint32 pll_on_delay; /* 0xb0 */
uint32 fref_sel_delay;
- uint32 slow_clk_ctl;
+ uint32 slow_clk_ctl; /* 5 < corerev < 10 */
uint32 PAD;
-
- uint32 system_clk_ctl;
+ /* Instaclock registers (corerev >= 10) */
+ uint32 system_clk_ctl; /* 0xc0 */
uint32 clkstatestretch;
uint32 PAD[2];
-
- uint32 bp_addrlow;
+ /* Indirect backplane access (corerev >= 22) */
+ uint32 bp_addrlow; /* 0xd0 */
uint32 bp_addrhigh;
uint32 bp_data;
uint32 PAD;
uint32 bp_indaccess;
-
+ /* SPI registers, corerev >= 37 */
uint32 gsioctrl;
uint32 gsioaddress;
uint32 gsiodata;
-
+ /* More clock dividers (corerev >= 32) */
uint32 clkdiv2;
-
+ /* FAB ID (corerev >= 40) */
uint32 otpcontrol1;
- uint32 fabid;
+ uint32 fabid; /* 0xf8 */
-
- uint32 eromptr;
+ /* In AI chips, pointer to erom */
+ uint32 eromptr; /* 0xfc */
-
- uint32 pcmcia_config;
+ /* ExtBus control registers (corerev >= 3) */
+ uint32 pcmcia_config; /* 0x100 */
uint32 pcmcia_memwait;
uint32 pcmcia_attrwait;
uint32 pcmcia_iowait;
uint32 prog_waitcount;
uint32 flash_config;
uint32 flash_waitcount;
- uint32 SECI_config;
+ uint32 SECI_config; /* 0x130 SECI configuration */
uint32 SECI_status;
uint32 SECI_statusmask;
uint32 SECI_rxnibchanged;
uint32 PAD[20];
-
- uint32 sromcontrol;
+ /* SROM interface (corerev >= 32) */
+ uint32 sromcontrol; /* 0x190 */
uint32 sromaddress;
uint32 sromdata;
- uint32 PAD[1];
-
- uint32 nflashctrl;
- uint32 nflashconf;
- uint32 nflashcoladdr;
- uint32 nflashrowaddr;
- uint32 nflashdata;
- uint32 nflashwaitcnt0;
- uint32 PAD[2];
-
- uint32 seci_uart_data;
+ uint32 PAD[1]; /* 0x19C */
+ /* NAND flash registers for BCM4706 (corerev = 31) */
+ uint32 nflashctrl; /* 0x1a0 */
+ uint32 nflashconf;
+ uint32 nflashcoladdr;
+ uint32 nflashrowaddr;
+ uint32 nflashdata;
+ uint32 nflashwaitcnt0; /* 0x1b4 */
+ uint32 PAD[2];
+
+ uint32 seci_uart_data; /* 0x1C0 */
uint32 seci_uart_bauddiv;
uint32 seci_uart_fcr;
uint32 seci_uart_lcr;
uint32 seci_uart_lsr;
uint32 seci_uart_msr;
uint32 seci_uart_baudadj;
-
- uint32 clk_ctl_st;
+ /* Clock control and hardware workarounds (corerev >= 20) */
+ uint32 clk_ctl_st; /* 0x1e0 */
uint32 hw_war;
uint32 PAD[70];
-
- uint8 uart0data;
+ /* UARTs */
+ uint8 uart0data; /* 0x300 */
uint8 uart0imr;
uint8 uart0fcr;
uint8 uart0lcr;
uint8 uart0lsr;
uint8 uart0msr;
uint8 uart0scratch;
- uint8 PAD[248];
+ uint8 PAD[248]; /* corerev >= 1 */
- uint8 uart1data;
+ uint8 uart1data; /* 0x400 */
uint8 uart1imr;
uint8 uart1fcr;
uint8 uart1lcr;
uint8 uart1mcr;
uint8 uart1lsr;
uint8 uart1msr;
- uint8 uart1scratch;
- uint32 PAD[126];
-
-
-
- uint32 pmucontrol;
+ uint8 uart1scratch; /* 0x407 */
+ uint32 PAD[62];
+
+ /* save/restore, corerev >= 48 */
+ uint32 sr_capability; /* 0x500 */
+ uint32 sr_control0; /* 0x504 */
+ uint32 sr_control1; /* 0x508 */
+ uint32 gpio_control; /* 0x50C */
+ uint32 PAD[60];
+
+ /* PMU registers (corerev >= 20) */
+ /* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP.
+ * The CPU must read them twice, compare, and retry if different.
+ */
+ uint32 pmucontrol; /* 0x600 */
uint32 pmucapabilities;
uint32 pmustatus;
uint32 res_state;
uint32 res_timer;
uint32 clkstretch;
uint32 pmuwatchdog;
- uint32 gpiosel;
- uint32 gpioenable;
+ uint32 gpiosel; /* 0x638, rev >= 1 */
+ uint32 gpioenable; /* 0x63c, rev >= 1 */
uint32 res_req_timer_sel;
uint32 res_req_timer;
uint32 res_req_mask;
uint32 PAD;
- uint32 chipcontrol_addr;
- uint32 chipcontrol_data;
+ uint32 chipcontrol_addr; /* 0x650 */
+ uint32 chipcontrol_data; /* 0x654 */
uint32 regcontrol_addr;
uint32 regcontrol_data;
uint32 pllcontrol_addr;
uint32 pllcontrol_data;
- uint32 pmustrapopt;
- uint32 pmu_xtalfreq;
- uint32 retention_ctl;
+ uint32 pmustrapopt; /* 0x668, corerev >= 28 */
+ uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */
+ uint32 retention_ctl; /* 0x670 */
uint32 PAD[3];
- uint32 retention_grpidx;
- uint32 retention_grpctl;
- uint32 PAD[94];
- uint16 sromotp[512];
+ uint32 retention_grpidx; /* 0x680 */
+ uint32 retention_grpctl; /* 0x684 */
+ uint32 PAD[20];
+ uint32 pmucontrol_ext; /* 0x6d8 */
+ uint32 slowclkperiod; /* 0x6dc */
+ uint32 PAD[8];
+ uint32 pmuintmask0; /* 0x700 */
+ uint32 pmuintmask1; /* 0x704 */
+ uint32 PAD[14];
+ uint32 pmuintstatus; /* 0x740 */
+ uint32 PAD[47];
+ uint16 sromotp[512]; /* 0x800 */
#ifdef NFLASH_SUPPORT
-
- uint32 nand_revision;
+ /* Nand flash MLC controller registers (corerev >= 38) */
+ uint32 nand_revision; /* 0xC00 */
uint32 nand_cmd_start;
uint32 nand_cmd_addr_x;
uint32 nand_cmd_addr;
uint32 nand_cache_data;
uint32 nand_ctrl_config;
uint32 nand_ctrl_status;
-#endif
- uint32 gci_corecaps0;
+#endif /* NFLASH_SUPPORT */
+ uint32 gci_corecaps0; /* GCI starting at 0xC00 */
uint32 gci_corecaps1;
uint32 gci_corecaps2;
uint32 gci_corectrl;
- uint32 gci_corestat;
- uint32 gci_intstat;
- uint32 gci_intmask;
- uint32 gci_wakemask;
- uint32 gci_levelintstat;
- uint32 gci_eventintstat;
+ uint32 gci_corestat; /* 0xC10 */
+ uint32 gci_intstat; /* 0xC14 */
+ uint32 gci_intmask; /* 0xC18 */
+ uint32 gci_wakemask; /* 0xC1C */
+ uint32 gci_levelintstat; /* 0xC20 */
+ uint32 gci_eventintstat; /* 0xC24 */
uint32 PAD[6];
- uint32 gci_indirect_addr;
- uint32 gci_gpioctl;
- uint32 PAD;
- uint32 gci_gpiomask;
+ uint32 gci_indirect_addr; /* 0xC40 */
+ uint32 gci_gpioctl; /* 0xC44 */
+ uint32 gci_gpiostatus;
+ uint32 gci_gpiomask; /* 0xC4C */
uint32 PAD;
- uint32 gci_miscctl;
- uint32 PAD[2];
- uint32 gci_input[32];
- uint32 gci_event[32];
- uint32 gci_output[4];
- uint32 gci_control_0;
- uint32 gci_control_1;
- uint32 gci_level_polreg;
- uint32 gci_levelintmask;
- uint32 gci_eventintmask;
- uint32 PAD[3];
- uint32 gci_inbandlevelintmask;
- uint32 gci_inbandeventintmask;
- uint32 PAD[2];
- uint32 gci_seciauxtx;
- uint32 gci_seciauxrx;
- uint32 gci_secitx_datatag;
- uint32 gci_secirx_datatag;
- uint32 gci_secitx_datamask;
- uint32 gci_seciusef0tx_reg;
- uint32 gci_secif0tx_offset;
- uint32 gci_secif0rx_offset;
- uint32 gci_secif1tx_offset;
- uint32 PAD[3];
- uint32 gci_uartescval;
+ uint32 gci_miscctl; /* 0xC54 */
+ uint32 gci_gpiointmask;
+ uint32 gci_gpiowakemask;
+ uint32 gci_input[32]; /* C60 */
+ uint32 gci_event[32]; /* CE0 */
+ uint32 gci_output[4]; /* D60 */
+ uint32 gci_control_0; /* 0xD70 */
+ uint32 gci_control_1; /* 0xD74 */
+ uint32 gci_intpolreg; /* 0xD78 */
+ uint32 gci_levelintmask; /* 0xD7C */
+ uint32 gci_eventintmask; /* 0xD80 */
uint32 PAD[3];
- uint32 gci_secibauddiv;
- uint32 gci_secifcr;
- uint32 gci_secilcr;
- uint32 gci_secimcr;
+ uint32 gci_inbandlevelintmask; /* 0xD90 */
+ uint32 gci_inbandeventintmask; /* 0xD94 */
uint32 PAD[2];
- uint32 gci_baudadj;
+ uint32 gci_seciauxtx; /* 0xDA0 */
+ uint32 gci_seciauxrx; /* 0xDA4 */
+ uint32 gci_secitx_datatag; /* 0xDA8 */
+ uint32 gci_secirx_datatag; /* 0xDAC */
+ uint32 gci_secitx_datamask; /* 0xDB0 */
+ uint32 gci_seciusef0tx_reg; /* 0xDB4 */
+ uint32 gci_secif0tx_offset; /* 0xDB8 */
+ uint32 gci_secif0rx_offset; /* 0xDBC */
+ uint32 gci_secif1tx_offset; /* 0xDC0 */
+ uint32 gci_rxfifo_common_ctrl; /* 0xDC4 */
+ uint32 gci_rxfifoctrl; /* 0xDC8 */
+ uint32 gci_uartreadid; /* DCC */
+ uint32 gci_uartescval; /* DD0 */
+ uint32 PAD;
+ uint32 gci_secififolevel; /* DD8 */
+ uint32 gci_seciuartdata; /* DDC */
+ uint32 gci_secibauddiv; /* DE0 */
+ uint32 gci_secifcr; /* DE4 */
+ uint32 gci_secilcr; /* DE8 */
+ uint32 gci_secimcr; /* DEC */
+ uint32 gci_secilsr; /* DF0 */
+ uint32 gci_secimsr; /* DF4 */
+ uint32 gci_baudadj; /* DF8 */
uint32 PAD;
- uint32 gci_chipctrl;
- uint32 gci_chipsts;
+ uint32 gci_chipctrl; /* 0xE00 */
+ uint32 gci_chipsts; /* 0xE04 */
+ uint32 gci_gpioout; /* 0xE08 */
+ uint32 gci_gpioout_read; /* 0xE0C */
+ uint32 gci_mpwaketx; /* 0xE10 */
+ uint32 gci_mpwakedetect; /* 0xE14 */
+ uint32 gci_seciin_ctrl; /* 0xE18 */
+ uint32 gci_seciout_ctrl; /* 0xE1C */
+ uint32 gci_seciin_auxfifo_en; /* 0xE20 */
+ uint32 gci_seciout_txen_txbr; /* 0xE24 */
+ uint32 gci_seciin_rxbrstatus; /* 0xE28 */
+ uint32 gci_seciin_rxerrstatus; /* 0xE2C */
+ uint32 gci_seciin_fcstatus; /* 0xE30 */
+ uint32 gci_seciout_txstatus; /* 0xE34 */
+ uint32 gci_seciout_txbrstatus; /* 0xE38 */
} chipcregs_t;
-#endif
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
#define CC_CHIPID 0
#define CC_CHIPST 0x2c
#define CC_EROMPTR 0xfc
-#define CC_OTPST 0x10
+#define CC_OTPST 0x10
+#define CC_INTSTATUS 0x20
+#define CC_INTMASK 0x24
#define CC_JTAGCMD 0x30
#define CC_JTAGIR 0x34
#define CC_JTAGDR 0x38
#define CC_GPIOCTRL 0x6c
#define CC_GPIOPOL 0x70
#define CC_GPIOINTM 0x74
+#define CC_GPIOEVENT 0x78
+#define CC_GPIOEVENTMASK 0x7c
#define CC_WATCHDOG 0x80
+#define CC_GPIOEVENTPOL 0x84
#define CC_CLKC_N 0x90
#define CC_CLKC_M0 0x94
#define CC_CLKC_M1 0x98
#define PMU_CAP 0x604
#define PMU_ST 0x608
#define PMU_RES_STATE 0x60c
+#define PMU_RES_PENDING 0x610
#define PMU_TIMER 0x614
#define PMU_MIN_RES_MASK 0x618
#define PMU_MAX_RES_MASK 0x61c
#define PMU_REG_CONTROL_DATA 0x65C
#define PMU_PLL_CONTROL_ADDR 0x660
#define PMU_PLL_CONTROL_DATA 0x664
-#define CC_SROM_OTP 0x800
+#define CC_SROM_CTRL 0x190
+#define CC_SROM_OTP 0x800 /* SROM/OTP address space */
#define CC_GCI_INDIRECT_ADDR_REG 0xC40
#define CC_GCI_CHIP_CTRL_REG 0xE00
#define CC_GCI_CC_OFFSET_2 2
#define CC_GCI_CC_OFFSET_5 5
+#define CC_SWD_CTRL 0x380
+#define CC_SWD_REQACK 0x384
+#define CC_SWD_DATA 0x388
+
+
+#define CHIPCTRLREG0 0x0
+#define CHIPCTRLREG1 0x1
+#define CHIPCTRLREG2 0x2
+#define CHIPCTRLREG3 0x3
+#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
+#define CHIPCTRLREG6 0x6
+#define REGCTRLREG4 0x4
+#define REGCTRLREG5 0x5
+#define REGCTRLREG6 0x6
+#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
+#define CHIPCTRLADDR 0x650
+#define CHIPCTRLDATA 0x654
+#define RSRCTABLEADDR 0x620
+#define PMU_RES_DEP_MASK 0x624
+#define RSRCUPDWNTIME 0x628
+#define PMUREG_RESREQ_MASK 0x68c
+#define EXT_LPO_AVAIL 0x100
+#define LPO_SEL (1 << 0)
+#define CC_EXT_LPO_PU 0x200000
+#define GC_EXT_LPO_PU 0x2
+#define CC_INT_LPO_PU 0x100000
+#define GC_INT_LPO_PU 0x1
+#define EXT_LPO_SEL 0x8
+#define INT_LPO_SEL 0x4
+#define ENABLE_FINE_CBUCK_CTRL (1 << 30)
+#define REGCTRL5_PWM_AUTO_CTRL_MASK 0x007e0000
+#define REGCTRL5_PWM_AUTO_CTRL_SHIFT 17
+#define REGCTRL6_PWM_AUTO_CTRL_MASK 0x3fff0000
+#define REGCTRL6_PWM_AUTO_CTRL_SHIFT 16
+
+#ifdef SR_DEBUG
+#define SUBCORE_POWER_ON 0x0001
+#define PHY_POWER_ON 0x0010
+#define VDDM_POWER_ON 0x0100
+#define MEMLPLDO_POWER_ON 0x1000
+#define SUBCORE_POWER_ON_CHK 0x00040000
+#define PHY_POWER_ON_CHK 0x00080000
+#define VDDM_POWER_ON_CHK 0x00100000
+#define MEMLPLDO_POWER_ON_CHK 0x00200000
+#endif /* SR_DEBUG */
#ifdef NFLASH_SUPPORT
-
+/* NAND flash support */
#define CC_NAND_REVISION 0xC00
#define CC_NAND_CMD_START 0xC04
#define CC_NAND_CMD_ADDR 0xC0C
#define CC_NAND_DEVID 0xC60
#define CC_NAND_DEVID_EXT 0xC64
#define CC_NAND_INTFC_STATUS 0xC6C
-#endif
-
-
-#define CID_ID_MASK 0x0000ffff
-#define CID_REV_MASK 0x000f0000
-#define CID_REV_SHIFT 16
-#define CID_PKG_MASK 0x00f00000
-#define CID_PKG_SHIFT 20
-#define CID_CC_MASK 0x0f000000
+#endif /* NFLASH_SUPPORT */
+
+/* chipid */
+#define CID_ID_MASK 0x0000ffff /* Chip Id mask */
+#define CID_REV_MASK 0x000f0000 /* Chip Revision mask */
+#define CID_REV_SHIFT 16 /* Chip Revision shift */
+#define CID_PKG_MASK 0x00f00000 /* Package Option mask */
+#define CID_PKG_SHIFT 20 /* Package Option shift */
+#define CID_CC_MASK 0x0f000000 /* CoreCount (corerev >= 4) */
#define CID_CC_SHIFT 24
-#define CID_TYPE_MASK 0xf0000000
+#define CID_TYPE_MASK 0xf0000000 /* Chip Type */
#define CID_TYPE_SHIFT 28
-
-#define CC_CAP_UARTS_MASK 0x00000003
-#define CC_CAP_MIPSEB 0x00000004
-#define CC_CAP_UCLKSEL 0x00000018
-#define CC_CAP_UINTCLK 0x00000008
-#define CC_CAP_UARTGPIO 0x00000020
-#define CC_CAP_EXTBUS_MASK 0x000000c0
-#define CC_CAP_EXTBUS_NONE 0x00000000
-#define CC_CAP_EXTBUS_FULL 0x00000040
-#define CC_CAP_EXTBUS_PROG 0x00000080
-#define CC_CAP_FLASH_MASK 0x00000700
-#define CC_CAP_PLL_MASK 0x00038000
-#define CC_CAP_PWR_CTL 0x00040000
-#define CC_CAP_OTPSIZE 0x00380000
-#define CC_CAP_OTPSIZE_SHIFT 19
-#define CC_CAP_OTPSIZE_BASE 5
-#define CC_CAP_JTAGP 0x00400000
-#define CC_CAP_ROM 0x00800000
-#define CC_CAP_BKPLN64 0x08000000
-#define CC_CAP_PMU 0x10000000
-#define CC_CAP_ECI 0x20000000
-#define CC_CAP_SROM 0x40000000
-#define CC_CAP_NFLASH 0x80000000
-
-#define CC_CAP2_SECI 0x00000001
-#define CC_CAP2_GSIO 0x00000002
-
-
-#define CC_CAP_EXT_SECI_PRESENT 0x00000001
-#define CC_CAP_EXT_GCI_PRESENT 0x00000004
-
-
+/* capabilities */
+#define CC_CAP_UARTS_MASK 0x00000003 /* Number of UARTs */
+#define CC_CAP_MIPSEB 0x00000004 /* MIPS is in big-endian mode */
+#define CC_CAP_UCLKSEL 0x00000018 /* UARTs clock select */
+#define CC_CAP_UINTCLK 0x00000008 /* UARTs are driven by internal divided clock */
+#define CC_CAP_UARTGPIO 0x00000020 /* UARTs own GPIOs 15:12 */
+#define CC_CAP_EXTBUS_MASK 0x000000c0 /* External bus mask */
+#define CC_CAP_EXTBUS_NONE 0x00000000 /* No ExtBus present */
+#define CC_CAP_EXTBUS_FULL 0x00000040 /* ExtBus: PCMCIA, IDE & Prog */
+#define CC_CAP_EXTBUS_PROG 0x00000080 /* ExtBus: ProgIf only */
+#define CC_CAP_FLASH_MASK 0x00000700 /* Type of flash */
+#define CC_CAP_PLL_MASK 0x00038000 /* Type of PLL */
+#define CC_CAP_PWR_CTL 0x00040000 /* Power control */
+#define CC_CAP_OTPSIZE 0x00380000 /* OTP Size (0 = none) */
+#define CC_CAP_OTPSIZE_SHIFT 19 /* OTP Size shift */
+#define CC_CAP_OTPSIZE_BASE 5 /* OTP Size base */
+#define CC_CAP_JTAGP 0x00400000 /* JTAG Master Present */
+#define CC_CAP_ROM 0x00800000 /* Internal boot rom active */
+#define CC_CAP_BKPLN64 0x08000000 /* 64-bit backplane */
+#define CC_CAP_PMU 0x10000000 /* PMU Present, rev >= 20 */
+#define CC_CAP_ECI 0x20000000 /* ECI Present, rev >= 21 */
+#define CC_CAP_SROM 0x40000000 /* Srom Present, rev >= 32 */
+#define CC_CAP_NFLASH 0x80000000 /* Nand flash present, rev >= 35 */
+
+#define CC_CAP2_SECI 0x00000001 /* SECI Present, rev >= 36 */
+#define CC_CAP2_GSIO 0x00000002 /* GSIO (spi/i2c) present, rev >= 37 */
+
+/* capabilities extension */
+#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /* SECI present */
+#define CC_CAP_EXT_GSIO_PRESENT 0x00000002 /* GSIO present */
+#define CC_CAP_EXT_GCI_PRESENT 0x00000004 /* GCI present */
+#define CC_CAP_EXT_AOB_PRESENT 0x00000040 /* AOB present */
+
+/* WL Channel Info to BT via GCI - bits 40 - 47 */
#define GCI_WL_CHN_INFO_MASK (0xFF00)
-
+/* PLL type */
#define PLL_NONE 0x00000000
-#define PLL_TYPE1 0x00010000
-#define PLL_TYPE2 0x00020000
-#define PLL_TYPE3 0x00030000
-#define PLL_TYPE4 0x00008000
-#define PLL_TYPE5 0x00018000
-#define PLL_TYPE6 0x00028000
-#define PLL_TYPE7 0x00038000
-
-
+#define PLL_TYPE1 0x00010000 /* 48MHz base, 3 dividers */
+#define PLL_TYPE2 0x00020000 /* 48MHz, 4 dividers */
+#define PLL_TYPE3 0x00030000 /* 25MHz, 2 dividers */
+#define PLL_TYPE4 0x00008000 /* 48MHz, 4 dividers */
+#define PLL_TYPE5 0x00018000 /* 25MHz, 4 dividers */
+#define PLL_TYPE6 0x00028000 /* 100/200 or 120/240 only */
+#define PLL_TYPE7 0x00038000 /* 25MHz, 4 dividers */
+
+/* ILP clock */
#define ILP_CLOCK 32000
-
+/* ALP clock on pre-PMU chips */
#define ALP_CLOCK 20000000
#ifdef CFG_SIM
#define NS_FAST_MEM_CLOCK 800000000
#define NS_MEM_CLOCK 533000000
#define NS_SLOW_MEM_CLOCK 400000000
-#endif
-
+#endif /* CFG_SIM */
+/* HT clock */
#define HT_CLOCK 80000000
+/* corecontrol */
+#define CC_UARTCLKO 0x00000001 /* Drive UART with internal clock */
+#define CC_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
+#define CC_ASYNCGPIO 0x00000004 /* 1=generate GPIO interrupt without backplane clock */
+#define CC_UARTCLKEN 0x00000008 /* enable UART Clock (corerev > = 21 */
-#define CC_UARTCLKO 0x00000001
-#define CC_SE 0x00000002
-#define CC_ASYNCGPIO 0x00000004
-#define CC_UARTCLKEN 0x00000008
-
-
+/* 4321 chipcontrol */
#define CHIPCTRL_4321A0_DEFAULT 0x3a4
#define CHIPCTRL_4321A1_DEFAULT 0x0a4
-#define CHIPCTRL_4321_PLL_DOWN 0x800000
-
+#define CHIPCTRL_4321_PLL_DOWN 0x800000 /* serdes PLL down override */
+/* Fields in the otpstatus register in rev >= 21 */
#define OTPS_OL_MASK 0x000000ff
-#define OTPS_OL_MFG 0x00000001
-#define OTPS_OL_OR1 0x00000002
-#define OTPS_OL_OR2 0x00000004
-#define OTPS_OL_GU 0x00000008
+#define OTPS_OL_MFG 0x00000001 /* manuf row is locked */
+#define OTPS_OL_OR1 0x00000002 /* otp redundancy row 1 is locked */
+#define OTPS_OL_OR2 0x00000004 /* otp redundancy row 2 is locked */
+#define OTPS_OL_GU 0x00000008 /* general use region is locked */
#define OTPS_GUP_MASK 0x00000f00
#define OTPS_GUP_SHIFT 8
-#define OTPS_GUP_HW 0x00000100
-#define OTPS_GUP_SW 0x00000200
-#define OTPS_GUP_CI 0x00000400
-#define OTPS_GUP_FUSE 0x00000800
+#define OTPS_GUP_HW 0x00000100 /* h/w subregion is programmed */
+#define OTPS_GUP_SW 0x00000200 /* s/w subregion is programmed */
+#define OTPS_GUP_CI 0x00000400 /* chipid/pkgopt subregion is programmed */
+#define OTPS_GUP_FUSE 0x00000800 /* fuse subregion is programmed */
#define OTPS_READY 0x00001000
-#define OTPS_RV(x) (1 << (16 + (x)))
+#define OTPS_RV(x) (1 << (16 + (x))) /* redundancy entry valid */
#define OTPS_RV_MASK 0x0fff0000
#define OTPS_PROGOK 0x40000000
-
+/* Fields in the otpcontrol register in rev >= 21 */
#define OTPC_PROGSEL 0x00000001
#define OTPC_PCOUNT_MASK 0x0000000e
#define OTPC_PCOUNT_SHIFT 1
#define OTPC_ODM 0x00000800
#define OTPC_PROGEN 0x80000000
-
+/* Fields in the 40nm otpcontrol register in rev >= 40 */
#define OTPC_40NM_PROGSEL_SHIFT 0
#define OTPC_40NM_PCOUNT_SHIFT 1
#define OTPC_40NM_PCOUNT_WR 0xA
#define OTPC1_TM_WR 0x84
#define OTPC1_TM_V1X 0x84
#define OTPC1_TM_R1X 0x4
+#define OTPC1_CLK_EN_MASK 0x00020000
+#define OTPC1_CLK_DIV_MASK 0x00FC0000
-
+/* Fields in otpprog in rev >= 21 and HND OTP */
#define OTPP_COL_MASK 0x000000ff
#define OTPP_COL_SHIFT 0
#define OTPP_ROW_MASK 0x0000ff00
+#define OTPP_ROW_MASK9 0x0001ff00 /* for ccrev >= 49 */
#define OTPP_ROW_SHIFT 8
#define OTPP_OC_MASK 0x0f000000
#define OTPP_OC_SHIFT 24
#define OTPP_VALUE_MASK 0x20000000
#define OTPP_VALUE_SHIFT 29
#define OTPP_START_BUSY 0x80000000
-#define OTPP_READ 0x40000000
-
+#define OTPP_READ 0x40000000 /* HND OTP */
+/* Fields in otplayout register */
#define OTPL_HWRGN_OFF_MASK 0x00000FFF
#define OTPL_HWRGN_OFF_SHIFT 0
#define OTPL_WRAP_REVID_MASK 0x00F80000
#define OTPL_WRAP_TYPE_SHIFT 16
#define OTPL_WRAP_TYPE_65NM 0
#define OTPL_WRAP_TYPE_40NM 1
+#define OTPL_ROW_SIZE_MASK 0x0000F000
+#define OTPL_ROW_SIZE_SHIFT 12
-
+/* otplayout reg corerev >= 36 */
#define OTP_CISFORMAT_NEW 0x80000000
-
+/* Opcodes for OTPP_OC field */
#define OTPPOC_READ 0
#define OTPPOC_BIT_PROG 1
#define OTPPOC_VERIFY 3
#define OTPPOC_ROW_LOCK 8
#define OTPPOC_PRESCN_TEST 9
-
+/* Opcodes for OTPP_OC field (40NM) */
#define OTPPOC_READ_40NM 0
#define OTPPOC_PROG_ENABLE_40NM 1
#define OTPPOC_PROG_DISABLE_40NM 2
#define OTPPOC_OVST_READ_40NM 14
#define OTPPOC_OVST_PROG_40NM 15
-
+/* Fields in otplayoutextension */
#define OTPLAYOUTEXT_FUSE_MASK 0x3FF
+/* Jtagm characteristics that appeared at a given corerev */
+#define JTAGM_CREV_OLD 10 /* Old command set, 16bit max IR */
+#define JTAGM_CREV_IRP 22 /* Able to do pause-ir */
+#define JTAGM_CREV_RTI 28 /* Able to do return-to-idle */
-#define JTAGM_CREV_OLD 10
-#define JTAGM_CREV_IRP 22
-#define JTAGM_CREV_RTI 28
-
-
+/* jtagcmd */
#define JCMD_START 0x80000000
#define JCMD_BUSY 0x80000000
#define JCMD_STATE_MASK 0x60000000
-#define JCMD_STATE_TLR 0x00000000
-#define JCMD_STATE_PIR 0x20000000
-#define JCMD_STATE_PDR 0x40000000
-#define JCMD_STATE_RTI 0x60000000
+#define JCMD_STATE_TLR 0x00000000 /* Test-logic-reset */
+#define JCMD_STATE_PIR 0x20000000 /* Pause IR */
+#define JCMD_STATE_PDR 0x40000000 /* Pause DR */
+#define JCMD_STATE_RTI 0x60000000 /* Run-test-idle */
#define JCMD0_ACC_MASK 0x0000f000
#define JCMD0_ACC_IRDR 0x00000000
#define JCMD0_ACC_DR 0x00001000
#define JCMD0_ACC_IRPDR 0x00004000
#define JCMD0_ACC_PDR 0x00005000
#define JCMD0_IRW_MASK 0x00000f00
-#define JCMD_ACC_MASK 0x000f0000
+#define JCMD_ACC_MASK 0x000f0000 /* Changes for corerev 11 */
#define JCMD_ACC_IRDR 0x00000000
#define JCMD_ACC_DR 0x00010000
#define JCMD_ACC_IR 0x00020000
#define JCMD_ACC_IRPDR 0x00040000
#define JCMD_ACC_PDR 0x00050000
#define JCMD_ACC_PIR 0x00060000
-#define JCMD_ACC_IRDR_I 0x00070000
-#define JCMD_ACC_DR_I 0x00080000
+#define JCMD_ACC_IRDR_I 0x00070000 /* rev 28: return to run-test-idle */
+#define JCMD_ACC_DR_I 0x00080000 /* rev 28: return to run-test-idle */
#define JCMD_IRW_MASK 0x00001f00
#define JCMD_IRW_SHIFT 8
#define JCMD_DRW_MASK 0x0000003f
+/* jtagctrl */
+#define JCTRL_FORCE_CLK 4 /* Force clock */
+#define JCTRL_EXT_EN 2 /* Enable external targets */
+#define JCTRL_EN 1 /* Enable Jtag master */
-#define JCTRL_FORCE_CLK 4
-#define JCTRL_EXT_EN 2
-#define JCTRL_EN 1
-
+#define JCTRL_TAPSEL_BIT 0x00000008 /* JtagMasterCtrl tap_sel bit */
+/* Fields in clkdiv */
#define CLKD_SFLASH 0x0f000000
#define CLKD_SFLASH_SHIFT 24
#define CLKD_OTP 0x000f0000
#define CLKD2_SROM 0x00000003
-
-#define CI_GPIO 0x00000001
-#define CI_EI 0x00000002
-#define CI_TEMP 0x00000004
-#define CI_SIRQ 0x00000008
-#define CI_ECI 0x00000010
-#define CI_PMU 0x00000020
-#define CI_UART 0x00000040
-#define CI_WDRESET 0x80000000
-
-
-#define SCC_SS_MASK 0x00000007
-#define SCC_SS_LPO 0x00000000
-#define SCC_SS_XTAL 0x00000001
-#define SCC_SS_PCI 0x00000002
-#define SCC_LF 0x00000200
-#define SCC_LP 0x00000400
-#define SCC_FS 0x00000800
-#define SCC_IP 0x00001000
-#define SCC_XC 0x00002000
-#define SCC_XP 0x00004000
-#define SCC_CD_MASK 0xffff0000
+/* intstatus/intmask */
+#define CI_GPIO 0x00000001 /* gpio intr */
+#define CI_EI 0x00000002 /* extif intr (corerev >= 3) */
+#define CI_TEMP 0x00000004 /* temp. ctrl intr (corerev >= 15) */
+#define CI_SIRQ 0x00000008 /* serial IRQ intr (corerev >= 15) */
+#define CI_ECI 0x00000010 /* eci intr (corerev >= 21) */
+#define CI_PMU 0x00000020 /* pmu intr (corerev >= 21) */
+#define CI_UART 0x00000040 /* uart intr (corerev >= 21) */
+#define CI_WDRESET 0x80000000 /* watchdog reset occurred */
+
+/* slow_clk_ctl */
+#define SCC_SS_MASK 0x00000007 /* slow clock source mask */
+#define SCC_SS_LPO 0x00000000 /* source of slow clock is LPO */
+#define SCC_SS_XTAL 0x00000001 /* source of slow clock is crystal */
+#define SCC_SS_PCI 0x00000002 /* source of slow clock is PCI */
+#define SCC_LF 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SCC_LP 0x00000400 /* LPOPowerDown, 1: LPO is disabled,
+ * 0: LPO is enabled
+ */
+#define SCC_FS 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock,
+ * 0: power logic control
+ */
+#define SCC_IP 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors
+ * PLL clock disable requests from core
+ */
+#define SCC_XC 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't
+ * disable crystal when appropriate
+ */
+#define SCC_XP 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */
+#define SCC_CD_MASK 0xffff0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */
#define SCC_CD_SHIFT 16
-
-#define SYCC_IE 0x00000001
-#define SYCC_AE 0x00000002
-#define SYCC_FP 0x00000004
-#define SYCC_AR 0x00000008
-#define SYCC_HR 0x00000010
-#define SYCC_CD_MASK 0xffff0000
+/* system_clk_ctl */
+#define SYCC_IE 0x00000001 /* ILPen: Enable Idle Low Power */
+#define SYCC_AE 0x00000002 /* ALPen: Enable Active Low Power */
+#define SYCC_FP 0x00000004 /* ForcePLLOn */
+#define SYCC_AR 0x00000008 /* Force ALP (or HT if ALPen is not set */
+#define SYCC_HR 0x00000010 /* Force HT */
+#define SYCC_CD_MASK 0xffff0000 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
#define SYCC_CD_SHIFT 16
-
+/* Indirect backplane access */
#define BPIA_BYTEEN 0x0000000f
#define BPIA_SZ1 0x00000001
#define BPIA_SZ2 0x00000003
#define BPIA_BUSY 0x00000200
#define BPIA_ERROR 0x00000400
-
-#define CF_EN 0x00000001
-#define CF_EM_MASK 0x0000000e
+/* pcmcia/prog/flash_config */
+#define CF_EN 0x00000001 /* enable */
+#define CF_EM_MASK 0x0000000e /* mode */
#define CF_EM_SHIFT 1
-#define CF_EM_FLASH 0
-#define CF_EM_SYNC 2
-#define CF_EM_PCMCIA 4
-#define CF_DS 0x00000010
-#define CF_BS 0x00000020
-#define CF_CD_MASK 0x000000c0
+#define CF_EM_FLASH 0 /* flash/asynchronous mode */
+#define CF_EM_SYNC 2 /* synchronous mode */
+#define CF_EM_PCMCIA 4 /* pcmcia mode */
+#define CF_DS 0x00000010 /* destsize: 0=8bit, 1=16bit */
+#define CF_BS 0x00000020 /* byteswap */
+#define CF_CD_MASK 0x000000c0 /* clock divider */
#define CF_CD_SHIFT 6
-#define CF_CD_DIV2 0x00000000
-#define CF_CD_DIV3 0x00000040
-#define CF_CD_DIV4 0x00000080
-#define CF_CE 0x00000100
-#define CF_SB 0x00000200
-
-
-#define PM_W0_MASK 0x0000003f
-#define PM_W1_MASK 0x00001f00
+#define CF_CD_DIV2 0x00000000 /* backplane/2 */
+#define CF_CD_DIV3 0x00000040 /* backplane/3 */
+#define CF_CD_DIV4 0x00000080 /* backplane/4 */
+#define CF_CE 0x00000100 /* clock enable */
+#define CF_SB 0x00000200 /* size/bytestrobe (synch only) */
+
+/* pcmcia_memwait */
+#define PM_W0_MASK 0x0000003f /* waitcount0 */
+#define PM_W1_MASK 0x00001f00 /* waitcount1 */
#define PM_W1_SHIFT 8
-#define PM_W2_MASK 0x001f0000
+#define PM_W2_MASK 0x001f0000 /* waitcount2 */
#define PM_W2_SHIFT 16
-#define PM_W3_MASK 0x1f000000
+#define PM_W3_MASK 0x1f000000 /* waitcount3 */
#define PM_W3_SHIFT 24
-
-#define PA_W0_MASK 0x0000003f
-#define PA_W1_MASK 0x00001f00
+/* pcmcia_attrwait */
+#define PA_W0_MASK 0x0000003f /* waitcount0 */
+#define PA_W1_MASK 0x00001f00 /* waitcount1 */
#define PA_W1_SHIFT 8
-#define PA_W2_MASK 0x001f0000
+#define PA_W2_MASK 0x001f0000 /* waitcount2 */
#define PA_W2_SHIFT 16
-#define PA_W3_MASK 0x1f000000
+#define PA_W3_MASK 0x1f000000 /* waitcount3 */
#define PA_W3_SHIFT 24
-
-#define PI_W0_MASK 0x0000003f
-#define PI_W1_MASK 0x00001f00
+/* pcmcia_iowait */
+#define PI_W0_MASK 0x0000003f /* waitcount0 */
+#define PI_W1_MASK 0x00001f00 /* waitcount1 */
#define PI_W1_SHIFT 8
-#define PI_W2_MASK 0x001f0000
+#define PI_W2_MASK 0x001f0000 /* waitcount2 */
#define PI_W2_SHIFT 16
-#define PI_W3_MASK 0x1f000000
+#define PI_W3_MASK 0x1f000000 /* waitcount3 */
#define PI_W3_SHIFT 24
-
-#define PW_W0_MASK 0x0000001f
-#define PW_W1_MASK 0x00001f00
+/* prog_waitcount */
+#define PW_W0_MASK 0x0000001f /* waitcount0 */
+#define PW_W1_MASK 0x00001f00 /* waitcount1 */
#define PW_W1_SHIFT 8
-#define PW_W2_MASK 0x001f0000
+#define PW_W2_MASK 0x001f0000 /* waitcount2 */
#define PW_W2_SHIFT 16
-#define PW_W3_MASK 0x1f000000
+#define PW_W3_MASK 0x1f000000 /* waitcount3 */
#define PW_W3_SHIFT 24
#define PW_W0 0x0000000c
#define PW_W2 0x00020000
#define PW_W3 0x01000000
-
-#define FW_W0_MASK 0x0000003f
-#define FW_W1_MASK 0x00001f00
+/* flash_waitcount */
+#define FW_W0_MASK 0x0000003f /* waitcount0 */
+#define FW_W1_MASK 0x00001f00 /* waitcount1 */
#define FW_W1_SHIFT 8
-#define FW_W2_MASK 0x001f0000
+#define FW_W2_MASK 0x001f0000 /* waitcount2 */
#define FW_W2_SHIFT 16
-#define FW_W3_MASK 0x1f000000
+#define FW_W3_MASK 0x1f000000 /* waitcount3 */
#define FW_W3_SHIFT 24
-
+/* When Srom support present, fields in sromcontrol */
#define SRC_START 0x80000000
#define SRC_BUSY 0x80000000
#define SRC_OPCODE 0x60000000
#define SRC_OP_WRDIS 0x40000000
#define SRC_OP_WREN 0x60000000
#define SRC_OTPSEL 0x00000010
+#define SRC_OTPPRESENT 0x00000020
#define SRC_LOCK 0x00000008
#define SRC_SIZE_MASK 0x00000006
#define SRC_SIZE_1K 0x00000000
#define SRC_SIZE_SHIFT 1
#define SRC_PRESENT 0x00000001
-
+/* Fields in pmucontrol */
#define PCTL_ILP_DIV_MASK 0xffff0000
#define PCTL_ILP_DIV_SHIFT 16
-#define PCTL_PLL_PLLCTL_UPD 0x00000400
-#define PCTL_NOILP_ON_WAIT 0x00000200
+#define PCTL_LQ_REQ_EN 0x00008000
+#define PCTL_PLL_PLLCTL_UPD 0x00000400 /* rev 2 */
+#define PCTL_NOILP_ON_WAIT 0x00000200 /* rev 1 */
#define PCTL_HT_REQ_EN 0x00000100
#define PCTL_ALP_REQ_EN 0x00000080
#define PCTL_XTALFREQ_MASK 0x0000007c
#define PCTL_ILP_DIV_EN 0x00000002
#define PCTL_LPO_SEL 0x00000001
-
+/* Retention Control */
#define PMU_RCTL_CLK_DIV_SHIFT 0
#define PMU_RCTL_CHAIN_LEN_SHIFT 12
#define PMU_RCTL_MACPHY_DISABLE_SHIFT 26
#define PMU_RCTL_MEMRETSLP_LOG_SHIFT 29
#define PMU_RCTL_MEMRETSLP_LOG_MASK (1 << 29)
-
+/* Retention Group Control */
#define PMU_RCTLGRP_CHAIN_LEN_SHIFT 0
#define PMU_RCTLGRP_RMODE_ENABLE_SHIFT 14
#define PMU_RCTLGRP_RMODE_ENABLE_MASK (1 << 14)
#define PMU_RCTLGRP_DFT_ENABLE_MASK (1 << 15)
#define PMU_RCTLGRP_NSRST_DISABLE_SHIFT 16
#define PMU_RCTLGRP_NSRST_DISABLE_MASK (1 << 16)
-
+/* Retention Group Control special for 4334 */
#define PMU4334_RCTLGRP_CHAIN_LEN_GRP0 338
#define PMU4334_RCTLGRP_CHAIN_LEN_GRP1 315
-
+/* Retention Group Control special for 43341 */
#define PMU43341_RCTLGRP_CHAIN_LEN_GRP0 366
#define PMU43341_RCTLGRP_CHAIN_LEN_GRP1 330
-
+/* Fields in clkstretch */
#define CSTRETCH_HT 0xffff0000
#define CSTRETCH_ALP 0x0000ffff
-
+/* gpiotimerval */
#define GPIO_ONTIME_SHIFT 16
-
-#define CN_N1_MASK 0x3f
-#define CN_N2_MASK 0x3f00
+/* clockcontrol_n */
+#define CN_N1_MASK 0x3f /* n1 control */
+#define CN_N2_MASK 0x3f00 /* n2 control */
#define CN_N2_SHIFT 8
-#define CN_PLLC_MASK 0xf0000
+#define CN_PLLC_MASK 0xf0000 /* pll control */
#define CN_PLLC_SHIFT 16
-
-#define CC_M1_MASK 0x3f
-#define CC_M2_MASK 0x3f00
+/* clockcontrol_sb/pci/uart */
+#define CC_M1_MASK 0x3f /* m1 control */
+#define CC_M2_MASK 0x3f00 /* m2 control */
#define CC_M2_SHIFT 8
-#define CC_M3_MASK 0x3f0000
+#define CC_M3_MASK 0x3f0000 /* m3 control */
#define CC_M3_SHIFT 16
-#define CC_MC_MASK 0x1f000000
+#define CC_MC_MASK 0x1f000000 /* mux control */
#define CC_MC_SHIFT 24
-
-#define CC_F6_2 0x02
-#define CC_F6_3 0x03
-#define CC_F6_4 0x05
+/* N3M Clock control magic field values */
+#define CC_F6_2 0x02 /* A factor of 2 in */
+#define CC_F6_3 0x03 /* 6-bit fields like */
+#define CC_F6_4 0x05 /* N1, M1 or M3 */
#define CC_F6_5 0x09
#define CC_F6_6 0x11
#define CC_F6_7 0x21
-#define CC_F5_BIAS 5
+#define CC_F5_BIAS 5 /* 5-bit fields get this added */
#define CC_MC_BYPASS 0x08
#define CC_MC_M1 0x04
#define CC_MC_M1M2M3 0x01
#define CC_MC_M1M3 0x11
-
-#define CC_T2_BIAS 2
-#define CC_T2M2_BIAS 3
+/* Type 2 Clock control magic field values */
+#define CC_T2_BIAS 2 /* n1, n2, m1 & m3 bias */
+#define CC_T2M2_BIAS 3 /* m2 bias */
#define CC_T2MC_M1BYP 1
#define CC_T2MC_M2BYP 2
#define CC_T2MC_M3BYP 4
-
-#define CC_T6_MMASK 1
-#define CC_T6_M0 120000000
-#define CC_T6_M1 100000000
+/* Type 6 Clock control magic field values */
+#define CC_T6_MMASK 1 /* bits of interest in m */
+#define CC_T6_M0 120000000 /* sb clock for m = 0 */
+#define CC_T6_M1 100000000 /* sb clock for m = 1 */
#define SB2MIPS_T6(sb) (2 * (sb))
+/* Common clock base */
+#define CC_CLOCK_BASE1 24000000 /* Half the clock freq */
+#define CC_CLOCK_BASE2 12500000 /* Alternate crystal on some PLLs */
-#define CC_CLOCK_BASE1 24000000
-#define CC_CLOCK_BASE2 12500000
-
-
+/* Clock control values for 200MHz in 5350 */
#define CLKC_5350_N 0x0311
#define CLKC_5350_M 0x04020009
-
-#define FLASH_NONE 0x000
-#define SFLASH_ST 0x100
-#define SFLASH_AT 0x200
+/* Flash types in the chipcommon capabilities register */
+#define FLASH_NONE 0x000 /* No flash */
+#define SFLASH_ST 0x100 /* ST serial flash */
+#define SFLASH_AT 0x200 /* Atmel serial flash */
#define NFLASH 0x300
-#define PFLASH 0x700
+#define PFLASH 0x700 /* Parallel flash */
#define QSPIFLASH_ST 0x800
#define QSPIFLASH_AT 0x900
-
-#define CC_CFG_EN 0x0001
-#define CC_CFG_EM_MASK 0x000e
-#define CC_CFG_EM_ASYNC 0x0000
-#define CC_CFG_EM_SYNC 0x0002
-#define CC_CFG_EM_PCMCIA 0x0004
-#define CC_CFG_EM_IDE 0x0006
-#define CC_CFG_DS 0x0010
-#define CC_CFG_CD_MASK 0x00e0
-#define CC_CFG_CE 0x0100
-#define CC_CFG_SB 0x0200
-#define CC_CFG_IS 0x0400
-
-
-#define CC_EB_BASE 0x1a000000
-#define CC_EB_PCMCIA_MEM 0x1a000000
-#define CC_EB_PCMCIA_IO 0x1a200000
-#define CC_EB_PCMCIA_CFG 0x1a400000
-#define CC_EB_IDE 0x1a800000
-#define CC_EB_PCMCIA1_MEM 0x1a800000
-#define CC_EB_PCMCIA1_IO 0x1aa00000
-#define CC_EB_PCMCIA1_CFG 0x1ac00000
-#define CC_EB_PROGIF 0x1b000000
-
-
-
+/* Bits in the ExtBus config registers */
+#define CC_CFG_EN 0x0001 /* Enable */
+#define CC_CFG_EM_MASK 0x000e /* Extif Mode */
+#define CC_CFG_EM_ASYNC 0x0000 /* Async/Parallel flash */
+#define CC_CFG_EM_SYNC 0x0002 /* Synchronous */
+#define CC_CFG_EM_PCMCIA 0x0004 /* PCMCIA */
+#define CC_CFG_EM_IDE 0x0006 /* IDE */
+#define CC_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */
+#define CC_CFG_CD_MASK 0x00e0 /* Sync: Clock divisor, rev >= 20 */
+#define CC_CFG_CE 0x0100 /* Sync: Clock enable, rev >= 20 */
+#define CC_CFG_SB 0x0200 /* Sync: Size/Bytestrobe, rev >= 20 */
+#define CC_CFG_IS 0x0400 /* Extif Sync Clk Select, rev >= 20 */
+
+/* ExtBus address space */
+#define CC_EB_BASE 0x1a000000 /* Chipc ExtBus base address */
+#define CC_EB_PCMCIA_MEM 0x1a000000 /* PCMCIA 0 memory base address */
+#define CC_EB_PCMCIA_IO 0x1a200000 /* PCMCIA 0 I/O base address */
+#define CC_EB_PCMCIA_CFG 0x1a400000 /* PCMCIA 0 config base address */
+#define CC_EB_IDE 0x1a800000 /* IDE memory base */
+#define CC_EB_PCMCIA1_MEM 0x1a800000 /* PCMCIA 1 memory base address */
+#define CC_EB_PCMCIA1_IO 0x1aa00000 /* PCMCIA 1 I/O base address */
+#define CC_EB_PCMCIA1_CFG 0x1ac00000 /* PCMCIA 1 config base address */
+#define CC_EB_PROGIF 0x1b000000 /* ProgIF Async/Sync base address */
+
+
+/* Start/busy bit in flashcontrol */
#define SFLASH_OPCODE 0x000000ff
#define SFLASH_ACTION 0x00000700
-#define SFLASH_CS_ACTIVE 0x00001000
+#define SFLASH_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */
#define SFLASH_START 0x80000000
#define SFLASH_BUSY SFLASH_START
-
-#define SFLASH_ACT_OPONLY 0x0000
-#define SFLASH_ACT_OP1D 0x0100
-#define SFLASH_ACT_OP3A 0x0200
-#define SFLASH_ACT_OP3A1D 0x0300
-#define SFLASH_ACT_OP3A4D 0x0400
-#define SFLASH_ACT_OP3A4X4D 0x0500
-#define SFLASH_ACT_OP3A1X4D 0x0700
-
-
-#define SFLASH_ST_WREN 0x0006
-#define SFLASH_ST_WRDIS 0x0004
-#define SFLASH_ST_RDSR 0x0105
-#define SFLASH_ST_WRSR 0x0101
-#define SFLASH_ST_READ 0x0303
-#define SFLASH_ST_PP 0x0302
-#define SFLASH_ST_SE 0x02d8
-#define SFLASH_ST_BE 0x00c7
-#define SFLASH_ST_DP 0x00b9
-#define SFLASH_ST_RES 0x03ab
-#define SFLASH_ST_CSA 0x1000
-#define SFLASH_ST_SSE 0x0220
-
-#define SFLASH_MXIC_RDID 0x0390
-#define SFLASH_MXIC_MFID 0xc2
-
-
-#define SFLASH_ST_WIP 0x01
-#define SFLASH_ST_WEL 0x02
-#define SFLASH_ST_BP_MASK 0x1c
+/* flashcontrol action codes */
+#define SFLASH_ACT_OPONLY 0x0000 /* Issue opcode only */
+#define SFLASH_ACT_OP1D 0x0100 /* opcode + 1 data byte */
+#define SFLASH_ACT_OP3A 0x0200 /* opcode + 3 addr bytes */
+#define SFLASH_ACT_OP3A1D 0x0300 /* opcode + 3 addr & 1 data bytes */
+#define SFLASH_ACT_OP3A4D 0x0400 /* opcode + 3 addr & 4 data bytes */
+#define SFLASH_ACT_OP3A4X4D 0x0500 /* opcode + 3 addr, 4 don't care & 4 data bytes */
+#define SFLASH_ACT_OP3A1X4D 0x0700 /* opcode + 3 addr, 1 don't care & 4 data bytes */
+
+/* flashcontrol action+opcodes for ST flashes */
+#define SFLASH_ST_WREN 0x0006 /* Write Enable */
+#define SFLASH_ST_WRDIS 0x0004 /* Write Disable */
+#define SFLASH_ST_RDSR 0x0105 /* Read Status Register */
+#define SFLASH_ST_WRSR 0x0101 /* Write Status Register */
+#define SFLASH_ST_READ 0x0303 /* Read Data Bytes */
+#define SFLASH_ST_PP 0x0302 /* Page Program */
+#define SFLASH_ST_SE 0x02d8 /* Sector Erase */
+#define SFLASH_ST_BE 0x00c7 /* Bulk Erase */
+#define SFLASH_ST_DP 0x00b9 /* Deep Power-down */
+#define SFLASH_ST_RES 0x03ab /* Read Electronic Signature */
+#define SFLASH_ST_CSA 0x1000 /* Keep chip select asserted */
+#define SFLASH_ST_SSE 0x0220 /* Sub-sector Erase */
+
+#define SFLASH_MXIC_RDID 0x0390 /* Read Manufacture ID */
+#define SFLASH_MXIC_MFID 0xc2 /* MXIC Manufacture ID */
+
+/* Status register bits for ST flashes */
+#define SFLASH_ST_WIP 0x01 /* Write In Progress */
+#define SFLASH_ST_WEL 0x02 /* Write Enable Latch */
+#define SFLASH_ST_BP_MASK 0x1c /* Block Protect */
#define SFLASH_ST_BP_SHIFT 2
-#define SFLASH_ST_SRWD 0x80
-
+#define SFLASH_ST_SRWD 0x80 /* Status Register Write Disable */
+/* flashcontrol action+opcodes for Atmel flashes */
#define SFLASH_AT_READ 0x07e8
#define SFLASH_AT_PAGE_READ 0x07d2
#define SFLASH_AT_BUF1_READ
#define SFLASH_AT_BUF1_REPROGRAM 0x0258
#define SFLASH_AT_BUF2_REPROGRAM 0x0259
-
+/* Status register bits for Atmel flashes */
#define SFLASH_AT_READY 0x80
#define SFLASH_AT_MISMATCH 0x40
#define SFLASH_AT_ID_MASK 0x38
#define SFLASH_AT_ID_SHIFT 3
-
+/* SPI register bits, corerev >= 37 */
#define GSIO_START 0x80000000
#define GSIO_BUSY GSIO_START
+/*
+ * These are the UART port assignments, expressed as offsets from the base
+ * register. These assignments should hold for any serial port based on
+ * a 8250, 16450, or 16550(A).
+ */
-
-#define UART_RX 0
-#define UART_TX 0
-#define UART_DLL 0
-#define UART_IER 1
-#define UART_DLM 1
-#define UART_IIR 2
-#define UART_FCR 2
-#define UART_LCR 3
-#define UART_MCR 4
-#define UART_LSR 5
-#define UART_MSR 6
-#define UART_SCR 7
-#define UART_LCR_DLAB 0x80
-#define UART_LCR_WLEN8 0x03
-#define UART_MCR_OUT2 0x08
-#define UART_MCR_LOOP 0x10
-#define UART_LSR_RX_FIFO 0x80
-#define UART_LSR_TDHR 0x40
-#define UART_LSR_THRE 0x20
-#define UART_LSR_BREAK 0x10
-#define UART_LSR_FRAMING 0x08
-#define UART_LSR_PARITY 0x04
-#define UART_LSR_OVERRUN 0x02
-#define UART_LSR_RXRDY 0x01
-#define UART_FCR_FIFO_ENABLE 1
-
-
-#define UART_IIR_FIFO_MASK 0xc0
-#define UART_IIR_INT_MASK 0xf
-#define UART_IIR_MDM_CHG 0x0
-#define UART_IIR_NOINT 0x1
-#define UART_IIR_THRE 0x2
-#define UART_IIR_RCVD_DATA 0x4
-#define UART_IIR_RCVR_STATUS 0x6
-#define UART_IIR_CHAR_TIME 0xc
-
-
-#define UART_IER_EDSSI 8
-#define UART_IER_ELSI 4
-#define UART_IER_ETBEI 2
-#define UART_IER_ERBFI 1
-
-
+#define UART_RX 0 /* In: Receive buffer (DLAB=0) */
+#define UART_TX 0 /* Out: Transmit buffer (DLAB=0) */
+#define UART_DLL 0 /* Out: Divisor Latch Low (DLAB=1) */
+#define UART_IER 1 /* In/Out: Interrupt Enable Register (DLAB=0) */
+#define UART_DLM 1 /* Out: Divisor Latch High (DLAB=1) */
+#define UART_IIR 2 /* In: Interrupt Identity Register */
+#define UART_FCR 2 /* Out: FIFO Control Register */
+#define UART_LCR 3 /* Out: Line Control Register */
+#define UART_MCR 4 /* Out: Modem Control Register */
+#define UART_LSR 5 /* In: Line Status Register */
+#define UART_MSR 6 /* In: Modem Status Register */
+#define UART_SCR 7 /* I/O: Scratch Register */
+#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
+#define UART_LCR_WLEN8 0x03 /* Word length: 8 bits */
+#define UART_MCR_OUT2 0x08 /* MCR GPIO out 2 */
+#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */
+#define UART_LSR_RX_FIFO 0x80 /* Receive FIFO error */
+#define UART_LSR_TDHR 0x40 /* Data-hold-register empty */
+#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
+#define UART_LSR_BREAK 0x10 /* Break interrupt */
+#define UART_LSR_FRAMING 0x08 /* Framing error */
+#define UART_LSR_PARITY 0x04 /* Parity error */
+#define UART_LSR_OVERRUN 0x02 /* Overrun error */
+#define UART_LSR_RXRDY 0x01 /* Receiver ready */
+#define UART_FCR_FIFO_ENABLE 1 /* FIFO control register bit controlling FIFO enable/disable */
+
+/* Interrupt Identity Register (IIR) bits */
+#define UART_IIR_FIFO_MASK 0xc0 /* IIR FIFO disable/enabled mask */
+#define UART_IIR_INT_MASK 0xf /* IIR interrupt ID source */
+#define UART_IIR_MDM_CHG 0x0 /* Modem status changed */
+#define UART_IIR_NOINT 0x1 /* No interrupt pending */
+#define UART_IIR_THRE 0x2 /* THR empty */
+#define UART_IIR_RCVD_DATA 0x4 /* Received data available */
+#define UART_IIR_RCVR_STATUS 0x6 /* Receiver status */
+#define UART_IIR_CHAR_TIME 0xc /* Character time */
+
+/* Interrupt Enable Register (IER) bits */
+#define UART_IER_PTIME 128 /* Programmable THRE Interrupt Mode Enable */
+#define UART_IER_EDSSI 8 /* enable modem status interrupt */
+#define UART_IER_ELSI 4 /* enable receiver line status interrupt */
+#define UART_IER_ETBEI 2 /* enable transmitter holding register empty interrupt */
+#define UART_IER_ERBFI 1 /* enable data available interrupt */
+
+/* pmustatus */
+#define PST_SLOW_WR_PENDING 0x0400
#define PST_EXTLPOAVAIL 0x0100
#define PST_WDRESET 0x0080
#define PST_INTPEND 0x0040
#define PST_HTAVAIL 0x0004
#define PST_RESINIT 0x0003
-
+/* pmucapabilities */
#define PCAP_REV_MASK 0x000000ff
#define PCAP_RC_MASK 0x00001f00
#define PCAP_RC_SHIFT 8
#define PCAP_VC_SHIFT 21
#define PCAP_CC_MASK 0x1e000000
#define PCAP_CC_SHIFT 25
-#define PCAP5_PC_MASK 0x003e0000
+#define PCAP5_PC_MASK 0x003e0000 /* PMU corerev >= 5 */
#define PCAP5_PC_SHIFT 17
#define PCAP5_VC_MASK 0x07c00000
#define PCAP5_VC_SHIFT 22
#define PCAP5_CC_MASK 0xf8000000
#define PCAP5_CC_SHIFT 27
-
-
+/* PMU Resource Request Timer registers */
+/* This is based on PmuRev0 */
#define PRRT_TIME_MASK 0x03ff
#define PRRT_INTEN 0x0400
#define PRRT_REQ_ACTIVE 0x0800
#define PRRT_HT_REQ 0x2000
#define PRRT_HQ_REQ 0x4000
+/* bit 0 of the PMU interrupt vector is asserted if this mask is enabled */
+#define RSRC_INTR_MASK_TIMER_INT_0 1
+/* PMU resource bit position */
#define PMURES_BIT(bit) (1 << (bit))
-
+/* PMU resource number limit */
#define PMURES_MAX_RESNUM 30
-
+/* PMU chip control0 register */
#define PMU_CHIPCTL0 0
-#define PMU43143_CC0_SDIO_DRSTR_OVR (1 << 31)
-
+#define PMU43143_CC0_SDIO_DRSTR_OVR (1 << 31) /* sdio drive strength override enable */
+/* clock req types */
#define PMU_CC1_CLKREQ_TYPE_SHIFT 19
#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT)
#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0
#define CLKREQ_TYPE_CONFIG_PUSHPULL 1
-
+/* PMU chip control1 register */
#define PMU_CHIPCTL1 1
#define PMU_CC1_RXC_DLL_BYPASS 0x00010000
+#define PMU_CC1_ENABLE_BBPLL_PWR_DOWN 0x00000010
#define PMU_CC1_IF_TYPE_MASK 0x00000030
#define PMU_CC1_IF_TYPE_RMII 0x00000000
#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080
#define PMU_CC1_SW_TYPE_RGMII 0x000000c0
-
+/* PMU chip control2 register */
#define PMU_CHIPCTL2 2
+#define PMU_CC2_FORCE_SUBCORE_PWR_SWITCH_ON (1 << 18)
+#define PMU_CC2_FORCE_PHY_PWR_SWITCH_ON (1 << 19)
+#define PMU_CC2_FORCE_VDDM_PWR_SWITCH_ON (1 << 20)
+#define PMU_CC2_FORCE_MEMLPLDO_PWR_SWITCH_ON (1 << 21)
-
+/* PMU chip control3 register */
#define PMU_CHIPCTL3 3
-
#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19
#define PMU_CC3_ENABLE_RF_SHIFT 22
#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23
-
+/* PMU chip control5 register */
#define PMU_CHIPCTL5 5
+/* PMU chip control6 register */
+#define PMU_CHIPCTL6 6
+#define PMU_CC6_ENABLE_CLKREQ_WAKEUP (1 << 4)
+#define PMU_CC6_ENABLE_PMU_WAKEUP_ALP (1 << 6)
+/* PMU chip control7 register */
+#define PMU_CHIPCTL7 7
+#define PMU_CC7_ENABLE_L2REFCLKPAD_PWRDWN (1 << 25)
+#define PMU_CC7_ENABLE_MDIO_RESET_WAR (1 << 27)
-
+/* PMU corerev and chip specific PLL controls.
+ * PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
+ * to differentiate different PLLs controlled by the same PMU rev.
+ */
+/* pllcontrol registers */
+/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */
#define PMU0_PLL0_PLLCTL0 0
#define PMU0_PLL0_PC0_PDIV_MASK 1
#define PMU0_PLL0_PC0_PDIV_FREQ 25000
#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3
#define PMU0_PLL0_PC0_DIV_ARM_BASE 8
-
+/* PC0_DIV_ARM for PLLOUT_ARM */
#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0
#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1
#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2
-#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3
+#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */
#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4
#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5
#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6
#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7
-
+/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */
#define PMU0_PLL0_PLLCTL1 1
#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000
#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28
#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8
#define PMU0_PLL0_PC1_STOP_MOD 0x00000040
-
+/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */
#define PMU0_PLL0_PLLCTL2 2
#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf
#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4
-
-
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
#define PMU1_PLL0_PLLCTL0 0
#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
#define PMU1_PLL0_PC0_P1DIV_SHIFT 20
#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000
#define PMU1_PLL0_PC0_P2DIV_SHIFT 24
-
+/* m<x>div */
#define PMU1_PLL0_PLLCTL1 1
#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff
#define PMU1_PLL0_PC1_M1DIV_SHIFT 0
#define PMU1_PLL0_PC1_M4DIV_BY_9 9
#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12
#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24
+#define PMU1_PLL0_PC1_M4DIV_BY_60 0x3C
#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8
#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT)
-
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
#define PMU1_PLL0_PLLCTL2 2
#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff
#define PMU1_PLL0_PC2_M5DIV_SHIFT 0
#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000
#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17
#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1
-#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2
+#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /* recommended for 4319 */
#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
-
+/* ndiv_frac */
#define PMU1_PLL0_PLLCTL3 3
#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff
#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0
-
+/* pll_ctrl */
#define PMU1_PLL0_PLLCTL4 4
-
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
#define PMU1_PLL0_PLLCTL5 5
#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00
#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8
+#define PMU1_PLL0_PLLCTL6 6
+#define PMU1_PLL0_PLLCTL7 7
+#define PMU1_PLL0_PLLCTL8 8
+#define PMU1_PLLCTL8_OPENLOOP_MASK 0x2
+
+/* PMU rev 2 control words */
#define PMU2_PHY_PLL_PLLCTL 4
#define PMU2_SI_PLL_PLLCTL 10
-
-
-
+/* PMU rev 2 */
+/* pllcontrol registers */
+/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
#define PMU2_PLL_PLLCTL0 0
#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000
#define PMU2_PLL_PC0_P1DIV_SHIFT 20
#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000
#define PMU2_PLL_PC0_P2DIV_SHIFT 24
-
+/* m<x>div */
#define PMU2_PLL_PLLCTL1 1
#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff
#define PMU2_PLL_PC1_M1DIV_SHIFT 0
#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000
#define PMU2_PLL_PC1_M4DIV_SHIFT 24
-
+/* m<x>div, ndiv_dither_mfb, ndiv_mode, ndiv_int */
#define PMU2_PLL_PLLCTL2 2
#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff
#define PMU2_PLL_PC2_M5DIV_SHIFT 0
#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000
#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20
-
+/* ndiv_frac */
#define PMU2_PLL_PLLCTL3 3
#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff
#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0
-
+/* pll_ctrl */
#define PMU2_PLL_PLLCTL4 4
-
+/* pll_ctrl, vco_rng, clkdrive_ch<x> */
#define PMU2_PLL_PLLCTL5 5
#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00
#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8
#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000
#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28
-
+/* PMU rev 5 (& 6) */
#define PMU5_PLL_P1P2_OFF 0
#define PMU5_PLL_P1_MASK 0x0f000000
#define PMU5_PLL_P1_SHIFT 24
#define PMU5_PLL_PCHI_OFF 5
#define PMU5_PLL_PCHI_MASK 0x0000003f
-
+/* pmu XtalFreqRatio */
#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000
#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31
-
+/* Divider allocation in 4716/47162/5356/5357 */
#define PMU5_MAINPLL_CPU 1
#define PMU5_MAINPLL_MEM 2
#define PMU5_MAINPLL_SI 3
-
+/* 4706 PMU */
#define PMU4706_MAINPLL_PLL0 0
-#define PMU6_4706_PROCPLL_OFF 4
+#define PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */
#define PMU6_4706_PROC_P2DIV_MASK 0x000f0000
#define PMU6_4706_PROC_P2DIV_SHIFT 16
#define PMU6_4706_PROC_P1DIV_MASK 0x0000f000
#define PMU7_PLL_PLLCTL11_MASK 0xffffff00
#define PMU7_PLL_PLLCTL11_VAL 0x22222200
-
+/* PMU rev 15 */
#define PMU15_PLL_PLLCTL0 0
#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003
#define PMU15_PLL_PC0_CLKSEL_SHIFT 0
#define PMU15_FREQTGT_480_DEFAULT 0x19AB1
#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5
-#define PMU15_ARM_96MHZ 96000000
-#define PMU15_ARM_98MHZ 98400000
-#define PMU15_ARM_97MHZ 97000000
+#define PMU15_ARM_96MHZ 96000000 /* 96 Mhz */
+#define PMU15_ARM_98MHZ 98400000 /* 98.4 Mhz */
+#define PMU15_ARM_97MHZ 97000000 /* 97 Mhz */
#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070
#define PMU17_PLLCTL0_BBPLL_DRST 3
#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8
-
+/* PLL usage in 4716/47162 */
#define PMU4716_MAINPLL_PLL0 12
+/* PLL usage in 4335 */
+#define PMU4335_PLL0_PC2_P1DIV_MASK 0x000f0000
+#define PMU4335_PLL0_PC2_P1DIV_SHIFT 16
+#define PMU4335_PLL0_PC2_NDIV_INT_MASK 0xff800000
+#define PMU4335_PLL0_PC2_NDIV_INT_SHIFT 23
+#define PMU4335_PLL0_PC1_MDIV2_MASK 0x0000ff00
+#define PMU4335_PLL0_PC1_MDIV2_SHIFT 8
+
+/* PLL usage in 5356/5357 */
#define PMU5356_MAINPLL_PLL0 0
#define PMU5357_MAINPLL_PLL0 0
-
+/* 4716/47162 resources */
#define RES4716_PROC_PLL_ON 0x00000040
#define RES4716_PROC_HT_AVAIL 0x00000080
-
-#define CCTRL_471X_I2S_PINS_ENABLE 0x0080
-
-
-
-#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000
-#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000
-
-
-#define RES5354_EXT_SWITCHER_PWM 0
-#define RES5354_BB_SWITCHER_PWM 1
-#define RES5354_BB_SWITCHER_BURST 2
-#define RES5354_BB_EXT_SWITCHER_BURST 3
-#define RES5354_ILP_REQUEST 4
-#define RES5354_RADIO_SWITCHER_PWM 5
-#define RES5354_RADIO_SWITCHER_BURST 6
-#define RES5354_ROM_SWITCH 7
-#define RES5354_PA_REF_LDO 8
-#define RES5354_RADIO_LDO 9
-#define RES5354_AFE_LDO 10
-#define RES5354_PLL_LDO 11
-#define RES5354_BG_FILTBYP 12
-#define RES5354_TX_FILTBYP 13
-#define RES5354_RX_FILTBYP 14
-#define RES5354_XTAL_PU 15
-#define RES5354_XTAL_EN 16
-#define RES5354_BB_PLL_FILTBYP 17
-#define RES5354_RF_PLL_FILTBYP 18
-#define RES5354_BB_PLL_PU 19
-
-
-#define CCTRL5357_EXTPA (1<<14)
-#define CCTRL5357_ANT_MUX_2o3 (1<<15)
-#define CCTRL5357_NFLASH (1<<16)
-
-
-#define CCTRL43217_EXTPA_C0 (1<<13)
-#define CCTRL43217_EXTPA_C1 (1<<8)
-
-
-#define CCTRL43228_EXTPA_C0 (1<<14)
-#define CCTRL43228_EXTPA_C1 (1<<9)
-
-
-#define RES4328_EXT_SWITCHER_PWM 0
-#define RES4328_BB_SWITCHER_PWM 1
-#define RES4328_BB_SWITCHER_BURST 2
-#define RES4328_BB_EXT_SWITCHER_BURST 3
-#define RES4328_ILP_REQUEST 4
-#define RES4328_RADIO_SWITCHER_PWM 5
-#define RES4328_RADIO_SWITCHER_BURST 6
-#define RES4328_ROM_SWITCH 7
-#define RES4328_PA_REF_LDO 8
-#define RES4328_RADIO_LDO 9
-#define RES4328_AFE_LDO 10
-#define RES4328_PLL_LDO 11
-#define RES4328_BG_FILTBYP 12
-#define RES4328_TX_FILTBYP 13
-#define RES4328_RX_FILTBYP 14
-#define RES4328_XTAL_PU 15
-#define RES4328_XTAL_EN 16
-#define RES4328_BB_PLL_FILTBYP 17
-#define RES4328_RF_PLL_FILTBYP 18
-#define RES4328_BB_PLL_PU 19
-
-
-#define RES4325_BUCK_BOOST_BURST 0
-#define RES4325_CBUCK_BURST 1
-#define RES4325_CBUCK_PWM 2
-#define RES4325_CLDO_CBUCK_BURST 3
-#define RES4325_CLDO_CBUCK_PWM 4
-#define RES4325_BUCK_BOOST_PWM 5
-#define RES4325_ILP_REQUEST 6
-#define RES4325_ABUCK_BURST 7
-#define RES4325_ABUCK_PWM 8
-#define RES4325_LNLDO1_PU 9
-#define RES4325_OTP_PU 10
-#define RES4325_LNLDO3_PU 11
-#define RES4325_LNLDO4_PU 12
-#define RES4325_XTAL_PU 13
-#define RES4325_ALP_AVAIL 14
-#define RES4325_RX_PWRSW_PU 15
-#define RES4325_TX_PWRSW_PU 16
-#define RES4325_RFPLL_PWRSW_PU 17
-#define RES4325_LOGEN_PWRSW_PU 18
-#define RES4325_AFE_PWRSW_PU 19
-#define RES4325_BBPLL_PWRSW_PU 20
-#define RES4325_HT_AVAIL 21
-
-
-#define RES4325B0_CBUCK_LPOM 1
-#define RES4325B0_CBUCK_BURST 2
-#define RES4325B0_CBUCK_PWM 3
-#define RES4325B0_CLDO_PU 4
-
-
-#define RES4325C1_LNLDO2_PU 12
-
-
+/* 4716/4717/4718 Chip specific ChipControl register bits */
+#define CCTRL_471X_I2S_PINS_ENABLE 0x0080 /* I2S pins off by default, shared w/ pflash */
+
+/* 5357 Chip specific ChipControl register bits */
+/* 2nd - 32-bit reg */
+#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000 /* I2S pins enable */
+#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000 /* I2C/SPI pins enable */
+
+/* 5354 resources */
+#define RES5354_EXT_SWITCHER_PWM 0 /* 0x00001 */
+#define RES5354_BB_SWITCHER_PWM 1 /* 0x00002 */
+#define RES5354_BB_SWITCHER_BURST 2 /* 0x00004 */
+#define RES5354_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */
+#define RES5354_ILP_REQUEST 4 /* 0x00010 */
+#define RES5354_RADIO_SWITCHER_PWM 5 /* 0x00020 */
+#define RES5354_RADIO_SWITCHER_BURST 6 /* 0x00040 */
+#define RES5354_ROM_SWITCH 7 /* 0x00080 */
+#define RES5354_PA_REF_LDO 8 /* 0x00100 */
+#define RES5354_RADIO_LDO 9 /* 0x00200 */
+#define RES5354_AFE_LDO 10 /* 0x00400 */
+#define RES5354_PLL_LDO 11 /* 0x00800 */
+#define RES5354_BG_FILTBYP 12 /* 0x01000 */
+#define RES5354_TX_FILTBYP 13 /* 0x02000 */
+#define RES5354_RX_FILTBYP 14 /* 0x04000 */
+#define RES5354_XTAL_PU 15 /* 0x08000 */
+#define RES5354_XTAL_EN 16 /* 0x10000 */
+#define RES5354_BB_PLL_FILTBYP 17 /* 0x20000 */
+#define RES5354_RF_PLL_FILTBYP 18 /* 0x40000 */
+#define RES5354_BB_PLL_PU 19 /* 0x80000 */
+
+/* 5357 Chip specific ChipControl register bits */
+#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */
+#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */
+#define CCTRL5357_NFLASH (1<<16) /* Nandflash in ChipControl 1, bit 16 */
+
+/* 43217 Chip specific ChipControl register bits */
+#define CCTRL43217_EXTPA_C0 (1<<13) /* core0 extPA in ChipControl 1, bit 13 */
+#define CCTRL43217_EXTPA_C1 (1<<8) /* core1 extPA in ChipControl 1, bit 8 */
+
+/* 43228 Chip specific ChipControl register bits */
+#define CCTRL43228_EXTPA_C0 (1<<14) /* core1 extPA in ChipControl 1, bit 14 */
+#define CCTRL43228_EXTPA_C1 (1<<9) /* core0 extPA in ChipControl 1, bit 1 */
+
+/* 4328 resources */
+#define RES4328_EXT_SWITCHER_PWM 0 /* 0x00001 */
+#define RES4328_BB_SWITCHER_PWM 1 /* 0x00002 */
+#define RES4328_BB_SWITCHER_BURST 2 /* 0x00004 */
+#define RES4328_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */
+#define RES4328_ILP_REQUEST 4 /* 0x00010 */
+#define RES4328_RADIO_SWITCHER_PWM 5 /* 0x00020 */
+#define RES4328_RADIO_SWITCHER_BURST 6 /* 0x00040 */
+#define RES4328_ROM_SWITCH 7 /* 0x00080 */
+#define RES4328_PA_REF_LDO 8 /* 0x00100 */
+#define RES4328_RADIO_LDO 9 /* 0x00200 */
+#define RES4328_AFE_LDO 10 /* 0x00400 */
+#define RES4328_PLL_LDO 11 /* 0x00800 */
+#define RES4328_BG_FILTBYP 12 /* 0x01000 */
+#define RES4328_TX_FILTBYP 13 /* 0x02000 */
+#define RES4328_RX_FILTBYP 14 /* 0x04000 */
+#define RES4328_XTAL_PU 15 /* 0x08000 */
+#define RES4328_XTAL_EN 16 /* 0x10000 */
+#define RES4328_BB_PLL_FILTBYP 17 /* 0x20000 */
+#define RES4328_RF_PLL_FILTBYP 18 /* 0x40000 */
+#define RES4328_BB_PLL_PU 19 /* 0x80000 */
+
+/* 4325 A0/A1 resources */
+#define RES4325_BUCK_BOOST_BURST 0 /* 0x00000001 */
+#define RES4325_CBUCK_BURST 1 /* 0x00000002 */
+#define RES4325_CBUCK_PWM 2 /* 0x00000004 */
+#define RES4325_CLDO_CBUCK_BURST 3 /* 0x00000008 */
+#define RES4325_CLDO_CBUCK_PWM 4 /* 0x00000010 */
+#define RES4325_BUCK_BOOST_PWM 5 /* 0x00000020 */
+#define RES4325_ILP_REQUEST 6 /* 0x00000040 */
+#define RES4325_ABUCK_BURST 7 /* 0x00000080 */
+#define RES4325_ABUCK_PWM 8 /* 0x00000100 */
+#define RES4325_LNLDO1_PU 9 /* 0x00000200 */
+#define RES4325_OTP_PU 10 /* 0x00000400 */
+#define RES4325_LNLDO3_PU 11 /* 0x00000800 */
+#define RES4325_LNLDO4_PU 12 /* 0x00001000 */
+#define RES4325_XTAL_PU 13 /* 0x00002000 */
+#define RES4325_ALP_AVAIL 14 /* 0x00004000 */
+#define RES4325_RX_PWRSW_PU 15 /* 0x00008000 */
+#define RES4325_TX_PWRSW_PU 16 /* 0x00010000 */
+#define RES4325_RFPLL_PWRSW_PU 17 /* 0x00020000 */
+#define RES4325_LOGEN_PWRSW_PU 18 /* 0x00040000 */
+#define RES4325_AFE_PWRSW_PU 19 /* 0x00080000 */
+#define RES4325_BBPLL_PWRSW_PU 20 /* 0x00100000 */
+#define RES4325_HT_AVAIL 21 /* 0x00200000 */
+
+/* 4325 B0/C0 resources */
+#define RES4325B0_CBUCK_LPOM 1 /* 0x00000002 */
+#define RES4325B0_CBUCK_BURST 2 /* 0x00000004 */
+#define RES4325B0_CBUCK_PWM 3 /* 0x00000008 */
+#define RES4325B0_CLDO_PU 4 /* 0x00000010 */
+
+/* 4325 C1 resources */
+#define RES4325C1_LNLDO2_PU 12 /* 0x00001000 */
+
+/* 4325 chip-specific ChipStatus register bits */
#define CST4325_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4325_DEFCIS_SEL 0
-#define CST4325_SPROM_SEL 1
-#define CST4325_OTP_SEL 2
-#define CST4325_OTP_PWRDN 3
+#define CST4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
+#define CST4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
+#define CST4325_OTP_SEL 2 /* OTP is powered up, no SPROM */
+#define CST4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
#define CST4325_SDIO_USB_MODE_MASK 0x00000004
#define CST4325_SDIO_USB_MODE_SHIFT 2
#define CST4325_RCAL_VALID_MASK 0x00000008
#define CST4325_RCAL_VALID_SHIFT 3
#define CST4325_RCAL_VALUE_MASK 0x000001f0
#define CST4325_RCAL_VALUE_SHIFT 4
-#define CST4325_PMUTOP_2B_MASK 0x00000200
+#define CST4325_PMUTOP_2B_MASK 0x00000200 /* 1 for 2b, 0 for to 2a */
#define CST4325_PMUTOP_2B_SHIFT 9
-#define RES4329_RESERVED0 0
-#define RES4329_CBUCK_LPOM 1
-#define RES4329_CBUCK_BURST 2
-#define RES4329_CBUCK_PWM 3
-#define RES4329_CLDO_PU 4
-#define RES4329_PALDO_PU 5
-#define RES4329_ILP_REQUEST 6
-#define RES4329_RESERVED7 7
-#define RES4329_RESERVED8 8
-#define RES4329_LNLDO1_PU 9
-#define RES4329_OTP_PU 10
-#define RES4329_RESERVED11 11
-#define RES4329_LNLDO2_PU 12
-#define RES4329_XTAL_PU 13
-#define RES4329_ALP_AVAIL 14
-#define RES4329_RX_PWRSW_PU 15
-#define RES4329_TX_PWRSW_PU 16
-#define RES4329_RFPLL_PWRSW_PU 17
-#define RES4329_LOGEN_PWRSW_PU 18
-#define RES4329_AFE_PWRSW_PU 19
-#define RES4329_BBPLL_PWRSW_PU 20
-#define RES4329_HT_AVAIL 21
+#define RES4329_RESERVED0 0 /* 0x00000001 */
+#define RES4329_CBUCK_LPOM 1 /* 0x00000002 */
+#define RES4329_CBUCK_BURST 2 /* 0x00000004 */
+#define RES4329_CBUCK_PWM 3 /* 0x00000008 */
+#define RES4329_CLDO_PU 4 /* 0x00000010 */
+#define RES4329_PALDO_PU 5 /* 0x00000020 */
+#define RES4329_ILP_REQUEST 6 /* 0x00000040 */
+#define RES4329_RESERVED7 7 /* 0x00000080 */
+#define RES4329_RESERVED8 8 /* 0x00000100 */
+#define RES4329_LNLDO1_PU 9 /* 0x00000200 */
+#define RES4329_OTP_PU 10 /* 0x00000400 */
+#define RES4329_RESERVED11 11 /* 0x00000800 */
+#define RES4329_LNLDO2_PU 12 /* 0x00001000 */
+#define RES4329_XTAL_PU 13 /* 0x00002000 */
+#define RES4329_ALP_AVAIL 14 /* 0x00004000 */
+#define RES4329_RX_PWRSW_PU 15 /* 0x00008000 */
+#define RES4329_TX_PWRSW_PU 16 /* 0x00010000 */
+#define RES4329_RFPLL_PWRSW_PU 17 /* 0x00020000 */
+#define RES4329_LOGEN_PWRSW_PU 18 /* 0x00040000 */
+#define RES4329_AFE_PWRSW_PU 19 /* 0x00080000 */
+#define RES4329_BBPLL_PWRSW_PU 20 /* 0x00100000 */
+#define RES4329_HT_AVAIL 21 /* 0x00200000 */
#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4329_DEFCIS_SEL 0
-#define CST4329_SPROM_SEL 1
-#define CST4329_OTP_SEL 2
-#define CST4329_OTP_PWRDN 3
+#define CST4329_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
+#define CST4329_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
+#define CST4329_OTP_SEL 2 /* OTP is powered up, no SPROM */
+#define CST4329_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
#define CST4329_SPI_SDIO_MODE_SHIFT 2
-
+/* 4312 chip-specific ChipStatus register bits */
#define CST4312_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4312_DEFCIS_SEL 0
-#define CST4312_SPROM_SEL 1
-#define CST4312_OTP_SEL 2
-#define CST4312_OTP_BAD 3
-
-
-#define RES4312_SWITCHER_BURST 0
-#define RES4312_SWITCHER_PWM 1
-#define RES4312_PA_REF_LDO 2
-#define RES4312_CORE_LDO_BURST 3
-#define RES4312_CORE_LDO_PWM 4
-#define RES4312_RADIO_LDO 5
-#define RES4312_ILP_REQUEST 6
-#define RES4312_BG_FILTBYP 7
-#define RES4312_TX_FILTBYP 8
-#define RES4312_RX_FILTBYP 9
-#define RES4312_XTAL_PU 10
-#define RES4312_ALP_AVAIL 11
-#define RES4312_BB_PLL_FILTBYP 12
-#define RES4312_RF_PLL_FILTBYP 13
-#define RES4312_HT_AVAIL 14
-
-
+#define CST4312_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
+#define CST4312_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
+#define CST4312_OTP_SEL 2 /* OTP is powered up, no SPROM */
+#define CST4312_OTP_BAD 3 /* OTP is broken, SPROM is present */
+
+/* 4312 resources (all PMU chips with little memory constraint) */
+#define RES4312_SWITCHER_BURST 0 /* 0x00000001 */
+#define RES4312_SWITCHER_PWM 1 /* 0x00000002 */
+#define RES4312_PA_REF_LDO 2 /* 0x00000004 */
+#define RES4312_CORE_LDO_BURST 3 /* 0x00000008 */
+#define RES4312_CORE_LDO_PWM 4 /* 0x00000010 */
+#define RES4312_RADIO_LDO 5 /* 0x00000020 */
+#define RES4312_ILP_REQUEST 6 /* 0x00000040 */
+#define RES4312_BG_FILTBYP 7 /* 0x00000080 */
+#define RES4312_TX_FILTBYP 8 /* 0x00000100 */
+#define RES4312_RX_FILTBYP 9 /* 0x00000200 */
+#define RES4312_XTAL_PU 10 /* 0x00000400 */
+#define RES4312_ALP_AVAIL 11 /* 0x00000800 */
+#define RES4312_BB_PLL_FILTBYP 12 /* 0x00001000 */
+#define RES4312_RF_PLL_FILTBYP 13 /* 0x00002000 */
+#define RES4312_HT_AVAIL 14 /* 0x00004000 */
+
+/* 4322 resources */
#define RES4322_RF_LDO 0
#define RES4322_ILP_REQUEST 1
#define RES4322_XTAL_PU 2
#define RES4322_HT_PHY_AVAIL 7
#define RES4322_OTP_PU 8
-
+/* 4322 chip-specific ChipStatus register bits */
#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020
#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0
#define CST4322_SPROM_OTP_SEL_SHIFT 6
-#define CST4322_NO_SPROM_OTP 0
-#define CST4322_SPROM_PRESENT 1
-#define CST4322_OTP_PRESENT 2
+#define CST4322_NO_SPROM_OTP 0 /* no OTP, no SPROM */
+#define CST4322_SPROM_PRESENT 1 /* SPROM is present */
+#define CST4322_OTP_PRESENT 2 /* OTP is present */
#define CST4322_PCI_OR_USB 0x00000100
#define CST4322_BOOT_MASK 0x00000600
#define CST4322_BOOT_SHIFT 9
-#define CST4322_BOOT_FROM_SRAM 0
-#define CST4322_BOOT_FROM_ROM 1
-#define CST4322_BOOT_FROM_FLASH 2
+#define CST4322_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
+#define CST4322_BOOT_FROM_ROM 1 /* boot from ROM */
+#define CST4322_BOOT_FROM_FLASH 2 /* boot from FLASH */
#define CST4322_BOOT_FROM_INVALID 3
#define CST4322_ILP_DIV_EN 0x00000800
#define CST4322_FLASH_TYPE_MASK 0x00001000
#define CST4322_FLASH_TYPE_SHIFT 12
-#define CST4322_FLASH_TYPE_SHIFT_ST 0
-#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1
+#define CST4322_FLASH_TYPE_SHIFT_ST 0 /* ST serial FLASH */
+#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /* ATMEL flash */
#define CST4322_ARM_TAP_SEL 0x00002000
#define CST4322_RES_INIT_MODE_MASK 0x0000c000
#define CST4322_RES_INIT_MODE_SHIFT 14
-#define CST4322_RES_INIT_MODE_ILPAVAIL 0
-#define CST4322_RES_INIT_MODE_ILPREQ 1
-#define CST4322_RES_INIT_MODE_ALPAVAIL 2
-#define CST4322_RES_INIT_MODE_HTAVAIL 3
+#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /* resinitmode: ILP available */
+#define CST4322_RES_INIT_MODE_ILPREQ 1 /* resinitmode: ILP request */
+#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /* resinitmode: ALP available */
+#define CST4322_RES_INIT_MODE_HTAVAIL 3 /* resinitmode: HT available */
#define CST4322_PCIPLLCLK_GATING 0x00010000
#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000
#define CST4322_PCI_CARDBUS_MODE 0x00040000
+/* 43224 chip-specific ChipControl register bits */
+#define CCTRL43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
+#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
+#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
-#define CCTRL43224_GPIO_TOGGLE 0x8000
-#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0
-#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0
-
-
+/* 43236 resources */
#define RES43236_REGULATOR 0
#define RES43236_ILP_REQUEST 1
#define RES43236_XTAL_PU 2
#define RES43236_SI_PLL_ON 4
#define RES43236_HT_SI_AVAIL 5
+/* 43236 chip-specific ChipControl register bits */
+#define CCTRL43236_BT_COEXIST (1<<0) /* 0 disable */
+#define CCTRL43236_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
+#define CCTRL43236_EXT_LNA (1<<2) /* 0 disable */
+#define CCTRL43236_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43236_GSIO (1<<4) /* 0 disable */
-#define CCTRL43236_BT_COEXIST (1<<0)
-#define CCTRL43236_SECI (1<<1)
-#define CCTRL43236_EXT_LNA (1<<2)
-#define CCTRL43236_ANT_MUX_2o3 (1<<3)
-#define CCTRL43236_GSIO (1<<4)
-
-
+/* 43236 Chip specific ChipStatus register bits */
#define CST43236_SFLASH_MASK 0x00000040
#define CST43236_OTP_SEL_MASK 0x00000080
#define CST43236_OTP_SEL_SHIFT 7
-#define CST43236_HSIC_MASK 0x00000100
-#define CST43236_BP_CLK 0x00000200
+#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */
+#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
#define CST43236_BOOT_MASK 0x00001800
#define CST43236_BOOT_SHIFT 11
-#define CST43236_BOOT_FROM_SRAM 0
-#define CST43236_BOOT_FROM_ROM 1
-#define CST43236_BOOT_FROM_FLASH 2
+#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
+#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
+#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
#define CST43236_BOOT_FROM_INVALID 3
-
+/* 43237 resources */
#define RES43237_REGULATOR 0
#define RES43237_ILP_REQUEST 1
#define RES43237_XTAL_PU 2
#define RES43237_SI_PLL_ON 4
#define RES43237_HT_SI_AVAIL 5
+/* 43237 chip-specific ChipControl register bits */
+#define CCTRL43237_BT_COEXIST (1<<0) /* 0 disable */
+#define CCTRL43237_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
+#define CCTRL43237_EXT_LNA (1<<2) /* 0 disable */
+#define CCTRL43237_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */
+#define CCTRL43237_GSIO (1<<4) /* 0 disable */
-#define CCTRL43237_BT_COEXIST (1<<0)
-#define CCTRL43237_SECI (1<<1)
-#define CCTRL43237_EXT_LNA (1<<2)
-#define CCTRL43237_ANT_MUX_2o3 (1<<3)
-#define CCTRL43237_GSIO (1<<4)
-
-
+/* 43237 Chip specific ChipStatus register bits */
#define CST43237_SFLASH_MASK 0x00000040
#define CST43237_OTP_SEL_MASK 0x00000080
#define CST43237_OTP_SEL_SHIFT 7
-#define CST43237_HSIC_MASK 0x00000100
-#define CST43237_BP_CLK 0x00000200
+#define CST43237_HSIC_MASK 0x00000100 /* USB/HSIC */
+#define CST43237_BP_CLK 0x00000200 /* 120/96Mbps */
#define CST43237_BOOT_MASK 0x00001800
#define CST43237_BOOT_SHIFT 11
-#define CST43237_BOOT_FROM_SRAM 0
-#define CST43237_BOOT_FROM_ROM 1
-#define CST43237_BOOT_FROM_FLASH 2
+#define CST43237_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
+#define CST43237_BOOT_FROM_ROM 1 /* boot from ROM */
+#define CST43237_BOOT_FROM_FLASH 2 /* boot from FLASH */
#define CST43237_BOOT_FROM_INVALID 3
-
+/* 43239 resources */
#define RES43239_OTP_PU 9
#define RES43239_MACPHY_CLKAVAIL 23
#define RES43239_HT_AVAIL 24
-
+/* 43239 Chip specific ChipStatus register bits */
#define CST43239_SPROM_MASK 0x00000002
#define CST43239_SFLASH_MASK 0x00000004
#define CST43239_RES_INIT_MODE_SHIFT 7
#define CST43239_RES_INIT_MODE_MASK 0x000001f0
-#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15))
-#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15))
-#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0)
-#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0))
-
-
+#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /* SDIO || gSPI */
+#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /* USB || USBDA */
+#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /* SDIO */
+#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /* gSPI */
+/* 4324 resources */
+/* 43242 use same PMU as 4324 */
#define RES4324_LPLDO_PU 0
#define RES4324_RESET_PULLDN_DIS 1
#define RES4324_PMU_BG_PU 2
#define RES4324_HT_AVAIL 29
#define RES4324_MACPHY_CLKAVAIL 30
-
+/* 4324 Chip specific ChipStatus register bits */
#define CST4324_SPROM_MASK 0x00000080
#define CST4324_SFLASH_MASK 0x00400000
#define CST4324_RES_INIT_MODE_SHIFT 10
#define CST4324_RES_INIT_MODE_MASK 0x00000c00
#define CST4324_CHIPMODE_MASK 0x7
-#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2))
-#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6)
-
+#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /* SDIO || gSPI */
+#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /* USB || USBDA */
+/* 43242 Chip specific ChipStatus register bits */
#define CST43242_SFLASH_MASK 0x00000008
+#define CST43242_SR_HALT (1<<25)
+#define CST43242_SR_CHIP_STATUS_2 27 /* bit 27 */
-
+/* 4331 resources */
#define RES4331_REGULATOR 0
#define RES4331_ILP_REQUEST 1
#define RES4331_XTAL_PU 2
#define RES4331_SI_PLL_ON 4
#define RES4331_HT_SI_AVAIL 5
-
-#define CCTRL4331_BT_COEXIST (1<<0)
-#define CCTRL4331_SECI (1<<1)
-#define CCTRL4331_EXT_LNA_G (1<<2)
-#define CCTRL4331_SPROM_GPIO13_15 (1<<3)
-#define CCTRL4331_EXTPA_EN (1<<4)
-#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5)
-#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6)
-#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7)
-#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8)
-#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9)
-#define CCTRL4331_PCIE_AUXCLKEN (1<<10)
-#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11)
-#define CCTRL4331_EXTPA_EN2 (1<<12)
-#define CCTRL4331_EXT_LNA_A (1<<13)
-#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16)
-#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17)
-#define CCTRL4331_EXTPA_ANA_EN (1<<24)
-
-
-#define CST4331_XTAL_FREQ 0x00000001
+/* 4331 chip-specific ChipControl register bits */
+#define CCTRL4331_BT_COEXIST (1<<0) /* 0 disable */
+#define CCTRL4331_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
+#define CCTRL4331_EXT_LNA_G (1<<2) /* 0 disable */
+#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /* sprom/gpio13-15 mux */
+#define CCTRL4331_EXTPA_EN (1<<4) /* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /* set drive out GPIO_CLK on sprom_cs pin */
+#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /* use sprom_cs pin as PCIE mdio interface */
+#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */
+#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /* override core control on pipe_AuxClkEnable */
+#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /* override core control on pipe_AuxPowerDown */
+#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /* pcie_auxclkenable */
+#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /* pcie_pipe_pllpowerdown */
+#define CCTRL4331_EXTPA_EN2 (1<<12) /* 0 ext pa disable, 1 ext pa enabled */
+#define CCTRL4331_EXT_LNA_A (1<<13) /* 0 disable */
+#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /* enable bt_shd0 at gpio4 */
+#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /* enable bt_shd1 at gpio5 */
+#define CCTRL4331_EXTPA_ANA_EN (1<<24) /* 0 ext pa disable, 1 ext pa enabled */
+
+/* 4331 Chip specific ChipStatus register bits */
+#define CST4331_XTAL_FREQ 0x00000001 /* crystal frequency 20/40Mhz */
#define CST4331_SPROM_OTP_SEL_MASK 0x00000006
#define CST4331_SPROM_OTP_SEL_SHIFT 1
#define CST4331_SPROM_PRESENT 0x00000002
#define CST4331_LDO_RF 0x00000008
#define CST4331_LDO_PAR 0x00000010
-
-#define RES4315_CBUCK_LPOM 1
-#define RES4315_CBUCK_BURST 2
-#define RES4315_CBUCK_PWM 3
-#define RES4315_CLDO_PU 4
-#define RES4315_PALDO_PU 5
-#define RES4315_ILP_REQUEST 6
-#define RES4315_LNLDO1_PU 9
-#define RES4315_OTP_PU 10
-#define RES4315_LNLDO2_PU 12
-#define RES4315_XTAL_PU 13
-#define RES4315_ALP_AVAIL 14
-#define RES4315_RX_PWRSW_PU 15
-#define RES4315_TX_PWRSW_PU 16
-#define RES4315_RFPLL_PWRSW_PU 17
-#define RES4315_LOGEN_PWRSW_PU 18
-#define RES4315_AFE_PWRSW_PU 19
-#define RES4315_BBPLL_PWRSW_PU 20
-#define RES4315_HT_AVAIL 21
-
-
-#define CST4315_SPROM_OTP_SEL_MASK 0x00000003
-#define CST4315_DEFCIS_SEL 0x00000000
-#define CST4315_SPROM_SEL 0x00000001
-#define CST4315_OTP_SEL 0x00000002
-#define CST4315_OTP_PWRDN 0x00000003
-#define CST4315_SDIO_MODE 0x00000004
+/* 4315 resource */
+#define RES4315_CBUCK_LPOM 1 /* 0x00000002 */
+#define RES4315_CBUCK_BURST 2 /* 0x00000004 */
+#define RES4315_CBUCK_PWM 3 /* 0x00000008 */
+#define RES4315_CLDO_PU 4 /* 0x00000010 */
+#define RES4315_PALDO_PU 5 /* 0x00000020 */
+#define RES4315_ILP_REQUEST 6 /* 0x00000040 */
+#define RES4315_LNLDO1_PU 9 /* 0x00000200 */
+#define RES4315_OTP_PU 10 /* 0x00000400 */
+#define RES4315_LNLDO2_PU 12 /* 0x00001000 */
+#define RES4315_XTAL_PU 13 /* 0x00002000 */
+#define RES4315_ALP_AVAIL 14 /* 0x00004000 */
+#define RES4315_RX_PWRSW_PU 15 /* 0x00008000 */
+#define RES4315_TX_PWRSW_PU 16 /* 0x00010000 */
+#define RES4315_RFPLL_PWRSW_PU 17 /* 0x00020000 */
+#define RES4315_LOGEN_PWRSW_PU 18 /* 0x00040000 */
+#define RES4315_AFE_PWRSW_PU 19 /* 0x00080000 */
+#define RES4315_BBPLL_PWRSW_PU 20 /* 0x00100000 */
+#define RES4315_HT_AVAIL 21 /* 0x00200000 */
+
+/* 4315 chip-specific ChipStatus register bits */
+#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /* gpio [7:6], SDIO CIS selection */
+#define CST4315_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
+#define CST4315_SPROM_SEL 0x00000001 /* use SPROM, OTP is powered up */
+#define CST4315_OTP_SEL 0x00000002 /* use OTP, OTP is powered up */
+#define CST4315_OTP_PWRDN 0x00000003 /* use SPROM, OTP is powered down */
+#define CST4315_SDIO_MODE 0x00000004 /* gpio [8], sdio/usb mode */
#define CST4315_RCAL_VALID 0x00000008
#define CST4315_RCAL_VALUE_MASK 0x000001f0
#define CST4315_RCAL_VALUE_SHIFT 4
-#define CST4315_PALDO_EXTPNP 0x00000200
+#define CST4315_PALDO_EXTPNP 0x00000200 /* PALDO is configured with external PNP */
#define CST4315_CBUCK_MODE_MASK 0x00000c00
#define CST4315_CBUCK_MODE_BURST 0x00000400
#define CST4315_CBUCK_MODE_LPBURST 0x00000c00
-
-#define RES4319_CBUCK_LPOM 1
-#define RES4319_CBUCK_BURST 2
-#define RES4319_CBUCK_PWM 3
-#define RES4319_CLDO_PU 4
-#define RES4319_PALDO_PU 5
-#define RES4319_ILP_REQUEST 6
-#define RES4319_LNLDO1_PU 9
-#define RES4319_OTP_PU 10
-#define RES4319_LNLDO2_PU 12
-#define RES4319_XTAL_PU 13
-#define RES4319_ALP_AVAIL 14
-#define RES4319_RX_PWRSW_PU 15
-#define RES4319_TX_PWRSW_PU 16
-#define RES4319_RFPLL_PWRSW_PU 17
-#define RES4319_LOGEN_PWRSW_PU 18
-#define RES4319_AFE_PWRSW_PU 19
-#define RES4319_BBPLL_PWRSW_PU 20
-#define RES4319_HT_AVAIL 21
-
-
+/* 4319 resources */
+#define RES4319_CBUCK_LPOM 1 /* 0x00000002 */
+#define RES4319_CBUCK_BURST 2 /* 0x00000004 */
+#define RES4319_CBUCK_PWM 3 /* 0x00000008 */
+#define RES4319_CLDO_PU 4 /* 0x00000010 */
+#define RES4319_PALDO_PU 5 /* 0x00000020 */
+#define RES4319_ILP_REQUEST 6 /* 0x00000040 */
+#define RES4319_LNLDO1_PU 9 /* 0x00000200 */
+#define RES4319_OTP_PU 10 /* 0x00000400 */
+#define RES4319_LNLDO2_PU 12 /* 0x00001000 */
+#define RES4319_XTAL_PU 13 /* 0x00002000 */
+#define RES4319_ALP_AVAIL 14 /* 0x00004000 */
+#define RES4319_RX_PWRSW_PU 15 /* 0x00008000 */
+#define RES4319_TX_PWRSW_PU 16 /* 0x00010000 */
+#define RES4319_RFPLL_PWRSW_PU 17 /* 0x00020000 */
+#define RES4319_LOGEN_PWRSW_PU 18 /* 0x00040000 */
+#define RES4319_AFE_PWRSW_PU 19 /* 0x00080000 */
+#define RES4319_BBPLL_PWRSW_PU 20 /* 0x00100000 */
+#define RES4319_HT_AVAIL 21 /* 0x00200000 */
+
+/* 4319 chip-specific ChipStatus register bits */
#define CST4319_SPI_CPULESSUSB 0x00000001
#define CST4319_SPI_CLK_POL 0x00000002
#define CST4319_SPI_CLK_PH 0x00000008
-#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0
+#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /* gpio [7:6], SDIO CIS selection */
#define CST4319_SPROM_OTP_SEL_SHIFT 6
-#define CST4319_DEFCIS_SEL 0x00000000
-#define CST4319_SPROM_SEL 0x00000040
-#define CST4319_OTP_SEL 0x00000080
-#define CST4319_OTP_PWRDN 0x000000c0
-#define CST4319_SDIO_USB_MODE 0x00000100
+#define CST4319_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
+#define CST4319_SPROM_SEL 0x00000040 /* use SPROM, OTP is powered up */
+#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */
+#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */
+#define CST4319_SDIO_USB_MODE 0x00000100 /* gpio [8], sdio/usb mode */
#define CST4319_REMAP_SEL_MASK 0x00000600
#define CST4319_ILPDIV_EN 0x00000800
#define CST4319_XTAL_PD_POL 0x00001000
#define CST4319_LPO_SEL 0x00002000
#define CST4319_RES_INIT_MODE 0x0000c000
-#define CST4319_PALDO_EXTPNP 0x00010000
+#define CST4319_PALDO_EXTPNP 0x00010000 /* PALDO is configured with external PNP */
#define CST4319_CBUCK_MODE_MASK 0x00060000
#define CST4319_CBUCK_MODE_BURST 0x00020000
#define CST4319_CBUCK_MODE_LPBURST 0x00060000
#define CCTL_4319USB_48MHZ_PLL_SEL 1
#define CCTL_4319USB_24MHZ_PLL_SEL 2
-
+/* PMU resources for 4336 */
#define RES4336_CBUCK_LPOM 0
#define RES4336_CBUCK_BURST 1
#define RES4336_CBUCK_LP_PWM 2
#define RES4336_HT_AVAIL 24
#define RES4336_RSVD 25
-
+/* 4336 chip-specific ChipStatus register bits */
#define CST4336_SPI_MODE_MASK 0x00000001
#define CST4336_SPROM_PRESENT 0x00000002
#define CST4336_OTP_PRESENT 0x00000004
#define CST4336_CBUCK_MODE_MASK 0x00000600
#define CST4336_CBUCK_MODE_SHIFT 9
-
+/* 4336 Chip specific PMU ChipControl register bits */
#define PCTL_4336_SERIAL_ENAB (1 << 24)
-
+/* 4330 resources */
#define RES4330_CBUCK_LPOM 0
#define RES4330_CBUCK_BURST 1
#define RES4330_CBUCK_LP_PWM 2
#define RES4330_5gTX_PWRSW_PU 26
#define RES4330_5g_LOGEN_PWRSW_PU 27
-
-#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6)
-#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6)
-#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0)
-#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4)
-#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6)
-#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7)
+/* 4330 chip-specific ChipStatus register bits */
+#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /* SDIO || gSPI */
+#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /* USB || USBDA */
+#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /* SDIO */
+#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /* gSPI */
+#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /* USB packet-oriented */
+#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /* USB Direct Access */
#define CST4330_OTP_PRESENT 0x00000010
#define CST4330_LPO_AUTODET_EN 0x00000020
#define CST4330_ARMREMAP_0 0x00000040
-#define CST4330_SPROM_PRESENT 0x00000080
+#define CST4330_SPROM_PRESENT 0x00000080 /* takes priority over OTP if both set */
#define CST4330_ILPDIV_EN 0x00000100
#define CST4330_LPO_SEL 0x00000200
#define CST4330_RES_INIT_MODE_SHIFT 10
#define SOCDEVRAM_BP_ADDR 0x1E000000
#define SOCDEVRAM_ARM_ADDR 0x00800000
-
+/* 4330 Chip specific PMU ChipControl register bits */
#define PCTL_4330_SERIAL_ENAB (1 << 24)
-
-#define CCTRL_4330_GPIO_SEL 0x00000001
-#define CCTRL_4330_ERCX_SEL 0x00000002
-#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004
-#define CCTRL_4330_JTAG_DISABLE 0x00000008
+/* 4330 Chip specific ChipControl register bits */
+#define CCTRL_4330_GPIO_SEL 0x00000001 /* 1=select GPIOs to be muxed out */
+#define CCTRL_4330_ERCX_SEL 0x00000002 /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004 /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL_4330_JTAG_DISABLE 0x00000008 /* 1=disable JTAG interface on mux'd pins */
#define PMU_VREG0_ADDR 0
#define PMU_VREG0_DISABLE_PULLD_BT_SHIFT 2
#define PMU_VREG4_LPLDO1_0p95V 6
#define PMU_VREG4_LPLDO1_0p90V 7
+/* 4350/4345 VREG4 settings */
+#define PMU4350_VREG4_LPLDO1_1p10V 0
+#define PMU4350_VREG4_LPLDO1_1p15V 1
+#define PMU4350_VREG4_LPLDO1_1p21V 2
+#define PMU4350_VREG4_LPLDO1_1p24V 3
+#define PMU4350_VREG4_LPLDO1_0p90V 4
+#define PMU4350_VREG4_LPLDO1_0p96V 5
+#define PMU4350_VREG4_LPLDO1_1p01V 6
+#define PMU4350_VREG4_LPLDO1_1p04V 7
+
#define PMU_VREG4_LPLDO2_LVM_SHIFT 18
#define PMU_VREG4_LPLDO2_LVM_MASK 0x7
#define PMU_VREG4_LPLDO2_HVM_SHIFT 21
#define PMU_VREG4_LPLDO2_1p15V 1
#define PMU_VREG4_LPLDO2_1p20V 2
#define PMU_VREG4_LPLDO2_1p10V 3
-#define PMU_VREG4_LPLDO2_0p90V 4
+#define PMU_VREG4_LPLDO2_0p90V 4 /* 4 - 7 is 0.90V */
#define PMU_VREG4_HSICLDO_BYPASS_SHIFT 27
#define PMU_VREG4_HSICLDO_BYPASS_MASK 0x1
#define PMU_VREG5_HSICDVDD_PD_SHIFT 11
#define PMU_VREG5_HSICDVDD_PD_MASK 0x1
-
+/* 4334 resources */
#define RES4334_LPLDO_PU 0
#define RES4334_RESET_PULLDN_DIS 1
#define RES4334_PMU_BG_PU 2
#define RES4334_HT_AVAIL 29
#define RES4334_MACPHY_CLK_AVAIL 30
-
+/* 4334 chip-specific ChipStatus register bits */
#define CST4334_CHIPMODE_MASK 7
#define CST4334_SDIO_MODE 0x00000000
#define CST4334_SPI_MODE 0x00000004
#define CST4334_RES_INIT_MODE_MASK 0x00000C00
#define CST4334_RES_INIT_MODE_SHIFT 10
-
+/* 4334 Chip specific PMU ChipControl register bits */
#define PCTL_4334_GPIO3_ENAB (1 << 3)
-
+/* 4334 Chip control */
#define CCTRL4334_PMU_WAKEUP_GPIO1 (1 << 0)
#define CCTRL4334_PMU_WAKEUP_HSIC (1 << 1)
#define CCTRL4334_PMU_WAKEUP_AOS (1 << 2)
#define CCTRL4334_HSIC_INBAND_GPIO1 (1 << 4)
#define CCTRL4334_HSIC_LDO_PU (1 << 23)
-
+/* 4334 Chip control 3 */
#define CCTRL4334_BLOCK_EXTRNL_WAKE (1 << 4)
#define CCTRL4334_SAVERESTORE_FIX (1 << 5)
-
+/* 43341 Chip control 3 */
#define CCTRL43341_BLOCK_EXTRNL_WAKE (1 << 13)
#define CCTRL43341_SAVERESTORE_FIX (1 << 14)
#define CCTRL43341_BT_ISO_SEL (1 << 16)
-
-#define CCTRL1_4334_GPIO_SEL (1 << 0)
-#define CCTRL1_4334_ERCX_SEL (1 << 1)
-#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2)
-#define CCTRL1_4334_JTAG_DISABLE (1 << 3)
-#define CCTRL1_4334_UART_ON_4_5 (1 << 28)
-
-
-#define CCTRL1_4324_GPIO_SEL (1 << 0)
-#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2)
-
-
-
-#define CST43143_REMAP_TO_ROM (3 << 0)
-#define CST43143_SDIO_EN (1 << 2)
-#define CST43143_SDIO_ISO (1 << 3)
-#define CST43143_USB_CPU_LESS (1 << 4)
-#define CST43143_CBUCK_MODE (3 << 6)
-#define CST43143_POK_CBUCK (1 << 8)
+/* 4334 Chip specific ChipControl1 register bits */
+#define CCTRL1_4334_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4334_ERCX_SEL (1 << 1) /* 1=select ERCX BT coex to be muxed out */
+#define CCTRL1_4334_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */
+#define CCTRL1_4334_JTAG_DISABLE (1 << 3) /* 1=disable JTAG interface on mux'd pins */
+#define CCTRL1_4334_UART_ON_4_5 (1 << 28) /* 1=UART_TX/UART_RX muxed on GPIO_4/5 (4334B0/1) */
+
+/* 4324 Chip specific ChipControl1 register bits */
+#define CCTRL1_4324_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 43143 chip-specific ChipStatus register bits based on Confluence documentation */
+/* register contains strap values sampled during POR */
+#define CST43143_REMAP_TO_ROM (3 << 0) /* 00=Boot SRAM, 01=Boot ROM, 10=Boot SFLASH */
+#define CST43143_SDIO_EN (1 << 2) /* 0 = USB Enab, SDIO pins are GPIO or I2S */
+#define CST43143_SDIO_ISO (1 << 3) /* 1 = SDIO isolated */
+#define CST43143_USB_CPU_LESS (1 << 4) /* 1 = CPULess mode Enabled */
+#define CST43143_CBUCK_MODE (3 << 6) /* Indicates what controller mode CBUCK is in */
+#define CST43143_POK_CBUCK (1 << 8) /* 1 = 1.2V CBUCK voltage ready */
#define CST43143_PMU_OVRSPIKE (1 << 9)
#define CST43143_PMU_OVRTEMP (0xF << 10)
#define CST43143_SR_FLL_CAL_DONE (1 << 14)
#define CST43143_USB_PLL_LOCKDET (1 << 15)
#define CST43143_PMU_PLL_LOCKDET (1 << 16)
-#define CST43143_CHIPMODE_SDIOD(cs) (((cs) & CST43143_SDIO_EN) != 0)
-
-
+#define CST43143_CHIPMODE_SDIOD(cs) (((cs) & CST43143_SDIO_EN) != 0) /* SDIO */
+/* 43143 Chip specific ChipControl register bits */
+/* 00: SECI is disabled (JATG functional), 01: 2 wire, 10: 4 wire */
#define CCTRL_43143_SECI (1<<0)
#define CCTRL_43143_BT_LEGACY (1<<1)
-#define CCTRL_43143_I2S_MODE (1<<2)
-#define CCTRL_43143_I2S_MASTER (1<<3)
-#define CCTRL_43143_I2S_FULL (1<<4)
-#define CCTRL_43143_GSIO (1<<5)
-#define CCTRL_43143_RF_SWCTRL_MASK (7<<6)
+#define CCTRL_43143_I2S_MODE (1<<2) /* 0: SDIO enabled */
+#define CCTRL_43143_I2S_MASTER (1<<3) /* 0: I2S MCLK input disabled */
+#define CCTRL_43143_I2S_FULL (1<<4) /* 0: I2S SDIN and SPDIF_TX inputs disabled */
+#define CCTRL_43143_GSIO (1<<5) /* 0: sFlash enabled */
+#define CCTRL_43143_RF_SWCTRL_MASK (7<<6) /* 0: disabled */
#define CCTRL_43143_RF_SWCTRL_0 (1<<6)
#define CCTRL_43143_RF_SWCTRL_1 (2<<6)
#define CCTRL_43143_RF_SWCTRL_2 (4<<6)
-#define CCTRL_43143_RF_XSWCTRL (1<<9)
-#define CCTRL_43143_HOST_WAKE0 (1<<11)
-#define CCTRL_43143_HOST_WAKE1 (1<<12)
-
-
-#define RES43143_EXT_SWITCHER_PWM 0
-#define RES43143_XTAL_PU 1
-#define RES43143_ILP_REQUEST 2
-#define RES43143_ALP_AVAIL 3
-#define RES43143_WL_CORE_READY 4
-#define RES43143_BBPLL_PWRSW_PU 5
-#define RES43143_HT_AVAIL 6
-#define RES43143_RADIO_PU 7
-#define RES43143_MACPHY_CLK_AVAIL 8
-#define RES43143_OTP_PU 9
-#define RES43143_LQ_AVAIL 10
+#define CCTRL_43143_RF_XSWCTRL (1<<9) /* 0: UART enabled */
+#define CCTRL_43143_HOST_WAKE0 (1<<11) /* 1: SDIO separate interrupt output from GPIO4 */
+#define CCTRL_43143_HOST_WAKE1 (1<<12) /* 1: SDIO separate interrupt output from GPIO16 */
+
+/* 43143 resources, based on pmu_params.xls V1.19 */
+#define RES43143_EXT_SWITCHER_PWM 0 /* 0x00001 */
+#define RES43143_XTAL_PU 1 /* 0x00002 */
+#define RES43143_ILP_REQUEST 2 /* 0x00004 */
+#define RES43143_ALP_AVAIL 3 /* 0x00008 */
+#define RES43143_WL_CORE_READY 4 /* 0x00010 */
+#define RES43143_BBPLL_PWRSW_PU 5 /* 0x00020 */
+#define RES43143_HT_AVAIL 6 /* 0x00040 */
+#define RES43143_RADIO_PU 7 /* 0x00080 */
+#define RES43143_MACPHY_CLK_AVAIL 8 /* 0x00100 */
+#define RES43143_OTP_PU 9 /* 0x00200 */
+#define RES43143_LQ_AVAIL 10 /* 0x00400 */
#define PMU43143_XTAL_CORE_SIZE_MASK 0x3F
-
+/* 4313 resources */
#define RES4313_BB_PU_RSRC 0
#define RES4313_ILP_REQ_RSRC 1
#define RES4313_XTAL_PU_RSRC 2
#define RES4313_HT_AVAIL_RSRC 14
#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
-
+/* 4313 chip-specific ChipStatus register bits */
#define CST4313_SPROM_PRESENT 1
#define CST4313_OTP_PRESENT 2
#define CST4313_SPROM_OTP_SEL_MASK 0x00000002
#define CST4313_SPROM_OTP_SEL_SHIFT 0
+/* 4313 Chip specific ChipControl register bits */
+#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
-#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
-
-
+/* PMU respources for 4314 */
#define RES4314_LPLDO_PU 0
#define RES4314_PMU_SLEEP_DIS 1
#define RES4314_PMU_BG_PU 2
#define RES4314_HT_AVAIL 28
#define RES4314_MACPHY_CLK_AVAIL 29
-
+/* 4314 chip-specific ChipStatus register bits */
#define CST4314_OTP_ENABLED 0x00200000
-
+/* 43228 resources */
#define RES43228_NOT_USED 0
#define RES43228_ILP_REQUEST 1
#define RES43228_XTAL_PU 2
#define RES43228_PLL_EN 4
#define RES43228_HT_PHY_AVAIL 5
-
+/* 43228 chipstatus reg bits */
#define CST43228_ILP_DIV_EN 0x1
#define CST43228_OTP_PRESENT 0x2
#define CST43228_SERDES_REFCLK_PADSEL 0x4
#define CST43228_SDIO_OTP_PRESENT 0x10
#define CST43228_SDIO_RESET 0x20
+/* 4706 chipstatus reg bits */
+#define CST4706_PKG_OPTION (1<<0) /* 0: full-featured package 1: low-cost package */
+#define CST4706_SFLASH_PRESENT (1<<1) /* 0: parallel, 1: serial flash is present */
+#define CST4706_SFLASH_TYPE (1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
+#define CST4706_MIPS_BENDIAN (1<<3) /* 0: little, 1: big endian */
+#define CST4706_PCIE1_DISABLE (1<<5) /* PCIE1 enable strap pin */
-#define CST4706_PKG_OPTION (1<<0)
-#define CST4706_SFLASH_PRESENT (1<<1)
-#define CST4706_SFLASH_TYPE (1<<2)
-#define CST4706_MIPS_BENDIAN (1<<3)
-#define CST4706_PCIE1_DISABLE (1<<5)
-
-
+/* 4706 flashstrconfig reg bits */
#define FLSTRCF4706_MASK 0x000000ff
-#define FLSTRCF4706_SF1 0x00000001
-#define FLSTRCF4706_PF1 0x00000002
-#define FLSTRCF4706_SF1_TYPE 0x00000004
-#define FLSTRCF4706_NF1 0x00000008
-#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0
-#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010
-#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020
-#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030
-#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040
-#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050
-#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060
-#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070
-
-
+#define FLSTRCF4706_SF1 0x00000001 /* 2nd serial flash present */
+#define FLSTRCF4706_PF1 0x00000002 /* 2nd parallel flash present */
+#define FLSTRCF4706_SF1_TYPE 0x00000004 /* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define FLSTRCF4706_NF1 0x00000008 /* 2nd NAND flash present */
+#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /* Valid value mask */
+#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /* 4MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /* 8MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /* 16MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /* 32MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /* 64MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /* 128MB */
+#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /* 256MB */
+
+/* 4360 Chip specific ChipControl register bits */
#define CCTRL4360_I2C_MODE (1 << 0)
#define CCTRL4360_UART_MODE (1 << 1)
#define CCTRL4360_SECI_MODE (1 << 2)
#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21)
#define CCTRL4360_SECI_ON_GPIO01 (1 << 24)
+/* 4360 Chip specific Regulator Control register bits */
+#define RCTRL4360_RFLDO_PWR_DOWN (1 << 1)
-
+/* 4360 PMU resources and chip status bits */
#define RES4360_REGULATOR 0
#define RES4360_ILP_AVAIL 1
#define RES4360_ILP_REQ 2
#define CST4360_BBPLL_LOCK 0x00000800
#define CST4360_AVBBPLL_LOCK 0x00001000
#define CST4360_USBBBPLL_LOCK 0x00002000
+#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+ CST4360_RSRC_INIT_MODE_SHIFT)
#define CCTRL_4360_UART_SEL 0x2
-
-
+#define CST4360_RSRC_INIT_MODE(cs) ((cs & CST4360_RSRC_INIT_MODE_MASK) >> \
+ CST4360_RSRC_INIT_MODE_SHIFT)
+
+
+/* 43602 PMU resources based on pmu_params.xls version v0.95 */
+#define RES43602_LPLDO_PU 0
+#define RES43602_REGULATOR 1
+#define RES43602_PMU_SLEEP 2
+#define RES43602_RSVD_3 3
+#define RES43602_XTALLDO_PU 4
+#define RES43602_SERDES_PU 5
+#define RES43602_BBPLL_PWRSW_PU 6
+#define RES43602_SR_CLK_START 7
+#define RES43602_SR_PHY_PWRSW 8
+#define RES43602_SR_SUBCORE_PWRSW 9
+#define RES43602_XTAL_PU 10
+#define RES43602_PERST_OVR 11
+#define RES43602_SR_CLK_STABLE 12
+#define RES43602_SR_SAVE_RESTORE 13
+#define RES43602_SR_SLEEP 14
+#define RES43602_LQ_START 15
+#define RES43602_LQ_AVAIL 16
+#define RES43602_WL_CORE_RDY 17
+#define RES43602_ILP_REQ 18
+#define RES43602_ALP_AVAIL 19
+#define RES43602_RADIO_PU 20
+#define RES43602_RFLDO_PU 21
+#define RES43602_HT_START 22
+#define RES43602_HT_AVAIL 23
+#define RES43602_MACPHY_CLKAVAIL 24
+#define RES43602_PARLDO_PU 25
+#define RES43602_RSVD_26 26
+
+/* 43602 chip status bits */
+#define CST43602_SPROM_PRESENT (1<<1)
+#define CST43602_SPROM_SIZE (1<<10) /* 0 = 16K, 1 = 4K */
+#define CST43602_BBPLL_LOCK (1<<11)
+#define CST43602_RF_LDO_OUT_OK (1<<15) /* RF LDO output OK */
+
+#define PMU43602_CC1_GPIO12_OVRD (1<<28) /* GPIO12 override */
+
+#define PMU43602_CC2_PCIE_CLKREQ_L_WAKE_EN (1<<1) /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_PCIE_PERST_L_WAKE_EN (1<<2) /* creates gated_pcie_wake, pmu_wakeup logic */
+#define PMU43602_CC2_ENABLE_L2REFCLKPAD_PWRDWN (1<<3)
+#define PMU43602_CC2_PMU_WAKE_ALP_AVAIL_EN (1<<5) /* enable pmu_wakeup to request for ALP_AVAIL */
+#define PMU43602_CC2_PERST_L_EXTEND_EN (1<<9) /* extend perst_l until rsc PERST_OVR comes up */
+#define PMU43602_CC2_FORCE_EXT_LPO (1<<19) /* 1=ext LPO clock is the final LPO clock */
+#define PMU43602_CC2_XTAL32_SEL (1<<30) /* 0=ext_clock, 1=xtal */
+
+#define CC_SR1_43602_SR_ASM_ADDR (0x0)
+
+/* PLL CTL register values for open loop, used during S/R operation */
+#define PMU43602_PLL_CTL6_VAL 0x68000528
+#define PMU43602_PLL_CTL7_VAL 0x6
+
+#define PMU43602_CC3_ARMCR4_DBG_CLK (1 << 29)
+
+/* 4349 related */
+#define RES4349_LPLDO_PU 0
+#define RES4349_BG_PU 1
+#define RES4349_PMU_SLEEP 2
+#define RES4349_PALDO3P3_PU 3
+#define RES4349_CBUCK_LPOM_PU 4
+#define RES4349_CBUCK_PFM_PU 5
+#define RES4349_COLD_START_WAIT 6
+#define RES4349_RSVD_7 7
+#define RES4349_LNLDO_PU 8
+#define RES4349_XTALLDO_PU 9
+#define RES4349_LDO3P3_PU 10
+#define RES4349_OTP_PU 11
+#define RES4349_XTAL_PU 12
+#define RES4349_SR_CLK_START 13
+#define RES4349_LQ_AVAIL 14
+#define RES4349_LQ_START 15
+#define RES4349_PERST_OVR 16
+#define RES4349_WL_CORE_RDY 17
+#define RES4349_ILP_REQ 18
+#define RES4349_ALP_AVAIL 19
+#define RES4349_MINI_PMU 20
+#define RES4349_RADIO_PU 21
+#define RES4349_SR_CLK_STABLE 22
+#define RES4349_SR_SAVE_RESTORE 23
+#define RES4349_SR_PHY_PWRSW 24
+#define RES4349_SR_VDDM_PWRSW 25
+#define RES4349_SR_SUBCORE_PWRSW 26
+#define RES4349_SR_SLEEP 27
+#define RES4349_HT_START 28
+#define RES4349_HT_AVAIL 29
+#define RES4349_MACPHY_CLKAVAIL 30
+
+#define CR4_4349_RAM_BASE (0x180000)
+#define CC4_4349_SR_ASM_ADDR (0x48)
+
+#define CST4349_CHIPMODE_SDIOD(cs) (((cs) & (1 << 6)) != 0) /* SDIO */
+#define CST4349_CHIPMODE_PCIE(cs) (((cs) & (1 << 7)) != 0) /* PCIE */
+
+#define CST4349_SPROM_PRESENT 0x00000010
+
+
+/* 43430 PMU resources based on pmu_params.xls */
+#define RES43430_LPLDO_PU 0
+#define RES43430_BG_PU 1
+#define RES43430_PMU_SLEEP 2
+#define RES43430_RSVD_3 3
+#define RES43430_CBUCK_LPOM_PU 4
+#define RES43430_CBUCK_PFM_PU 5
+#define RES43430_COLD_START_WAIT 6
+#define RES43430_RSVD_7 7
+#define RES43430_LNLDO_PU 8
+#define RES43430_RSVD_9 9
+#define RES43430_LDO3P3_PU 10
+#define RES43430_OTP_PU 11
+#define RES43430_XTAL_PU 12
+#define RES43430_SR_CLK_START 13
+#define RES43430_LQ_AVAIL 14
+#define RES43430_LQ_START 15
+#define RES43430_RSVD_16 16
+#define RES43430_WL_CORE_RDY 17
+#define RES43430_ILP_REQ 18
+#define RES43430_ALP_AVAIL 19
+#define RES43430_MINI_PMU 20
+#define RES43430_RADIO_PU 21
+#define RES43430_SR_CLK_STABLE 22
+#define RES43430_SR_SAVE_RESTORE 23
+#define RES43430_SR_PHY_PWRSW 24
+#define RES43430_SR_VDDM_PWRSW 25
+#define RES43430_SR_SUBCORE_PWRSW 26
+#define RES43430_SR_SLEEP 27
+#define RES43430_HT_START 28
+#define RES43430_HT_AVAIL 29
+#define RES43430_MACPHY_CLK_AVAIL 30
+
+/* 43430 chip status bits */
+#define CST43430_SDIO_MODE 0x00000001
+#define CST43430_GSPI_MODE 0x00000002
+#define CST43430_RSRC_INIT_MODE_0 0x00000080
+#define CST43430_RSRC_INIT_MODE_1 0x00000100
+#define CST43430_SEL0_SDIO 0x00000200
+#define CST43430_SEL1_SDIO 0x00000400
+#define CST43430_SEL2_SDIO 0x00000800
+#define CST43430_BBPLL_LOCKED 0x00001000
+#define CST43430_DBG_INST_DETECT 0x00004000
+#define CST43430_CLB2WL_BT_READY 0x00020000
+#define CST43430_JTAG_MODE 0x00100000
+#define CST43430_HOST_IFACE 0x00400000
+#define CST43430_TRIM_EN 0x00800000
+#define CST43430_DIN_PACKAGE_OPTION 0x10000000
+
+/* defines to detect active host interface in use */
#define CHIP_HOSTIF_PCIEMODE 0x1
#define CHIP_HOSTIF_USBMODE 0x2
#define CHIP_HOSTIF_SDIOMODE 0x4
#define CHIP_HOSTIF_PCIE(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_PCIEMODE)
+#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_USBMODE)
#define CHIP_HOSTIF_SDIO(sih) (si_chip_hostif(sih) == CHIP_HOSTIF_SDIOMODE)
-
+/* 4335 resources */
#define RES4335_LPLDO_PO 0
#define RES4335_PMU_BG_PU 1
#define RES4335_PMU_SLEEP 2
#define RES4335_HT_AVAIL 29
#define RES4335_MACPHY_CLKAVAIL 30
-
+/* 4335 Chip specific ChipStatus register bits */
#define CST4335_SPROM_MASK 0x00000020
#define CST4335_SFLASH_MASK 0x00000040
#define CST4335_RES_INIT_MODE_SHIFT 7
#define CST4335_RES_INIT_MODE_MASK 0x00000180
#define CST4335_CHIPMODE_MASK 0xF
-#define CST4335_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0)
-#define CST4335_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0)
-#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0)
-#define CST4335_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0)
-
-
-#define CCTRL1_4335_GPIO_SEL (1 << 0)
-#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2)
-
-#define CR4_4335_RAM_BASE (0x180000)
+#define CST4335_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */
+#define CST4335_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */
+#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /* HSIC || USBDA */
+#define CST4335_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */
+
+/* 4335 Chip specific ChipControl1 register bits */
+#define CCTRL1_4335_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */
+#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */
+
+/* 4335 Chip specific ChipControl2 register bits */
+#define CCTRL2_4335_AOSBLOCK (1 << 30)
+#define CCTRL2_4335_PMUWAKE (1 << 31)
#define PATCHTBL_SIZE (0x800)
+#define CR4_4335_RAM_BASE (0x180000)
+#define CR4_4345_RAM_BASE (0x1b0000)
+#define CR4_4349_RAM_BASE (0x180000)
#define CR4_4350_RAM_BASE (0x180000)
#define CR4_4360_RAM_BASE (0x0)
+#define CR4_43602_RAM_BASE (0x180000)
-
-
+/* 4335 chip OTP present & OTP select bits. */
#define SPROM4335_OTP_SELECT 0x00000010
#define SPROM4335_OTP_PRESENT 0x00000020
-
+/* 4335 GCI specific bits. */
#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT (1 << 24)
#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE 25
#define CC4335_GCI_FUNC_SEL_PAD_SDIO 0x00707770
-
+/* SFLASH clkdev specific bits. */
#define CC4335_SFLASH_CLKDIV_MASK 0x1F000000
#define CC4335_SFLASH_CLKDIV_SHIFT 25
-
+/* 4335 OTP bits for SFLASH. */
#define CC4335_SROM_OTP_SFLASH 40
#define CC4335_SROM_OTP_SFLASH_PRESENT 0x1
#define CC4335_SROM_OTP_SFLASH_TYPE 0x2
#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2
+/* 4335 chip OTP present & OTP select bits. */
+#define SPROM4335_OTP_SELECT 0x00000010
+#define SPROM4335_OTP_PRESENT 0x00000020
+/* 4335 GCI specific bits. */
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_PRESENT (1 << 24)
+#define CC4335_GCI_STRAP_OVERRIDE_SFLASH_TYPE 25
+#define CC4335_GCI_FUNC_SEL_PAD_SDIO 0x00707770
+/* SFLASH clkdev specific bits. */
+#define CC4335_SFLASH_CLKDIV_MASK 0x1F000000
+#define CC4335_SFLASH_CLKDIV_SHIFT 25
+/* 4335 OTP bits for SFLASH. */
+#define CC4335_SROM_OTP_SFLASH 40
+#define CC4335_SROM_OTP_SFLASH_PRESENT 0x1
+#define CC4335_SROM_OTP_SFLASH_TYPE 0x2
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_MASK 0x003C
+#define CC4335_SROM_OTP_SFLASH_CLKDIV_SHIFT 2
+
+/* 4335 resources--END */
+
+/* 4345 Chip specific ChipStatus register bits */
+#define CST4345_SPROM_MASK 0x00000020
+#define CST4345_SFLASH_MASK 0x00000040
+#define CST4345_RES_INIT_MODE_SHIFT 7
+#define CST4345_RES_INIT_MODE_MASK 0x00000180
+#define CST4345_CHIPMODE_MASK 0x4000F
+#define CST4345_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */
+#define CST4345_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */
+#define CST4345_CHIPMODE_HSIC(cs) (((cs) & (1 << 2)) != 0) /* HSIC */
+#define CST4345_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */
+#define CST4345_CHIPMODE_USB20D(cs) (((cs) & (1 << 18)) != 0) /* USBDA */
+
+/* 4350 Chipcommon ChipStatus bits */
#define CST4350_SDIO_MODE 0x00000001
#define CST4350_HSIC20D_MODE 0x00000002
#define CST4350_BP_ON_HSIC_CLK 0x00000004
#define CST4350_SDIO_PAD_VDDIO 0x04000000
#define CST4350_GSPI_MODE 0x08000000
#define CST4350_PACKAGE_OPTION 0xF0000000
+#define CST4350_PACKAGE_SHIFT 28
+
+/* package option for 4350 */
+#define CST4350_PACKAGE_WLCSP 0x0
+#define CST4350_PACKAGE_PCIE 0x1
+#define CST4350_PACKAGE_WLBGA 0x2
+#define CST4350_PACKAGE_DBG 0x3
+#define CST4350_PACKAGE_USB 0x4
+#define CST4350_PACKAGE_USB_HSIC 0x4
+#define CST4350_PKG_MODE(cs) ((cs & CST4350_PACKAGE_OPTION) >> CST4350_PACKAGE_SHIFT)
+#define CST4350_PKG_WLCSP(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLCSP))
+#define CST4350_PKG_PCIE(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_PCIE))
+#define CST4350_PKG_WLBGA(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_WLBGA))
+#define CST4350_PKG_USB(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB))
+#define CST4350_PKG_USB_HSIC(cs) (CST4350_PKG_MODE(cs) == (CST4350_PACKAGE_USB_HSIC))
+
+/* 4350C0 USB PACKAGE using raw_sprom_present to indicate 40mHz xtal */
+#define CST4350_PKG_USB_40M(cs) (cs & CST4350_RAW_SPROM_PRESENT)
+
+#define CST4350_CHIPMODE_SDIOD(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD))
+#define CST4350_CHIPMODE_USB20D(cs) ((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D))
+#define CST4350_CHIPMODE_HSIC20D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D))
+#define CST4350_CHIPMODE_HSIC30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D))
+#define CST4350_CHIPMODE_USB30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D))
+#define CST4350_CHIPMODE_USB30D_WL(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL))
+#define CST4350_CHIPMODE_PCIE(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE))
+
+/* strap_host_ifc strap value */
#define CST4350_HOST_IFC_MASK 0x00700000
#define CST4350_HOST_IFC_SHIFT 20
-
+/* host_ifc raw mode */
#define CST4350_IFC_MODE_SDIOD 0x0
#define CST4350_IFC_MODE_HSIC20D 0x1
#define CST4350_IFC_MODE_HSIC30D 0x2
#define CST4350_IFC_MODE(cs) ((cs & CST4350_HOST_IFC_MASK) >> CST4350_HOST_IFC_SHIFT)
-#define CST4350_CHIPMODE_SDIOD(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_SDIOD))
-#define CST4350_CHIPMODE_USB20D(cs) ((CST4350_IFC_MODE(cs)) == (CST4350_IFC_MODE_USB20D))
-#define CST4350_CHIPMODE_HSIC20D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC20D))
-#define CST4350_CHIPMODE_HSIC30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_HSIC30D))
-#define CST4350_CHIPMODE_USB30D(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D))
-#define CST4350_CHIPMODE_USB30D_WL(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_USB30D_WL))
-#define CST4350_CHIPMODE_PCIE(cs) (CST4350_IFC_MODE(cs) == (CST4350_IFC_MODE_PCIE))
-
-
+/* 4350 PMU resources */
#define RES4350_LPLDO_PU 0
#define RES4350_PMU_BG_PU 1
#define RES4350_PMU_SLEEP 2
#define RES4350_SR_CLK_START 13
#define RES4350_LQ_AVAIL 14
#define RES4350_LQ_START 15
-#define RES4350_RSVD_16 16
+#define RES4350_PERST_OVR 16
#define RES4350_WL_CORE_RDY 17
#define RES4350_ILP_REQ 18
#define RES4350_ALP_AVAIL 19
#define RES4350_MACPHY_CLKAVAIL 30
#define MUXENAB4350_UART_MASK (0x0000000f)
+#define MUXENAB4350_UART_SHIFT 0
+#define MUXENAB4350_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */
+#define MUXENAB4350_HOSTWAKE_SHIFT 4
+/* 4350 GCI function sel values */
#define CC4350_FNSEL_HWDEF (0)
#define CC4350_FNSEL_SAMEASPIN (1)
#define CC4350_FNSEL_UART (2)
#define CC4350_FNSEL_PDN (13)
#define CC4350_FNSEL_PUP (14)
#define CC4350_FNSEL_TRISTATE (15)
+#define CC4350C_FNSEL_UART (3)
+/* 4350 GPIO */
#define CC4350_PIN_GPIO_00 (0)
#define CC4350_PIN_GPIO_01 (1)
#define CC4350_PIN_GPIO_02 (2)
#define CC4350_PIN_GPIO_14 (14)
#define CC4350_PIN_GPIO_15 (15)
+#define CC4350_RSVD_16_SHIFT 16
+
+#define CC2_4350_PHY_PWRSW_UPTIME_MASK (0xf << 0)
+#define CC2_4350_PHY_PWRSW_UPTIME_SHIFT (0)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_MASK (0xf << 4)
+#define CC2_4350_VDDM_PWRSW_UPDELAY_SHIFT (4)
+#define CC2_4350_VDDM_PWRSW_UPTIME_MASK (0xf << 8)
+#define CC2_4350_VDDM_PWRSW_UPTIME_SHIFT (8)
+#define CC2_4350_SBC_PWRSW_DNDELAY_MASK (0x3 << 12)
+#define CC2_4350_SBC_PWRSW_DNDELAY_SHIFT (12)
+#define CC2_4350_PHY_PWRSW_DNDELAY_MASK (0x3 << 14)
+#define CC2_4350_PHY_PWRSW_DNDELAY_SHIFT (14)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_MASK (0x3 << 16)
+#define CC2_4350_VDDM_PWRSW_DNDELAY_SHIFT (16)
+#define CC2_4350_VDDM_PWRSW_EN_MASK (1 << 20)
+#define CC2_4350_VDDM_PWRSW_EN_SHIFT (20)
#define CC2_4350_MEMLPLDO_PWRSW_EN_MASK (1 << 21)
#define CC2_4350_MEMLPLDO_PWRSW_EN_SHIFT (21)
#define CC2_4350_SDIO_AOS_WAKEUP_MASK (1 << 24)
#define CC2_4350_SDIO_AOS_WAKEUP_SHIFT (24)
-
+/* Applies to 4335/4350/4345 */
#define CC3_SR_CLK_SR_MEM_MASK (1 << 0)
#define CC3_SR_CLK_SR_MEM_SHIFT (0)
#define CC3_SR_BIT1_TBD_MASK (1 << 1)
#define CC3_SR_GPIO_MUX_MASK (0xF << 28)
#define CC3_SR_GPIO_MUX_SHIFT (28)
-
+/* Applies to 4335/4350/4345 */
#define CC4_SR_INIT_ADDR_MASK (0x3FF0000)
#define CC4_4350_SR_ASM_ADDR (0x30)
+#define CC4_4350_C0_SR_ASM_ADDR (0x0)
#define CC4_4335_SR_ASM_ADDR (0x48)
+#define CC4_4345_SR_ASM_ADDR (0x48)
#define CC4_SR_INIT_ADDR_SHIFT (16)
#define CC4_4350_EN_SR_CLK_ALP_MASK (1 << 30)
#define VREG4_4350_MEMLPDO_PU_MASK (1 << 31)
#define VREG4_4350_MEMLPDO_PU_SHIFT 31
+#define VREG6_4350_SR_EXT_CLKDIR_MASK (1 << 20)
+#define VREG6_4350_SR_EXT_CLKDIR_SHIFT 20
+#define VREG6_4350_SR_EXT_CLKDIV_MASK (0x3 << 21)
+#define VREG6_4350_SR_EXT_CLKDIV_SHIFT 21
+#define VREG6_4350_SR_EXT_CLKEN_MASK (1 << 23)
+#define VREG6_4350_SR_EXT_CLKEN_SHIFT 23
+
+#define CC5_4350_PMU_EN_ASSERT_MASK (1 << 13)
+#define CC5_4350_PMU_EN_ASSERT_SHIFT (13)
+
#define CC6_4350_PCIE_CLKREQ_WAKEUP_MASK (1 << 4)
#define CC6_4350_PCIE_CLKREQ_WAKEUP_SHIFT (4)
#define CC6_4350_PMU_WAKEUP_ALPAVAIL_MASK (1 << 6)
#define CC6_4350_PMU_WAKEUP_ALPAVAIL_SHIFT (6)
-
-
+#define CC6_4350_PMU_EN_EXT_PERST_MASK (1 << 17)
+#define CC6_4350_PMU_EN_EXT_PERST_SHIFT (17)
+#define CC6_4350_PMU_EN_WAKEUP_MASK (1 << 18)
+#define CC6_4350_PMU_EN_WAKEUP_SHIFT (18)
+
+#define CC7_4350_PMU_EN_ASSERT_L2_MASK (1 << 26)
+#define CC7_4350_PMU_EN_ASSERT_L2_SHIFT (26)
+#define CC7_4350_PMU_EN_MDIO_MASK (1 << 27)
+#define CC7_4350_PMU_EN_MDIO_SHIFT (27)
+
+#define CC6_4345_PMU_EN_PERST_DEASSERT_MASK (1 << 13)
+#define CC6_4345_PMU_EN_PERST_DEASSERT_SHIF (13)
+#define CC6_4345_PMU_EN_L2_DEASSERT_MASK (1 << 14)
+#define CC6_4345_PMU_EN_L2_DEASSERT_SHIF (14)
+#define CC6_4345_PMU_EN_ASSERT_L2_MASK (1 << 15)
+#define CC6_4345_PMU_EN_ASSERT_L2_SHIFT (15)
+#define CC6_4345_PMU_EN_MDIO_MASK (1 << 24)
+#define CC6_4345_PMU_EN_MDIO_SHIFT (24)
+
+/* GCI chipcontrol register indices */
#define CC_GCI_CHIPCTRL_00 (0)
#define CC_GCI_CHIPCTRL_01 (1)
#define CC_GCI_CHIPCTRL_02 (2)
#define CC_GCI_CHIPCTRL_06 (6)
#define CC_GCI_CHIPCTRL_07 (7)
#define CC_GCI_CHIPCTRL_08 (8)
+#define CC_GCI_XTAL_BUFSTRG_NFC (0xff << 12)
#define CC_GCI_06_JTAG_SEL_SHIFT 4
#define CC_GCI_06_JTAG_SEL_MASK (1 << 4)
#define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00) >> 8)
-
+/* 4345 PMU resources */
+#define RES4345_LPLDO_PU 0
+#define RES4345_PMU_BG_PU 1
+#define RES4345_PMU_SLEEP 2
+#define RES4345_HSICLDO_PU 3
+#define RES4345_CBUCK_LPOM_PU 4
+#define RES4345_CBUCK_PFM_PU 5
+#define RES4345_COLD_START_WAIT 6
+#define RES4345_RSVD_7 7
+#define RES4345_LNLDO_PU 8
+#define RES4345_XTALLDO_PU 9
+#define RES4345_LDO3P3_PU 10
+#define RES4345_OTP_PU 11
+#define RES4345_XTAL_PU 12
+#define RES4345_SR_CLK_START 13
+#define RES4345_LQ_AVAIL 14
+#define RES4345_LQ_START 15
+#define RES4345_PERST_OVR 16
+#define RES4345_WL_CORE_RDY 17
+#define RES4345_ILP_REQ 18
+#define RES4345_ALP_AVAIL 19
+#define RES4345_MINI_PMU 20
+#define RES4345_RADIO_PU 21
+#define RES4345_SR_CLK_STABLE 22
+#define RES4345_SR_SAVE_RESTORE 23
+#define RES4345_SR_PHY_PWRSW 24
+#define RES4345_SR_VDDM_PWRSW 25
+#define RES4345_SR_SUBCORE_PWRSW 26
+#define RES4345_SR_SLEEP 27
+#define RES4345_HT_START 28
+#define RES4345_HT_AVAIL 29
+#define RES4345_MACPHY_CLK_AVAIL 30
+
+/* 4335 pins
+* note: only the values set as default/used are added here.
+*/
#define CC4335_PIN_GPIO_00 (0)
#define CC4335_PIN_GPIO_01 (1)
#define CC4335_PIN_GPIO_02 (2)
#define CC4335_PIN_SDIO_DATA1 (19)
#define CC4335_PIN_SDIO_DATA2 (20)
#define CC4335_PIN_SDIO_DATA3 (21)
-#define CC4335_PIN_RF_SW_CTRL_0 (22)
-#define CC4335_PIN_RF_SW_CTRL_1 (23)
-#define CC4335_PIN_RF_SW_CTRL_2 (24)
-#define CC4335_PIN_RF_SW_CTRL_3 (25)
-#define CC4335_PIN_RF_SW_CTRL_4 (26)
-#define CC4335_PIN_RF_SW_CTRL_5 (27)
-#define CC4335_PIN_RF_SW_CTRL_6 (28)
-#define CC4335_PIN_RF_SW_CTRL_7 (29)
-#define CC4335_PIN_RF_SW_CTRL_8 (30)
-#define CC4335_PIN_RF_SW_CTRL_9 (31)
-
-
+#define CC4335_PIN_RF_SW_CTRL_6 (22)
+#define CC4335_PIN_RF_SW_CTRL_7 (23)
+#define CC4335_PIN_RF_SW_CTRL_8 (24)
+#define CC4335_PIN_RF_SW_CTRL_9 (25)
+/* Last GPIO Pad */
+#define CC4335_PIN_GPIO_LAST (31)
+
+/* 4335 GCI function sel values
+*/
#define CC4335_FNSEL_HWDEF (0)
#define CC4335_FNSEL_SAMEASPIN (1)
#define CC4335_FNSEL_GPIO0 (2)
#define CC4335_FNSEL_PUP (14)
#define CC4335_FNSEL_TRI (15)
-
+/* GCI Core Control Reg */
+#define GCI_CORECTRL_SR_MASK (1 << 0) /* SECI block Reset */
+#define GCI_CORECTRL_RSL_MASK (1 << 1) /* ResetSECILogic */
+#define GCI_CORECTRL_ES_MASK (1 << 2) /* EnableSECI */
+#define GCI_CORECTRL_FSL_MASK (1 << 3) /* Force SECI Out Low */
+#define GCI_CORECTRL_SOM_MASK (7 << 4) /* SECI Op Mode */
+#define GCI_CORECTRL_US_MASK (1 << 7) /* Update SECI */
+#define GCI_CORECTRL_BOS_MASK (1 << 8) /* Break On Sleep */
+
+/* 4345 pins
+* note: only the values set as default/used are added here.
+*/
+#define CC4345_PIN_GPIO_00 (0)
+#define CC4345_PIN_GPIO_01 (1)
+#define CC4345_PIN_GPIO_02 (2)
+#define CC4345_PIN_GPIO_03 (3)
+#define CC4345_PIN_GPIO_04 (4)
+#define CC4345_PIN_GPIO_05 (5)
+#define CC4345_PIN_GPIO_06 (6)
+#define CC4345_PIN_GPIO_07 (7)
+#define CC4345_PIN_GPIO_08 (8)
+#define CC4345_PIN_GPIO_09 (9)
+#define CC4345_PIN_GPIO_10 (10)
+#define CC4345_PIN_GPIO_11 (11)
+#define CC4345_PIN_GPIO_12 (12)
+#define CC4345_PIN_GPIO_13 (13)
+#define CC4345_PIN_GPIO_14 (14)
+#define CC4345_PIN_GPIO_15 (15)
+#define CC4345_PIN_GPIO_16 (16)
+#define CC4345_PIN_SDIO_CLK (17)
+#define CC4345_PIN_SDIO_CMD (18)
+#define CC4345_PIN_SDIO_DATA0 (19)
+#define CC4345_PIN_SDIO_DATA1 (20)
+#define CC4345_PIN_SDIO_DATA2 (21)
+#define CC4345_PIN_SDIO_DATA3 (22)
+#define CC4345_PIN_RF_SW_CTRL_0 (23)
+#define CC4345_PIN_RF_SW_CTRL_1 (24)
+#define CC4345_PIN_RF_SW_CTRL_2 (25)
+#define CC4345_PIN_RF_SW_CTRL_3 (26)
+#define CC4345_PIN_RF_SW_CTRL_4 (27)
+#define CC4345_PIN_RF_SW_CTRL_5 (28)
+#define CC4345_PIN_RF_SW_CTRL_6 (29)
+#define CC4345_PIN_RF_SW_CTRL_7 (30)
+#define CC4345_PIN_RF_SW_CTRL_8 (31)
+#define CC4345_PIN_RF_SW_CTRL_9 (32)
+
+/* 4345 GCI function sel values
+*/
+#define CC4345_FNSEL_HWDEF (0)
+#define CC4345_FNSEL_SAMEASPIN (1)
+#define CC4345_FNSEL_GPIO0 (2)
+#define CC4345_FNSEL_GPIO1 (3)
+#define CC4345_FNSEL_GCI0 (4)
+#define CC4345_FNSEL_GCI1 (5)
+#define CC4345_FNSEL_UART (6)
+#define CC4345_FNSEL_SFLASH (7)
+#define CC4345_FNSEL_SPROM (8)
+#define CC4345_FNSEL_MISC0 (9)
+#define CC4345_FNSEL_MISC1 (10)
+#define CC4345_FNSEL_MISC2 (11)
+#define CC4345_FNSEL_IND (12)
+#define CC4345_FNSEL_PDN (13)
+#define CC4345_FNSEL_PUP (14)
+#define CC4345_FNSEL_TRI (15)
+
+#define MUXENAB4345_UART_MASK (0x0000000f)
+#define MUXENAB4345_UART_SHIFT 0
+#define MUXENAB4345_HOSTWAKE_MASK (0x000000f0)
+#define MUXENAB4345_HOSTWAKE_SHIFT 4
+
+/* 4349 Group (4349, 4355, 4359) GCI AVS function sel values */
+#define CC4349_GRP_GCI_AVS_CTRL_MASK (0xffe00000)
+#define CC4349_GRP_GCI_AVS_CTRL_SHIFT (21)
+#define CC4349_GRP_GCI_AVS_CTRL_ENAB (1 << 5)
+
+/* 4345 GCI AVS function sel values */
+#define CC4345_GCI_AVS_CTRL_MASK (0xfc)
+#define CC4345_GCI_AVS_CTRL_SHIFT (2)
+#define CC4345_GCI_AVS_CTRL_ENAB (1 << 5)
+
+/* GCI GPIO for function sel GCI-0/GCI-1 */
+#define CC_GCI_GPIO_0 (0)
+#define CC_GCI_GPIO_1 (1)
+#define CC_GCI_GPIO_2 (2)
+#define CC_GCI_GPIO_3 (3)
+#define CC_GCI_GPIO_4 (4)
+#define CC_GCI_GPIO_5 (5)
+#define CC_GCI_GPIO_6 (6)
+#define CC_GCI_GPIO_7 (7)
+#define CC_GCI_GPIO_8 (8)
+#define CC_GCI_GPIO_9 (9)
+#define CC_GCI_GPIO_10 (10)
+#define CC_GCI_GPIO_11 (11)
+#define CC_GCI_GPIO_12 (12)
+#define CC_GCI_GPIO_13 (13)
+#define CC_GCI_GPIO_14 (14)
+#define CC_GCI_GPIO_15 (15)
+
+
+/* indicates Invalid GPIO, e.g. when PAD GPIO doesn't map to GCI GPIO */
+#define CC_GCI_GPIO_INVALID 0xFF
+
+/* find the 4 bit mask given the bit position */
#define GCIMASK(pos) (((uint32)0xF) << pos)
-
-
+/* get the value which can be used to directly OR with chipcontrol reg */
#define GCIPOSVAL(val, pos) ((((uint32)val) << pos) & GCIMASK(pos))
-
-
+/* Extract nibble from a given position */
+#define GCIGETNBL(val, pos) ((val >> pos) & 0xF)
+
+
+/* find the 8 bit mask given the bit position */
+#define GCIMASK_8B(pos) (((uint32)0xFF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_8B(val, pos) ((((uint32)val) << pos) & GCIMASK_8B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_8B(val, pos) ((val >> pos) & 0xFF)
+
+/* find the 4 bit mask given the bit position */
+#define GCIMASK_4B(pos) (((uint32)0xF) << pos)
+/* get the value which can be used to directly OR with chipcontrol reg */
+#define GCIPOSVAL_4B(val, pos) ((((uint32)val) << pos) & GCIMASK_4B(pos))
+/* Extract nibble from a given position */
+#define GCIGETNBL_4B(val, pos) ((val >> pos) & 0xF)
+
+
+/* 4335 GCI Intstatus(Mask)/WakeMask Register bits. */
+#define GCI_INTSTATUS_RBI (1 << 0) /* Rx Break Interrupt */
+#define GCI_INTSTATUS_UB (1 << 1) /* UART Break Interrupt */
+#define GCI_INTSTATUS_SPE (1 << 2) /* SECI Parity Error Interrupt */
+#define GCI_INTSTATUS_SFE (1 << 3) /* SECI Framing Error Interrupt */
+#define GCI_INTSTATUS_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */
+#define GCI_INTSTATUS_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */
+#define GCI_INTSTATUS_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTSTATUS_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */
+#define GCI_INTSTATUS_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */
+#define GCI_INTSTATUS_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTSTATUS_GPIOINT (1 << 25) /* GCIGpioInt */
+#define GCI_INTSTATUS_GPIOWAKE (1 << 26) /* GCIGpioWake */
+
+/* 4335 GCI IntMask Register bits. */
+#define GCI_INTMASK_RBI (1 << 0) /* Rx Break Interrupt */
+#define GCI_INTMASK_UB (1 << 1) /* UART Break Interrupt */
+#define GCI_INTMASK_SPE (1 << 2) /* SECI Parity Error Interrupt */
+#define GCI_INTMASK_SFE (1 << 3) /* SECI Framing Error Interrupt */
+#define GCI_INTMASK_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */
+#define GCI_INTMASK_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */
+#define GCI_INTMASK_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */
+#define GCI_INTMASK_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */
+#define GCI_INTMASK_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */
+#define GCI_INTMASK_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */
+#define GCI_INTMASK_GPIOINT (1 << 25) /* GCIGpioInt */
+#define GCI_INTMASK_GPIOWAKE (1 << 26) /* GCIGpioWake */
+
+/* 4335 GCI WakeMask Register bits. */
+#define GCI_WAKEMASK_RBI (1 << 0) /* Rx Break Interrupt */
+#define GCI_WAKEMASK_UB (1 << 1) /* UART Break Interrupt */
+#define GCI_WAKEMASK_SPE (1 << 2) /* SECI Parity Error Interrupt */
+#define GCI_WAKEMASK_SFE (1 << 3) /* SECI Framing Error Interrupt */
+#define GCI_WAKE_SRITI (1 << 9) /* SECI Rx Idle Timer Interrupt */
+#define GCI_WAKEMASK_STFF (1 << 10) /* SECI Tx FIFO Full Interrupt */
+#define GCI_WAKEMASK_STFAE (1 << 11) /* SECI Tx FIFO Almost Empty Intr */
+#define GCI_WAKEMASK_SRFAF (1 << 12) /* SECI Rx FIFO Almost Full */
+#define GCI_WAKEMASK_SRFNE (1 << 14) /* SECI Rx FIFO Not Empty */
+#define GCI_WAKEMASK_SRFOF (1 << 15) /* SECI Rx FIFO Not Empty Timeout */
+#define GCI_WAKEMASK_GPIOINT (1 << 25) /* GCIGpioInt */
+#define GCI_WAKEMASK_GPIOWAKE (1 << 26) /* GCIGpioWake */
+
+#define GCI_WAKE_ON_GCI_GPIO1 1
+#define GCI_WAKE_ON_GCI_GPIO2 2
+#define GCI_WAKE_ON_GCI_GPIO3 3
+#define GCI_WAKE_ON_GCI_GPIO4 4
+#define GCI_WAKE_ON_GCI_GPIO5 5
+#define GCI_WAKE_ON_GCI_GPIO6 6
+#define GCI_WAKE_ON_GCI_GPIO7 7
+#define GCI_WAKE_ON_GCI_GPIO8 8
+#define GCI_WAKE_ON_GCI_SECI_IN 9
+
+/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic
+* for now only UART for bootloader.
+*/
#define MUXENAB4335_UART_MASK (0x0000000f)
#define MUXENAB4335_UART_SHIFT 0
-#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0)
+#define MUXENAB4335_HOSTWAKE_MASK (0x000000f0) /* configure GPIO for SDIO host_wake */
#define MUXENAB4335_HOSTWAKE_SHIFT 4
#define MUXENAB4335_GETIX(val, name) \
((((val) & MUXENAB4335_ ## name ## _MASK) >> MUXENAB4335_ ## name ## _SHIFT) - 1)
-
-#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) & CST4360_MODE_USB)
-
-
+/*
+* Maximum delay for the PMU state transition in us.
+* This is an upper bound intended for spinwaits etc.
+*/
#define PMU_MAX_TRANSITION_DLY 15000
-
+/* PMU resource up transition time in ILP cycles */
#define PMURES_UP_TRANSITION 2
-
+/* SECI configuration */
#define SECI_MODE_UART 0x0
#define SECI_MODE_SECI 0x1
#define SECI_MODE_LEGACY_3WIRE_BT 0x2
#define SECI_ENAB_SECI_ECI (1 << 2)
#define SECI_ENAB_SECIOUT_DIS (1 << 3)
#define SECI_MODE_MASK 0x7
-#define SECI_MODE_SHIFT 4
+#define SECI_MODE_SHIFT 4 /* (bits 5, 6, 7) */
#define SECI_UPD_SECI (1 << 7)
-#define SECI_SIGNOFF_0 0xDB
+#define SECI_SLIP_ESC_CHAR 0xDB
+#define SECI_SIGNOFF_0 SECI_SLIP_ESC_CHAR
#define SECI_SIGNOFF_1 0
+#define SECI_REFRESH_REQ 0xDA
-
+/* seci clk_ctl_st bits */
#define CLKCTL_STS_SECI_CLK_REQ (1 << 8)
#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24)
#define SECI_UART_SECI_IN_STATE (1 << 2)
#define SECI_UART_SECI_IN2_STATE (1 << 3)
-
-#define SECI_UART_LCR_STOP_BITS (1 << 0)
+/* GCI RX FIFO Control Register */
+#define GCI_RXF_LVL_MASK (0xFF << 0)
+#define GCI_RXF_TIMEOUT_MASK (0xFF << 8)
+
+/* GCI UART Registers' Bit definitions */
+/* Seci Fifo Level Register */
+#define SECI_TXF_LVL_MASK (0x3F << 8)
+#define TXF_AE_LVL_DEFAULT 0x4
+#define SECI_RXF_LVL_FC_MASK (0x3F << 16)
+
+/* SeciUARTFCR Bit definitions */
+#define SECI_UART_FCR_RFR (1 << 0)
+#define SECI_UART_FCR_TFR (1 << 1)
+#define SECI_UART_FCR_SR (1 << 2)
+#define SECI_UART_FCR_THP (1 << 3)
+#define SECI_UART_FCR_AB (1 << 4)
+#define SECI_UART_FCR_ATOE (1 << 5)
+#define SECI_UART_FCR_ARTSOE (1 << 6)
+#define SECI_UART_FCR_ABV (1 << 7)
+#define SECI_UART_FCR_ALM (1 << 8)
+
+/* SECI UART LCR register bits */
+#define SECI_UART_LCR_STOP_BITS (1 << 0) /* 0 - 1bit, 1 - 2bits */
#define SECI_UART_LCR_PARITY_EN (1 << 1)
-#define SECI_UART_LCR_PARITY (1 << 2)
+#define SECI_UART_LCR_PARITY (1 << 2) /* 0 - odd, 1 - even */
#define SECI_UART_LCR_RX_EN (1 << 3)
-#define SECI_UART_LCR_LBRK_CTRL (1 << 4)
+#define SECI_UART_LCR_LBRK_CTRL (1 << 4) /* 1 => SECI_OUT held low */
#define SECI_UART_LCR_TXO_EN (1 << 5)
#define SECI_UART_LCR_RTSO_EN (1 << 6)
#define SECI_UART_LCR_SLIPMODE_EN (1 << 7)
#define SECI_UART_LCR_TXCRC_INV (1 << 9)
#define SECI_UART_LCR_TXCRC_LSBF (1 << 10)
#define SECI_UART_LCR_TXCRC_EN (1 << 11)
+#define SECI_UART_LCR_RXSYNC_EN (1 << 12)
#define SECI_UART_MCR_TX_EN (1 << 0)
#define SECI_UART_MCR_PRTS (1 << 1)
#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7)
#define SECI_UART_MCR_XONOFF_RPT (1 << 9)
-
-
-
+/* SeciUARTLSR Bit Mask */
+#define SECI_UART_LSR_RXOVR_MASK (1 << 0)
+#define SECI_UART_LSR_RFF_MASK (1 << 1)
+#define SECI_UART_LSR_TFNE_MASK (1 << 2)
+#define SECI_UART_LSR_TI_MASK (1 << 3)
+#define SECI_UART_LSR_TPR_MASK (1 << 4)
+#define SECI_UART_LSR_TXHALT_MASK (1 << 5)
+
+/* SeciUARTMSR Bit Mask */
+#define SECI_UART_MSR_CTSS_MASK (1 << 0)
+#define SECI_UART_MSR_RTSS_MASK (1 << 1)
+#define SECI_UART_MSR_SIS_MASK (1 << 2)
+#define SECI_UART_MSR_SIS2_MASK (1 << 3)
+
+/* SeciUARTData Bits */
+#define SECI_UART_DATA_RF_NOT_EMPTY_BIT (1 << 12)
+#define SECI_UART_DATA_RF_FULL_BIT (1 << 13)
+#define SECI_UART_DATA_RF_OVRFLOW_BIT (1 << 14)
+#define SECI_UART_DATA_FIFO_PTR_MASK 0xFF
+#define SECI_UART_DATA_RF_RD_PTR_SHIFT 16
+#define SECI_UART_DATA_RF_WR_PTR_SHIFT 24
+
+/* LTECX: ltecxmux */
+#define LTECX_EXTRACT_MUX(val, idx) (getbit4(&(val), (idx)))
+
+/* LTECX: ltecxmux MODE */
+#define LTECX_MUX_MODE_IDX 0
+#define LTECX_MUX_MODE_WCI2 0x0
+#define LTECX_MUX_MODE_GPIO 0x1
+
+
+/* LTECX GPIO Information Index */
+#define LTECX_NVRAM_FSYNC_IDX 0
+#define LTECX_NVRAM_LTERX_IDX 1
+#define LTECX_NVRAM_LTETX_IDX 2
+#define LTECX_NVRAM_WLPRIO_IDX 3
+
+/* LTECX WCI2 Information Index */
+#define LTECX_NVRAM_WCI2IN_IDX 0
+#define LTECX_NVRAM_WCI2OUT_IDX 1
+
+/* LTECX: Macros to get GPIO/FNSEL/GCIGPIO */
+#define LTECX_EXTRACT_PADNUM(val, idx) (getbit8(&(val), (idx)))
+#define LTECX_EXTRACT_FNSEL(val, idx) (getbit4(&(val), (idx)))
+#define LTECX_EXTRACT_GCIGPIO(val, idx) (getbit4(&(val), (idx)))
+
+/* WLAN channel numbers - used from wifi.h */
+
+/* WLAN BW */
#define ECI_BW_20 0x0
#define ECI_BW_25 0x1
#define ECI_BW_30 0x2
#define ECI_BW_50 0x6
#define ECI_BW_ALL 0x7
-
+/* WLAN - number of antenna */
#define WLAN_NUM_ANT1 TXANT_0
#define WLAN_NUM_ANT2 TXANT_1
-#endif
+/* otpctrl1 0xF4 */
+#define OTPC_FORCE_PWR_OFF 0x02000000
+/* chipcommon s/r registers introduced with cc rev >= 48 */
+#define CC_SR_CTL0_ENABLE_MASK 0x1
+#define CC_SR_CTL0_ENABLE_SHIFT 0
+#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */
+#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to sr_engine */
+#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk in sr_engine */
+#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16 /* Allow Subcore mem StandBy? */
+#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18
+#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19
+#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power domains */
+#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25
+#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30
+
+#define CC_SR_CTL1_SR_INIT_MASK 0x3FF
+#define CC_SR_CTL1_SR_INIT_SHIFT 0
+
+#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */
+#define ECI_INLO_PKTDUR_SHIFT 4
+
+/* gci chip control bits */
+#define GCI_GPIO_CHIPCTRL_ENAB_IN_BIT 0
+#define GCI_GPIO_CHIPCTRL_ENAB_OP_BIT 1
+#define GCI_GPIO_CHIPCTRL_INVERT_BIT 2
+#define GCI_GPIO_CHIPCTRL_PULLUP_BIT 3
+#define GCI_GPIO_CHIPCTRL_PULLDN_BIT 4
+#define GCI_GPIO_CHIPCTRL_ENAB_BTSIG_BIT 5
+#define GCI_GPIO_CHIPCTRL_ENAB_OD_OP_BIT 6
+#define GCI_GPIO_CHIPCTRL_ENAB_EXT_GPIO_BIT 7
+
+/* gci GPIO input status bits */
+#define GCI_GPIO_STS_VALUE_BIT 0
+#define GCI_GPIO_STS_POS_EDGE_BIT 1
+#define GCI_GPIO_STS_NEG_EDGE_BIT 2
+#define GCI_GPIO_STS_FAST_EDGE_BIT 3
+#define GCI_GPIO_STS_CLEAR 0xF
+
+#define GCI_GPIO_STS_VALUE (1 << GCI_GPIO_STS_VALUE_BIT)
+
+#endif /* _SBCHIPC_H */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: sbconfig.h 241182 2011-02-17 21:50:03Z $
+ * $Id: sbconfig.h 456346 2014-02-18 16:48:52Z $
*/
#ifndef _SBCONFIG_H
#define _SBCONFIG_H
-
+/* cpp contortions to concatenate w/arg prescan */
#ifndef PAD
#define _PADLINE(line) pad ## line
#define _XSTR(line) _PADLINE(line)
#define PAD _XSTR(__LINE__)
#endif
-
-#define SB_BUS_SIZE 0x10000
+/* enumeration in SB is based on the premise that cores are contiguos in the
+ * enumeration space.
+ */
+#define SB_BUS_SIZE 0x10000 /* Each bus gets 64Kbytes for cores */
#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE)
-#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE)
-
+#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /* Max cores per bus */
-#define SBCONFIGOFF 0xf00
-#define SBCONFIGSIZE 256
+/*
+ * Sonics Configuration Space Registers.
+ */
+#define SBCONFIGOFF 0xf00 /* core sbconfig regs are top 256bytes of regs */
+#define SBCONFIGSIZE 256 /* sizeof (sbconfig_t) */
#define SBIPSFLAG 0x08
#define SBTPSFLAG 0x18
-#define SBTMERRLOGA 0x48
-#define SBTMERRLOG 0x50
+#define SBTMERRLOGA 0x48 /* sonics >= 2.3 */
+#define SBTMERRLOG 0x50 /* sonics >= 2.3 */
#define SBADMATCH3 0x60
#define SBADMATCH2 0x68
#define SBADMATCH1 0x70
#define SBIDLOW 0xf8
#define SBIDHIGH 0xfc
-
+/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have
+ * a few registers *below* that line. I think it would be very confusing to try
+ * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here,
+ */
#define SBIMERRLOGA 0xea8
#define SBIMERRLOG 0xeb0
#define SBTMPORTCONNID0 0xed8
#define SBTMPORTLOCK0 0xef8
-#ifndef _LANGUAGE_ASSEMBLY
+#if !defined(_LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLY__)
typedef volatile struct _sbconfig {
uint32 PAD[2];
- uint32 sbipsflag;
+ uint32 sbipsflag; /* initiator port ocp slave flag */
uint32 PAD[3];
- uint32 sbtpsflag;
+ uint32 sbtpsflag; /* target port ocp slave flag */
uint32 PAD[11];
- uint32 sbtmerrloga;
+ uint32 sbtmerrloga; /* (sonics >= 2.3) */
uint32 PAD;
- uint32 sbtmerrlog;
+ uint32 sbtmerrlog; /* (sonics >= 2.3) */
uint32 PAD[3];
- uint32 sbadmatch3;
+ uint32 sbadmatch3; /* address match3 */
uint32 PAD;
- uint32 sbadmatch2;
+ uint32 sbadmatch2; /* address match2 */
uint32 PAD;
- uint32 sbadmatch1;
+ uint32 sbadmatch1; /* address match1 */
uint32 PAD[7];
- uint32 sbimstate;
- uint32 sbintvec;
- uint32 sbtmstatelow;
- uint32 sbtmstatehigh;
- uint32 sbbwa0;
+ uint32 sbimstate; /* initiator agent state */
+ uint32 sbintvec; /* interrupt mask */
+ uint32 sbtmstatelow; /* target state */
+ uint32 sbtmstatehigh; /* target state */
+ uint32 sbbwa0; /* bandwidth allocation table0 */
uint32 PAD;
- uint32 sbimconfiglow;
- uint32 sbimconfighigh;
- uint32 sbadmatch0;
+ uint32 sbimconfiglow; /* initiator configuration */
+ uint32 sbimconfighigh; /* initiator configuration */
+ uint32 sbadmatch0; /* address match0 */
uint32 PAD;
- uint32 sbtmconfiglow;
- uint32 sbtmconfighigh;
- uint32 sbbconfig;
+ uint32 sbtmconfiglow; /* target configuration */
+ uint32 sbtmconfighigh; /* target configuration */
+ uint32 sbbconfig; /* broadcast configuration */
uint32 PAD;
- uint32 sbbstate;
+ uint32 sbbstate; /* broadcast state */
uint32 PAD[3];
- uint32 sbactcnfg;
+ uint32 sbactcnfg; /* activate configuration */
uint32 PAD[3];
- uint32 sbflagst;
+ uint32 sbflagst; /* current sbflags */
uint32 PAD[3];
- uint32 sbidlow;
- uint32 sbidhigh;
+ uint32 sbidlow; /* identification */
+ uint32 sbidhigh; /* identification */
} sbconfig_t;
-#endif
+#endif /* !_LANGUAGE_ASSEMBLY && !__ASSEMBLY__ */
-
-#define SBIPS_INT1_MASK 0x3f
+/* sbipsflag */
+#define SBIPS_INT1_MASK 0x3f /* which sbflags get routed to mips interrupt 1 */
#define SBIPS_INT1_SHIFT 0
-#define SBIPS_INT2_MASK 0x3f00
+#define SBIPS_INT2_MASK 0x3f00 /* which sbflags get routed to mips interrupt 2 */
#define SBIPS_INT2_SHIFT 8
-#define SBIPS_INT3_MASK 0x3f0000
+#define SBIPS_INT3_MASK 0x3f0000 /* which sbflags get routed to mips interrupt 3 */
#define SBIPS_INT3_SHIFT 16
-#define SBIPS_INT4_MASK 0x3f000000
+#define SBIPS_INT4_MASK 0x3f000000 /* which sbflags get routed to mips interrupt 4 */
#define SBIPS_INT4_SHIFT 24
-
-#define SBTPS_NUM0_MASK 0x3f
-#define SBTPS_F0EN0 0x40
-
-
-#define SBTMEL_CM 0x00000007
-#define SBTMEL_CI 0x0000ff00
-#define SBTMEL_EC 0x0f000000
-#define SBTMEL_ME 0x80000000
-
-
-#define SBIM_PC 0xf
-#define SBIM_AP_MASK 0x30
-#define SBIM_AP_BOTH 0x00
-#define SBIM_AP_TS 0x10
-#define SBIM_AP_TK 0x20
-#define SBIM_AP_RSV 0x30
-#define SBIM_IBE 0x20000
-#define SBIM_TO 0x40000
-#define SBIM_BY 0x01800000
-#define SBIM_RJ 0x02000000
-
-
-#define SBTML_RESET 0x0001
-#define SBTML_REJ_MASK 0x0006
-#define SBTML_REJ 0x0002
-#define SBTML_TMPREJ 0x0004
-
-#define SBTML_SICF_SHIFT 16
-
-
-#define SBTMH_SERR 0x0001
-#define SBTMH_INT 0x0002
-#define SBTMH_BUSY 0x0004
-#define SBTMH_TO 0x0020
-
-#define SBTMH_SISF_SHIFT 16
-
-
-#define SBBWA_TAB0_MASK 0xffff
-#define SBBWA_TAB1_MASK 0xffff
+/* sbtpsflag */
+#define SBTPS_NUM0_MASK 0x3f /* interrupt sbFlag # generated by this core */
+#define SBTPS_F0EN0 0x40 /* interrupt is always sent on the backplane */
+
+/* sbtmerrlog */
+#define SBTMEL_CM 0x00000007 /* command */
+#define SBTMEL_CI 0x0000ff00 /* connection id */
+#define SBTMEL_EC 0x0f000000 /* error code */
+#define SBTMEL_ME 0x80000000 /* multiple error */
+
+/* sbimstate */
+#define SBIM_PC 0xf /* pipecount */
+#define SBIM_AP_MASK 0x30 /* arbitration policy */
+#define SBIM_AP_BOTH 0x00 /* use both timeslaces and token */
+#define SBIM_AP_TS 0x10 /* use timesliaces only */
+#define SBIM_AP_TK 0x20 /* use token only */
+#define SBIM_AP_RSV 0x30 /* reserved */
+#define SBIM_IBE 0x20000 /* inbanderror */
+#define SBIM_TO 0x40000 /* timeout */
+#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
+#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
+
+/* sbtmstatelow */
+#define SBTML_RESET 0x0001 /* reset */
+#define SBTML_REJ_MASK 0x0006 /* reject field */
+#define SBTML_REJ 0x0002 /* reject */
+#define SBTML_TMPREJ 0x0004 /* temporary reject, for error recovery */
+
+#define SBTML_SICF_SHIFT 16 /* Shift to locate the SI control flags in sbtml */
+
+/* sbtmstatehigh */
+#define SBTMH_SERR 0x0001 /* serror */
+#define SBTMH_INT 0x0002 /* interrupt */
+#define SBTMH_BUSY 0x0004 /* busy */
+#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
+
+#define SBTMH_SISF_SHIFT 16 /* Shift to locate the SI status flags in sbtmh */
+
+/* sbbwa0 */
+#define SBBWA_TAB0_MASK 0xffff /* lookup table 0 */
+#define SBBWA_TAB1_MASK 0xffff /* lookup table 1 */
#define SBBWA_TAB1_SHIFT 16
-
-#define SBIMCL_STO_MASK 0x7
-#define SBIMCL_RTO_MASK 0x70
+/* sbimconfiglow */
+#define SBIMCL_STO_MASK 0x7 /* service timeout */
+#define SBIMCL_RTO_MASK 0x70 /* request timeout */
#define SBIMCL_RTO_SHIFT 4
-#define SBIMCL_CID_MASK 0xff0000
+#define SBIMCL_CID_MASK 0xff0000 /* connection id */
#define SBIMCL_CID_SHIFT 16
-
-#define SBIMCH_IEM_MASK 0xc
-#define SBIMCH_TEM_MASK 0x30
+/* sbimconfighigh */
+#define SBIMCH_IEM_MASK 0xc /* inband error mode */
+#define SBIMCH_TEM_MASK 0x30 /* timeout error mode */
#define SBIMCH_TEM_SHIFT 4
-#define SBIMCH_BEM_MASK 0xc0
+#define SBIMCH_BEM_MASK 0xc0 /* bus error mode */
#define SBIMCH_BEM_SHIFT 6
-
-#define SBAM_TYPE_MASK 0x3
-#define SBAM_AD64 0x4
-#define SBAM_ADINT0_MASK 0xf8
+/* sbadmatch0 */
+#define SBAM_TYPE_MASK 0x3 /* address type */
+#define SBAM_AD64 0x4 /* reserved */
+#define SBAM_ADINT0_MASK 0xf8 /* type0 size */
#define SBAM_ADINT0_SHIFT 3
-#define SBAM_ADINT1_MASK 0x1f8
+#define SBAM_ADINT1_MASK 0x1f8 /* type1 size */
#define SBAM_ADINT1_SHIFT 3
-#define SBAM_ADINT2_MASK 0x1f8
+#define SBAM_ADINT2_MASK 0x1f8 /* type2 size */
#define SBAM_ADINT2_SHIFT 3
-#define SBAM_ADEN 0x400
-#define SBAM_ADNEG 0x800
-#define SBAM_BASE0_MASK 0xffffff00
+#define SBAM_ADEN 0x400 /* enable */
+#define SBAM_ADNEG 0x800 /* negative decode */
+#define SBAM_BASE0_MASK 0xffffff00 /* type0 base address */
#define SBAM_BASE0_SHIFT 8
-#define SBAM_BASE1_MASK 0xfffff000
+#define SBAM_BASE1_MASK 0xfffff000 /* type1 base address for the core */
#define SBAM_BASE1_SHIFT 12
-#define SBAM_BASE2_MASK 0xffff0000
+#define SBAM_BASE2_MASK 0xffff0000 /* type2 base address for the core */
#define SBAM_BASE2_SHIFT 16
-
-#define SBTMCL_CD_MASK 0xff
-#define SBTMCL_CO_MASK 0xf800
+/* sbtmconfiglow */
+#define SBTMCL_CD_MASK 0xff /* clock divide */
+#define SBTMCL_CO_MASK 0xf800 /* clock offset */
#define SBTMCL_CO_SHIFT 11
-#define SBTMCL_IF_MASK 0xfc0000
+#define SBTMCL_IF_MASK 0xfc0000 /* interrupt flags */
#define SBTMCL_IF_SHIFT 18
-#define SBTMCL_IM_MASK 0x3000000
+#define SBTMCL_IM_MASK 0x3000000 /* interrupt mode */
#define SBTMCL_IM_SHIFT 24
-
-#define SBTMCH_BM_MASK 0x3
-#define SBTMCH_RM_MASK 0x3
+/* sbtmconfighigh */
+#define SBTMCH_BM_MASK 0x3 /* busy mode */
+#define SBTMCH_RM_MASK 0x3 /* retry mode */
#define SBTMCH_RM_SHIFT 2
-#define SBTMCH_SM_MASK 0x30
+#define SBTMCH_SM_MASK 0x30 /* stop mode */
#define SBTMCH_SM_SHIFT 4
-#define SBTMCH_EM_MASK 0x300
+#define SBTMCH_EM_MASK 0x300 /* sb error mode */
#define SBTMCH_EM_SHIFT 8
-#define SBTMCH_IM_MASK 0xc00
+#define SBTMCH_IM_MASK 0xc00 /* int mode */
#define SBTMCH_IM_SHIFT 10
-
-#define SBBC_LAT_MASK 0x3
-#define SBBC_MAX0_MASK 0xf0000
+/* sbbconfig */
+#define SBBC_LAT_MASK 0x3 /* sb latency */
+#define SBBC_MAX0_MASK 0xf0000 /* maxccntr0 */
#define SBBC_MAX0_SHIFT 16
-#define SBBC_MAX1_MASK 0xf00000
+#define SBBC_MAX1_MASK 0xf00000 /* maxccntr1 */
#define SBBC_MAX1_SHIFT 20
+/* sbbstate */
+#define SBBS_SRD 0x1 /* st reg disable */
+#define SBBS_HRD 0x2 /* hold reg disable */
-#define SBBS_SRD 0x1
-#define SBBS_HRD 0x2
-
-
-#define SBIDL_CS_MASK 0x3
-#define SBIDL_AR_MASK 0x38
+/* sbidlow */
+#define SBIDL_CS_MASK 0x3 /* config space */
+#define SBIDL_AR_MASK 0x38 /* # address ranges supported */
#define SBIDL_AR_SHIFT 3
-#define SBIDL_SYNCH 0x40
-#define SBIDL_INIT 0x80
-#define SBIDL_MINLAT_MASK 0xf00
+#define SBIDL_SYNCH 0x40 /* sync */
+#define SBIDL_INIT 0x80 /* initiator */
+#define SBIDL_MINLAT_MASK 0xf00 /* minimum backplane latency */
#define SBIDL_MINLAT_SHIFT 8
-#define SBIDL_MAXLAT 0xf000
+#define SBIDL_MAXLAT 0xf000 /* maximum backplane latency */
#define SBIDL_MAXLAT_SHIFT 12
-#define SBIDL_FIRST 0x10000
-#define SBIDL_CW_MASK 0xc0000
+#define SBIDL_FIRST 0x10000 /* this initiator is first */
+#define SBIDL_CW_MASK 0xc0000 /* cycle counter width */
#define SBIDL_CW_SHIFT 18
-#define SBIDL_TP_MASK 0xf00000
+#define SBIDL_TP_MASK 0xf00000 /* target ports */
#define SBIDL_TP_SHIFT 20
-#define SBIDL_IP_MASK 0xf000000
+#define SBIDL_IP_MASK 0xf000000 /* initiator ports */
#define SBIDL_IP_SHIFT 24
-#define SBIDL_RV_MASK 0xf0000000
+#define SBIDL_RV_MASK 0xf0000000 /* sonics backplane revision code */
#define SBIDL_RV_SHIFT 28
-#define SBIDL_RV_2_2 0x00000000
-#define SBIDL_RV_2_3 0x10000000
+#define SBIDL_RV_2_2 0x00000000 /* version 2.2 or earlier */
+#define SBIDL_RV_2_3 0x10000000 /* version 2.3 */
-
-#define SBIDH_RC_MASK 0x000f
-#define SBIDH_RCE_MASK 0x7000
+/* sbidhigh */
+#define SBIDH_RC_MASK 0x000f /* revision code */
+#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
#define SBIDH_RCE_SHIFT 8
#define SBCOREREV(sbidh) \
((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK))
-#define SBIDH_CC_MASK 0x8ff0
+#define SBIDH_CC_MASK 0x8ff0 /* core code */
#define SBIDH_CC_SHIFT 4
-#define SBIDH_VC_MASK 0xffff0000
+#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
#define SBIDH_VC_SHIFT 16
-#define SB_COMMIT 0xfd8
-
+#define SB_COMMIT 0xfd8 /* update buffered registers value */
-#define SB_VEND_BCM 0x4243
+/* vendor codes */
+#define SB_VEND_BCM 0x4243 /* Broadcom's SB vendor code */
-#endif
+#endif /* _SBCONFIG_H */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: sbhnddma.h 373617 2012-12-07 23:03:08Z $
+ * $Id: sbhnddma.h 452424 2014-01-30 09:43:39Z $
*/
#ifndef _sbhnddma_h_
#define _sbhnddma_h_
+/* DMA structure:
+ * support two DMA engines: 32 bits address or 64 bit addressing
+ * basic DMA register set is per channel(transmit or receive)
+ * a pair of channels is defined for convenience
+ */
+/* 32 bits addressing */
-
-
-
+/* dma registers per channel(xmt or rcv) */
typedef volatile struct {
- uint32 control;
- uint32 addr;
- uint32 ptr;
- uint32 status;
+ uint32 control; /* enable, et al */
+ uint32 addr; /* descriptor ring base address (4K aligned) */
+ uint32 ptr; /* last descriptor posted to chip */
+ uint32 status; /* current active descriptor, et al */
} dma32regs_t;
typedef volatile struct {
- dma32regs_t xmt;
- dma32regs_t rcv;
+ dma32regs_t xmt; /* dma tx channel */
+ dma32regs_t rcv; /* dma rx channel */
} dma32regp_t;
-typedef volatile struct {
- uint32 fifoaddr;
- uint32 fifodatalow;
- uint32 fifodatahigh;
- uint32 pad;
+typedef volatile struct { /* diag access */
+ uint32 fifoaddr; /* diag address */
+ uint32 fifodatalow; /* low 32bits of data */
+ uint32 fifodatahigh; /* high 32bits of data */
+ uint32 pad; /* reserved */
} dma32diag_t;
-
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
typedef volatile struct {
- uint32 ctrl;
- uint32 addr;
+ uint32 ctrl; /* misc control bits & bufcount */
+ uint32 addr; /* data buffer address */
} dma32dd_t;
-
+/*
+ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
+ */
#define D32RINGALIGN_BITS 12
#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS)
#define D32RINGALIGN (1 << D32RINGALIGN_BITS)
#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
-
-#define XC_XE ((uint32)1 << 0)
-#define XC_SE ((uint32)1 << 1)
-#define XC_LE ((uint32)1 << 2)
-#define XC_FL ((uint32)1 << 4)
-#define XC_MR_MASK 0x000000C0
+/* transmit channel control */
+#define XC_XE ((uint32)1 << 0) /* transmit enable */
+#define XC_SE ((uint32)1 << 1) /* transmit suspend request */
+#define XC_LE ((uint32)1 << 2) /* loopback enable */
+#define XC_FL ((uint32)1 << 4) /* flush request */
+#define XC_MR_MASK 0x000001C0 /* Multiple outstanding reads */
#define XC_MR_SHIFT 6
-#define XC_PD ((uint32)1 << 11)
-#define XC_AE ((uint32)3 << 16)
+#define XC_PD ((uint32)1 << 11) /* parity check disable */
+#define XC_AE ((uint32)3 << 16) /* address extension bits */
#define XC_AE_SHIFT 16
-#define XC_BL_MASK 0x001C0000
+#define XC_BL_MASK 0x001C0000 /* BurstLen bits */
#define XC_BL_SHIFT 18
-#define XC_PC_MASK 0x00E00000
+#define XC_PC_MASK 0x00E00000 /* Prefetch control */
#define XC_PC_SHIFT 21
-#define XC_PT_MASK 0x03000000
+#define XC_PT_MASK 0x03000000 /* Prefetch threshold */
#define XC_PT_SHIFT 24
-
+/* Multiple outstanding reads */
#define DMA_MR_1 0
#define DMA_MR_2 1
-
-
-
+#define DMA_MR_4 2
+#define DMA_MR_8 3
+#define DMA_MR_12 4
+#define DMA_MR_16 5
+#define DMA_MR_20 6
+#define DMA_MR_32 7
+
+/* DMA Burst Length in bytes */
#define DMA_BL_16 0
#define DMA_BL_32 1
#define DMA_BL_64 2
#define DMA_BL_512 5
#define DMA_BL_1024 6
-
+/* Prefetch control */
#define DMA_PC_0 0
#define DMA_PC_4 1
#define DMA_PC_8 2
#define DMA_PC_16 3
+/* others: reserved */
-
-
+/* Prefetch threshold */
#define DMA_PT_1 0
#define DMA_PT_2 1
#define DMA_PT_4 2
#define DMA_PT_8 3
+/* transmit descriptor table pointer */
+#define XP_LD_MASK 0xfff /* last valid descriptor */
-#define XP_LD_MASK 0xfff
-
-
-#define XS_CD_MASK 0x0fff
-#define XS_XS_MASK 0xf000
+/* transmit channel status */
+#define XS_CD_MASK 0x0fff /* current descriptor pointer */
+#define XS_XS_MASK 0xf000 /* transmit state */
#define XS_XS_SHIFT 12
-#define XS_XS_DISABLED 0x0000
-#define XS_XS_ACTIVE 0x1000
-#define XS_XS_IDLE 0x2000
-#define XS_XS_STOPPED 0x3000
-#define XS_XS_SUSP 0x4000
-#define XS_XE_MASK 0xf0000
+#define XS_XS_DISABLED 0x0000 /* disabled */
+#define XS_XS_ACTIVE 0x1000 /* active */
+#define XS_XS_IDLE 0x2000 /* idle wait */
+#define XS_XS_STOPPED 0x3000 /* stopped */
+#define XS_XS_SUSP 0x4000 /* suspend pending */
+#define XS_XE_MASK 0xf0000 /* transmit errors */
#define XS_XE_SHIFT 16
-#define XS_XE_NOERR 0x00000
-#define XS_XE_DPE 0x10000
-#define XS_XE_DFU 0x20000
-#define XS_XE_BEBR 0x30000
-#define XS_XE_BEDA 0x40000
-#define XS_AD_MASK 0xfff00000
+#define XS_XE_NOERR 0x00000 /* no error */
+#define XS_XE_DPE 0x10000 /* descriptor protocol error */
+#define XS_XE_DFU 0x20000 /* data fifo underrun */
+#define XS_XE_BEBR 0x30000 /* bus error on buffer read */
+#define XS_XE_BEDA 0x40000 /* bus error on descriptor access */
+#define XS_AD_MASK 0xfff00000 /* active descriptor */
#define XS_AD_SHIFT 20
-
-#define RC_RE ((uint32)1 << 0)
-#define RC_RO_MASK 0xfe
+/* receive channel control */
+#define RC_RE ((uint32)1 << 0) /* receive enable */
+#define RC_RO_MASK 0xfe /* receive frame offset */
#define RC_RO_SHIFT 1
-#define RC_FM ((uint32)1 << 8)
-#define RC_SH ((uint32)1 << 9)
-#define RC_OC ((uint32)1 << 10)
-#define RC_PD ((uint32)1 << 11)
-#define RC_AE ((uint32)3 << 16)
+#define RC_FM ((uint32)1 << 8) /* direct fifo receive (pio) mode */
+#define RC_SH ((uint32)1 << 9) /* separate rx header descriptor enable */
+#define RC_OC ((uint32)1 << 10) /* overflow continue */
+#define RC_PD ((uint32)1 << 11) /* parity check disable */
+#define RC_AE ((uint32)3 << 16) /* address extension bits */
#define RC_AE_SHIFT 16
-#define RC_BL_MASK 0x001C0000
+#define RC_BL_MASK 0x001C0000 /* BurstLen bits */
#define RC_BL_SHIFT 18
-#define RC_PC_MASK 0x00E00000
+#define RC_PC_MASK 0x00E00000 /* Prefetch control */
#define RC_PC_SHIFT 21
-#define RC_PT_MASK 0x03000000
+#define RC_PT_MASK 0x03000000 /* Prefetch threshold */
#define RC_PT_SHIFT 24
+/* receive descriptor table pointer */
+#define RP_LD_MASK 0xfff /* last valid descriptor */
-#define RP_LD_MASK 0xfff
-
-
-#define RS_CD_MASK 0x0fff
-#define RS_RS_MASK 0xf000
+/* receive channel status */
+#define RS_CD_MASK 0x0fff /* current descriptor pointer */
+#define RS_RS_MASK 0xf000 /* receive state */
#define RS_RS_SHIFT 12
-#define RS_RS_DISABLED 0x0000
-#define RS_RS_ACTIVE 0x1000
-#define RS_RS_IDLE 0x2000
-#define RS_RS_STOPPED 0x3000
-#define RS_RE_MASK 0xf0000
+#define RS_RS_DISABLED 0x0000 /* disabled */
+#define RS_RS_ACTIVE 0x1000 /* active */
+#define RS_RS_IDLE 0x2000 /* idle wait */
+#define RS_RS_STOPPED 0x3000 /* reserved */
+#define RS_RE_MASK 0xf0000 /* receive errors */
#define RS_RE_SHIFT 16
-#define RS_RE_NOERR 0x00000
-#define RS_RE_DPE 0x10000
-#define RS_RE_DFO 0x20000
-#define RS_RE_BEBW 0x30000
-#define RS_RE_BEDA 0x40000
-#define RS_AD_MASK 0xfff00000
+#define RS_RE_NOERR 0x00000 /* no error */
+#define RS_RE_DPE 0x10000 /* descriptor protocol error */
+#define RS_RE_DFO 0x20000 /* data fifo overflow */
+#define RS_RE_BEBW 0x30000 /* bus error on buffer write */
+#define RS_RE_BEDA 0x40000 /* bus error on descriptor access */
+#define RS_AD_MASK 0xfff00000 /* active descriptor */
#define RS_AD_SHIFT 20
-
-#define FA_OFF_MASK 0xffff
-#define FA_SEL_MASK 0xf0000
+/* fifoaddr */
+#define FA_OFF_MASK 0xffff /* offset */
+#define FA_SEL_MASK 0xf0000 /* select */
#define FA_SEL_SHIFT 16
-#define FA_SEL_XDD 0x00000
-#define FA_SEL_XDP 0x10000
-#define FA_SEL_RDD 0x40000
-#define FA_SEL_RDP 0x50000
-#define FA_SEL_XFD 0x80000
-#define FA_SEL_XFP 0x90000
-#define FA_SEL_RFD 0xc0000
-#define FA_SEL_RFP 0xd0000
-#define FA_SEL_RSD 0xe0000
-#define FA_SEL_RSP 0xf0000
-
-
-#define CTRL_BC_MASK 0x00001fff
-#define CTRL_AE ((uint32)3 << 16)
+#define FA_SEL_XDD 0x00000 /* transmit dma data */
+#define FA_SEL_XDP 0x10000 /* transmit dma pointers */
+#define FA_SEL_RDD 0x40000 /* receive dma data */
+#define FA_SEL_RDP 0x50000 /* receive dma pointers */
+#define FA_SEL_XFD 0x80000 /* transmit fifo data */
+#define FA_SEL_XFP 0x90000 /* transmit fifo pointers */
+#define FA_SEL_RFD 0xc0000 /* receive fifo data */
+#define FA_SEL_RFP 0xd0000 /* receive fifo pointers */
+#define FA_SEL_RSD 0xe0000 /* receive frame status data */
+#define FA_SEL_RSP 0xf0000 /* receive frame status pointers */
+
+/* descriptor control flags */
+#define CTRL_BC_MASK 0x00001fff /* buffer byte count, real data len must <= 4KB */
+#define CTRL_AE ((uint32)3 << 16) /* address extension bits */
#define CTRL_AE_SHIFT 16
-#define CTRL_PARITY ((uint32)3 << 18)
-#define CTRL_EOT ((uint32)1 << 28)
-#define CTRL_IOC ((uint32)1 << 29)
-#define CTRL_EOF ((uint32)1 << 30)
-#define CTRL_SOF ((uint32)1 << 31)
-
+#define CTRL_PARITY ((uint32)3 << 18) /* parity bit */
+#define CTRL_EOT ((uint32)1 << 28) /* end of descriptor table */
+#define CTRL_IOC ((uint32)1 << 29) /* interrupt on completion */
+#define CTRL_EOF ((uint32)1 << 30) /* end of frame */
+#define CTRL_SOF ((uint32)1 << 31) /* start of frame */
+/* control flags in the range [27:20] are core-specific and not defined here */
#define CTRL_CORE_MASK 0x0ff00000
+/* 64 bits addressing */
-
-
+/* dma registers per channel(xmt or rcv) */
typedef volatile struct {
- uint32 control;
- uint32 ptr;
- uint32 addrlow;
- uint32 addrhigh;
- uint32 status0;
- uint32 status1;
+ uint32 control; /* enable, et al */
+ uint32 ptr; /* last descriptor posted to chip */
+ uint32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */
+ uint32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */
+ uint32 status0; /* current descriptor, xmt state */
+ uint32 status1; /* active descriptor, xmt error */
} dma64regs_t;
typedef volatile struct {
- dma64regs_t tx;
- dma64regs_t rx;
+ dma64regs_t tx; /* dma64 tx channel */
+ dma64regs_t rx; /* dma64 rx channel */
} dma64regp_t;
-typedef volatile struct {
- uint32 fifoaddr;
- uint32 fifodatalow;
- uint32 fifodatahigh;
- uint32 pad;
+typedef volatile struct { /* diag access */
+ uint32 fifoaddr; /* diag address */
+ uint32 fifodatalow; /* low 32bits of data */
+ uint32 fifodatahigh; /* high 32bits of data */
+ uint32 pad; /* reserved */
} dma64diag_t;
-
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
typedef volatile struct {
- uint32 ctrl1;
- uint32 ctrl2;
- uint32 addrlow;
- uint32 addrhigh;
+ uint32 ctrl1; /* misc control bits */
+ uint32 ctrl2; /* buffer count and address extension */
+ uint32 addrlow; /* memory address of the date buffer, bits 31:0 */
+ uint32 addrhigh; /* memory address of the date buffer, bits 63:32 */
} dma64dd_t;
-
+/*
+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
+ */
#define D64RINGALIGN_BITS 13
#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS)
#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
-
+/* for cores with large descriptor ring support, descriptor ring size can be up to 4096 */
#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t))
-
+/* for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross
+ * 64K boundary
+ */
#define D64RINGBOUNDARY_LARGE (1 << 16)
-
+/*
+ * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11.
+ * When this field contains the value N, the burst length is 2**(N + 4) bytes.
+ */
#define D64_DEF_USBBURSTLEN 2
#define D64_DEF_SDIOBURSTLEN 1
#define D64_SDIOBURSTLEN DMA_BL_32
#endif
-
-#define D64_XC_XE 0x00000001
-#define D64_XC_SE 0x00000002
-#define D64_XC_LE 0x00000004
-#define D64_XC_FL 0x00000010
-#define D64_XC_MR_MASK 0x000000C0
+/* transmit channel control */
+#define D64_XC_XE 0x00000001 /* transmit enable */
+#define D64_XC_SE 0x00000002 /* transmit suspend request */
+#define D64_XC_LE 0x00000004 /* loopback enable */
+#define D64_XC_FL 0x00000010 /* flush request */
+#define D64_XC_MR_MASK 0x000001C0 /* Multiple outstanding reads */
#define D64_XC_MR_SHIFT 6
-#define D64_XC_PD 0x00000800
-#define D64_XC_AE 0x00030000
+#define D64_XC_PD 0x00000800 /* parity check disable */
+#define D64_XC_AE 0x00030000 /* address extension bits */
#define D64_XC_AE_SHIFT 16
-#define D64_XC_BL_MASK 0x001C0000
+#define D64_XC_BL_MASK 0x001C0000 /* BurstLen bits */
#define D64_XC_BL_SHIFT 18
-#define D64_XC_PC_MASK 0x00E00000
+#define D64_XC_PC_MASK 0x00E00000 /* Prefetch control */
#define D64_XC_PC_SHIFT 21
-#define D64_XC_PT_MASK 0x03000000
+#define D64_XC_PT_MASK 0x03000000 /* Prefetch threshold */
#define D64_XC_PT_SHIFT 24
+/* transmit descriptor table pointer */
+#define D64_XP_LD_MASK 0x00001fff /* last valid descriptor */
-#define D64_XP_LD_MASK 0x00001fff
-
-
-#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask)
-#define D64_XS0_XS_MASK 0xf0000000
+/* transmit channel status */
+#define D64_XS0_CD_MASK (di->d64_xs0_cd_mask) /* current descriptor pointer */
+#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
#define D64_XS0_XS_SHIFT 28
-#define D64_XS0_XS_DISABLED 0x00000000
-#define D64_XS0_XS_ACTIVE 0x10000000
-#define D64_XS0_XS_IDLE 0x20000000
-#define D64_XS0_XS_STOPPED 0x30000000
-#define D64_XS0_XS_SUSP 0x40000000
-
-#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask)
-#define D64_XS1_XE_MASK 0xf0000000
+#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
+#define D64_XS0_XS_ACTIVE 0x10000000 /* active */
+#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
+#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
+#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
+
+#define D64_XS1_AD_MASK (di->d64_xs1_ad_mask) /* active descriptor */
+#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
#define D64_XS1_XE_SHIFT 28
-#define D64_XS1_XE_NOERR 0x00000000
-#define D64_XS1_XE_DPE 0x10000000
-#define D64_XS1_XE_DFU 0x20000000
-#define D64_XS1_XE_DTE 0x30000000
-#define D64_XS1_XE_DESRE 0x40000000
-#define D64_XS1_XE_COREE 0x50000000
-
-
-#define D64_RC_RE 0x00000001
-#define D64_RC_RO_MASK 0x000000fe
+#define D64_XS1_XE_NOERR 0x00000000 /* no error */
+#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
+#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
+#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
+#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
+#define D64_XS1_XE_COREE 0x50000000 /* core error */
+
+/* receive channel control */
+#define D64_RC_RE 0x00000001 /* receive enable */
+#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
#define D64_RC_RO_SHIFT 1
-#define D64_RC_FM 0x00000100
-#define D64_RC_SH 0x00000200
-#define D64_RC_OC 0x00000400
-#define D64_RC_PD 0x00000800
-#define D64_RC_GE 0x00004000
-#define D64_RC_AE 0x00030000
+#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
+#define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */
+#define D64_RC_SHIFT 9 /* separate rx header descriptor enable */
+#define D64_RC_OC 0x00000400 /* overflow continue */
+#define D64_RC_PD 0x00000800 /* parity check disable */
+#define D64_RC_GE 0x00004000 /* Glom enable */
+#define D64_RC_AE 0x00030000 /* address extension bits */
#define D64_RC_AE_SHIFT 16
-#define D64_RC_BL_MASK 0x001C0000
+#define D64_RC_BL_MASK 0x001C0000 /* BurstLen bits */
#define D64_RC_BL_SHIFT 18
-#define D64_RC_PC_MASK 0x00E00000
+#define D64_RC_PC_MASK 0x00E00000 /* Prefetch control */
#define D64_RC_PC_SHIFT 21
-#define D64_RC_PT_MASK 0x03000000
+#define D64_RC_PT_MASK 0x03000000 /* Prefetch threshold */
#define D64_RC_PT_SHIFT 24
-
-#define DMA_CTRL_PEN (1 << 0)
-#define DMA_CTRL_ROC (1 << 1)
-#define DMA_CTRL_RXMULTI (1 << 2)
-#define DMA_CTRL_UNFRAMED (1 << 3)
+/* flags for dma controller */
+#define DMA_CTRL_PEN (1 << 0) /* partity enable */
+#define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */
+#define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */
+#define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */
#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4)
-#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5)
+#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /* DMA avoidance WAR for 4331 */
+#define DMA_CTRL_RXSINGLE (1 << 6) /* always single buffer */
+#define DMA_CTRL_SDIO_RXGLOM (1 << 7) /* DMA Rx glome is enabled */
+/* receive descriptor table pointer */
+#define D64_RP_LD_MASK 0x00001fff /* last valid descriptor */
-#define D64_RP_LD_MASK 0x00001fff
-
-
-#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask)
-#define D64_RS0_RS_MASK 0xf0000000
+/* receive channel status */
+#define D64_RS0_CD_MASK (di->d64_rs0_cd_mask) /* current descriptor pointer */
+#define D64_RS0_RS_MASK 0xf0000000 /* receive state */
#define D64_RS0_RS_SHIFT 28
-#define D64_RS0_RS_DISABLED 0x00000000
-#define D64_RS0_RS_ACTIVE 0x10000000
-#define D64_RS0_RS_IDLE 0x20000000
-#define D64_RS0_RS_STOPPED 0x30000000
-#define D64_RS0_RS_SUSP 0x40000000
-
-#define D64_RS1_AD_MASK 0x0001ffff
-#define D64_RS1_RE_MASK 0xf0000000
+#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
+#define D64_RS0_RS_ACTIVE 0x10000000 /* active */
+#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
+#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
+#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
+
+#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
+#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
#define D64_RS1_RE_SHIFT 28
-#define D64_RS1_RE_NOERR 0x00000000
-#define D64_RS1_RE_DPO 0x10000000
-#define D64_RS1_RE_DFU 0x20000000
-#define D64_RS1_RE_DTE 0x30000000
-#define D64_RS1_RE_DESRE 0x40000000
-#define D64_RS1_RE_COREE 0x50000000
-
-
-#define D64_FA_OFF_MASK 0xffff
-#define D64_FA_SEL_MASK 0xf0000
+#define D64_RS1_RE_NOERR 0x00000000 /* no error */
+#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
+#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
+#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
+#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
+#define D64_RS1_RE_COREE 0x50000000 /* core error */
+
+/* fifoaddr */
+#define D64_FA_OFF_MASK 0xffff /* offset */
+#define D64_FA_SEL_MASK 0xf0000 /* select */
#define D64_FA_SEL_SHIFT 16
-#define D64_FA_SEL_XDD 0x00000
-#define D64_FA_SEL_XDP 0x10000
-#define D64_FA_SEL_RDD 0x40000
-#define D64_FA_SEL_RDP 0x50000
-#define D64_FA_SEL_XFD 0x80000
-#define D64_FA_SEL_XFP 0x90000
-#define D64_FA_SEL_RFD 0xc0000
-#define D64_FA_SEL_RFP 0xd0000
-#define D64_FA_SEL_RSD 0xe0000
-#define D64_FA_SEL_RSP 0xf0000
-
-
-#define D64_CTRL_COREFLAGS 0x0ff00000
-#define D64_CTRL1_EOT ((uint32)1 << 28)
-#define D64_CTRL1_IOC ((uint32)1 << 29)
-#define D64_CTRL1_EOF ((uint32)1 << 30)
-#define D64_CTRL1_SOF ((uint32)1 << 31)
-
-
-#define D64_CTRL2_BC_MASK 0x00007fff
-#define D64_CTRL2_AE 0x00030000
+#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
+#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
+#define D64_FA_SEL_RDD 0x40000 /* receive dma data */
+#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
+#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
+#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
+#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
+#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
+#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
+#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */
+#define D64_CTRL1_NOTPCIE ((uint32)1 << 18) /* buirst size control */
+#define D64_CTRL1_EOT ((uint32)1 << 28) /* end of descriptor table */
+#define D64_CTRL1_IOC ((uint32)1 << 29) /* interrupt on completion */
+#define D64_CTRL1_EOF ((uint32)1 << 30) /* end of frame */
+#define D64_CTRL1_SOF ((uint32)1 << 31) /* start of frame */
+
+/* descriptor control flags 2 */
+#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */
+#define D64_CTRL2_AE 0x00030000 /* address extension bits */
#define D64_CTRL2_AE_SHIFT 16
-#define D64_CTRL2_PARITY 0x00040000
-
+#define D64_CTRL2_PARITY 0x00040000 /* parity bit */
+/* control flags in the range [27:20] are core-specific and not defined here */
#define D64_CTRL_CORE_MASK 0x0ff00000
-#define D64_RX_FRM_STS_LEN 0x0000ffff
-#define D64_RX_FRM_STS_OVFL 0x00800000
-#define D64_RX_FRM_STS_DSCRCNT 0x0f000000
-#define D64_RX_FRM_STS_DATATYPE 0xf0000000
-
+#define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */
+#define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */
+#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1, d11corerev >= 22 */
+#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */
+/* receive frame status */
typedef volatile struct {
uint16 len;
uint16 flags;
} dma_rxh_t;
-#endif
+#endif /* _sbhnddma_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: sbpcmcia.h 381094 2013-01-25 04:45:06Z $
+ * $Id: sbpcmcia.h 446298 2014-01-03 11:30:17Z $
*/
#ifndef _SBPCMCIA_H
#define _SBPCMCIA_H
+/* All the addresses that are offsets in attribute space are divided
+ * by two to account for the fact that odd bytes are invalid in
+ * attribute space and our read/write routines make the space appear
+ * as if they didn't exist. Still we want to show the original numbers
+ * as documented in the hnd_pcmcia core manual.
+ */
-
-
+/* PCMCIA Function Configuration Registers */
#define PCMCIA_FCR (0x700 / 2)
#define FCR0_OFF 0
#define PCMCIA_FCR2 (0x780 / 2)
#define PCMCIA_FCR3 (0x7c0 / 2)
-
+/* Standard PCMCIA FCR registers */
#define PCMCIA_COR 0
#define F2_MEMOFF 0x3000
#define F3_MEMOFF 0x4000
-
+/* Memory base in the function fcr's */
#define MEM_ADDR0 (0x728 / 2)
#define MEM_ADDR1 (0x72a / 2)
#define MEM_ADDR2 (0x72c / 2)
-
+/* PCMCIA base plus Srom access in fcr0: */
#define PCMCIA_ADDR0 (0x072e / 2)
#define PCMCIA_ADDR1 (0x0730 / 2)
#define PCMCIA_ADDR2 (0x0732 / 2)
#define SROM_DATAH (0x073a / 2)
#define SROM_ADDRL (0x073c / 2)
#define SROM_ADDRH (0x073e / 2)
-#define SROM_INFO2 (0x0772 / 2)
-#define SROM_INFO (0x07be / 2)
-
+#define SROM_INFO2 (0x0772 / 2) /* Corerev >= 2 && <= 5 */
+#define SROM_INFO (0x07be / 2) /* Corerev >= 6 */
+/* Values for srom_cs: */
#define SROM_IDLE 0
#define SROM_WRITE 1
#define SROM_READ 2
#define SROM_WDS 7
#define SROM_DONE 8
-
+/* Fields in srom_info: */
#define SRI_SZ_MASK 0x03
#define SRI_BLANK 0x04
#define SRI_OTP 0x80
+#if !defined(LINUX_POSTMOGRIFY_REMOVAL)
+/* CIS stuff */
+
+/* The CIS stops where the FCRs start */
+#define CIS_SIZE PCMCIA_FCR
+#define CIS_SIZE_12K 1154 /* Maximum h/w + s/w sub region size for 12k OTP */
+
+/* CIS tuple length field max */
+#define CIS_TUPLE_LEN_MAX 0xff
+
+/* Standard tuples we know about */
+
+#define CISTPL_NULL 0x00
+#define CISTPL_VERS_1 0x15 /* CIS ver, manf, dev & ver strings */
+#define CISTPL_MANFID 0x20 /* Manufacturer and device id */
+#define CISTPL_FUNCID 0x21 /* Function identification */
+#define CISTPL_FUNCE 0x22 /* Function extensions */
+#define CISTPL_CFTABLE 0x1b /* Config table entry */
+#define CISTPL_END 0xff /* End of the CIS tuple chain */
+
+/* Function identifier provides context for the function extentions tuple */
+#define CISTPL_FID_SDIO 0x0c /* Extensions defined by SDIO spec */
+
+/* Function extensions for LANs (assumed for extensions other than SDIO) */
+#define LAN_TECH 1 /* Technology type */
+#define LAN_SPEED 2 /* Raw bit rate */
+#define LAN_MEDIA 3 /* Transmission media */
+#define LAN_NID 4 /* Node identification (aka MAC addr) */
+#define LAN_CONN 5 /* Connector standard */
+
+
+/* CFTable */
+#define CFTABLE_REGWIN_2K 0x08 /* 2k reg windows size */
+#define CFTABLE_REGWIN_4K 0x10 /* 4k reg windows size */
+#define CFTABLE_REGWIN_8K 0x20 /* 8k reg windows size */
+
+/* Vendor unique tuples are 0x80-0x8f. Within Broadcom we'll
+ * take one for HNBU, and use "extensions" (a la FUNCE) within it.
+ */
+
+#define CISTPL_BRCM_HNBU 0x80
+
+/* Subtypes of BRCM_HNBU: */
+
+#define HNBU_SROMREV 0x00 /* A byte with sromrev, 1 if not present */
+#define HNBU_CHIPID 0x01 /* Two 16bit values: PCI vendor & device id */
+#define HNBU_BOARDREV 0x02 /* One byte board revision */
+#define HNBU_PAPARMS 0x03 /* PA parameters: 8 (sromrev == 1)
+ * or 9 (sromrev > 1) bytes
+ */
+#define HNBU_OEM 0x04 /* Eight bytes OEM data (sromrev == 1) */
+#define HNBU_CC 0x05 /* Default country code (sromrev == 1) */
+#define HNBU_AA 0x06 /* Antennas available */
+#define HNBU_AG 0x07 /* Antenna gain */
+#define HNBU_BOARDFLAGS 0x08 /* board flags (2 or 4 bytes) */
+#define HNBU_LEDS 0x09 /* LED set */
+#define HNBU_CCODE 0x0a /* Country code (2 bytes ascii + 1 byte cctl)
+ * in rev 2
+ */
+#define HNBU_CCKPO 0x0b /* 2 byte cck power offsets in rev 3 */
+#define HNBU_OFDMPO 0x0c /* 4 byte 11g ofdm power offsets in rev 3 */
+#define HNBU_GPIOTIMER 0x0d /* 2 bytes with on/off values in rev 3 */
+#define HNBU_PAPARMS5G 0x0e /* 5G PA params */
+#define HNBU_ANT5G 0x0f /* 4328 5G antennas available/gain */
+#define HNBU_RDLID 0x10 /* 2 byte USB remote downloader (RDL) product Id */
+#define HNBU_RSSISMBXA2G 0x11 /* 4328 2G RSSI mid pt sel & board switch arch,
+ * 2 bytes, rev 3.
+ */
+#define HNBU_RSSISMBXA5G 0x12 /* 4328 5G RSSI mid pt sel & board switch arch,
+ * 2 bytes, rev 3.
+ */
+#define HNBU_XTALFREQ 0x13 /* 4 byte Crystal frequency in kilohertz */
+#define HNBU_TRI2G 0x14 /* 4328 2G TR isolation, 1 byte */
+#define HNBU_TRI5G 0x15 /* 4328 5G TR isolation, 3 bytes */
+#define HNBU_RXPO2G 0x16 /* 4328 2G RX power offset, 1 byte */
+#define HNBU_RXPO5G 0x17 /* 4328 5G RX power offset, 1 byte */
+#define HNBU_BOARDNUM 0x18 /* board serial number, independent of mac addr */
+#define HNBU_MACADDR 0x19 /* mac addr override for the standard CIS LAN_NID */
+#define HNBU_RDLSN 0x1a /* 2 bytes; serial # advertised in USB descriptor */
+#define HNBU_BOARDTYPE 0x1b /* 2 bytes; boardtype */
+#define HNBU_LEDDC 0x1c /* 2 bytes; LED duty cycle */
+#define HNBU_HNBUCIS 0x1d /* what follows is proprietary HNBU CIS format */
+#define HNBU_PAPARMS_SSLPNPHY 0x1e /* SSLPNPHY PA params */
+#define HNBU_RSSISMBXA2G_SSLPNPHY 0x1f /* SSLPNPHY RSSI mid pt sel & board switch arch */
+#define HNBU_RDLRNDIS 0x20 /* 1 byte; 1 = RDL advertises RNDIS config */
+#define HNBU_CHAINSWITCH 0x21 /* 2 byte; txchain, rxchain */
+#define HNBU_REGREV 0x22 /* 1 byte; */
+#define HNBU_FEM 0x23 /* 2 or 4 byte: 11n frontend specification */
+#define HNBU_PAPARMS_C0 0x24 /* 8 or 30 bytes: 11n pa paramater for chain 0 */
+#define HNBU_PAPARMS_C1 0x25 /* 8 or 30 bytes: 11n pa paramater for chain 1 */
+#define HNBU_PAPARMS_C2 0x26 /* 8 or 30 bytes: 11n pa paramater for chain 2 */
+#define HNBU_PAPARMS_C3 0x27 /* 8 or 30 bytes: 11n pa paramater for chain 3 */
+#define HNBU_PO_CCKOFDM 0x28 /* 6 or 18 bytes: cck2g/ofdm2g/ofdm5g power offset */
+#define HNBU_PO_MCS2G 0x29 /* 8 bytes: mcs2g power offset */
+#define HNBU_PO_MCS5GM 0x2a /* 8 bytes: mcs5g mid band power offset */
+#define HNBU_PO_MCS5GLH 0x2b /* 16 bytes: mcs5g low-high band power offset */
+#define HNBU_PO_CDD 0x2c /* 2 bytes: cdd2g/5g power offset */
+#define HNBU_PO_STBC 0x2d /* 2 bytes: stbc2g/5g power offset */
+#define HNBU_PO_40M 0x2e /* 2 bytes: 40Mhz channel 2g/5g power offset */
+#define HNBU_PO_40MDUP 0x2f /* 2 bytes: 40Mhz channel dup 2g/5g power offset */
+
+#define HNBU_RDLRWU 0x30 /* 1 byte; 1 = RDL advertises Remote Wake-up */
+#define HNBU_WPS 0x31 /* 1 byte; GPIO pin for WPS button */
+#define HNBU_USBFS 0x32 /* 1 byte; 1 = USB advertises FS mode only */
+#define HNBU_BRMIN 0x33 /* 4 byte bootloader min resource mask */
+#define HNBU_BRMAX 0x34 /* 4 byte bootloader max resource mask */
+#define HNBU_PATCH 0x35 /* bootloader patch addr(2b) & data(4b) pair */
+#define HNBU_CCKFILTTYPE 0x36 /* CCK digital filter selection options */
+#define HNBU_OFDMPO5G 0x37 /* 4 * 3 = 12 byte 11a ofdm power offsets in rev 3 */
+#define HNBU_ELNA2G 0x38
+#define HNBU_ELNA5G 0x39
+#define HNBU_TEMPTHRESH 0x3A /* 2 bytes
+ * byte1 tempthresh
+ * byte2 period(msb 4 bits) | hysterisis(lsb 4 bits)
+ */
+#define HNBU_UUID 0x3B /* 16 Bytes Hex */
+
+#define HNBU_USBEPNUM 0x40 /* USB endpoint numbers */
+
+/* POWER PER RATE for SROM V9 */
+#define HNBU_CCKBW202GPO 0x41 /* 2 bytes each
+ * CCK Power offsets for 20 MHz rates (11, 5.5, 2, 1Mbps)
+ * cckbw202gpo cckbw20ul2gpo
+ */
+
+#define HNBU_LEGOFDMBW202GPO 0x42 /* 4 bytes each
+ * OFDM power offsets for 20 MHz Legacy rates
+ * (54, 48, 36, 24, 18, 12, 9, 6 Mbps)
+ * legofdmbw202gpo legofdmbw20ul2gpo
+ */
+
+#define HNBU_LEGOFDMBW205GPO 0x43 /* 4 bytes each
+ * 5G band: OFDM power offsets for 20 MHz Legacy rates
+ * (54, 48, 36, 24, 18, 12, 9, 6 Mbps)
+ * low subband : legofdmbw205glpo legofdmbw20ul2glpo
+ * mid subband :legofdmbw205gmpo legofdmbw20ul2gmpo
+ * high subband :legofdmbw205ghpo legofdmbw20ul2ghpo
+ */
+
+#define HNBU_MCS2GPO 0x44 /* 4 bytes each
+ * mcs 0-7 power-offset. LSB nibble: m0, MSB nibble: m7
+ * mcsbw202gpo mcsbw20ul2gpo mcsbw402gpo
+ */
+#define HNBU_MCS5GLPO 0x45 /* 4 bytes each
+ * 5G low subband mcs 0-7 power-offset.
+ * LSB nibble: m0, MSB nibble: m7
+ * mcsbw205glpo mcsbw20ul5glpo mcsbw405glpo
+ */
+#define HNBU_MCS5GMPO 0x46 /* 4 bytes each
+ * 5G mid subband mcs 0-7 power-offset.
+ * LSB nibble: m0, MSB nibble: m7
+ * mcsbw205gmpo mcsbw20ul5gmpo mcsbw405gmpo
+ */
+#define HNBU_MCS5GHPO 0x47 /* 4 bytes each
+ * 5G high subband mcs 0-7 power-offset.
+ * LSB nibble: m0, MSB nibble: m7
+ * mcsbw205ghpo mcsbw20ul5ghpo mcsbw405ghpo
+ */
+#define HNBU_MCS32PO 0x48 /* 2 bytes total
+ * mcs-32 power offset for each band/subband.
+ * LSB nibble: 2G band, MSB nibble:
+ * mcs322ghpo, mcs325gmpo, mcs325glpo, mcs322gpo
+ */
+#define HNBU_LEG40DUPPO 0x49 /* 2 bytes total
+ * Additional power offset for Legacy Dup40 transmissions.
+ * Applied in addition to legofdmbw20ulXpo, X=2g, 5gl, 5gm, or 5gh.
+ * LSB nibble: 2G band, MSB nibble: 5G band high subband.
+ * leg40dup5ghpo, leg40dup5gmpo, leg40dup5glpo, leg40dup2gpo
+ */
+
+#define HNBU_PMUREGS 0x4a /* Variable length (5 bytes for each register)
+ * The setting of the ChipCtrl, PLL, RegulatorCtrl, Up/Down Timer and
+ * ResourceDependency Table registers.
+ */
+
+#define HNBU_PATCH2 0x4b /* bootloader TCAM patch addr(4b) & data(4b) pair .
+ * This is required for socram rev 15 onwards.
+ */
+
+#define HNBU_USBRDY 0x4c /* Variable length (upto 5 bytes)
+ * This is to indicate the USB/HSIC host controller
+ * that the device is ready for enumeration.
+ */
+
+#define HNBU_USBREGS 0x4d /* Variable length
+ * The setting of the devcontrol, HSICPhyCtrl1 and HSICPhyCtrl2
+ * registers during the USB initialization.
+ */
+
+#define HNBU_BLDR_TIMEOUT 0x4e /* 2 bytes used for HSIC bootloader to reset chip
+ * on connect timeout.
+ * The Delay after USBConnect for timeout till dongle receives
+ * get_descriptor request.
+ */
+#define HNBU_USBFLAGS 0x4f
+#define HNBU_PATCH_AUTOINC 0x50
+#define HNBU_MDIO_REGLIST 0x51
+#define HNBU_MDIOEX_REGLIST 0x52
+/* Unified OTP: tupple to embed USB manfid inside SDIO CIS */
+#define HNBU_UMANFID 0x53
+#define HNBU_PUBKEY 0x54 /* 128 byte; publick key to validate downloaded FW */
+#define HNBU_WOWLGPIO 0x55 /* 1 byte bit 7 initial polarity, bit 6..0 gpio pin */
+#define HNBU_MUXENAB 0x56 /* 1 byte to enable mux options */
+#define HNBU_GCI_CCR 0x57 /* GCI Chip control register */
+
+#define HNBU_FEM_CFG 0x58 /* FEM config */
+#define HNBU_ACPA_C0 0x59 /* ACPHY PA parameters: chain 0 */
+#define HNBU_ACPA_C1 0x5a /* ACPHY PA parameters: chain 1 */
+#define HNBU_ACPA_C2 0x5b /* ACPHY PA parameters: chain 2 */
+#define HNBU_MEAS_PWR 0x5c
+#define HNBU_PDOFF 0x5d
+#define HNBU_ACPPR_2GPO 0x5e /* ACPHY Power-per-rate 2gpo */
+#define HNBU_ACPPR_5GPO 0x5f /* ACPHY Power-per-rate 5gpo */
+#define HNBU_ACPPR_SBPO 0x60 /* ACPHY Power-per-rate sbpo */
+#define HNBU_NOISELVL 0x61
+#define HNBU_RXGAIN_ERR 0x62
+#define HNBU_AGBGA 0x63
+#define HNBU_USBDESC_COMPOSITE 0x64 /* USB WLAN/BT composite descriptor */
+#define HNBU_PATCH_AUTOINC8 0x65 /* Auto increment patch entry for 8 byte patching */
+#define HNBU_PATCH8 0x66 /* Patch entry for 8 byte patching */
+#define HNBU_ACRXGAINS_C0 0x67 /* ACPHY rxgains: chain 0 */
+#define HNBU_ACRXGAINS_C1 0x68 /* ACPHY rxgains: chain 1 */
+#define HNBU_ACRXGAINS_C2 0x69 /* ACPHY rxgains: chain 2 */
+#define HNBU_TXDUTY 0x6a /* Tx duty cycle for ACPHY 5g 40/80 Mhz */
+#define HNBU_USBUTMI_CTL 0x6b /* 2 byte USB UTMI/LDO Control */
+#define HNBU_PDOFF_2G 0x6c
+#define HNBU_USBSSPHY_UTMI_CTL0 0x6d /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_UTMI_CTL1 0x6e /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_UTMI_CTL2 0x6f /* 4 byte USB SSPHY UTMI Control */
+#define HNBU_USBSSPHY_SLEEP0 0x70 /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP1 0x71 /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP2 0x72 /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_SLEEP3 0x73 /* 2 byte USB SSPHY sleep */
+#define HNBU_USBSSPHY_MDIO 0x74 /* USB SSPHY INIT regs setting */
+#define HNBU_USB30PHY_NOSS 0x75 /* USB30 NO Super Speed */
+#define HNBU_USB30PHY_U1U2 0x76 /* USB30 PHY U1U2 Enable */
+#define HNBU_USB30PHY_REGS 0x77 /* USB30 PHY REGs update */
+
+#define HNBU_SROM3SWRGN 0x80 /* 78 bytes; srom rev 3 s/w region without crc8
+ * plus extra info appended.
+ */
+#define HNBU_RESERVED 0x81 /* Reserved for non-BRCM post-mfg additions */
+#define HNBU_CUSTOM1 0x82 /* 4 byte; For non-BRCM post-mfg additions */
+#define HNBU_CUSTOM2 0x83 /* Reserved; For non-BRCM post-mfg additions */
+#define HNBU_ACPAPARAM 0x84 /* ACPHY PAPARAM */
+#define HNBU_ACPA_CCK 0x86 /* ACPHY PA trimming parameters: CCK */
+#define HNBU_ACPA_40 0x87 /* ACPHY PA trimming parameters: 40 */
+#define HNBU_ACPA_80 0x88 /* ACPHY PA trimming parameters: 80 */
+#define HNBU_ACPA_4080 0x89 /* ACPHY PA trimming parameters: 40/80 */
+#define HNBU_SUBBAND5GVER 0x8a /* subband5gver */
+#define HNBU_PAPARAMBWVER 0x8b /* paparambwver */
+
+#define HNBU_MCS5Gx1PO 0x8c
+#define HNBU_ACPPR_SB8080_PO 0x8d
-#define SBTML_INT_ACK 0x40000
-#define SBTML_INT_EN 0x20000
+#endif /* !defined(LINUX_POSTMOGRIFY_REMOVAL) */
+/* sbtmstatelow */
+#define SBTML_INT_ACK 0x40000 /* ack the sb interrupt */
+#define SBTML_INT_EN 0x20000 /* enable sb interrupt */
-#define SBTMH_INT_STATUS 0x40000
+/* sbtmstatehigh */
+#define SBTMH_INT_STATUS 0x40000 /* sb interrupt status */
-#endif
+#endif /* _SBPCMCIA_H */
*
* $Copyright Open 2005 Broadcom Corporation$
*
- * $Id: sbsdpcmdev.h 336848 2012-06-05 11:28:07Z $
+ * $Id: sbsdpcmdev.h 416730 2013-08-06 09:33:19Z $
*/
#ifndef _sbsdpcmdev_h_
/* HW frame tag */
#define SDPCM_FRAMETAG_LEN 4 /* HW frametag: 2 bytes len, 2 bytes check val */
+#if !defined(NDISVER) || (NDISVER < 0x0630)
#define SDPCM_HWEXT_LEN 8
+#else
+#define SDPCM_HWEXT_LEN 0
+#endif /* !defined(NDISVER) || (NDISVER < 0x0630) */
#endif /* _sbsdpcmdev_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: sbsocram.h 271781 2011-07-13 20:00:06Z $
+ * $Id: sbsocram.h 481602 2014-05-29 22:43:34Z $
*/
#ifndef _SBSOCRAM_H
#ifndef _LANGUAGE_ASSEMBLY
-
+/* cpp contortions to concatenate w/arg prescan */
#ifndef PAD
#define _PADLINE(line) pad ## line
#define _XSTR(line) _PADLINE(line)
#define PAD _XSTR(__LINE__)
-#endif
-
+#endif /* PAD */
+/* Memcsocram core registers */
typedef volatile struct sbsocramregs {
uint32 coreinfo;
uint32 bwalloc;
uint32 bankidx;
uint32 standbyctrl;
- uint32 errlogstatus;
- uint32 errlogaddr;
-
+ uint32 errlogstatus; /* rev 6 */
+ uint32 errlogaddr; /* rev 6 */
+ /* used for patching rev 3 & 5 */
uint32 cambankidx;
uint32 cambankstandbyctrl;
uint32 cambankpatchctrl;
uint32 cambankdatareg;
uint32 cambankmaskreg;
uint32 PAD[1];
- uint32 bankinfo;
- uint32 PAD[15];
+ uint32 bankinfo; /* corev 8 */
+ uint32 bankpda;
+ uint32 PAD[14];
uint32 extmemconfig;
uint32 extmemparitycsr;
uint32 extmemparityerrdata;
uint32 extmemwrctrlandsize;
uint32 PAD[84];
uint32 workaround;
- uint32 pwrctl;
+ uint32 pwrctl; /* corerev >= 2 */
uint32 PAD[133];
- uint32 sr_control;
- uint32 sr_status;
- uint32 sr_address;
- uint32 sr_data;
+ uint32 sr_control; /* corerev >= 15 */
+ uint32 sr_status; /* corerev >= 15 */
+ uint32 sr_address; /* corerev >= 15 */
+ uint32 sr_data; /* corerev >= 15 */
} sbsocramregs_t;
-#endif
-
+#endif /* _LANGUAGE_ASSEMBLY */
+/* Register offsets */
#define SR_COREINFO 0x00
#define SR_BWALLOC 0x04
#define SR_BISTSTAT 0x0c
#define SR_BANKSTBYCTL 0x14
#define SR_PWRCTL 0x1e8
-
-#define SRCI_PT_MASK 0x00070000
+/* Coreinfo register */
+#define SRCI_PT_MASK 0x00070000 /* corerev >= 6; port type[18:16] */
#define SRCI_PT_SHIFT 16
-
+/* port types : SRCI_PT_<processorPT>_<backplanePT> */
#define SRCI_PT_OCP_OCP 0
#define SRCI_PT_AXI_OCP 1
#define SRCI_PT_ARM7AHB_OCP 2
#define SRCI_PT_CM3AHB_OCP 3
#define SRCI_PT_AXI_AXI 4
#define SRCI_PT_AHB_AXI 5
-
+/* corerev >= 3 */
#define SRCI_LSS_MASK 0x00f00000
#define SRCI_LSS_SHIFT 20
#define SRCI_LRS_MASK 0x0f000000
#define SRCI_LRS_SHIFT 24
-
+/* In corerev 0, the memory size is 2 to the power of the
+ * base plus 16 plus to the contents of the memsize field plus 1.
+ */
#define SRCI_MS0_MASK 0xf
#define SR_MS0_BASE 16
-
+/*
+ * In corerev 1 the bank size is 2 ^ the bank size field plus 14,
+ * the memory size is number of banks times bank size.
+ * The same applies to rom size.
+ */
#define SRCI_ROMNB_MASK 0xf000
#define SRCI_ROMNB_SHIFT 12
#define SRCI_ROMBSZ_MASK 0xf00
#define SR_BSZ_BASE 14
-
+/* Standby control register */
#define SRSC_SBYOVR_MASK 0x80000000
#define SRSC_SBYOVR_SHIFT 31
#define SRSC_SBYOVRVAL_MASK 0x60000000
#define SRSC_SBYOVRVAL_SHIFT 29
-#define SRSC_SBYEN_MASK 0x01000000
+#define SRSC_SBYEN_MASK 0x01000000 /* rev >= 3 */
#define SRSC_SBYEN_SHIFT 24
-
-#define SRPC_PMU_STBYDIS_MASK 0x00000010
+/* Power control register */
+#define SRPC_PMU_STBYDIS_MASK 0x00000010 /* rev >= 3 */
#define SRPC_PMU_STBYDIS_SHIFT 4
#define SRPC_STBYOVRVAL_MASK 0x00000008
#define SRPC_STBYOVRVAL_SHIFT 3
#define SRPC_STBYOVR_MASK 0x00000007
#define SRPC_STBYOVR_SHIFT 0
-
+/* Extra core capability register */
#define SRECC_NUM_BANKS_MASK 0x000000F0
#define SRECC_NUM_BANKS_SHIFT 4
#define SRECC_BANKSIZE_MASK 0x0000000F
#define SRECC_BANKSIZE(value) (1 << (value))
-
+/* CAM bank patch control */
#define SRCBPC_PATCHENABLE 0x80000000
#define SRP_ADDRESS 0x0001FFFC
#define SRP_VALID 0x8000
-
+/* CAM bank command reg */
#define SRCMD_WRITE 0x00020000
#define SRCMD_READ 0x00010000
#define SRCMD_DONE 0x80000000
#define SRCMD_DONE_DLY 1000
-
+/* bankidx and bankinfo reg defines corerev >= 8 */
#define SOCRAM_BANKINFO_SZMASK 0x7f
#define SOCRAM_BANKIDX_ROM_MASK 0x100
#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8
-
+/* socram bankinfo memtype */
#define SOCRAM_MEMTYPE_RAM 0
#define SOCRAM_MEMTYPE_R0M 1
#define SOCRAM_MEMTYPE_DEVRAM 2
#define SOCRAM_BANKINFO_STDBY_MASK 0x400
#define SOCRAM_BANKINFO_STDBY_TIMER 0x800
-
+/* bankinfo rev >= 10 */
#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13
#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000
#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14
#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24
#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000
-
+/* extracoreinfo register */
#define SOCRAM_DEVRAMBANK_MASK 0xF000
#define SOCRAM_DEVRAMBANK_SHIFT 12
-
+/* bank info to calculate bank size */
#define SOCRAM_BANKINFO_SZBASE 8192
-#define SOCRAM_BANKSIZE_SHIFT 13
+#define SOCRAM_BANKSIZE_SHIFT 13 /* SOCRAM_BANKINFO_SZBASE */
-#endif
+#endif /* _SBSOCRAM_H */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: sdio.h 308973 2012-01-18 04:19:34Z $
+ * $Id: sdio.h 416730 2013-08-06 09:33:19Z $
*/
#ifndef _SDIO_H
#define _SDIO_H
+#ifdef BCMSDIO
/* CCCR structure for function 0 */
typedef volatile struct {
#define SDIOD_CCCR_INTR_EXTN 0x16
/* Broadcom extensions (corerev >= 1) */
-#define SDIOD_CCCR_BRCM_CARDCAP 0xf0
+#define SDIOD_CCCR_BRCM_CARDCAP 0xf0
#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_SUPPORT 0x02
#define SDIOD_CCCR_BRCM_CARDCAP_CMD14_EXT 0x04
#define SDIOD_CCCR_BRCM_CARDCAP_CMD_NODEC 0x08
/* cccr_sdio_rev */
#define SDIO_REV_SDIOID_MASK 0xf0 /* SDIO spec revision number */
#define SDIO_REV_CCCRID_MASK 0x0f /* CCCR format version number */
+#define SDIO_SPEC_VERSION_3_0 0x40 /* SDIO spec version 3.0 */
/* sd_rev */
#define SD_REV_PHY_MASK 0x0f /* SD format version number */
/* speed_control (control device entry into high-speed clocking mode) */
#define SDIO_SPEED_SHS 0x01 /* supports high-speed [clocking] mode (RO) */
#define SDIO_SPEED_EHS 0x02 /* enable high-speed [clocking] mode (RW) */
+#define SDIO_SPEED_UHSI_DDR50 0x08
/* for setting bus speed in card: 0x13h */
#define SDIO_BUS_SPEED_UHSISEL_M BITFIELD_MASK(3)
/* command issue options */
#define CMD_OPTION_DEFAULT 0
#define CMD_OPTION_TUNING 1
+
+#endif /* def BCMSDIO */
#endif /* _SDIO_H */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: siutils.h 385510 2013-02-15 21:02:07Z $
+ * $Id: siutils.h 481602 2014-05-29 22:43:34Z $
*/
#ifndef _siutils_h_
#define _siutils_h_
+#ifdef SR_DEBUG
+#include "wlioctl.h"
+#endif /* SR_DEBUG */
+
+/*
+ * Data structure to export all chip specific common variables
+ * public (read-only) portion of siutils handle returned by si_attach()/si_kattach()
+ */
struct si_pub {
- uint socitype;
-
- uint bustype;
- uint buscoretype;
- uint buscorerev;
- uint buscoreidx;
- int ccrev;
- uint32 cccaps;
- uint32 cccaps_ext;
- int pmurev;
- uint32 pmucaps;
- uint boardtype;
- uint boardrev;
- uint boardvendor;
- uint boardflags;
- uint boardflags2;
- uint chip;
- uint chiprev;
- uint chippkg;
- uint32 chipst;
- bool issim;
- uint socirev;
+ uint socitype; /* SOCI_SB, SOCI_AI */
+
+ uint bustype; /* SI_BUS, PCI_BUS */
+ uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
+ uint buscorerev; /* buscore rev */
+ uint buscoreidx; /* buscore index */
+ int ccrev; /* chip common core rev */
+ uint32 cccaps; /* chip common capabilities */
+ uint32 cccaps_ext; /* chip common capabilities extension */
+ int pmurev; /* pmu core rev */
+ uint32 pmucaps; /* pmu capabilities */
+ uint boardtype; /* board type */
+ uint boardrev; /* board rev */
+ uint boardvendor; /* board vendor */
+ uint boardflags; /* board flags */
+ uint boardflags2; /* board flags2 */
+ uint chip; /* chip number */
+ uint chiprev; /* chip revision */
+ uint chippkg; /* chip package option */
+ uint32 chipst; /* chip status */
+ bool issim; /* chip is in simulation or emulation */
+ uint socirev; /* SOC interconnect rev */
bool pci_pr32414;
};
-
+/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver
+ * for monolithic driver, it is readonly to prevent accident change
+ */
typedef const struct si_pub si_t;
-
-
-#define SI_OSH NULL
+/*
+ * Many of the routines below take an 'sih' handle as their first arg.
+ * Allocate this by calling si_attach(). Free it by calling si_detach().
+ * At any one time, the sih is logically focused on one particular si core
+ * (the "current core").
+ * Use si_setcore() or si_setcoreidx() to change the association to another core.
+ */
+#define SI_OSH NULL /* Use for si_kattach when no osh is available */
#define BADIDX (SI_MAXCORES + 1)
+/* clkctl xtal what flags */
+#define XTAL 0x1 /* primary crystal oscillator (2050) */
+#define PLL 0x2 /* main chip pll */
-#define XTAL 0x1
-#define PLL 0x2
-
-
-#define CLK_FAST 0
-#define CLK_DYNAMIC 2
-
-
-#define GPIO_DRV_PRIORITY 0
-#define GPIO_APP_PRIORITY 1
-#define GPIO_HI_PRIORITY 2
+/* clkctl clk mode */
+#define CLK_FAST 0 /* force fast (pll) clock */
+#define CLK_DYNAMIC 2 /* enable dynamic clock control */
+/* GPIO usage priorities */
+#define GPIO_DRV_PRIORITY 0 /* Driver */
+#define GPIO_APP_PRIORITY 1 /* Application */
+#define GPIO_HI_PRIORITY 2 /* Highest priority. Ignore GPIO reservation */
+/* GPIO pull up/down */
#define GPIO_PULLUP 0
#define GPIO_PULLDN 1
+/* GPIO event regtype */
+#define GPIO_REGEVT 0 /* GPIO register event */
+#define GPIO_REGEVT_INTMSK 1 /* GPIO register event int mask */
+#define GPIO_REGEVT_INTPOL 2 /* GPIO register event int polarity */
-#define GPIO_REGEVT 0
-#define GPIO_REGEVT_INTMSK 1
-#define GPIO_REGEVT_INTPOL 2
-
-
-#define SI_DEVPATH_BUFSZ 16
-
+/* device path */
+#define SI_DEVPATH_BUFSZ 16 /* min buffer size in bytes */
+/* SI routine enumeration: to be used by update function with multiple hooks */
#define SI_DOATTACH 1
-#define SI_PCIDOWN 2
-#define SI_PCIUP 3
+#define SI_PCIDOWN 2 /* wireless interface is down */
+#define SI_PCIUP 3 /* wireless interface is up */
-#define ISSIM_ENAB(sih) 0
+#ifdef SR_DEBUG
+#define PMU_RES 31
+#endif /* SR_DEBUG */
+#define ISSIM_ENAB(sih) FALSE
+/* PMU clock/power control */
#if defined(BCMPMUCTL)
#define PMUCTL_ENAB(sih) (BCMPMUCTL)
#else
#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU)
#endif
+#define AOB_ENAB(sih) ((sih)->ccrev >= 35 ? \
+ ((sih)->cccaps_ext & CC_CAP_EXT_AOB_PRESENT) : 0)
+/* chipcommon clock/power control (exclusive with PMU's) */
#if defined(BCMPMUCTL) && BCMPMUCTL
#define CCCTL_ENAB(sih) (0)
#define CCPLL_ENAB(sih) (0)
#endif
typedef void (*gpio_handler_t)(uint32 stat, void *arg);
-
+typedef void (*gci_gpio_handler_t)(uint32 stat, void *arg);
+/* External BT Coex enable mask */
#define CC_BTCOEX_EN_MASK 0x01
-
+/* External PA enable mask */
#define GPIO_CTRL_EPA_EN_MASK 0x40
-
+/* WL/BT control enable mask */
#define GPIO_CTRL_5_6_EN_MASK 0x60
#define GPIO_CTRL_7_6_EN_MASK 0xC0
#define GPIO_OUT_7_EN_MASK 0x80
-
+/* CR4 specific defines used by the host driver */
#define SI_CR4_CAP (0x04)
#define SI_CR4_BANKIDX (0x40)
#define SI_CR4_BANKINFO (0x44)
+#define SI_CR4_BANKPDA (0x4C)
#define ARMCR4_TCBBNB_MASK 0xf0
#define ARMCR4_TCBBNB_SHIFT 4
#define ARMCR4_BSZ_MASK 0x3f
#define ARMCR4_BSZ_MULT 8192
-
-
+#include <osl_decl.h>
+/* === exported functions === */
extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype,
void *sdh, char **vars, uint *varsz);
extern si_t *si_kattach(osl_t *osh);
extern void si_detach(si_t *sih);
extern bool si_pci_war16165(si_t *sih);
-
+extern void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit);
extern uint si_corelist(si_t *sih, uint coreid[]);
extern uint si_coreid(si_t *sih);
extern uint si_flag(si_t *sih);
extern void *si_osh(si_t *sih);
extern void si_setosh(si_t *sih, osl_t *osh);
extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val);
+extern uint32 *si_corereg_addr(si_t *sih, uint coreidx, uint regoff);
extern void *si_coreregs(si_t *sih);
extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
extern uint si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val);
extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val);
extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val);
extern bool si_iscoreup(si_t *sih);
+extern uint si_numcoreunits(si_t *sih, uint coreid);
+extern uint si_numd11coreunits(si_t *sih);
extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit);
extern void *si_setcoreidx(si_t *sih, uint coreidx);
extern void *si_setcore(si_t *sih, uint coreid, uint coreunit);
extern uint si_chip_hostif(si_t *sih);
extern bool si_read_pmu_autopll(si_t *sih);
extern uint32 si_clock(si_t *sih);
-extern uint32 si_alp_clock(si_t *sih);
-extern uint32 si_ilp_clock(si_t *sih);
+extern uint32 si_alp_clock(si_t *sih); /* returns [Hz] units */
+extern uint32 si_ilp_clock(si_t *sih); /* returns [Hz] units */
extern void si_pci_setup(si_t *sih, uint coremask);
extern void si_pcmcia_init(si_t *sih);
extern void si_setint(si_t *sih, int siflag);
extern uint32 si_socram_size(si_t *sih);
extern uint32 si_socdevram_size(si_t *sih);
extern uint32 si_socram_srmem_size(si_t *sih);
+extern void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda);
extern void si_socdevram(si_t *sih, bool set, uint8 *ennable, uint8 *protect, uint8 *remap);
extern bool si_socdevram_pkg(si_t *sih);
extern bool si_socdevram_remap_isenb(si_t *sih);
extern uint32 si_gpiopull(si_t *sih, bool updown, uint32 mask, uint32 val);
extern uint32 si_gpioevent(si_t *sih, uint regtype, uint32 mask, uint32 val);
extern uint32 si_gpio_int_enable(si_t *sih, bool enable);
+extern void si_gci_uart_init(si_t *sih, osl_t *osh, uint8 seci_mode);
+extern void si_gci_enable_gpio(si_t *sih, uint8 gpio, uint32 mask, uint32 value);
+extern uint8 si_gci_host_wake_gpio_init(si_t *sih);
+extern void si_gci_host_wake_gpio_enable(si_t *sih, uint8 gpio, bool state);
-
+/* GPIO event handlers */
extern void *si_gpio_handler_register(si_t *sih, uint32 e, bool lev, gpio_handler_t cb, void *arg);
extern void si_gpio_handler_unregister(si_t *sih, void* gpioh);
extern void si_gpio_handler_process(si_t *sih);
+/* GCI interrupt handlers */
+extern void si_gci_handler_process(si_t *sih);
+/* GCI GPIO event handlers */
+extern void *si_gci_gpioint_handler_register(si_t *sih, uint8 gpio, uint8 sts,
+ gci_gpio_handler_t cb, void *arg);
+extern void si_gci_gpioint_handler_unregister(si_t *sih, void* gci_i);
+extern uint8 si_gci_gpio_status(si_t *sih, uint8 gci_gpio, uint8 mask, uint8 value);
+
+/* Wake-on-wireless-LAN (WOWL) */
extern bool si_pci_pmecap(si_t *sih);
-struct osl_info;
extern bool si_pci_fastpmecap(struct osl_info *osh);
extern bool si_pci_pmestat(si_t *sih);
extern void si_pci_pmeclr(si_t *sih);
extern void si_pci_pmeen(si_t *sih);
extern void si_pci_pmestatclr(si_t *sih);
extern uint si_pcie_readreg(void *sih, uint addrtype, uint offset);
+extern uint si_pcie_writereg(void *sih, uint addrtype, uint offset, uint val);
+
+#ifdef BCMSDIO
extern void si_sdio_init(si_t *sih);
+#endif
extern uint16 si_d11_devid(si_t *sih);
extern int si_corepciid(si_t *sih, uint func, uint16 *pcivendor, uint16 *pcidevice,
#define si_seci(sih) 0
#define si_seci_upd(sih, a) do {} while (0)
static INLINE void * si_seci_init(si_t *sih, uint8 use_seci) {return NULL;}
+static INLINE void * si_gci_init(si_t *sih) {return NULL;}
#define si_seci_down(sih) do {} while (0)
#define si_gci(sih) 0
-static INLINE void * si_gci_init(si_t *sih) {return NULL;}
-
+/* OTP status */
extern bool si_is_otp_disabled(si_t *sih);
extern bool si_is_otp_powered(si_t *sih);
-extern void si_otp_power(si_t *sih, bool on);
-
+extern void si_otp_power(si_t *sih, bool on, uint32* min_res_mask);
+/* SPROM availability */
extern bool si_is_sprom_available(si_t *sih);
extern bool si_is_sprom_enabled(si_t *sih);
extern void si_sprom_enable(si_t *sih, bool enable);
-
+/* OTP/SROM CIS stuff */
extern int si_cis_source(si_t *sih);
#define CIS_DEFAULT 0
#define CIS_SROM 1
#define CIS_OTP 2
+/* Fab-id information */
+#define DEFAULT_FAB 0x0 /* Original/first fab used for this chip */
+#define CSM_FAB7 0x1 /* CSM Fab7 chip */
+#define TSMC_FAB12 0x2 /* TSMC Fab12/Fab14 chip */
+#define SMIC_FAB4 0x3 /* SMIC Fab4 chip */
-#define DEFAULT_FAB 0x0
-#define CSM_FAB7 0x1
-#define TSMC_FAB12 0x2
-#define SMIC_FAB4 0x3
extern int si_otp_fabid(si_t *sih, uint16 *fabid, bool rw);
extern uint16 si_fabid(si_t *sih);
+extern uint16 si_chipid(si_t *sih);
-
+/*
+ * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
+ * The returned path is NULL terminated and has trailing '/'.
+ * Return 0 on success, nonzero otherwise.
+ */
extern int si_devpath(si_t *sih, char *path, int size);
-
+extern int si_devpath_pcie(si_t *sih, char *path, int size);
+/* Read variable with prepending the devpath to the name */
extern char *si_getdevpathvar(si_t *sih, const char *name);
extern int si_getdevpathintvar(si_t *sih, const char *name);
extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name);
extern uint8 si_pcieclkreq(si_t *sih, uint32 mask, uint32 val);
extern uint32 si_pcielcreg(si_t *sih, uint32 mask, uint32 val);
extern uint8 si_pcieltrenable(si_t *sih, uint32 mask, uint32 val);
+extern uint8 si_pcieobffenable(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltr_reg(si_t *sih, uint32 reg, uint32 mask, uint32 val);
+extern uint32 si_pcieltrspacing_reg(si_t *sih, uint32 mask, uint32 val);
+extern uint32 si_pcieltrhysteresiscnt_reg(si_t *sih, uint32 mask, uint32 val);
extern void si_pcie_set_error_injection(si_t *sih, uint32 mode);
+extern void si_pcie_set_L1substate(si_t *sih, uint32 substate);
+extern uint32 si_pcie_get_L1substate(si_t *sih);
extern void si_war42780_clkreq(si_t *sih, bool clkreq);
extern void si_pci_down(si_t *sih);
extern void si_pci_up(si_t *sih);
extern void si_chipcontrl_epa4331(si_t *sih, bool on);
extern void si_chipcontrl_epa4331_wowl(si_t *sih, bool enter_wowl);
extern void si_chipcontrl_srom4360(si_t *sih, bool on);
-
+/* Enable BT-COEX & Ex-PA for 4313 */
extern void si_epa_4313war(si_t *sih);
extern void si_btc_enable_chipcontrol(si_t *sih);
-
+/* BT/WL selection for 4313 bt combo >= P250 boards */
extern void si_btcombo_p250_4313_war(si_t *sih);
extern void si_btcombo_43228_war(si_t *sih);
extern void si_clk_pmu_htavail_set(si_t *sih, bool set_clear);
extern void si_pmu_synth_pwrsw_4313_war(si_t *sih);
extern uint si_pll_reset(si_t *sih);
-
+/* === debug routines === */
extern bool si_taclear(si_t *sih, bool details);
+#if defined(BCMDBG_PHYDUMP)
+extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif
extern uint32 si_ccreg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
extern uint32 si_pciereg(si_t *sih, uint32 offset, uint32 mask, uint32 val, uint type);
+#ifdef SR_DEBUG
+extern void si_dump_pmu(si_t *sih, void *pmu_var);
+extern void si_pmu_keep_on(si_t *sih, int32 int_val);
+extern uint32 si_pmu_keep_on_get(si_t *sih);
+extern uint32 si_power_island_set(si_t *sih, uint32 int_val);
+extern uint32 si_power_island_get(si_t *sih);
+#endif /* SR_DEBUG */
extern uint32 si_pcieserdesreg(si_t *sih, uint32 mdioslave, uint32 offset, uint32 mask, uint32 val);
extern void si_pcie_set_request_size(si_t *sih, uint16 size);
extern uint16 si_pcie_get_request_size(si_t *sih);
extern uint32 si_tcm_size(si_t *sih);
+extern bool si_has_flops(si_t *sih);
extern int si_set_sromctl(si_t *sih, uint32 value);
extern uint32 si_get_sromctl(si_t *sih);
extern uint32 si_gci_direct(si_t *sih, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_indirect(si_t *sih, uint regidx, uint offset, uint32 mask, uint32 val);
+extern uint32 si_gci_output(si_t *sih, uint reg, uint32 mask, uint32 val);
+extern uint32 si_gci_input(si_t *sih, uint reg);
+extern uint32 si_gci_int_enable(si_t *sih, bool enable);
extern void si_gci_reset(si_t *sih);
+#ifdef BCMLTECOEX
+extern void si_gci_seci_init(si_t *sih);
+extern void si_ercx_init(si_t *sih, uint32 ltecx_mux, uint32 ltecx_padnum,
+ uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+extern void si_wci2_init(si_t *sih, uint8 baudrate, uint32 ltecx_mux, uint32 ltecx_padnum,
+ uint32 ltecx_fnsel, uint32 ltecx_gcigpio);
+#endif /* BCMLTECOEX */
extern void si_gci_set_functionsel(si_t *sih, uint32 pin, uint8 fnsel);
+extern uint32 si_gci_get_functionsel(si_t *sih, uint32 pin);
+extern void si_gci_clear_functionsel(si_t *sih, uint8 fnsel);
extern uint8 si_gci_get_chipctrlreg_idx(uint32 pin, uint32 *regidx, uint32 *pos);
extern uint32 si_gci_chipcontrol(si_t *sih, uint reg, uint32 mask, uint32 val);
-
+extern uint32 si_gci_chipstatus(si_t *sih, uint reg);
+extern uint16 si_cc_get_reg16(uint32 reg_offs);
+extern uint32 si_cc_get_reg32(uint32 reg_offs);
+extern uint32 si_cc_set_reg32(uint32 reg_offs, uint32 val);
+extern uint32 si_gci_preinit_upd_indirect(uint32 regidx, uint32 setval, uint32 mask);
+extern uint8 si_enable_device_wake(si_t *sih, uint8 *wake_status, uint8 *cur_status);
+extern void si_swdenable(si_t *sih, uint32 swdflag);
+
+#define CHIPCTRLREG1 0x1
#define CHIPCTRLREG2 0x2
#define CHIPCTRLREG3 0x3
#define CHIPCTRLREG4 0x4
+#define CHIPCTRLREG5 0x5
#define MINRESMASKREG 0x618
+#define MAXRESMASKREG 0x61c
#define CHIPCTRLADDR 0x650
#define CHIPCTRLDATA 0x654
#define RSRCTABLEADDR 0x620
#define RSRCUPDWNTIME 0x628
#define PMUREG_RESREQ_MASK 0x68c
-void
-si_update_masks(si_t *sih);
+void si_update_masks(si_t *sih);
+void si_force_islanding(si_t *sih, bool enable);
+extern uint32 si_pmu_res_req_timer_clr(si_t *sih);
+extern void si_pmu_rfldo(si_t *sih, bool on);
+extern void si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 spert_val);
+extern uint32 si_pcie_set_ctrlreg(si_t *sih, uint32 sperst_mask, uint32 spert_val);
+extern void si_pcie_ltr_war(si_t *sih);
+extern void si_pcie_hw_LTR_war(si_t *sih);
+extern void si_pcie_hw_L1SS_war(si_t *sih);
+extern void si_pciedev_crwlpciegen2(si_t *sih);
+extern void si_pcie_prep_D3(si_t *sih, bool enter_D3);
+extern void si_pciedev_reg_pm_clk_period(si_t *sih);
+
+#ifdef WLRSDB
+extern void si_d11rsdb_core_disable(si_t *sih, uint32 bits);
+extern void si_d11rsdb_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+#endif
-void
-si_force_islanding(si_t *sih, bool enable);
-#endif
+/* Macro to enable clock gating changes in different cores */
+#define MEM_CLK_GATE_BIT 5
+#define GCI_CLK_GATE_BIT 18
+
+#define USBAPP_CLK_BIT 0
+#define PCIE_CLK_BIT 3
+#define ARMCR4_DBG_CLK_BIT 4
+#define SAMPLE_SYNC_CLK_BIT 17
+#define PCIE_TL_CLK_BIT 18
+#define HQ_REQ_BIT 24
+#define PLL_DIV2_BIT_START 9
+#define PLL_DIV2_MASK (0x37 << PLL_DIV2_BIT_START)
+#define PLL_DIV2_DIS_OP (0x37 << PLL_DIV2_BIT_START)
+
+#define PMUREG(si, member) \
+ (AOB_ENAB(si) ? \
+ si_corereg_addr(si, si_findcoreidx(si, PMU_CORE_ID, 0), \
+ OFFSETOF(pmuregs_t, member)): \
+ si_corereg_addr(si, SI_CC_IDX, OFFSETOF(chipcregs_t, member)))
+
+#define pmu_corereg(si, cc_idx, member, mask, val) \
+ (AOB_ENAB(si) ? \
+ si_pmu_corereg(si, si_findcoreidx(sih, PMU_CORE_ID, 0), \
+ OFFSETOF(pmuregs_t, member), mask, val): \
+ si_pmu_corereg(si, cc_idx, OFFSETOF(chipcregs_t, member), mask, val))
+
+/* GCI Macros */
+#define ALLONES_32 0xFFFFFFFF
+#define GCI_CCTL_SECIRST_OFFSET 0 /* SeciReset */
+#define GCI_CCTL_RSTSL_OFFSET 1 /* ResetSeciLogic */
+#define GCI_CCTL_SECIEN_OFFSET 2 /* EnableSeci */
+#define GCI_CCTL_FSL_OFFSET 3 /* ForceSeciOutLow */
+#define GCI_CCTL_SMODE_OFFSET 4 /* SeciOpMode, 6:4 */
+#define GCI_CCTL_US_OFFSET 7 /* UpdateSeci */
+#define GCI_CCTL_BRKONSLP_OFFSET 8 /* BreakOnSleep */
+#define GCI_CCTL_SILOWTOUT_OFFSET 9 /* SeciInLowTimeout, 10:9 */
+#define GCI_CCTL_RSTOCC_OFFSET 11 /* ResetOffChipCoex */
+#define GCI_CCTL_ARESEND_OFFSET 12 /* AutoBTSigResend */
+#define GCI_CCTL_FGCR_OFFSET 16 /* ForceGciClkReq */
+#define GCI_CCTL_FHCRO_OFFSET 17 /* ForceHWClockReqOff */
+#define GCI_CCTL_FREGCLK_OFFSET 18 /* ForceRegClk */
+#define GCI_CCTL_FSECICLK_OFFSET 19 /* ForceSeciClk */
+#define GCI_CCTL_FGCA_OFFSET 20 /* ForceGciClkAvail */
+#define GCI_CCTL_FGCAV_OFFSET 21 /* ForceGciClkAvailValue */
+#define GCI_CCTL_SCS_OFFSET 24 /* SeciClkStretch, 31:24 */
+
+#define GCI_MODE_UART 0x0
+#define GCI_MODE_SECI 0x1
+#define GCI_MODE_BTSIG 0x2
+#define GCI_MODE_GPIO 0x3
+#define GCI_MODE_MASK 0x7
+
+#define GCI_CCTL_LOWTOUT_DIS 0x0
+#define GCI_CCTL_LOWTOUT_10BIT 0x1
+#define GCI_CCTL_LOWTOUT_20BIT 0x2
+#define GCI_CCTL_LOWTOUT_30BIT 0x3
+#define GCI_CCTL_LOWTOUT_MASK 0x3
+
+#define GCI_CCTL_SCS_DEF 0x19
+#define GCI_CCTL_SCS_MASK 0xFF
+
+#define GCI_SECIIN_MODE_OFFSET 0
+#define GCI_SECIIN_GCIGPIO_OFFSET 4
+#define GCI_SECIIN_RXID2IP_OFFSET 8
+
+#define GCI_SECIOUT_MODE_OFFSET 0
+#define GCI_SECIOUT_GCIGPIO_OFFSET 4
+#define GCI_SECIOUT_SECIINRELATED_OFFSET 16
+
+#define GCI_SECIAUX_RXENABLE_OFFSET 0
+#define GCI_SECIFIFO_RXENABLE_OFFSET 16
+
+#define GCI_SECITX_ENABLE_OFFSET 0
+
+#define GCI_GPIOCTL_INEN_OFFSET 0
+#define GCI_GPIOCTL_OUTEN_OFFSET 1
+#define GCI_GPIOCTL_PDN_OFFSET 4
+
+#define GCI_GPIOIDX_OFFSET 16
+
+#define GCI_LTECX_SECI_ID 0 /* SECI port for LTECX */
+
+/* To access per GCI bit registers */
+#define GCI_REG_WIDTH 32
+
+/* GCI bit positions */
+/* GCI [127:000] = WLAN [127:0] */
+#define GCI_WLAN_IP_ID 0
+#define GCI_WLAN_BEGIN 0
+#define GCI_WLAN_PRIO_POS (GCI_WLAN_BEGIN + 4)
+
+/* GCI [639:512] = LTE [127:0] */
+#define GCI_LTE_IP_ID 4
+#define GCI_LTE_BEGIN 512
+#define GCI_LTE_FRAMESYNC_POS (GCI_LTE_BEGIN + 0)
+#define GCI_LTE_RX_POS (GCI_LTE_BEGIN + 1)
+#define GCI_LTE_TX_POS (GCI_LTE_BEGIN + 2)
+#define GCI_LTE_AUXRXDVALID_POS (GCI_LTE_BEGIN + 56)
+
+/* Reg Index corresponding to ECI bit no x of ECI space */
+#define GCI_REGIDX(x) ((x)/GCI_REG_WIDTH)
+/* Bit offset of ECI bit no x in 32-bit words */
+#define GCI_BITOFFSET(x) ((x)%GCI_REG_WIDTH)
+
+/* End - GCI Macros */
+
+#ifdef REROUTE_OOBINT
+#define CC_OOB 0x0
+#define M2MDMA_OOB 0x1
+#define PMU_OOB 0x2
+#define D11_OOB 0x3
+#define SDIOD_OOB 0x4
+#define PMU_OOB_BIT (0x10 | PMU_OOB)
+#endif /* REROUTE_OOBINT */
+
+
+#endif /* _siutils_h_ */
/*
* $Copyright Open Broadcom Corporation$
- * $Id: typedefs.h 397286 2013-04-18 01:42:19Z $
+ * $Id: typedefs.h 484281 2014-06-12 22:42:26Z $
*/
#ifndef _TYPEDEFS_H_
#ifdef SITE_TYPEDEFS
-
+/*
+ * Define SITE_TYPEDEFS in the compile to include a site-specific
+ * typedef file "site_typedefs.h".
+ *
+ * If SITE_TYPEDEFS is not defined, then the code section below makes
+ * inferences about the compile environment based on defined symbols and
+ * possibly compiler pragmas.
+ *
+ * Following these two sections is the Default Typedefs section.
+ * This section is only processed if USE_TYPEDEF_DEFAULTS is
+ * defined. This section has a default set of typedefs and a few
+ * preprocessor symbols (TRUE, FALSE, NULL, ...).
+ */
#include "site_typedefs.h"
#else
-
+/*
+ * Infer the compile environment based on preprocessor symbols and pragmas.
+ * Override type definitions as needed, and include configuration-dependent
+ * header files to define types.
+ */
#ifdef __cplusplus
#define TRUE true
#endif
-#else
+#else /* ! __cplusplus */
-#endif
+#endif /* ! __cplusplus */
-#if defined(__x86_64__)
+#if defined(__LP64__)
#define TYPEDEF_UINTPTR
typedef unsigned long long int uintptr;
#endif
#define TYPEDEF_ULONG
#endif
-
-
+/*
+ * If this is either a Linux hybrid build or the per-port code of a hybrid build
+ * then use the Linux header files to get some of the typedefs. Otherwise, define
+ * them entirely in this file. We can't always define the types because we get
+ * a duplicate typedef error; there is no way to "undefine" a typedef.
+ * We know when it's per-port code because each file defines LINUX_PORT at the top.
+ */
#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
#define TYPEDEF_UINT
#ifndef TARGETENV_android
#define TYPEDEF_USHORT
#define TYPEDEF_ULONG
-#endif
+#endif /* TARGETENV_android */
#ifdef __KERNEL__
#include <linux/version.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19))
#define TYPEDEF_BOOL
-#endif
-
+#endif /* >= 2.6.19 */
+/* special detection for 2.6.18-128.7.1.0.1.el5 */
#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18))
#include <linux/compiler.h>
#ifdef noinline_for_stack
#define TYPEDEF_BOOL
#endif
-#endif
-#endif
-#endif
-
-
-
+#endif /* == 2.6.18 */
+#endif /* __KERNEL__ */
+#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
+/* Do not support the (u)int64 types with strict ansi for GNU C */
#if defined(__GNUC__) && defined(__STRICT_ANSI__)
#define TYPEDEF_INT64
#define TYPEDEF_UINT64
-#endif
-
+#endif /* defined(__GNUC__) && defined(__STRICT_ANSI__) */
+/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode
+ * for signed or unsigned
+ */
#if defined(__ICL)
#define TYPEDEF_INT64
#define TYPEDEF_UINT64
#endif
-#endif
+#endif /* __ICL */
#if !defined(__DJGPP__)
-
+/* pick up ushort & uint from standard types.h */
#if defined(__KERNEL__)
-
+/* See note above */
#if !defined(LINUX_HYBRID) || defined(LINUX_PORT)
-#include <linux/types.h>
-#endif
+#include <linux/types.h> /* sys/types.h and linux/types.h are oil and water */
+#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */
#else
-
#include <sys/types.h>
-#endif
+#endif /* linux && __KERNEL__ */
#endif
-
-
+/* use the default typedefs in the next section of this file */
#define USE_TYPEDEF_DEFAULTS
-#endif
-
+#endif /* SITE_TYPEDEFS */
+/*
+ * Default Typedefs
+ */
#ifdef USE_TYPEDEF_DEFAULTS
#undef USE_TYPEDEF_DEFAULTS
#ifndef TYPEDEF_BOOL
-typedef unsigned char bool;
-#endif
-
+typedef /* @abstract@ */ unsigned char bool;
+#endif /* endif TYPEDEF_BOOL */
+/* define uchar, ushort, uint, ulong */
#ifndef TYPEDEF_UCHAR
typedef unsigned char uchar;
typedef unsigned long ulong;
#endif
-
+/* define [u]int8/16/32/64, uintptr */
#ifndef TYPEDEF_UINT8
typedef unsigned char uint8;
typedef signed long long int64;
#endif
-
+/* define float32/64, float_t */
#ifndef TYPEDEF_FLOAT32
typedef float float32;
typedef double float64;
#endif
-
+/*
+ * abstracted floating point type allows for compile time selection of
+ * single or double precision arithmetic. Compiling with -DFLOAT32
+ * selects single precision; the default is double precision.
+ */
#ifndef TYPEDEF_FLOAT_T
#if defined(FLOAT32)
typedef float32 float_t;
-#else
+#else /* default to double precision floating point */
typedef float64 float_t;
#endif
-#endif
-
+#endif /* TYPEDEF_FLOAT_T */
+/* define macro values */
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
-#define TRUE 1
+#define TRUE 1 /* TRUE */
#endif
#ifndef NULL
#endif
#ifndef ON
-#define ON 1
+#define ON 1 /* ON = 1 */
#endif
-#define AUTO (-1)
-
+#define AUTO (-1) /* Auto = -1 */
+/* define PTRSZ, INLINE */
#ifndef PTRSZ
#define PTRSZ sizeof(char*)
#endif
-
+/* Detect compiler type. */
#if defined(__GNUC__) || defined(__lint)
#define BWL_COMPILER_GNU
#elif defined(__CC_ARM) && __CC_ARM
#else
#define INLINE
#endif
-#endif
+#endif /* INLINE */
#undef TYPEDEF_BOOL
#undef TYPEDEF_UCHAR
#undef TYPEDEF_FLOAT64
#undef TYPEDEF_FLOAT_T
-#endif
-
+#endif /* USE_TYPEDEF_DEFAULTS */
+/* Suppress unused parameter warning */
#define UNUSED_PARAMETER(x) (void)(x)
-
+/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */
#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr))
-
+/*
+ * Including the bcmdefs.h here, to make sure everyone including typedefs.h
+ * gets this automatically
+*/
#include <bcmdefs.h>
-#endif
+#endif /* _TYPEDEFS_H_ */
/*
* $Copyright Open 2009 Broadcom Corporation$
-* $Id: wlfc_proto.h 381382 2013-01-27 07:13:00Z $
+* $Id: wlfc_proto.h 455301 2014-02-13 12:42:13Z $
*
*/
#ifndef __wlfc_proto_definitions_h__
#define WLFC_CTL_TYPE_MAC_REQUEST_PACKET 13
#define WLFC_CTL_TYPE_HOST_REORDER_RXPKTS 14
+
#define WLFC_CTL_TYPE_TX_ENTRY_STAMP 15
#define WLFC_CTL_TYPE_RX_STAMP 16
#define WLFC_CTL_TYPE_TRANS_ID 18
#define WLFC_CTL_TYPE_COMP_TXSTATUS 19
+#define WLFC_CTL_TYPE_TID_OPEN 20
+#define WLFC_CTL_TYPE_TID_CLOSE 21
+
#define WLFC_CTL_TYPE_FILLER 255
#define WLFC_CTL_VALUE_LEN_TXSTATUS 4
#define WLFC_CTL_VALUE_LEN_PKTTAG 4
+#define WLFC_CTL_VALUE_LEN_SEQ 2
+
/* enough space to host all 4 ACs, bc/mc and atim fifo credit */
#define WLFC_CTL_VALUE_LEN_FIFO_CREDITBACK 6
#define WLFC_CTL_VALUE_LEN_REQUEST_PACKET 3 /* credit, MAC-handle, prec_bitmap */
-#define WLFC_PKTID_GEN_MASK 0x80000000
-#define WLFC_PKTID_GEN_SHIFT 31
-
-#define WLFC_PKTID_GEN(x) (((x) & WLFC_PKTID_GEN_MASK) >> WLFC_PKTID_GEN_SHIFT)
-#define WLFC_PKTID_SETGEN(x, gen) (x) = ((x) & ~WLFC_PKTID_GEN_MASK) | \
- (((gen) << WLFC_PKTID_GEN_SHIFT) & WLFC_PKTID_GEN_MASK)
-
#define WLFC_PKTFLAG_PKTFROMHOST 0x01
#define WLFC_PKTFLAG_PKT_REQUESTED 0x02
+#define WL_TXSTATUS_STATUS_MASK 0xff /* allow 8 bits */
+#define WL_TXSTATUS_STATUS_SHIFT 24
+
+#define WL_TXSTATUS_SET_STATUS(x, status) ((x) = \
+ ((x) & ~(WL_TXSTATUS_STATUS_MASK << WL_TXSTATUS_STATUS_SHIFT)) | \
+ (((status) & WL_TXSTATUS_STATUS_MASK) << WL_TXSTATUS_STATUS_SHIFT))
+#define WL_TXSTATUS_GET_STATUS(x) (((x) >> WL_TXSTATUS_STATUS_SHIFT) & \
+ WL_TXSTATUS_STATUS_MASK)
+
+#define WL_TXSTATUS_GENERATION_MASK 1 /* allow 1 bit */
+#define WL_TXSTATUS_GENERATION_SHIFT 31
+
+#define WL_TXSTATUS_SET_GENERATION(x, gen) ((x) = \
+ ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
+ (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
+
+#define WL_TXSTATUS_GET_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
+ WL_TXSTATUS_GENERATION_MASK)
+
#define WL_TXSTATUS_FLAGS_MASK 0xf /* allow 4 bits only */
#define WL_TXSTATUS_FLAGS_SHIFT 27
((x) & ~WL_TXSTATUS_PKTID_MASK) | (num))
#define WL_TXSTATUS_GET_PKTID(x) ((x) & WL_TXSTATUS_PKTID_MASK)
+#define WL_TXSTATUS_HSLOT_MASK 0xffff /* allow 16 bits */
+#define WL_TXSTATUS_HSLOT_SHIFT 8
+
+#define WL_TXSTATUS_SET_HSLOT(x, hslot) ((x) = \
+ ((x) & ~(WL_TXSTATUS_HSLOT_MASK << WL_TXSTATUS_HSLOT_SHIFT)) | \
+ (((hslot) & WL_TXSTATUS_HSLOT_MASK) << WL_TXSTATUS_HSLOT_SHIFT))
+#define WL_TXSTATUS_GET_HSLOT(x) (((x) >> WL_TXSTATUS_HSLOT_SHIFT)& \
+ WL_TXSTATUS_HSLOT_MASK)
+
+#define WL_TXSTATUS_FREERUNCTR_MASK 0xff /* allow 8 bits */
+
+#define WL_TXSTATUS_SET_FREERUNCTR(x, ctr) ((x) = \
+ ((x) & ~(WL_TXSTATUS_FREERUNCTR_MASK)) | \
+ ((ctr) & WL_TXSTATUS_FREERUNCTR_MASK))
+#define WL_TXSTATUS_GET_FREERUNCTR(x) ((x)& WL_TXSTATUS_FREERUNCTR_MASK)
+
+#define WL_SEQ_FROMFW_MASK 0x1 /* allow 1 bit */
+#define WL_SEQ_FROMFW_SHIFT 13
+#define WL_SEQ_SET_FROMFW(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_FROMFW_MASK << WL_SEQ_FROMFW_SHIFT)) | \
+ (((val) & WL_SEQ_FROMFW_MASK) << WL_SEQ_FROMFW_SHIFT))
+#define WL_SEQ_GET_FROMFW(x) (((x) >> WL_SEQ_FROMFW_SHIFT) & \
+ WL_SEQ_FROMFW_MASK)
+
+#define WL_SEQ_FROMDRV_MASK 0x1 /* allow 1 bit */
+#define WL_SEQ_FROMDRV_SHIFT 12
+#define WL_SEQ_SET_FROMDRV(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_FROMDRV_MASK << WL_SEQ_FROMDRV_SHIFT)) | \
+ (((val) & WL_SEQ_FROMDRV_MASK) << WL_SEQ_FROMDRV_SHIFT))
+#define WL_SEQ_GET_FROMDRV(x) (((x) >> WL_SEQ_FROMDRV_SHIFT) & \
+ WL_SEQ_FROMDRV_MASK)
+
+#define WL_SEQ_NUM_MASK 0xfff /* allow 12 bit */
+#define WL_SEQ_NUM_SHIFT 0
+#define WL_SEQ_SET_NUM(x, val) ((x) = \
+ ((x) & ~(WL_SEQ_NUM_MASK << WL_SEQ_NUM_SHIFT)) | \
+ (((val) & WL_SEQ_NUM_MASK) << WL_SEQ_NUM_SHIFT))
+#define WL_SEQ_GET_NUM(x) (((x) >> WL_SEQ_NUM_SHIFT) & \
+ WL_SEQ_NUM_MASK)
+
/* 32 STA should be enough??, 6 bits; Must be power of 2 */
#define WLFC_MAC_DESC_TABLE_SIZE 32
#define WLFC_MAX_IFNUM 16
#define WLFC_PKTFLAG_CLR_PKTREQUESTED(x) (x) &= \
~(WLFC_PKTFLAG_PKT_REQUESTED << WL_TXSTATUS_FLAGS_SHIFT)
-#define WL_TXSTATUS_GENERATION_MASK 1
-#define WL_TXSTATUS_GENERATION_SHIFT 31
-
-#define WLFC_PKTFLAG_SET_GENERATION(x, gen) ((x) = \
- ((x) & ~(WL_TXSTATUS_GENERATION_MASK << WL_TXSTATUS_GENERATION_SHIFT)) | \
- (((gen) & WL_TXSTATUS_GENERATION_MASK) << WL_TXSTATUS_GENERATION_SHIFT))
-
-#define WLFC_PKTFLAG_GENERATION(x) (((x) >> WL_TXSTATUS_GENERATION_SHIFT) & \
- WL_TXSTATUS_GENERATION_MASK)
#define WLFC_MAX_PENDING_DATALEN 120
/* host is free to discard the packet */
-#define WLFC_CTL_PKTFLAG_DISCARD 0
+#define WLFC_CTL_PKTFLAG_DISCARD 0
/* D11 suppressed a packet */
#define WLFC_CTL_PKTFLAG_D11SUPPRESS 1
/* WL firmware suppressed a packet because MAC is
already in PSMode (short time window)
*/
-#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2
+#define WLFC_CTL_PKTFLAG_WLSUPPRESS 2
/* Firmware tossed this packet */
#define WLFC_CTL_PKTFLAG_TOSSED_BYWLC 3
+/* Firmware tossed after retries */
+#define WLFC_CTL_PKTFLAG_DISCARD_NOACK 4
#define WLFC_D11_STATUS_INTERPRET(txs) \
- (((txs)->status.suppr_ind != 0) ? WLFC_CTL_PKTFLAG_D11SUPPRESS : WLFC_CTL_PKTFLAG_DISCARD)
+ (((txs)->status.suppr_ind != TX_STATUS_SUPR_NONE) ? \
+ WLFC_CTL_PKTFLAG_D11SUPPRESS : \
+ ((txs)->status.was_acked ? \
+ WLFC_CTL_PKTFLAG_DISCARD : WLFC_CTL_PKTFLAG_DISCARD_NOACK))
#ifdef PROP_TXSTATUS_DEBUG
#define WLFC_DBGMESG(x) printf x
/* transaction id data len byte 0: rsvd, byte 1: seqnumber, byte 2-5 will be used for timestampe */
#define WLFC_CTL_TRANS_ID_LEN 6
+#define WLFC_TYPE_TRANS_ID_LEN 6
+
+#define WLFC_MODE_HANGER 1 /* use hanger */
+#define WLFC_MODE_AFQ 2 /* use afq */
+#define WLFC_IS_OLD_DEF(x) ((x & 1) || (x & 2))
+
+#define WLFC_MODE_AFQ_SHIFT 2 /* afq bit */
+#define WLFC_SET_AFQ(x, val) ((x) = \
+ ((x) & ~(1 << WLFC_MODE_AFQ_SHIFT)) | \
+ (((val) & 1) << WLFC_MODE_AFQ_SHIFT))
+#define WLFC_GET_AFQ(x) (((x) >> WLFC_MODE_AFQ_SHIFT) & 1)
+
+#define WLFC_MODE_REUSESEQ_SHIFT 3 /* seq reuse bit */
+#define WLFC_SET_REUSESEQ(x, val) ((x) = \
+ ((x) & ~(1 << WLFC_MODE_REUSESEQ_SHIFT)) | \
+ (((val) & 1) << WLFC_MODE_REUSESEQ_SHIFT))
+#define WLFC_GET_REUSESEQ(x) (((x) >> WLFC_MODE_REUSESEQ_SHIFT) & 1)
+
+#define WLFC_MODE_REORDERSUPP_SHIFT 4 /* host reorder suppress pkt bit */
+#define WLFC_SET_REORDERSUPP(x, val) ((x) = \
+ ((x) & ~(1 << WLFC_MODE_REORDERSUPP_SHIFT)) | \
+ (((val) & 1) << WLFC_MODE_REORDERSUPP_SHIFT))
+#define WLFC_GET_REORDERSUPP(x) (((x) >> WLFC_MODE_REORDERSUPP_SHIFT) & 1)
#endif /* __wlfc_proto_definitions_h__ */
*
* Definitions subject to change without notice.
*
- * Copyright (C) 1999-2013, Broadcom Corporation
- *
- * Unless you and Broadcom execute a separate written software license
- * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2 (the "GPL"),
- * available at http://www.broadcom.com/licenses/GPLv2.php, with the
- * following added to such license:
- *
- * As a special exception, the copyright holders of this software give you
- * permission to link this software with independent modules, and to copy and
- * distribute the resulting executable under terms of your choice, provided that
- * you also meet, for each linked independent module, the terms and conditions of
- * the license of that module. An independent module is a module which is not
- * derived from this software. The special exception does not apply to any
- * modifications of the software.
- *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a license
- * other than the GPL, without Broadcom's express prior written consent.
+ * $Copyright Open Broadcom Corporation$
*
- * $Id: wlioctl.h 419132 2013-08-19 21:33:05Z $
+ * $Id: wlioctl.h 490639 2014-07-11 08:31:53Z $
*/
#ifndef _wlioctl_h_
#include <typedefs.h>
#include <proto/ethernet.h>
+#include <proto/bcmip.h>
#include <proto/bcmeth.h>
+#include <proto/bcmip.h>
#include <proto/bcmevent.h>
#include <proto/802.11.h>
+#include <proto/802.1d.h>
#include <bcmwifi_channels.h>
#include <bcmwifi_rates.h>
+#include <devctrl_if/wlioctl_defs.h>
+
+#if 0 && (NDISVER >= 0x0600)
+#include <proto/bcmipv6.h>
+#endif
#ifndef LINUX_POSTMOGRIFY_REMOVAL
#include <bcm_mpool_pub.h>
#include <bcmcdc.h>
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-/* LINUX_POSTMOGRIFY_REMOVAL: undefined during compile phase, so its
- * a no-op for most cases. For hybrid and other open source releases,
- * its defined during a second pass and mogrified out for distribution.
- */
+
+
#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* Used to send ioctls over the transport pipe */
typedef struct remote_ioctl {
- cdc_ioctl_t msg;
- uint data_len;
- char intf_name[INTF_NAME_SIZ];
+ cdc_ioctl_t msg;
+ uint32 data_len;
+ char intf_name[INTF_NAME_SIZ];
} rem_ioctl_t;
#define REMOTE_SIZE sizeof(rem_ioctl_t)
-
typedef struct {
uint32 num;
chanspec_t list[1];
} chanspec_list_t;
+/* DFS Forced param */
+typedef struct wl_dfs_forced_params {
+ chanspec_t chspec;
+ uint16 version;
+ chanspec_list_t chspec_list;
+} wl_dfs_forced_t;
+
+#define DFS_PREFCHANLIST_VER 0x01
+#define WL_CHSPEC_LIST_FIXED_SIZE OFFSETOF(chanspec_list_t, list)
+#define WL_DFS_FORCED_PARAMS_FIXED_SIZE \
+ (WL_CHSPEC_LIST_FIXED_SIZE + OFFSETOF(wl_dfs_forced_t, chspec_list))
+#define WL_DFS_FORCED_PARAMS_MAX_SIZE \
+ WL_DFS_FORCED_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(chanspec_t))
+
+/* association decision information */
+typedef struct {
+ bool assoc_approved; /* (re)association approved */
+ uint16 reject_reason; /* reason code for rejecting association */
+ struct ether_addr da;
+#if 0 && (NDISVER >= 0x0620)
+ LARGE_INTEGER sys_time; /* current system time */
+#else
+ int64 sys_time; /* current system time */
+#endif
+} assoc_decision_t;
+
#define ACTION_FRAME_SIZE 1800
typedef struct wl_action_frame {
#include <packed_section_start.h>
+/* Flags for OBSS IOVAR Parameters */
+#define WL_OBSS_DYN_BWSW_FLAG_ACTIVITY_PERIOD (0x01)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_PERIOD (0x02)
+#define WL_OBSS_DYN_BWSW_FLAG_NOACTIVITY_INCR_PERIOD (0x04)
+#define WL_OBSS_DYN_BWSW_FLAG_PSEUDO_SENSE_PERIOD (0x08)
+#define WL_OBSS_DYN_BWSW_FLAG_RX_CRS_PERIOD (0x10)
+#define WL_OBSS_DYN_BWSW_FLAG_DUR_THRESHOLD (0x20)
+#define WL_OBSS_DYN_BWSW_FLAG_TXOP_PERIOD (0x40)
+
+/* OBSS IOVAR Version information */
+#define WL_PROT_OBSS_CONFIG_PARAMS_VERSION 1
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint8 obss_bwsw_activity_cfm_count_cfg; /* configurable count in
+ * seconds before we confirm that OBSS is present and
+ * dynamically activate dynamic bwswitch.
+ */
+ uint8 obss_bwsw_no_activity_cfm_count_cfg; /* configurable count in
+ * seconds before we confirm that OBSS is GONE and
+ * dynamically start pseudo upgrade. If in pseudo sense time, we
+ * will see OBSS, [means that, we false detected that OBSS-is-gone
+ * in watchdog] this count will be incremented in steps of
+ * obss_bwsw_no_activity_cfm_count_incr_cfg for confirming OBSS
+ * detection again. Note that, at present, max 30seconds is
+ * allowed like this. [OBSS_BWSW_NO_ACTIVITY_MAX_INCR_DEFAULT]
+ */
+ uint8 obss_bwsw_no_activity_cfm_count_incr_cfg; /* see above
+ */
+ uint16 obss_bwsw_pseudo_sense_count_cfg; /* number of msecs/cnt to be in
+ * pseudo state. This is used to sense/measure the stats from lq.
+ */
+ uint8 obss_bwsw_rx_crs_threshold_cfg; /* RX CRS default threshold */
+ uint8 obss_bwsw_dur_thres; /* OBSS dyn bwsw trigger/RX CRS Sec */
+ uint8 obss_bwsw_txop_threshold_cfg; /* TXOP default threshold */
+} BWL_POST_PACKED_STRUCT wlc_prot_dynbwsw_config_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 version; /* version field */
+ uint32 config_mask;
+ uint32 reset_mask;
+ wlc_prot_dynbwsw_config_t config_params;
+} BWL_POST_PACKED_STRUCT obss_config_params_t;
+
+
+
#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* Legacy structure to help keep backward compatible wl tool and tray app */
uint32 nbss_cap; /* 802.11N+AC BSS Capabilities */
uint8 ctl_ch; /* 802.11N BSS control channel number */
uint8 padding1[3]; /* explicit struct alignment padding */
- uint16 vht_rxmcsmap; /* VHT rx mcs map (802.11ac VHT_CAP_MCS_MAP_*) */
- uint16 vht_txmcsmap; /* VHT tx mcs map (802.11ac VHT_CAP_MCS_MAP_*) */
+ uint16 vht_rxmcsmap; /* VHT rx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
+ uint16 vht_txmcsmap; /* VHT tx mcs map (802.11ac IE, VHT_CAP_MCS_MAP_*) */
uint8 flags; /* flags */
uint8 vht_cap; /* BSS is vht capable */
uint8 reserved[2]; /* Reserved for expansion of BSS properties */
/* variable length Information Elements */
} wl_bss_info_t;
-/* bss_info_cap_t flags */
-#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */
-#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */
-#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info was received on channel (vs offchannel) */
-
-/* bssinfo flag for nbss_cap */
-#define VHT_BI_SGI_80MHZ 0x00000100
-
#ifndef LINUX_POSTMOGRIFY_REMOVAL
typedef struct wl_bsscfg {
- uint32 wsec;
- uint32 WPA_auth;
- uint32 wsec_index;
- uint32 associated;
- uint32 BSS;
- uint32 phytest_on;
- struct ether_addr prev_BSSID;
- struct ether_addr BSSID;
+ uint32 bsscfg_idx;
+ uint32 wsec;
+ uint32 WPA_auth;
+ uint32 wsec_index;
+ uint32 associated;
+ uint32 BSS;
+ uint32 phytest_on;
+ struct ether_addr prev_BSSID;
+ struct ether_addr BSSID;
uint32 targetbss_wpa2_flags;
uint32 assoc_type;
uint32 assoc_state;
} wl_bsscfg_t;
+typedef struct wl_if_add {
+ uint32 bsscfg_flags;
+ uint32 if_flags;
+ uint32 ap;
+ struct ether_addr mac_addr;
+} wl_if_add_t;
+
typedef struct wl_bss_config {
uint32 atim_window;
uint32 beacon_period;
uint32 chanspec;
} wl_bss_config_t;
+#define WL_BSS_USER_RADAR_CHAN_SELECT 0x1 /* User application will randomly select
+ * radar channel.
+ */
+
#define DLOAD_HANDLER_VER 1 /* Downloader version */
#define DLOAD_FLAG_VER_MASK 0xf000 /* Downloader version mask */
#define DLOAD_FLAG_VER_SHIFT 12 /* Downloader version shift */
#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t))
-#define WL_BSSTYPE_INFRA 1
-#define WL_BSSTYPE_INDEP 0
-#define WL_BSSTYPE_ANY 2
-
-/* Bitmask for scan_type */
-#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */
-#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */
-#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */
-#define WL_SCANFLAGS_OFFCHAN 0x08 /* allow scanning/reporting off-channel APs */
-#define WL_SCANFLAGS_HOTSPOT 0x10 /* automatic ANQP to hotspot APs */
-
#define WL_SCAN_PARAMS_SSID_MAX 10
typedef struct wl_scan_params {
/* size of wl_scan_params not including variable length array */
#define WL_SCAN_PARAMS_FIXED_SIZE 64
-
-/* masks for channel and ssid count */
-#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff
-#define WL_SCAN_PARAMS_NSSID_SHIFT 16
-
-#define WL_SCAN_ACTION_START 1
-#define WL_SCAN_ACTION_CONTINUE 2
-#define WL_SCAN_ACTION_ABORT 3
+#define WL_MAX_ROAMSCAN_DATSZ (WL_SCAN_PARAMS_FIXED_SIZE + (WL_NUMCHANNELS * sizeof(uint16)))
#define ISCAN_REQ_VERSION 1
/* size of wl_scan_results not including variable length array */
#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t))
-/* wl_iscan_results status values */
-#define WL_SCAN_RESULTS_SUCCESS 0
-#define WL_SCAN_RESULTS_PARTIAL 1
-#define WL_SCAN_RESULTS_PENDING 2
-#define WL_SCAN_RESULTS_ABORTED 3
-#define WL_SCAN_RESULTS_NO_MEM 4
-
-/* Used in EXT_STA */
-#define DNGL_RXCTXT_SIZE 45
-
#define ESCAN_REQ_VERSION 1
#define WL_ISCAN_RESULTS_FIXED_SIZE \
(WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results))
+#define SCANOL_PARAMS_VERSION 1
+
+typedef struct scanol_params {
+ uint32 version;
+ uint32 flags; /* offload scanning flags */
+ int32 active_time; /* -1 use default, dwell time per channel for active scanning */
+ int32 passive_time; /* -1 use default, dwell time per channel for passive scanning */
+ int32 idle_rest_time; /* -1 use default, time idle between scan cycle */
+ int32 idle_rest_time_multiplier;
+ int32 active_rest_time;
+ int32 active_rest_time_multiplier;
+ int32 scan_cycle_idle_rest_time;
+ int32 scan_cycle_idle_rest_multiplier;
+ int32 scan_cycle_active_rest_time;
+ int32 scan_cycle_active_rest_multiplier;
+ int32 max_rest_time;
+ int32 max_scan_cycles;
+ int32 nprobes; /* -1 use default, number of probes per channel */
+ int32 scan_start_delay;
+ uint32 nchannels;
+ uint32 ssid_count;
+ wlc_ssid_t ssidlist[1];
+} scanol_params_t;
+
typedef struct wl_probe_params {
wlc_ssid_t ssid;
struct ether_addr bssid;
uint16 vht_mcs[VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
} wl_rateset_args_t;
+#define TXBF_RATE_MCS_ALL 4
+#define TXBF_RATE_VHT_ALL 4
+#define TXBF_RATE_OFDM_ALL 8
+
+typedef struct wl_txbf_rateset {
+ uint8 txbf_rate_mcs[TXBF_RATE_MCS_ALL]; /* one for each stream */
+ uint8 txbf_rate_mcs_bcm[TXBF_RATE_MCS_ALL]; /* one for each stream */
+ uint16 txbf_rate_vht[TXBF_RATE_VHT_ALL]; /* one for each stream */
+ uint16 txbf_rate_vht_bcm[TXBF_RATE_VHT_ALL]; /* one for each stream */
+ uint8 txbf_rate_ofdm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
+ uint8 txbf_rate_ofdm_bcm[TXBF_RATE_OFDM_ALL]; /* bitmap of ofdm rates that enables txbf */
+ uint8 txbf_rate_ofdm_cnt;
+ uint8 txbf_rate_ofdm_cnt_bcm;
+} wl_txbf_rateset_t;
+
+#define OFDM_RATE_MASK 0x0000007f
+typedef uint8 ofdm_rates_t;
+
+typedef struct wl_rates_info {
+ wl_rateset_t rs_tgt;
+ uint32 phy_type;
+ int32 bandtype;
+ uint8 cck_only;
+ uint8 rate_mask;
+ uint8 mcsallow;
+ uint8 bw;
+ uint8 txstreams;
+} wl_rates_info_t;
+
/* uint32 list */
typedef struct wl_uint32_list {
/* in - # of elements, out - # of entries */
typedef struct wl_assoc_params {
struct ether_addr bssid; /* 00:00:00:00:00:00: broadcast scan */
uint16 bssid_cnt; /* 0: use chanspec_num, and the single bssid,
- * otherwise count of chanspecs in chanspec_list
- * AND paired bssids following chanspec_list
- */
+ * otherwise count of chanspecs in chanspec_list
+ * AND paired bssids following chanspec_list
+ * also, chanspec_num has to be set to zero
+ * for bssid list to be used
+ */
int32 chanspec_num; /* 0: all available channels,
- * otherwise count of chanspecs in chanspec_list
- */
+ * otherwise count of chanspecs in chanspec_list
+ */
chanspec_t chanspec_list[1]; /* list of chanspecs */
} wl_assoc_params_t;
+
#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list)
/* used for reassociation/roam to a specific BSSID and channel */
#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \
WL_JOIN_ASSOC_PARAMS_FIXED_SIZE)
-/* All builds use the new 11ac ratespec/chanspec */
-#undef D11AC_IOTYPES
-#define D11AC_IOTYPES
-
-#ifndef D11AC_IOTYPES
-
-/* defines used by the nrate iovar */
-#define NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
-#define NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
-#define NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
-#define NRATE_STF_SHIFT 8 /* stf mode shift */
-#define NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
-#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
-#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
-#define NRATE_SGI_SHIFT 23 /* sgi mode */
-#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
-#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
-
-#define NRATE_STF_SISO 0 /* stf mode SISO */
-#define NRATE_STF_CDD 1 /* stf mode CDD */
-#define NRATE_STF_STBC 2 /* stf mode STBC */
-#define NRATE_STF_SDM 3 /* stf mode SDM */
-
-#else /* D11AC_IOTYPES */
-
-/* WL_RSPEC defines for rate information */
-#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */
-#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */
-#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */
-#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */
-#define WL_RSPEC_TXEXP_MASK 0x00000300
-#define WL_RSPEC_TXEXP_SHIFT 8
-#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */
-#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */
-#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */
-#define WL_RSPEC_TXBF 0x00200000 /* bit indicates TXBF mode */
-#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */
-#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */
-#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */
-#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */
-#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override both rate & mode */
-
-/* WL_RSPEC_ENCODING field defs */
-#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */
-#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */
-#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */
-
-/* WL_RSPEC_BW field defs */
-#define WL_RSPEC_BW_UNSPECIFIED 0
-#define WL_RSPEC_BW_20MHZ 0x00010000
-#define WL_RSPEC_BW_40MHZ 0x00020000
-#define WL_RSPEC_BW_80MHZ 0x00030000
-#define WL_RSPEC_BW_160MHZ 0x00040000
-
-/* Legacy defines for the nrate iovar */
-#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */
-#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */
-#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */
-#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */
-#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */
-#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */
-#define OLD_NRATE_SGI 0x00800000 /* sgi mode */
-#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
-
-#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */
-#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */
-#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */
-#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */
-
-#endif /* D11AC_IOTYPES */
-
-#define ANTENNA_NUM_1 1 /* total number of antennas to be used */
-#define ANTENNA_NUM_2 2
-#define ANTENNA_NUM_3 3
-#define ANTENNA_NUM_4 4
-
-#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
-#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */
-#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */
-#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */
-#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */
-#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */
-
#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */
-
typedef struct {
uint8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */
uint8 num_antcfg; /* number of available antenna configurations */
} wlc_antselcfg_t;
-#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */
-
-#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */
-#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */
-
-#define IBSS_MED 15 /* Mediom in-bss congestion percentage */
-#define IBSS_HI 25 /* Hi in-bss congestion percentage */
-#define OBSS_MED 12
-#define OBSS_HI 25
-#define INTERFER_MED 5
-#define INTERFER_HI 10
-
-#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */
-#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */
-#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */
-#define CCA_FLAGS_PREFER_1_6_11 0x10
-#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */
-
-#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */
-#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */
-#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */
-#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */
-#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */
-
typedef struct {
uint32 duration; /* millisecs spent sampling this channel */
uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */
cca_congest_t secs[1]; /* Data */
} cca_congest_channel_req_t;
-/* interference source detection and identification mode */
-#define ITFR_MODE_DISABLE 0 /* disable feature */
-#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */
-#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */
/* interference sources */
enum interference_source {
uint32 source; /* last detected interference source */
uint32 timestamp; /* second timestamp on interferenced flag change */
} interference_source_rep_t;
-
-/* bit definitions for flags in interference source report */
-#define ITFR_INTERFERENCED 1 /* interference detected */
-#define ITFR_HOME_CHANNEL 2 /* home channel has interference */
-#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */
-
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define WLC_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */
char country_abbrev[1];
} wl_country_list_t;
-#define WL_NUM_RPI_BINS 8
-#define WL_RM_TYPE_BASIC 1
-#define WL_RM_TYPE_CCA 2
-#define WL_RM_TYPE_RPI 3
-
-#define WL_RM_FLAG_PARALLEL (1<<0)
-
-#define WL_RM_FLAG_LATE (1<<1)
-#define WL_RM_FLAG_INCAPABLE (1<<2)
-#define WL_RM_FLAG_REFUSED (1<<3)
-
typedef struct wl_rm_req_elt {
int8 type;
int8 flags;
} sup_auth_status_t;
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-/* Enumerate crypto algorithms */
-#define CRYPTO_ALGO_OFF 0
-#define CRYPTO_ALGO_WEP1 1
-#define CRYPTO_ALGO_TKIP 2
-#define CRYPTO_ALGO_WEP128 3
-#define CRYPTO_ALGO_AES_CCM 4
-#define CRYPTO_ALGO_AES_OCB_MSDU 5
-#define CRYPTO_ALGO_AES_OCB_MPDU 6
-#if !defined(BCMCCX) && !defined(BCMEXTCCX)
-#define CRYPTO_ALGO_NALG 7
-#else
-#define CRYPTO_ALGO_CKIP 7
-#define CRYPTO_ALGO_CKIP_MMH 8
-#define CRYPTO_ALGO_WEP_MMH 9
-#define CRYPTO_ALGO_NALG 10
-#endif /* !BCMCCX && !BCMEXTCCX */
-#ifdef BCMWAPI_WPI
-#define CRYPTO_ALGO_SMS4 11
-#endif /* BCMWAPI_WPI */
-#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */
-#define CRYPTO_ALGO_BIP 13 /* 802.11w BIP (aes cmac) */
-
-#define WSEC_GEN_MIC_ERROR 0x0001
-#define WSEC_GEN_REPLAY 0x0002
-#define WSEC_GEN_ICV_ERROR 0x0004
-#define WSEC_GEN_MFP_ACT_ERROR 0x0008
-#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010
-#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020
-
-#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */
-#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */
-#if defined(BCMCCX) || defined(BCMEXTCCX)
-#define WL_CKIP_KP (1 << 4) /* CMIC */
-#define WL_CKIP_MMH (1 << 5) /* CKIP */
-#else
-#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */
-#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */
-#endif /* BCMCCX || BCMEXTCCX */
-#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */
-
typedef struct wl_wsec_key {
uint32 index; /* key index */
uint32 len; /* key length */
uint8 key[WSEC_MAX_PSK_LEN]; /* PMK material */
} wsec_pmk_t;
-/* wireless security bitvec */
-#define WEP_ENABLED 0x0001
-#define TKIP_ENABLED 0x0002
-#define AES_ENABLED 0x0004
-#define WSEC_SWFLAG 0x0008
-#ifdef BCMCCX
-#define CKIP_KP_ENABLED 0x0010
-#define CKIP_MIC_ENABLED 0x0020
-#endif /* BCMCCX */
-#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */
-#ifdef BCMWAPI_WPI
-#define SMS4_ENABLED 0x0100
-#endif /* BCMWAPI_WPI */
-
-/* wsec macros for operating on the above definitions */
-#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED)
-#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED)
-#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED)
-
-#ifdef BCMCCX
-#define WSEC_CKIP_KP_ENABLED(wsec) ((wsec) & CKIP_KP_ENABLED)
-#define WSEC_CKIP_MIC_ENABLED(wsec) ((wsec) & CKIP_MIC_ENABLED)
-#define WSEC_CKIP_ENABLED(wsec) ((wsec) & (CKIP_KP_ENABLED|CKIP_MIC_ENABLED))
-
-#ifdef BCMWAPI_WPI
-#define WSEC_ENABLED(wsec) \
- ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | \
- CKIP_MIC_ENABLED | SMS4_ENABLED))
-#else /* BCMWAPI_WPI */
-#define WSEC_ENABLED(wsec) \
- ((wsec) & \
- (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | CKIP_KP_ENABLED | CKIP_MIC_ENABLED))
-#endif /* BCMWAPI_WPI */
-#else /* defined BCMCCX */
-#ifdef BCMWAPI_WPI
-#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED | SMS4_ENABLED))
-#else /* BCMWAPI_WPI */
-#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED))
-#endif /* BCMWAPI_WPI */
-#endif /* BCMCCX */
-#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED)
-#ifdef BCMWAPI_WAI
-#define WSEC_SMS4_ENABLED(wsec) ((wsec) & SMS4_ENABLED)
-#endif /* BCMWAPI_WAI */
-
-#define MFP_CAPABLE 0x0200
-#define MFP_REQUIRED 0x0400
-#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */
-
-/* WPA authentication mode bitvec */
-#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */
-#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */
-#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */
-#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */
-#if defined(BCMCCX) || defined(BCMEXTCCX)
-#define WPA_AUTH_CCKM 0x0008 /* CCKM */
-#define WPA2_AUTH_CCKM 0x0010 /* CCKM2 */
-#endif /* BCMCCX || BCMEXTCCX */
-/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */
-#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */
-#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */
-#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */
-#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */
-#if defined(BCMWAPI_WAI) || defined(BCMWAPI_WPI)
-#define WPA_AUTH_WAPI 0x0400
-#define WAPI_AUTH_NONE WPA_AUTH_NONE /* none (IBSS) */
-#define WAPI_AUTH_UNSPECIFIED 0x0400 /* over AS */
-#define WAPI_AUTH_PSK 0x0800 /* Pre-shared key */
-#endif /* BCMWAPI_WAI || BCMWAPI_WPI */
-#define WPA2_AUTH_MFP 0x1000 /* MFP (11w) in contrast to CCX */
-#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */
-#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */
-#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
-
-/* pmkid */
-#define MAXPMKID 16
-
typedef struct _pmkid {
struct ether_addr BSSID;
uint8 PMKID[WPA2_PMKID_LEN];
pmkid_cand_t pmkid_cand[1];
} pmkid_cand_list_t;
+#define WL_STA_ANT_MAX 4 /* max possible rx antennas */
+
#ifndef LINUX_POSTMOGRIFY_REMOVAL
typedef struct wl_assoc_info {
uint32 req_len;
struct dot11_assoc_resp resp;
} wl_assoc_info_t;
-/* flags */
-#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */
-
typedef struct wl_led_info {
uint32 index; /* led index */
uint32 behavior;
uint16 buf[1];
} srom_rw_t;
+#define CISH_FLAG_PCIECIS (1 << 15) /* write CIS format bit for PCIe CIS */
/* similar cis (srom or otp) struct [iovar: may not be aligned] */
typedef struct {
- uint32 source; /* cis source */
+ uint16 source; /* cis source */
+ uint16 flags; /* flags */
uint32 byteoff; /* byte offset */
uint32 nbytes; /* number of bytes */
/* data follows here */
} cis_rw_t;
-#define WLC_CIS_DEFAULT 0 /* built-in default */
-#define WLC_CIS_SROM 1 /* source is sprom */
-#define WLC_CIS_OTP 2 /* source is otp */
-
/* R_REG and W_REG struct passed through ioctl */
typedef struct {
uint32 byteoff; /* byte offset of the field in d11regs_t */
/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */
/* PCL - Power Control Loop */
-/* current gain setting is replaced by user input */
-#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */
-#define WL_ATTEN_PCL_ON 1 /* turn on PCL */
-/* current gain setting is maintained */
-#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */
-
typedef struct {
uint16 auto_ctrl; /* WL_ATTEN_XX */
uint16 bb; /* Baseband attenuation */
#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT)
typedef struct wl_plc_nodelist {
- uint count; /* Number of nodes */
+ uint32 count; /* Number of nodes */
struct _node {
struct ether_addr ea; /* Node ether address */
uint32 node_type; /* Node type */
typedef struct wl_plc_params {
uint32 cmd; /* Command */
- bool plc_failover; /* PLC failover control/status */
+ uint8 plc_failover; /* PLC failover control/status */
struct ether_addr node_ea; /* Node ether address */
uint32 cost; /* Link cost or mac cost */
} wl_plc_params_t;
-#define PLC_CMD_FAILOVER 1
-#define PLC_CMD_MAC_COST 2
-#define PLC_CMD_LINK_COST 3
-#define PLC_CMD_NODE_LIST 4
-
-#define NODE_TYPE_UNKNOWN 0 /* Unknown link */
-#define NODE_TYPE_WIFI_ONLY 1 /* Pure Wireless STA node */
-#define NODE_TYPE_PLC_ONLY 2 /* Pure PLC only node */
-#define NODE_TYPE_WIFI_PLC 3 /* WiFi PLC capable node */
-
-/* defines used by poweridx iovar - it controls power in a-band */
-/* current gain setting is maintained */
-#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */
-#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */
-#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */
-#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */
-/* value >= 0 causes
- * - input to be set to that value
- * - PCL to be off
- */
-
/* Used to get specific link/ac parameters */
typedef struct {
- int ac;
+ int32 ac;
uint8 val;
struct ether_addr ea;
} link_val_t;
-#define BCM_MAC_STATUS_INDICATION (0x40010200L)
+
+#define WL_PM_MUTE_TX_VER 1
+
+typedef struct wl_pm_mute_tx {
+ uint16 version; /* version */
+ uint16 len; /* length */
+ uint16 deadline; /* deadline timer (in milliseconds) */
+ uint8 enable; /* set to 1 to enable mode; set to 0 to disable it */
+} wl_pm_mute_tx_t;
+
typedef struct {
uint16 ver; /* version of this struct */
wl_rateset_t rateset; /* rateset in use */
uint32 in; /* seconds elapsed since associated */
uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */
- uint32 tx_pkts; /* # of packets transmitted */
- uint32 tx_failures; /* # of packets failed */
+ uint32 tx_pkts; /* # of user packets transmitted (unicast) */
+ uint32 tx_failures; /* # of user packets failed */
uint32 rx_ucast_pkts; /* # of unicast packets received */
uint32 rx_mcast_pkts; /* # of multicast packets received */
- uint32 tx_rate; /* Rate of last successful tx frame */
+ uint32 tx_rate; /* Rate used by last tx frame */
uint32 rx_rate; /* Rate of last successful rx frame */
uint32 rx_decrypt_succeeds; /* # of packet decrypted successfully */
uint32 rx_decrypt_failures; /* # of packet decrypted unsuccessfully */
+ uint32 tx_tot_pkts; /* # of user tx pkts (ucast + mcast) */
+ uint32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */
+ uint32 tx_mcast_pkts; /* # of mcast pkts txed */
+ uint64 tx_tot_bytes; /* data bytes txed (ucast + mcast) */
+ uint64 rx_tot_bytes; /* data bytes recvd (ucast + mcast) */
+ uint64 tx_ucast_bytes; /* data bytes txed (ucast) */
+ uint64 tx_mcast_bytes; /* # data bytes txed (mcast) */
+ uint64 rx_ucast_bytes; /* data bytes recvd (ucast) */
+ uint64 rx_mcast_bytes; /* data bytes recvd (mcast) */
+ int8 rssi[WL_STA_ANT_MAX]; /* average rssi per antenna
+ * of data frames
+ */
+ int8 nf[WL_STA_ANT_MAX]; /* per antenna noise floor */
+ uint16 aid; /* association ID */
+ uint16 ht_capabilities; /* advertised ht caps */
+ uint16 vht_flags; /* converted vht flags */
+ uint32 tx_pkts_retried; /* # of frames where a retry was
+ * necessary
+ */
+ uint32 tx_pkts_retry_exhausted; /* # of user frames where a retry
+ * was exhausted
+ */
+ int8 rx_lastpkt_rssi[WL_STA_ANT_MAX]; /* Per antenna RSSI of last
+ * received data frame.
+ */
+ /* TX WLAN retry/failure statistics:
+ * Separated for host requested frames and WLAN locally generated frames.
+ * Include unicast frame only where the retries/failures can be counted.
+ */
+ uint32 tx_pkts_total; /* # user frames sent successfully */
+ uint32 tx_pkts_retries; /* # user frames retries */
+ uint32 tx_pkts_fw_total; /* # FW generated sent successfully */
+ uint32 tx_pkts_fw_retries; /* # retries for FW generated frames */
+ uint32 tx_pkts_fw_retry_exhausted; /* # FW generated where a retry
+ * was exhausted
+ */
+ uint32 rx_pkts_retried; /* # rx with retry bit set */
+ uint32 tx_rate_fallback; /* lowest fallback TX rate */
} sta_info_t;
-#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_pkts)
-
-#define WL_STA_VER 3
-
-/* Flags for sta_info_t indicating properties of STA */
-#define WL_STA_BRCM 0x1 /* Running a Broadcom driver */
-#define WL_STA_WME 0x2 /* WMM association */
-#define WL_STA_UNUSED 0x4
-#define WL_STA_AUTHE 0x8 /* Authenticated */
-#define WL_STA_ASSOC 0x10 /* Associated */
-#define WL_STA_AUTHO 0x20 /* Authorized */
-#define WL_STA_WDS 0x40 /* Wireless Distribution System */
-#define WL_STA_WDS_LINKUP 0x80 /* WDS traffic/probes flowing properly */
-#define WL_STA_PS 0x100 /* STA is in power save mode from AP's viewpoint */
-#define WL_STA_APSD_BE 0x200 /* APSD delv/trigger for AC_BE is default enabled */
-#define WL_STA_APSD_BK 0x400 /* APSD delv/trigger for AC_BK is default enabled */
-#define WL_STA_APSD_VI 0x800 /* APSD delv/trigger for AC_VI is default enabled */
-#define WL_STA_APSD_VO 0x1000 /* APSD delv/trigger for AC_VO is default enabled */
-#define WL_STA_N_CAP 0x2000 /* STA 802.11n capable */
-#define WL_STA_SCBSTATS 0x4000 /* Per STA debug stats */
-
-#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */
-
-/* Values for TX Filter override mode */
-#define WLC_TXFILTER_OVERRIDE_DISABLED 0
-#define WLC_TXFILTER_OVERRIDE_ENABLED 1
+#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_tot_pkts)
+
+#define WL_STA_VER 4
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WLC_NUMRATES 16 /* max # of rates in a rateset */
+
+typedef struct wlc_rateset {
+ uint32 count; /* number of rates in rates[] */
+ uint8 rates[WLC_NUMRATES]; /* rates in 500kbps units w/hi bit set if basic */
+ uint8 htphy_membership; /* HT PHY Membership */
+ uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */
+ uint16 vht_mcsmap; /* supported vht mcs nss bit map */
+} wlc_rateset_t;
+
/* Used to get specific STA parameters */
typedef struct {
uint32 val;
} channel_info_t;
/* For ioctls that take a list of MAC addresses */
-struct maclist {
+typedef struct maclist {
uint count; /* number of MAC addresses */
struct ether_addr ea[1]; /* variable length array of MAC addresses */
-};
+} maclist_t;
#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* get pkt count struct passed through ioctl */
uint32 tsf_timer[2][2]; /* Start and End time for 8bytes value */
} wl_mac_ratehisto_res_t; /* MAC Specific Rate Histogram Response */
-/* Values for TX Filter override mode */
-#define WLC_TXFILTER_OVERRIDE_DISABLED 0
-#define WLC_TXFILTER_OVERRIDE_ENABLED 1
-
-#define WL_IOCTL_ACTION_GET 0x0
-#define WL_IOCTL_ACTION_SET 0x1
-#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e
-#define WL_IOCTL_ACTION_OVL_RSV 0x20
-#define WL_IOCTL_ACTION_OVL 0x40
-#define WL_IOCTL_ACTION_MASK 0x7e
-#define WL_IOCTL_ACTION_OVL_SHIFT 1
-
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
/* Linux network driver ioctl encoding */
uint needed; /* bytes needed (optional) */
} wl_ioctl_t;
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#ifdef CONFIG_COMPAT
+typedef struct compat_wl_ioctl {
+ uint cmd; /* common ioctl definition */
+ uint32 buf; /* pointer to user buffer */
+ uint len; /* length of user buffer */
+ uint8 set; /* 1=set IOCTL; 0=query IOCTL */
+ uint used; /* bytes read or written (optional) */
+ uint needed; /* bytes needed (optional) */
+} compat_wl_ioctl_t;
+#endif /* CONFIG_COMPAT */
+
+#define WL_NUM_RATES_CCK 4 /* 1, 2, 5.5, 11 Mbps */
+#define WL_NUM_RATES_OFDM 8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
+#define WL_NUM_RATES_MCS_1STREAM 8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
+#define WL_NUM_RATES_EXTRA_VHT 2 /* Additional VHT 11AC rates */
+#define WL_NUM_RATES_VHT 10
+#define WL_NUM_RATES_MCS32 1
-/* reference to wl_ioctl_t struct used by usermode driver */
-#define ioctl_subtype set /* subtype param */
-#define ioctl_pid used /* pid param */
-#define ioctl_status needed /* status param */
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
/*
* Structure for passing hardware and software
uint16 fifo;
uint16 size;
} wl_txfifo_sz_t;
-/* magic pattern used for mismatch driver and wl */
-#define WL_TXFIFO_SZ_MAGIC 0xa5a5
/* Transfer info about an IOVar from the driver */
/* Max supported IOV name size in bytes, + 1 for nul termination */
char name[WLC_IOV_NAME_LEN];
} wlc_iov_trx_t;
-/* check this magic number */
-#define WLC_IOCTL_MAGIC 0x14e46c77
-
/* bump this number if you change the ioctl interface */
-#ifdef D11AC_IOTYPES
#define WLC_IOCTL_VERSION 2
#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1
-#else
-#define WLC_IOCTL_VERSION 1
-#endif /* D11AC_IOTYPES */
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
-#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */
-#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */
-#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */
-#if defined(LCNCONF) || defined(LCN40CONF)
-#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */
-#else
-#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */
-#endif
-
-/* common ioctl definitions */
-#define WLC_GET_MAGIC 0
-#define WLC_GET_VERSION 1
-#define WLC_UP 2
-#define WLC_DOWN 3
-#define WLC_GET_LOOP 4
-#define WLC_SET_LOOP 5
-#define WLC_DUMP 6
-#define WLC_GET_MSGLEVEL 7
-#define WLC_SET_MSGLEVEL 8
-#define WLC_GET_PROMISC 9
-#define WLC_SET_PROMISC 10
-/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */
-#define WLC_GET_RATE 12
-#define WLC_GET_MAX_RATE 13
-#define WLC_GET_INSTANCE 14
-/* #define WLC_GET_FRAG 15 */ /* no longer supported */
-/* #define WLC_SET_FRAG 16 */ /* no longer supported */
-/* #define WLC_GET_RTS 17 */ /* no longer supported */
-/* #define WLC_SET_RTS 18 */ /* no longer supported */
-#define WLC_GET_INFRA 19
-#define WLC_SET_INFRA 20
-#define WLC_GET_AUTH 21
-#define WLC_SET_AUTH 22
-#define WLC_GET_BSSID 23
-#define WLC_SET_BSSID 24
-#define WLC_GET_SSID 25
-#define WLC_SET_SSID 26
-#define WLC_RESTART 27
-#define WLC_TERMINATED 28
-/* #define WLC_DUMP_SCB 28 */ /* no longer supported */
-#define WLC_GET_CHANNEL 29
-#define WLC_SET_CHANNEL 30
-#define WLC_GET_SRL 31
-#define WLC_SET_SRL 32
-#define WLC_GET_LRL 33
-#define WLC_SET_LRL 34
-#define WLC_GET_PLCPHDR 35
-#define WLC_SET_PLCPHDR 36
-#define WLC_GET_RADIO 37
-#define WLC_SET_RADIO 38
-#define WLC_GET_PHYTYPE 39
-#define WLC_DUMP_RATE 40
-#define WLC_SET_RATE_PARAMS 41
-#define WLC_GET_FIXRATE 42
-#define WLC_SET_FIXRATE 43
-/* #define WLC_GET_WEP 42 */ /* no longer supported */
-/* #define WLC_SET_WEP 43 */ /* no longer supported */
-#define WLC_GET_KEY 44
-#define WLC_SET_KEY 45
-#define WLC_GET_REGULATORY 46
-#define WLC_SET_REGULATORY 47
-#define WLC_GET_PASSIVE_SCAN 48
-#define WLC_SET_PASSIVE_SCAN 49
-#define WLC_SCAN 50
-#define WLC_SCAN_RESULTS 51
-#define WLC_DISASSOC 52
-#define WLC_REASSOC 53
-#define WLC_GET_ROAM_TRIGGER 54
-#define WLC_SET_ROAM_TRIGGER 55
-#define WLC_GET_ROAM_DELTA 56
-#define WLC_SET_ROAM_DELTA 57
-#define WLC_GET_ROAM_SCAN_PERIOD 58
-#define WLC_SET_ROAM_SCAN_PERIOD 59
-#define WLC_EVM 60 /* diag */
-#define WLC_GET_TXANT 61
-#define WLC_SET_TXANT 62
-#define WLC_GET_ANTDIV 63
-#define WLC_SET_ANTDIV 64
-/* #define WLC_GET_TXPWR 65 */ /* no longer supported */
-/* #define WLC_SET_TXPWR 66 */ /* no longer supported */
-#define WLC_GET_CLOSED 67
-#define WLC_SET_CLOSED 68
-#define WLC_GET_MACLIST 69
-#define WLC_SET_MACLIST 70
-#define WLC_GET_RATESET 71
-#define WLC_SET_RATESET 72
-/* #define WLC_GET_LOCALE 73 */ /* no longer supported */
-#define WLC_LONGTRAIN 74
-#define WLC_GET_BCNPRD 75
-#define WLC_SET_BCNPRD 76
-#define WLC_GET_DTIMPRD 77
-#define WLC_SET_DTIMPRD 78
-#define WLC_GET_SROM 79
-#define WLC_SET_SROM 80
-#define WLC_GET_WEP_RESTRICT 81
-#define WLC_SET_WEP_RESTRICT 82
-#define WLC_GET_COUNTRY 83
-#define WLC_SET_COUNTRY 84
-#define WLC_GET_PM 85
-#define WLC_SET_PM 86
-#define WLC_GET_WAKE 87
-#define WLC_SET_WAKE 88
-/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */
-#define WLC_GET_FORCELINK 90 /* ndis only */
-#define WLC_SET_FORCELINK 91 /* ndis only */
-#define WLC_FREQ_ACCURACY 92 /* diag */
-#define WLC_CARRIER_SUPPRESS 93 /* diag */
-#define WLC_GET_PHYREG 94
-#define WLC_SET_PHYREG 95
-#define WLC_GET_RADIOREG 96
-#define WLC_SET_RADIOREG 97
-#define WLC_GET_REVINFO 98
-#define WLC_GET_UCANTDIV 99
-#define WLC_SET_UCANTDIV 100
-#define WLC_R_REG 101
-#define WLC_W_REG 102
-/* #define WLC_DIAG_LOOPBACK 103 old tray diag */
-/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */
-#define WLC_GET_MACMODE 105
-#define WLC_SET_MACMODE 106
-#define WLC_GET_MONITOR 107
-#define WLC_SET_MONITOR 108
-#define WLC_GET_GMODE 109
-#define WLC_SET_GMODE 110
-#define WLC_GET_LEGACY_ERP 111
-#define WLC_SET_LEGACY_ERP 112
-#define WLC_GET_RX_ANT 113
-#define WLC_GET_CURR_RATESET 114 /* current rateset */
-#define WLC_GET_SCANSUPPRESS 115
-#define WLC_SET_SCANSUPPRESS 116
-#define WLC_GET_AP 117
-#define WLC_SET_AP 118
-#define WLC_GET_EAP_RESTRICT 119
-#define WLC_SET_EAP_RESTRICT 120
-#define WLC_SCB_AUTHORIZE 121
-#define WLC_SCB_DEAUTHORIZE 122
-#define WLC_GET_WDSLIST 123
-#define WLC_SET_WDSLIST 124
-#define WLC_GET_ATIM 125
-#define WLC_SET_ATIM 126
-#define WLC_GET_RSSI 127
-#define WLC_GET_PHYANTDIV 128
-#define WLC_SET_PHYANTDIV 129
-#define WLC_AP_RX_ONLY 130
-#define WLC_GET_TX_PATH_PWR 131
-#define WLC_SET_TX_PATH_PWR 132
-#define WLC_GET_WSEC 133
-#define WLC_SET_WSEC 134
-#define WLC_GET_PHY_NOISE 135
-#define WLC_GET_BSS_INFO 136
-#define WLC_GET_PKTCNTS 137
-#define WLC_GET_LAZYWDS 138
-#define WLC_SET_LAZYWDS 139
-#define WLC_GET_BANDLIST 140
-
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-#define WLC_GET_BAND 141
-#define WLC_SET_BAND 142
-#define WLC_SCB_DEAUTHENTICATE 143
-#define WLC_GET_SHORTSLOT 144
-#define WLC_GET_SHORTSLOT_OVERRIDE 145
-#define WLC_SET_SHORTSLOT_OVERRIDE 146
-#define WLC_GET_SHORTSLOT_RESTRICT 147
-#define WLC_SET_SHORTSLOT_RESTRICT 148
-#define WLC_GET_GMODE_PROTECTION 149
-#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150
-#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151
-#define WLC_UPGRADE 152
-/* #define WLC_GET_MRATE 153 */ /* no longer supported */
-/* #define WLC_SET_MRATE 154 */ /* no longer supported */
-#define WLC_GET_IGNORE_BCNS 155
-#define WLC_SET_IGNORE_BCNS 156
-#define WLC_GET_SCB_TIMEOUT 157
-#define WLC_SET_SCB_TIMEOUT 158
-#define WLC_GET_ASSOCLIST 159
-#define WLC_GET_CLK 160
-#define WLC_SET_CLK 161
-#define WLC_GET_UP 162
-#define WLC_OUT 163
-#define WLC_GET_WPA_AUTH 164
-#define WLC_SET_WPA_AUTH 165
-#define WLC_GET_UCFLAGS 166
-#define WLC_SET_UCFLAGS 167
-#define WLC_GET_PWRIDX 168
-#define WLC_SET_PWRIDX 169
-#define WLC_GET_TSSI 170
-#define WLC_GET_SUP_RATESET_OVERRIDE 171
-#define WLC_SET_SUP_RATESET_OVERRIDE 172
-/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */
-/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */
-/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */
-/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */
-/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */
-#define WLC_GET_PROTECTION_CONTROL 178
-#define WLC_SET_PROTECTION_CONTROL 179
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#define WLC_GET_PHYLIST 180
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */
-#define WLC_DECRYPT_STATUS 182 /* ndis only */
-#define WLC_GET_KEY_SEQ 183
-#define WLC_GET_SCAN_CHANNEL_TIME 184
-#define WLC_SET_SCAN_CHANNEL_TIME 185
-#define WLC_GET_SCAN_UNASSOC_TIME 186
-#define WLC_SET_SCAN_UNASSOC_TIME 187
-#define WLC_GET_SCAN_HOME_TIME 188
-#define WLC_SET_SCAN_HOME_TIME 189
-#define WLC_GET_SCAN_NPROBES 190
-#define WLC_SET_SCAN_NPROBES 191
-#define WLC_GET_PRB_RESP_TIMEOUT 192
-#define WLC_SET_PRB_RESP_TIMEOUT 193
-#define WLC_GET_ATTEN 194
-#define WLC_SET_ATTEN 195
-#define WLC_GET_SHMEM 196 /* diag */
-#define WLC_SET_SHMEM 197 /* diag */
-/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */
-/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */
-#define WLC_SET_WSEC_TEST 200
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-#define WLC_TKIP_COUNTERMEASURES 202
-#define WLC_GET_PIOMODE 203
-#define WLC_SET_PIOMODE 204
-#define WLC_SET_ASSOC_PREFER 205
-#define WLC_GET_ASSOC_PREFER 206
-#define WLC_SET_ROAM_PREFER 207
-#define WLC_GET_ROAM_PREFER 208
-#define WLC_SET_LED 209
-#define WLC_GET_LED 210
-#define WLC_GET_INTERFERENCE_MODE 211
-#define WLC_SET_INTERFERENCE_MODE 212
-#define WLC_GET_CHANNEL_QA 213
-#define WLC_START_CHANNEL_QA 214
-#define WLC_GET_CHANNEL_SEL 215
-#define WLC_START_CHANNEL_SEL 216
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#define WLC_GET_VALID_CHANNELS 217
-#define WLC_GET_FAKEFRAG 218
-#define WLC_SET_FAKEFRAG 219
-#define WLC_GET_PWROUT_PERCENTAGE 220
-#define WLC_SET_PWROUT_PERCENTAGE 221
-#define WLC_SET_BAD_FRAME_PREEMPT 222
-#define WLC_GET_BAD_FRAME_PREEMPT 223
-#define WLC_SET_LEAP_LIST 224
-#define WLC_GET_LEAP_LIST 225
-#define WLC_GET_CWMIN 226
-#define WLC_SET_CWMIN 227
-#define WLC_GET_CWMAX 228
-#define WLC_SET_CWMAX 229
-#define WLC_GET_WET 230
-#define WLC_SET_WET 231
-#define WLC_GET_PUB 232
-/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */
-/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */
-#define WLC_GET_KEY_PRIMARY 235
-#define WLC_SET_KEY_PRIMARY 236
-
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-
-/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */
-#define WLC_GET_ACI_ARGS 238
-#define WLC_SET_ACI_ARGS 239
-#define WLC_UNSET_CALLBACK 240
-#define WLC_SET_CALLBACK 241
-#define WLC_GET_RADAR 242
-#define WLC_SET_RADAR 243
-#define WLC_SET_SPECT_MANAGMENT 244
-#define WLC_GET_SPECT_MANAGMENT 245
-#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */
-#define WLC_WDS_GET_WPA_SUP 247
-#define WLC_SET_CS_SCAN_TIMER 248
-#define WLC_GET_CS_SCAN_TIMER 249
-#define WLC_MEASURE_REQUEST 250
-#define WLC_INIT 251
-#define WLC_SEND_QUIET 252
-#define WLC_KEEPALIVE 253
-#define WLC_SEND_PWR_CONSTRAINT 254
-#define WLC_UPGRADE_STATUS 255
-#define WLC_CURRENT_PWR 256
-#define WLC_GET_SCAN_PASSIVE_TIME 257
-#define WLC_SET_SCAN_PASSIVE_TIME 258
-#define WLC_LEGACY_LINK_BEHAVIOR 259
-#define WLC_GET_CHANNELS_IN_COUNTRY 260
-#define WLC_GET_COUNTRY_LIST 261
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#define WLC_GET_VAR 262 /* get value of named variable */
-#define WLC_SET_VAR 263 /* set named variable to value */
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-#define WLC_NVRAM_GET 264 /* deprecated */
-#define WLC_NVRAM_SET 265
-#define WLC_NVRAM_DUMP 266
-#define WLC_REBOOT 267
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#define WLC_SET_WSEC_PMK 268
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-#define WLC_GET_AUTH_MODE 269
-#define WLC_SET_AUTH_MODE 270
-#define WLC_GET_WAKEENTRY 271
-#define WLC_SET_WAKEENTRY 272
-#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */
-#define WLC_NVOTPW 274
-#define WLC_OTPW 275
-#define WLC_IOV_BLOCK_GET 276
-#define WLC_IOV_MODULES_GET 277
-#define WLC_SOFT_RESET 278
-#define WLC_GET_ALLOW_MODE 279
-#define WLC_SET_ALLOW_MODE 280
-#define WLC_GET_DESIRED_BSSID 281
-#define WLC_SET_DESIRED_BSSID 282
-#define WLC_DISASSOC_MYAP 283
-#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */
-#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */
-#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */
-#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */
-#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */
-#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */
-#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */
-#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */
-#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */
-#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */
-#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */
-#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */
-#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */
-#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */
-#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */
-#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */
-#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */
-#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */
-#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */
-#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */
-#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */
-#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */
-#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */
-#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */
-/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */
-#define WLC_GET_CMD 309
-/* #define WLC_LAST 310 */ /* Never used - can be reused */
-#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */
-#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */
-/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */
-/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */
-/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */
-#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */
-#define WLC_GET_NAT_STATE 317
-#define WLC_GET_RSSI_QDB 318 /* qdB portion of the RSSI */
-#define WLC_LAST 319
-
-#ifndef EPICTRL_COOKIE
-#define EPICTRL_COOKIE 0xABADCEDE
-#endif
-
-/* vx wlc ioctl's offset */
-#define CMN_IOCTL_OFF 0x180
-
-/*
- * custom OID support
- *
- * 0xFF - implementation specific OID
- * 0xE4 - first byte of Broadcom PCI vendor ID
- * 0x14 - second byte of Broadcom PCI vendor ID
- * 0xXX - the custom OID number
- */
-
-/* begin 0x1f values beyond the start of the ET driver range. */
-#define WL_OID_BASE 0xFFE41420
-
-/* NDIS overrides */
-#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE)
-#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK)
-#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK)
-#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH)
-#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS)
-#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR)
-#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM)
-
-/* EXT_STA Dongle suuport */
-#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC)
-#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS)
-#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY)
-#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY)
-#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME)
-#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID)
-#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE)
-#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING)
-#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING)
-#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON)
-#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON)
-#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE)
-#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC)
-#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS)
-#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS)
-
-/* NAT filter driver support */
-#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG)
-#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE)
-
-#define WL_DECRYPT_STATUS_SUCCESS 1
-#define WL_DECRYPT_STATUS_FAILURE 2
-#define WL_DECRYPT_STATUS_UNKNOWN 3
-
-/* allows user-mode app to poll the status of USB image upgrade */
-#define WLC_UPGRADE_SUCCESS 0
-#define WLC_UPGRADE_PENDING 1
#ifdef CONFIG_USBRNDIS_RETAIL
/* struct passed in for WLC_NDCONFIG_ITEM */
#endif
-/* WLC_GET_AUTH, WLC_SET_AUTH values */
-#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */
-#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
-#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */
-
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
-/* Bit masks for radio disabled status - returned by WL_GET_RADIO */
-#define WL_RADIO_SW_DISABLE (1<<0)
-#define WL_RADIO_HW_DISABLE (1<<1)
-#define WL_RADIO_MPC_DISABLE (1<<2)
-#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */
-
-#define WL_SPURAVOID_OFF 0
-#define WL_SPURAVOID_ON1 1
-#define WL_SPURAVOID_ON2 2
-
-
-#define WL_4335_SPURAVOID_ON1 1
-#define WL_4335_SPURAVOID_ON2 2
-#define WL_4335_SPURAVOID_ON3 3
-#define WL_4335_SPURAVOID_ON4 4
-#define WL_4335_SPURAVOID_ON5 5
-#define WL_4335_SPURAVOID_ON6 6
-#define WL_4335_SPURAVOID_ON7 7
-#define WL_4335_SPURAVOID_ON8 8
-#define WL_4335_SPURAVOID_ON9 9
-
-/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */
-#define WL_TXPWR_OVERRIDE (1U<<31)
-#define WL_TXPWR_NEG (1U<<30)
-
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
#define WL_PHY_PAVARS_LEN 32 /* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */
#define WL_PHY_PAVAR_VER 1 /* pavars version */
uint16 mcspo[8];
} wl_po_t;
-/* a large TX Power as an init value to factor out of MIN() calculations,
- * keep low enough to fit in an int8, units are .25 dBm
- */
-#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */
-
-/* "diag" iovar argument and error code */
-#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */
-#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */
-#define WL_DIAG_MEMORY 3 /* d11 memory test */
-#define WL_DIAG_LED 4 /* LED test */
-#define WL_DIAG_REG 5 /* d11/phy register test */
-#define WL_DIAG_SROM 6 /* srom read/crc test */
-#define WL_DIAG_DMA 7 /* DMA test */
-#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */
-
-#define WL_DIAGERR_SUCCESS 0
-#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */
-#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */
-#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */
-#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */
-#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */
-#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */
-#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */
-#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */
-#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */
-#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */
-
-#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */
-#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */
-
-/* band types */
-#define WLC_BAND_AUTO 0 /* auto-select */
-#define WLC_BAND_5G 1 /* 5 Ghz */
-#define WLC_BAND_2G 2 /* 2.4 Ghz */
-#define WLC_BAND_ALL 3 /* all bands */
-
-/* band range returned by band_range iovar */
-#define WL_CHAN_FREQ_RANGE_2G 0
-#define WL_CHAN_FREQ_RANGE_5GL 1
-#define WL_CHAN_FREQ_RANGE_5GM 2
-#define WL_CHAN_FREQ_RANGE_5GH 3
-
-#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4
-#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5
-#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6
-#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7
-#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8
-
-#define WL_CHAN_FREQ_RANGE_5G_BAND0 1
-#define WL_CHAN_FREQ_RANGE_5G_BAND1 2
-#define WL_CHAN_FREQ_RANGE_5G_BAND2 3
-#define WL_CHAN_FREQ_RANGE_5G_BAND3 4
-
-#define WL_CHAN_FREQ_RANGE_5G_4BAND 5
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-
-/* phy types (returned by WLC_GET_PHYTPE) */
-#define WLC_PHY_TYPE_A 0
-#define WLC_PHY_TYPE_B 1
-#define WLC_PHY_TYPE_G 2
-#define WLC_PHY_TYPE_N 4
-#define WLC_PHY_TYPE_LP 5
-#define WLC_PHY_TYPE_SSN 6
-#define WLC_PHY_TYPE_HT 7
-#define WLC_PHY_TYPE_LCN 8
-#define WLC_PHY_TYPE_LCN40 10
-#define WLC_PHY_TYPE_AC 11
-#define WLC_PHY_TYPE_NULL 0xf
-
-/* Values for PM */
-#define PM_OFF 0
-#define PM_MAX 1
-#define PM_FAST 2
-#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */
-
-#ifndef LINUX_POSTMOGRIFY_REMOVAL
-/* MAC list modes */
-#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */
-#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */
-#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */
+#define WL_NUM_RPCALVARS 5 /* number of rpcal vars */
-/*
- * 54g modes (basic bits may still be overridden)
- *
- * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11
- * Preamble: Long
- * Shortslot: Off
- * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
- * Extended Rateset: 6, 9, 12, 48
- * Preamble: Long
- * Shortslot: Auto
- * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54
- * Extended Rateset: 6b, 9, 12b, 48
- * Preamble: Short required
- * Shortslot: Auto
- * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54
- * Extended Rateset: 6, 9, 12, 48
- * Preamble: Long
- * Shortslot: On
- * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54
- * Preamble: Short required
- * Shortslot: On and required
- * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b
- * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54
- * Preamble: Long
- * Shortslot: Auto
- */
-#define GMODE_LEGACY_B 0
-#define GMODE_AUTO 1
-#define GMODE_ONLY 2
-#define GMODE_B_DEFERRED 3
-#define GMODE_PERFORMANCE 4
-#define GMODE_LRS 5
-#define GMODE_MAX 6
-
-/* values for PLCPHdr_override */
-#define WLC_PLCP_AUTO -1
-#define WLC_PLCP_SHORT 0
-#define WLC_PLCP_LONG 1
-
-/* values for g_protection_override and n_protection_override */
-#define WLC_PROTECTION_AUTO -1
-#define WLC_PROTECTION_OFF 0
-#define WLC_PROTECTION_ON 1
-#define WLC_PROTECTION_MMHDR_ONLY 2
-#define WLC_PROTECTION_CTS_ONLY 3
-
-/* values for g_protection_control and n_protection_control */
-#define WLC_PROTECTION_CTL_OFF 0
-#define WLC_PROTECTION_CTL_LOCAL 1
-#define WLC_PROTECTION_CTL_OVERLAP 2
-
-/* values for n_protection */
-#define WLC_N_PROTECTION_OFF 0
-#define WLC_N_PROTECTION_OPTIONAL 1
-#define WLC_N_PROTECTION_20IN40 2
-#define WLC_N_PROTECTION_MIXEDMODE 3
-
-/* values for n_preamble_type */
-#define WLC_N_PREAMBLE_MIXEDMODE 0
-#define WLC_N_PREAMBLE_GF 1
-#define WLC_N_PREAMBLE_GF_BRCM 2
-
-/* values for band specific 40MHz capabilities (deprecated) */
-#define WLC_N_BW_20ALL 0
-#define WLC_N_BW_40ALL 1
-#define WLC_N_BW_20IN2G_40IN5G 2
-
-#define WLC_BW_20MHZ_BIT (1<<0)
-#define WLC_BW_40MHZ_BIT (1<<1)
-#define WLC_BW_80MHZ_BIT (1<<2)
-
-/* Bandwidth capabilities */
-#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT)
-#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
-#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT)
-#define WLC_BW_CAP_UNRESTRICTED 0xFF
-
-#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE)
-#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE)
-#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE)
-
-/* values to force tx/rx chain */
-#define WLC_N_TXRX_CHAIN0 0
-#define WLC_N_TXRX_CHAIN1 1
-
-/* bitflags for SGI support (sgi_rx iovar) */
-#define WLC_N_SGI_20 0x01
-#define WLC_N_SGI_40 0x02
-#define WLC_VHT_SGI_80 0x04
-
-/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */
-#define WLC_SGI_ALL 0x02
-
-#define LISTEN_INTERVAL 10
-/* interference mitigation options */
-#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */
-#define INTERFERE_NONE 0 /* off */
-#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */
-#define WLAN_MANUAL 2 /* ACI: no auto detection */
-#define WLAN_AUTO 3 /* ACI: auto detect */
-#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */
-#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */
-
-/* AP environment */
-#define AP_ENV_DETECT_NOT_USED 0 /* We aren't using AP environment detection */
-#define AP_ENV_DENSE 1 /* "Corporate" or other AP dense environment */
-#define AP_ENV_SPARSE 2 /* "Home" or other sparse environment */
-#define AP_ENV_INDETERMINATE 3 /* AP environment hasn't been identified */
+typedef struct wl_rpcal {
+ uint16 value;
+ uint16 update;
+} wl_rpcal_t;
typedef struct wl_aci_args {
int enter_aci_thresh; /* Trigger level to start detecting ACI */
uint16 nphy_noise_crsidx_decr;
} wl_aci_args_t;
-#define TRIGGER_NOW 0
-#define TRIGGER_CRS 0x01
-#define TRIGGER_CRSDEASSERT 0x02
-#define TRIGGER_GOODFCS 0x04
-#define TRIGGER_BADFCS 0x08
-#define TRIGGER_BADPLCP 0x10
-#define TRIGGER_CRSGLITCH 0x20
#define WL_ACI_ARGS_LEGACY_LENGTH 16 /* bytes of pre NPHY aci args */
#define WL_SAMPLECOLLECT_T_VERSION 2 /* version of wl_samplecollect_args_t struct */
typedef struct wl_samplecollect_args {
uint32 pre_dur;
uint32 post_dur;
uint8 gpio_sel;
- bool downsamp;
- bool be_deaf;
- bool agc; /* loop from init gain and going down */
- bool filter; /* override high pass corners to lowest */
+ uint8 downsamp;
+ uint8 be_deaf;
+ uint8 agc; /* loop from init gain and going down */
+ uint8 filter; /* override high pass corners to lowest */
/* add'l version 2 fields */
uint8 trigger_state;
uint8 module_sel1;
uint32 gpioCapMask;
} wl_samplecollect_args_t;
-#define WL_SAMPLEDATA_HEADER_TYPE 1
-#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */
-#define WL_SAMPLEDATA_TYPE 2
-#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */
-#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */
#define WL_SAMPLEDATA_T_VERSION 1 /* version of wl_samplecollect_args_t struct */
/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */
#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2
uint32 flag; /* bit def */
} wl_sampledata_t;
-/* WL_OTA START */
-
-#define WL_OTA_ARG_PARSE_BLK_SIZE 1200
-#define WL_OTA_TEST_MAX_NUM_RATE 30
-#define WL_OTA_TEST_MAX_NUM_SEQ 100
+/* WL_OTA START */
/* OTA Test Status */
enum {
- WL_OTA_TEST_IDLE, /* Default Idle state */
- WL_OTA_TEST_ACTIVE, /* Test Running */
- WL_OTA_TEST_SUCCESS, /* Successfully Finished Test */
- WL_OTA_TEST_FAIL /* Test Failed in the Middle */
+ WL_OTA_TEST_IDLE = 0, /* Default Idle state */
+ WL_OTA_TEST_ACTIVE = 1, /* Test Running */
+ WL_OTA_TEST_SUCCESS = 2, /* Successfully Finished Test */
+ WL_OTA_TEST_FAIL = 3 /* Test Failed in the Middle */
};
/* OTA SYNC Status */
enum {
- WL_OTA_SYNC_IDLE, /* Idle state */
- WL_OTA_SYNC_ACTIVE, /* Waiting for Sync */
- WL_OTA_SYNC_FAIL /* Sync pkt not recieved */
+ WL_OTA_SYNC_IDLE = 0, /* Idle state */
+ WL_OTA_SYNC_ACTIVE = 1, /* Waiting for Sync */
+ WL_OTA_SYNC_FAIL = 2 /* Sync pkt not recieved */
};
/* Various error states dut can get stuck during test */
enum {
WL_OTA_SKIP_TEST_CAL_FAIL = 1, /* Phy calibration failed */
- WL_OTA_SKIP_TEST_SYNCH_FAIL, /* Sync Packet not recieved */
- WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL, /* Cmd flow file download failed */
- WL_OTA_SKIP_TEST_NO_TEST_FOUND, /* No test found in Flow file */
- WL_OTA_SKIP_TEST_WL_NOT_UP, /* WL UP failed */
+ WL_OTA_SKIP_TEST_SYNCH_FAIL = 2, /* Sync Packet not recieved */
+ WL_OTA_SKIP_TEST_FILE_DWNLD_FAIL = 3, /* Cmd flow file download failed */
+ WL_OTA_SKIP_TEST_NO_TEST_FOUND = 4, /* No test found in Flow file */
+ WL_OTA_SKIP_TEST_WL_NOT_UP = 5, /* WL UP failed */
WL_OTA_SKIP_TEST_UNKNOWN_CALL /* Unintentional scheduling on ota test */
};
/* Differentiator for ota_tx and ota_rx */
enum {
- WL_OTA_TEST_TX, /* ota_tx */
- WL_OTA_TEST_RX /* ota_rx */
+ WL_OTA_TEST_TX = 0, /* ota_tx */
+ WL_OTA_TEST_RX = 1, /* ota_rx */
};
/* Catch 3 modes of operation: 20Mhz, 40Mhz, 20 in 40 Mhz */
enum {
- WL_OTA_TEST_BW_20_IN_40MHZ, /* 20 in 40 operation */
- WL_OTA_TEST_BW_20MHZ, /* 20 Mhz operation */
- WL_OTA_TEST_BW_40MHZ /* full 40Mhz operation */
+ WL_OTA_TEST_BW_20_IN_40MHZ = 0, /* 20 in 40 operation */
+ WL_OTA_TEST_BW_20MHZ = 1, /* 20 Mhz operation */
+ WL_OTA_TEST_BW_40MHZ = 2 /* full 40Mhz operation */
};
typedef struct ota_rate_info {
uint8 rate_cnt; /* Total number of rates */
uint8 cur_test; /* test phase */
uint8 chan; /* channel */
uint8 bw; /* bandwidth */
- char control_band; /* control band */
+ uint8 control_band; /* control band */
uint8 stf_mode; /* stf mode */
ota_rate_info_t rt_info; /* Rate info */
ota_packetengine_t pkteng; /* packeteng info */
typedef struct wl_ota_test_vector {
wl_ota_test_args_t test_arg[WL_OTA_TEST_MAX_NUM_SEQ]; /* Test argument struct */
uint16 test_cnt; /* Total no of test */
- bool file_dwnld_valid; /* File successfully downloaded */
+ uint8 file_dwnld_valid; /* File successfully downloaded */
uint8 sync_timeout; /* sync packet timeout */
int8 sync_fail_action; /* sync fail action */
struct ether_addr sync_mac; /* macaddress for sync pkt */
int8 skip_test_reason; /* skip test reasoin */
wl_ota_test_args_t test_arg; /* cur test arg details */
uint16 test_cnt; /* total no of test downloaded */
- bool file_dwnld_valid; /* file successfully downloaded ? */
+ uint8 file_dwnld_valid; /* file successfully downloaded ? */
uint8 sync_timeout; /* sync timeout */
int8 sync_fail_action; /* sync fail action */
struct ether_addr sync_mac; /* macaddress for sync pkt */
/* wl_radar_args_t */
typedef struct {
- int npulses; /* required number of pulses at n * t_int */
- int ncontig; /* required number of pulses at t_int */
- int min_pw; /* minimum pulse width (20 MHz clocks) */
- int max_pw; /* maximum pulse width (20 MHz clocks) */
+ int npulses; /* required number of pulses at n * t_int */
+ int ncontig; /* required number of pulses at t_int */
+ int min_pw; /* minimum pulse width (20 MHz clocks) */
+ int max_pw; /* maximum pulse width (20 MHz clocks) */
uint16 thresh0; /* Radar detection, thresh 0 */
uint16 thresh1; /* Radar detection, thresh 1 */
uint16 blank; /* Radar detection, blank control */
uint16 thresh1_40_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */
uint16 thresh0_80_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */
uint16 thresh1_80_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */
- uint16 thresh0_160_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
- uint16 thresh1_160_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
uint16 thresh0_20_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */
uint16 thresh1_20_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */
uint16 thresh0_40_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */
uint16 thresh1_40_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */
uint16 thresh0_80_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */
uint16 thresh1_80_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */
+#ifdef WL11AC160
+ uint16 thresh0_160_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */
+ uint16 thresh1_160_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */
uint16 thresh0_160_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */
uint16 thresh1_160_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */
+#endif /* WL11AC160 */
} wl_radar_thr_t;
#define WL_RADAR_THR_VERSION 2
-#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */
-
-/* radar iovar SET defines */
-#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */
-#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */
-#define WL_RADAR_SIMULATED 2 /* force radar detector to declare
- * detection once
- */
-#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */
-#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
-#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
-#define WL_ANT_IDX_1 0 /* antenna index 1 */
-#define WL_ANT_IDX_2 1 /* antenna index 2 */
-
-#ifndef WL_RSSI_ANT_MAX
-#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */
-#elif WL_RSSI_ANT_MAX != 4
-#error "WL_RSSI_ANT_MAX does not match"
-#endif
/* RSSI per antenna */
typedef struct {
int8 rssi_ant[WL_RSSI_ANT_MAX]; /* rssi per antenna */
} wl_rssi_ant_t;
-/* dfs_status iovar-related defines */
-
-/* cac - channel availability check,
- * ism - in-service monitoring
- * csa - channel switching announcement
- */
-
-/* cac state values */
-#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */
-#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */
-#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */
-#define WL_DFS_CACSTATE_CSA 3 /* csa */
-#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */
-#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */
-#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */
-#define WL_DFS_CACSTATES 7 /* this many states exist */
-
/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */
typedef struct {
uint state; /* noted by WL_DFS_CACSTATE_XX. */
uint16 pad;
} wl_dfs_status_t;
+/* data structure used in 'radar_status' wl interface, which is use to query radar det status */
+typedef struct {
+ bool detected;
+ int count;
+ bool pretended;
+ uint32 radartype;
+ uint32 timenow;
+ uint32 timefromL;
+ int lp_csect_single;
+ int detected_pulse_index;
+ int nconsecq_pulses;
+ chanspec_t ch;
+ int pw[10];
+ int intv[10];
+ int fm[10];
+} wl_radar_status_t;
+
#define NUM_PWRCTRL_RATES 12
typedef struct {
} tx_power_legacy2_t;
/* TX Power index defines */
-#define WL_NUM_RATES_CCK 4 /* 1, 2, 5.5, 11 Mbps */
-#define WL_NUM_RATES_OFDM 8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */
-#define WL_NUM_RATES_MCS_1STREAM 8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */
-#define WL_NUM_RATES_EXTRA_VHT 2 /* Additional VHT 11AC rates */
-#define WL_NUM_RATES_VHT 10
-#define WL_NUM_RATES_MCS32 1
-
#define WLC_NUM_RATES_CCK WL_NUM_RATES_CCK
#define WLC_NUM_RATES_OFDM WL_NUM_RATES_OFDM
#define WLC_NUM_RATES_MCS_1_STREAM WL_NUM_RATES_MCS_1STREAM
#define WL_NUM_2x2_ELEMENTS 4
#define WL_NUM_3x3_ELEMENTS 6
-typedef struct txppr {
- /* start of 20MHz tx power limits */
- int8 b20_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
- int8 b20_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
-
- int8 b20_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b20_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
- int8 b20_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b20_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
-
- int8 b20_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b20_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
- int8 b20_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b20_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
- int8 b20_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
-
- int8 b20_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
- int8 b20_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
- int8 b20_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
- int8 b20_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
- int8 b20_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
- int8 b20_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
- int8 b20_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
- int8 b20_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
-
- /* start of 40MHz tx power limits */
- int8 b40_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b40_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
- int8 b40_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
-
- int8 b40_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b40_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b40_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
- int8 b40_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b40_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
-
- int8 b40_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b40_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b40_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
- int8 b40_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b40_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
- int8 b40_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
-
- int8 b40_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
- int8 b40_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
- int8 b40_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
- int8 b40_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
- int8 b40_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
- int8 b40_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
- int8 b40_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
- int8 b40_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
-
- /* start of 20in40MHz tx power limits */
- int8 b20in40_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20in40_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
- int8 b20in40_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
-
- int8 b20in40_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20in40_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b20in40_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
- int8 b20in40_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b20in40_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
-
- int8 b20in40_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20in40_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* 20 in 40 MHz Legacy OFDM CDD */
- int8 b20in40_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
- int8 b20in40_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b20in40_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
- int8 b20in40_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
-
- int8 b20in40_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
- int8 b20in40_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
- int8 b20in40_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
- int8 b20in40_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
- int8 b20in40_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
- int8 b20in40_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
- int8 b20in40_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
- int8 b20in40_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
-
- /* start of 80MHz tx power limits */
- int8 b80_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
- int8 b80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
-
- int8 b80_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
- int8 b80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
-
- int8 b80_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
- int8 b80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
- int8 b80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
-
- int8 b80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
- int8 b80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
- int8 b80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
- int8 b80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
- int8 b80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
- int8 b80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
- int8 b80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
- int8 b80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
-
- /* start of 20in80MHz tx power limits */
- int8 b20in80_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20in80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
- int8 b20in80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
-
- int8 b20in80_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20in80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b20in80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
- int8 b20in80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b20in80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
-
- int8 b20in80_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b20in80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b20in80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
- int8 b20in80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b20in80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
- int8 b20in80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
-
- int8 b20in80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
- int8 b20in80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
- int8 b20in80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
- int8 b20in80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
- int8 b20in80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
- int8 b20in80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
- int8 b20in80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
- int8 b20in80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
-
- /* start of 40in80MHz tx power limits */
- int8 b40in80_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b40in80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */
- int8 b40in80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */
-
- int8 b40in80_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b40in80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */
- int8 b40in80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */
- int8 b40in80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b40in80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */
-
- int8 b40in80_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */
- int8 b40in80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* MHz Legacy OFDM CDD */
- int8 b40in80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */
- int8 b40in80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */
- int8 b40in80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */
- int8 b40in80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */
-
- int8 b40in80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */
- int8 b40in80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */
- int8 b40in80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */
- int8 b40in80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */
- int8 b40in80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */
- int8 b40in80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */
- int8 b40in80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */
- int8 b40in80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */
-
- int8 mcs32; /* C_CHECK - THIS NEEDS TO BE REMOVED THROUGHOUT THE CODE */
-} txppr_t;
-
-/* 20MHz */
-#define WL_TX_POWER_CCK_FIRST OFFSETOF(txppr_t, b20_1x1dsss)
-#define WL_TX_POWER_OFDM20_FIRST OFFSETOF(txppr_t, b20_1x1ofdm)
-#define WL_TX_POWER_MCS20_SISO_FIRST OFFSETOF(txppr_t, b20_1x1mcs0)
-#define WL_TX_POWER_20_S1x1_FIRST OFFSETOF(txppr_t, b20_1x1mcs0)
-
-#define WL_TX_POWER_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20_1x2dsss)
-#define WL_TX_POWER_OFDM20_CDD_FIRST OFFSETOF(txppr_t, b20_1x2cdd_ofdm)
-#define WL_TX_POWER_MCS20_CDD_FIRST OFFSETOF(txppr_t, b20_1x2cdd_mcs0)
-#define WL_TX_POWER_20_S1x2_FIRST OFFSETOF(txppr_t, b20_1x2cdd_mcs0)
-#define WL_TX_POWER_MCS20_STBC_FIRST OFFSETOF(txppr_t, b20_2x2stbc_mcs0)
-#define WL_TX_POWER_MCS20_SDM_FIRST OFFSETOF(txppr_t, b20_2x2sdm_mcs8)
-#define WL_TX_POWER_20_S2x2_FIRST OFFSETOF(txppr_t, b20_2x2sdm_mcs8)
-
-#define WL_TX_POWER_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3dsss)
-#define WL_TX_POWER_OFDM20_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3cdd_ofdm)
-#define WL_TX_POWER_20_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3cdd_mcs0)
-#define WL_TX_POWER_20_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20_2x3stbc_mcs0)
-#define WL_TX_POWER_20_S2x3_FIRST OFFSETOF(txppr_t, b20_2x3sdm_mcs8)
-#define WL_TX_POWER_20_S3x3_FIRST OFFSETOF(txppr_t, b20_3x3sdm_mcs16)
-
-#define WL_TX_POWER_20_S1X1_VHT OFFSETOF(txppr_t, b20_1x1vht)
-#define WL_TX_POWER_20_S1X2_CDD_VHT OFFSETOF(txppr_t, b20_1x2cdd_vht)
-#define WL_TX_POWER_20_S2X2_STBC_VHT OFFSETOF(txppr_t, b20_2x2stbc_vht)
-#define WL_TX_POWER_20_S2X2_VHT OFFSETOF(txppr_t, b20_2x2sdm_vht)
-#define WL_TX_POWER_20_S1X3_CDD_VHT OFFSETOF(txppr_t, b20_1x3cdd_vht)
-#define WL_TX_POWER_20_S2X3_STBC_VHT OFFSETOF(txppr_t, b20_2x3stbc_vht)
-#define WL_TX_POWER_20_S2X3_VHT OFFSETOF(txppr_t, b20_2x3sdm_vht)
-#define WL_TX_POWER_20_S3X3_VHT OFFSETOF(txppr_t, b20_3x3sdm_vht)
-
-/* 40MHz */
-#define WL_TX_POWER_40_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b40_dummy1x1dsss)
-#define WL_TX_POWER_OFDM40_FIRST OFFSETOF(txppr_t, b40_1x1ofdm)
-#define WL_TX_POWER_MCS40_SISO_FIRST OFFSETOF(txppr_t, b40_1x1mcs0)
-#define WL_TX_POWER_40_S1x1_FIRST OFFSETOF(txppr_t, b40_1x1mcs0)
-
-#define WL_TX_POWER_40_DUMMY_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b40_dummy1x2dsss)
-#define WL_TX_POWER_OFDM40_CDD_FIRST OFFSETOF(txppr_t, b40_1x2cdd_ofdm)
-#define WL_TX_POWER_MCS40_CDD_FIRST OFFSETOF(txppr_t, b40_1x2cdd_mcs0)
-#define WL_TX_POWER_40_S1x2_FIRST OFFSETOF(txppr_t, b40_1x2cdd_mcs0)
-#define WL_TX_POWER_MCS40_STBC_FIRST OFFSETOF(txppr_t, b40_2x2stbc_mcs0)
-#define WL_TX_POWER_MCS40_SDM_FIRST OFFSETOF(txppr_t, b40_2x2sdm_mcs8)
-#define WL_TX_POWER_40_S2x2_FIRST OFFSETOF(txppr_t, b40_2x2sdm_mcs8)
-
-#define WL_TX_POWER_40_DUMMY_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40_dummy1x3dsss)
-#define WL_TX_POWER_OFDM40_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40_1x3cdd_ofdm)
-#define WL_TX_POWER_40_S1x3_FIRST OFFSETOF(txppr_t, b40_1x3cdd_mcs0)
-#define WL_TX_POWER_40_STBC_S2x3_FIRST OFFSETOF(txppr_t, b40_2x3stbc_mcs0)
-#define WL_TX_POWER_40_S2x3_FIRST OFFSETOF(txppr_t, b40_2x3sdm_mcs8)
-#define WL_TX_POWER_40_S3x3_FIRST OFFSETOF(txppr_t, b40_3x3sdm_mcs16)
-
-#define WL_TX_POWER_40_S1X1_VHT OFFSETOF(txppr_t, b40_1x1vht)
-#define WL_TX_POWER_40_S1X2_CDD_VHT OFFSETOF(txppr_t, b40_1x2cdd_vht)
-#define WL_TX_POWER_40_S2X2_STBC_VHT OFFSETOF(txppr_t, b40_2x2stbc_vht)
-#define WL_TX_POWER_40_S2X2_VHT OFFSETOF(txppr_t, b40_2x2sdm_vht)
-#define WL_TX_POWER_40_S1X3_CDD_VHT OFFSETOF(txppr_t, b40_1x3cdd_vht)
-#define WL_TX_POWER_40_S2X3_STBC_VHT OFFSETOF(txppr_t, b40_2x3stbc_vht)
-#define WL_TX_POWER_40_S2X3_VHT OFFSETOF(txppr_t, b40_2x3sdm_vht)
-#define WL_TX_POWER_40_S3X3_VHT OFFSETOF(txppr_t, b40_3x3sdm_vht)
-
-/* 20 in 40MHz */
-#define WL_TX_POWER_20UL_CCK_FIRST OFFSETOF(txppr_t, b20in40_1x1dsss)
-#define WL_TX_POWER_20UL_OFDM_FIRST OFFSETOF(txppr_t, b20in40_1x1ofdm)
-#define WL_TX_POWER_20UL_S1x1_FIRST OFFSETOF(txppr_t, b20in40_1x1mcs0)
-
-#define WL_TX_POWER_CCK_20U_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20in40_1x2dsss)
-#define WL_TX_POWER_20UL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b20in40_1x2cdd_ofdm)
-#define WL_TX_POWER_20UL_S1x2_FIRST OFFSETOF(txppr_t, b20in40_1x2cdd_mcs0)
-#define WL_TX_POWER_20UL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b20in40_2x2stbc_mcs0)
-#define WL_TX_POWER_20UL_S2x2_FIRST OFFSETOF(txppr_t, b20in40_2x2sdm_mcs8)
-
-#define WL_TX_POWER_CCK_20U_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3dsss)
-#define WL_TX_POWER_20UL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3cdd_ofdm)
-#define WL_TX_POWER_20UL_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3cdd_mcs0)
-#define WL_TX_POWER_20UL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20in40_2x3stbc_mcs0)
-#define WL_TX_POWER_20UL_S2x3_FIRST OFFSETOF(txppr_t, b20in40_2x3sdm_mcs8)
-#define WL_TX_POWER_20UL_S3x3_FIRST OFFSETOF(txppr_t, b20in40_3x3sdm_mcs16)
-
-#define WL_TX_POWER_20UL_S1X1_VHT OFFSETOF(txppr_t, b20in40_1x1vht)
-#define WL_TX_POWER_20UL_S1X2_CDD_VHT OFFSETOF(txppr_t, b20in40_1x2cdd_vht)
-#define WL_TX_POWER_20UL_S2X2_STBC_VHT OFFSETOF(txppr_t, b20in40_2x2stbc_vht)
-#define WL_TX_POWER_20UL_S2X2_VHT OFFSETOF(txppr_t, b20in40_2x2sdm_vht)
-#define WL_TX_POWER_20UL_S1X3_CDD_VHT OFFSETOF(txppr_t, b20in40_1x3cdd_vht)
-#define WL_TX_POWER_20UL_S2X3_STBC_VHT OFFSETOF(txppr_t, b20in40_2x3stbc_vht)
-#define WL_TX_POWER_20UL_S2X3_VHT OFFSETOF(txppr_t, b20in40_2x3sdm_vht)
-#define WL_TX_POWER_20UL_S3X3_VHT OFFSETOF(txppr_t, b20in40_3x3sdm_vht)
-
-/* 80MHz */
-#define WL_TX_POWER_80_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b80_dummy1x1dsss)
-#define WL_TX_POWER_OFDM80_FIRST OFFSETOF(txppr_t, b80_1x1ofdm)
-#define WL_TX_POWER_MCS80_SISO_FIRST OFFSETOF(txppr_t, b80_1x1mcs0)
-#define WL_TX_POWER_80_S1x1_FIRST OFFSETOF(txppr_t, b80_1x1mcs0)
-
-#define WL_TX_POWER_80_DUMMY_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b80_dummy1x2dsss)
-#define WL_TX_POWER_OFDM80_CDD_FIRST OFFSETOF(txppr_t, b80_1x2cdd_ofdm)
-#define WL_TX_POWER_MCS80_CDD_FIRST OFFSETOF(txppr_t, b80_1x2cdd_mcs0)
-#define WL_TX_POWER_80_S1x2_FIRST OFFSETOF(txppr_t, b80_1x2cdd_mcs0)
-#define WL_TX_POWER_MCS80_STBC_FIRST OFFSETOF(txppr_t, b80_2x2stbc_mcs0)
-#define WL_TX_POWER_MCS80_SDM_FIRST OFFSETOF(txppr_t, b80_2x2sdm_mcs8)
-#define WL_TX_POWER_80_S2x2_FIRST OFFSETOF(txppr_t, b80_2x2sdm_mcs8)
-
-#define WL_TX_POWER_80_DUMMY_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b80_dummy1x3dsss)
-#define WL_TX_POWER_OFDM80_CDD_S1x3_FIRST OFFSETOF(txppr_t, b80_1x3cdd_ofdm)
-#define WL_TX_POWER_80_S1x3_FIRST OFFSETOF(txppr_t, b80_1x3cdd_mcs0)
-#define WL_TX_POWER_80_STBC_S2x3_FIRST OFFSETOF(txppr_t, b80_2x3stbc_mcs0)
-#define WL_TX_POWER_80_S2x3_FIRST OFFSETOF(txppr_t, b80_2x3sdm_mcs8)
-#define WL_TX_POWER_80_S3x3_FIRST OFFSETOF(txppr_t, b80_3x3sdm_mcs16)
-
-#define WL_TX_POWER_80_S1X1_VHT OFFSETOF(txppr_t, b80_1x1vht)
-#define WL_TX_POWER_80_S1X2_CDD_VHT OFFSETOF(txppr_t, b80_1x2cdd_vht)
-#define WL_TX_POWER_80_S2X2_STBC_VHT OFFSETOF(txppr_t, b80_2x2stbc_vht)
-#define WL_TX_POWER_80_S2X2_VHT OFFSETOF(txppr_t, b80_2x2sdm_vht)
-#define WL_TX_POWER_80_S1X3_CDD_VHT OFFSETOF(txppr_t, b80_1x3cdd_vht)
-#define WL_TX_POWER_80_S2X3_STBC_VHT OFFSETOF(txppr_t, b80_2x3stbc_vht)
-#define WL_TX_POWER_80_S2X3_VHT OFFSETOF(txppr_t, b80_2x3sdm_vht)
-#define WL_TX_POWER_80_S3X3_VHT OFFSETOF(txppr_t, b80_3x3sdm_vht)
-
-/* 20 in 80MHz */
-#define WL_TX_POWER_20UUL_CCK_FIRST OFFSETOF(txppr_t, b20in80_1x1dsss)
-#define WL_TX_POWER_20UUL_OFDM_FIRST OFFSETOF(txppr_t, b20in80_1x1ofdm)
-#define WL_TX_POWER_20UUL_S1x1_FIRST OFFSETOF(txppr_t, b20in80_1x1mcs0)
-
-#define WL_TX_POWER_CCK_20UU_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20in80_1x2dsss)
-#define WL_TX_POWER_20UUL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b20in80_1x2cdd_ofdm)
-#define WL_TX_POWER_20UUL_S1x2_FIRST OFFSETOF(txppr_t, b20in80_1x2cdd_mcs0)
-#define WL_TX_POWER_20UUL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b20in80_2x2stbc_mcs0)
-#define WL_TX_POWER_20UUL_S2x2_FIRST OFFSETOF(txppr_t, b20in80_2x2sdm_mcs8)
-
-#define WL_TX_POWER_CCK_20UU_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3dsss)
-#define WL_TX_POWER_20UUL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3cdd_ofdm)
-#define WL_TX_POWER_20UUL_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3cdd_mcs0)
-#define WL_TX_POWER_20UUL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20in80_2x3stbc_mcs0)
-#define WL_TX_POWER_20UUL_S2x3_FIRST OFFSETOF(txppr_t, b20in80_2x3sdm_mcs8)
-#define WL_TX_POWER_20UUL_S3x3_FIRST OFFSETOF(txppr_t, b20in80_3x3sdm_mcs16)
-
-#define WL_TX_POWER_20UUL_S1X1_VHT OFFSETOF(txppr_t, b20in80_1x1vht)
-#define WL_TX_POWER_20UUL_S1X2_CDD_VHT OFFSETOF(txppr_t, b20in80_1x2cdd_vht)
-#define WL_TX_POWER_20UUL_S2X2_STBC_VHT OFFSETOF(txppr_t, b20in80_2x2stbc_vht)
-#define WL_TX_POWER_20UUL_S2X2_VHT OFFSETOF(txppr_t, b20in80_2x2sdm_vht)
-#define WL_TX_POWER_20UUL_S1X3_CDD_VHT OFFSETOF(txppr_t, b20in80_1x3cdd_vht)
-#define WL_TX_POWER_20UUL_S2X3_STBC_VHT OFFSETOF(txppr_t, b20in80_2x3stbc_vht)
-#define WL_TX_POWER_20UUL_S2X3_VHT OFFSETOF(txppr_t, b20in80_2x3sdm_vht)
-#define WL_TX_POWER_20UUL_S3X3_VHT OFFSETOF(txppr_t, b20in80_3x3sdm_vht)
-
-/* 40 in 80MHz */
-#define WL_TX_POWER_40UUL_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b40in80_dummy1x1dsss)
-#define WL_TX_POWER_40UUL_OFDM_FIRST OFFSETOF(txppr_t, b40in80_1x1ofdm)
-#define WL_TX_POWER_40UUL_S1x1_FIRST OFFSETOF(txppr_t, b40in80_1x1mcs0)
-
-#define WL_TX_POWER_CCK_40UU_DUMMY_CDD_S1x2_FIRST OFFSETOF(txppr_t, b40in80_dummy1x2dsss)
-#define WL_TX_POWER_40UUL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b40in80_1x2cdd_ofdm)
-#define WL_TX_POWER_40UUL_S1x2_FIRST OFFSETOF(txppr_t, b40in80_1x2cdd_mcs0)
-#define WL_TX_POWER_40UUL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b40in80_2x2stbc_mcs0)
-#define WL_TX_POWER_40UUL_S2x2_FIRST OFFSETOF(txppr_t, b40in80_2x2sdm_mcs8)
-
-#define WL_TX_POWER_CCK_40UU_DUMMY_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40in80_dummy1x3dsss)
-#define WL_TX_POWER_40UUL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40in80_1x3cdd_ofdm)
-#define WL_TX_POWER_40UUL_S1x3_FIRST OFFSETOF(txppr_t, b40in80_1x3cdd_mcs0)
-#define WL_TX_POWER_40UUL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b40in80_2x3stbc_mcs0)
-#define WL_TX_POWER_40UUL_S2x3_FIRST OFFSETOF(txppr_t, b40in80_2x3sdm_mcs8)
-#define WL_TX_POWER_40UUL_S3x3_FIRST OFFSETOF(txppr_t, b40in80_3x3sdm_mcs16)
-
-#define WL_TX_POWER_40UUL_S1X1_VHT OFFSETOF(txppr_t, b40in80_1x1vht)
-#define WL_TX_POWER_40UUL_S1X2_CDD_VHT OFFSETOF(txppr_t, b40in80_1x2cdd_vht)
-#define WL_TX_POWER_40UUL_S2X2_STBC_VHT OFFSETOF(txppr_t, b40in80_2x2stbc_vht)
-#define WL_TX_POWER_40UUL_S2X2_VHT OFFSETOF(txppr_t, b40in80_2x2sdm_vht)
-#define WL_TX_POWER_40UUL_S1X3_CDD_VHT OFFSETOF(txppr_t, b40in80_1x3cdd_vht)
-#define WL_TX_POWER_40UUL_S2X3_STBC_VHT OFFSETOF(txppr_t, b40in80_2x3stbc_vht)
-#define WL_TX_POWER_40UUL_S2X3_VHT OFFSETOF(txppr_t, b40in80_2x3sdm_vht)
-#define WL_TX_POWER_40UUL_S3X3_VHT OFFSETOF(txppr_t, b40in80_3x3sdm_vht)
-
-#define WL_TX_POWER_MCS_32 OFFSETOF(txppr_t, mcs32) /* C_CHECK remove later */
-
-#define WL_TX_POWER_RATES sizeof(struct txppr)
-
-/* sslpnphy specifics */
-#define WL_TX_POWER_MCS20_SISO_FIRST_SSN WL_TX_POWER_MCS20_SISO_FIRST
-#define WL_TX_POWER_MCS40_SISO_FIRST_SSN WL_TX_POWER_MCS40_SISO_FIRST
-
typedef struct {
uint16 ver; /* version of this struct */
uint16 len; /* length in bytes of this structure */
uint32 flags;
chanspec_t chanspec; /* txpwr report for this channel */
chanspec_t local_chanspec; /* channel on which we are associated */
- uint32 buflen; /* ppr buffer length */
+ uint32 buflen; /* ppr buffer length */
uint8 pprbuf[1]; /* Latest target power buffer */
} wl_txppr_t;
-#define WL_TXPPR_VERSION 0
+#define WL_TXPPR_VERSION 1
#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t))
-#define TX_POWER_T_VERSION 44
-
-/* Defines used with channel_bandwidth for curpower */
-#define WL_BW_20MHZ 0
-#define WL_BW_40MHZ 1
-#define WL_BW_80MHZ 2
-#define WL_BW_160MHZ 3
-
-/* tx_power_t.flags bits */
-/* use for defined PPR_API */
-#define WL_TX_POWER_F_ENABLED 1
-#define WL_TX_POWER_F_HW 2
-#define WL_TX_POWER_F_MIMO 4
-#define WL_TX_POWER_F_SISO 8
-#define WL_TX_POWER_F_HT 0x10
-#define WL_TX_POWER_F_VHT 0x20
-
-typedef struct {
- uint32 flags;
- chanspec_t chanspec; /* txpwr report for this channel */
- chanspec_t local_chanspec; /* channel on which we are associated */
- uint8 local_max; /* local max according to the AP */
- uint8 local_constraint; /* local constraint according to the AP */
- int8 antgain[2]; /* Ant gain for each band - from SROM */
- uint8 rf_cores; /* count of RF Cores being reported */
- uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
- uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain w/o adjustment */
- uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
- uint8 tx_power_max[4]; /* Maximum target power among all rates */
- uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
- uint8 user_limit[WL_TX_POWER_RATES]; /* User limit */
- int8 board_limit[WL_TX_POWER_RATES]; /* Max power board can support (SROM) */
- int8 target[WL_TX_POWER_RATES]; /* Latest target power */
- int8 clm_limits[WL_NUMRATES]; /* regulatory limits - 20, 40 or 80MHz */
- int8 clm_limits_subchan1[WL_NUMRATES]; /* regulatory limits - 20in40 or 40in80 */
- int8 clm_limits_subchan2[WL_NUMRATES]; /* regulatory limits - 20in80MHz */
- int8 sar; /* SAR limit for display by wl executable */
- int8 channel_bandwidth; /* 20, 40 or 80 MHz bandwidth? */
- uint8 version; /* Version of the data format wlu <--> driver */
- uint8 display_core; /* Displayed curpower core */
- int8 target_offsets[4]; /* Target power offsets for current rate per core */
- uint32 last_tx_ratespec; /* Ratespec for last transmition */
- int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
-} tx_power_t;
+#define TX_POWER_T_VERSION 45
+/* number of ppr serialization buffers, it should be reg, board and target */
+#define WL_TXPPR_SER_BUF_NUM (3)
+
+typedef struct chanspec_txpwr_max {
+ chanspec_t chanspec; /* chanspec */
+ uint8 txpwr_max; /* max txpwr in all the rates */
+ uint8 padding;
+} chanspec_txpwr_max_t;
+
+typedef struct wl_chanspec_txpwr_max {
+ uint16 ver; /* version of this struct */
+ uint16 len; /* length in bytes of this structure */
+ uint32 count; /* number of elements of (chanspec, txpwr_max) pair */
+ chanspec_txpwr_max_t txpwr[1]; /* array of (chanspec, max_txpwr) pair */
+} wl_chanspec_txpwr_max_t;
+
+#define WL_CHANSPEC_TXPWR_MAX_VER 1
+#define WL_CHANSPEC_TXPWR_MAX_LEN (sizeof(wl_chanspec_txpwr_max_t))
typedef struct tx_inst_power {
uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */
uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */
} tx_inst_power_t;
-
-typedef struct {
- uint32 flags;
- chanspec_t chanspec; /* txpwr report for this channel */
- chanspec_t local_chanspec; /* channel on which we are associated */
- uint8 local_max; /* local max according to the AP */
- uint8 local_constraint; /* local constraint according to the AP */
- int8 antgain[2]; /* Ant gain for each band - from SROM */
- uint8 rf_cores; /* count of RF Cores being reported */
- uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */
- uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain
- * without adjustment
- */
- uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
- uint8 tx_power_max[4]; /* Maximum target power among all rates */
- uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
- txppr_t user_limit; /* User limit */
- txppr_t reg_limit; /* Regulatory power limit */
- txppr_t board_limit; /* Max power board can support (SROM) */
- txppr_t target; /* Latest target power */
-} wl_txpwr_t;
-
#define WL_NUM_TXCHAIN_MAX 4
typedef struct wl_txchain_pwr_offsets {
int8 offset[WL_NUM_TXCHAIN_MAX]; /* quarter dBm signed offset for each chain */
} wl_txchain_pwr_offsets_t;
-
-/* 802.11h measurement types */
-#define WLC_MEASURE_TPC 1
-#define WLC_MEASURE_CHANNEL_BASIC 2
-#define WLC_MEASURE_CHANNEL_CCA 3
-#define WLC_MEASURE_CHANNEL_RPI 4
-
-/* regulatory enforcement levels */
-#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */
-#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */
-#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */
-#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */
-/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE
- * adoption is done regardless of capability spectrum_management
- */
-#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */
-
-#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */
-#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */
-#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */
-#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */
-#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */
-#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */
-#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */
-
-/* BTC mode used by "btc_mode" iovar */
-#define WL_BTC_DISABLE 0 /* disable BT coexistence */
-#define WL_BTC_FULLTDM 1 /* full TDM COEX */
-#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */
-#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */
-#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */
-#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */
-#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */
-#define WL_BTC_DEFAULT 8 /* set the default mode for the device */
-#define WL_INF_BTC_DISABLE 0
-#define WL_INF_BTC_ENABLE 1
-#define WL_INF_BTC_AUTO 3
-
-/* BTC wire used by "btc_wire" iovar */
-#define WL_BTC_DEFWIRE 0 /* use default wire setting */
-#define WL_BTC_2WIRE 2 /* use 2-wire BTC */
-#define WL_BTC_3WIRE 3 /* use 3-wire BTC */
-#define WL_BTC_4WIRE 4 /* use 4-wire BTC */
-
-/* BTC flags: BTC configuration that can be set by host */
-#define WL_BTC_FLAG_PREMPT (1 << 0)
-#define WL_BTC_FLAG_BT_DEF (1 << 1)
-#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2)
-#define WL_BTC_FLAG_SIM_RSP (1 << 3)
-#define WL_BTC_FLAG_PS_PROTECT (1 << 4)
-#define WL_BTC_FLAG_SIM_TX_LP (1 << 5)
-#define WL_BTC_FLAG_ECI (1 << 6)
-#define WL_BTC_FLAG_LIGHT (1 << 7)
-#define WL_BTC_FLAG_PARALLEL (1 << 8)
-
-/* Message levels */
-#define WL_ERROR_VAL 0x00000001
-#define WL_TRACE_VAL 0x00000002
-#define WL_PRHDRS_VAL 0x00000004
-#define WL_PRPKT_VAL 0x00000008
-#define WL_INFORM_VAL 0x00000010
-#define WL_TMP_VAL 0x00000020
-#define WL_OID_VAL 0x00000040
-#define WL_RATE_VAL 0x00000080
-#define WL_ASSOC_VAL 0x00000100
-#define WL_PRUSR_VAL 0x00000200
-#define WL_PS_VAL 0x00000400
-#define WL_TXPWR_VAL 0x00000800 /* retired in TOT on 6/10/2009 */
-#define WL_PORT_VAL 0x00001000
-#define WL_DUAL_VAL 0x00002000
-#define WL_WSEC_VAL 0x00004000
-#define WL_WSEC_DUMP_VAL 0x00008000
-#define WL_LOG_VAL 0x00010000
-#define WL_NRSSI_VAL 0x00020000 /* retired in TOT on 6/10/2009 */
-#define WL_LOFT_VAL 0x00040000 /* retired in TOT on 6/10/2009 */
-#define WL_REGULATORY_VAL 0x00080000
-#define WL_PHYCAL_VAL 0x00100000 /* retired in TOT on 6/10/2009 */
-#define WL_RADAR_VAL 0x00200000 /* retired in TOT on 6/10/2009 */
-#define WL_MPC_VAL 0x00400000
-#define WL_APSTA_VAL 0x00800000
-#define WL_DFS_VAL 0x01000000
-#define WL_BA_VAL 0x02000000 /* retired in TOT on 6/14/2010 */
-#define WL_ACI_VAL 0x04000000
-#define WL_MBSS_VAL 0x04000000
-#define WL_CAC_VAL 0x08000000
-#define WL_AMSDU_VAL 0x10000000
-#define WL_AMPDU_VAL 0x20000000
-#define WL_FFPLD_VAL 0x40000000
-
-/* wl_msg_level is full. For new bits take the next one and AND with
- * wl_msg_level2 in wl_dbg.h
- */
-#define WL_DPT_VAL 0x00000001
-#define WL_SCAN_VAL 0x00000002
-#define WL_WOWL_VAL 0x00000004
-#define WL_COEX_VAL 0x00000008
-#define WL_RTDC_VAL 0x00000010
-#define WL_PROTO_VAL 0x00000020
-#define WL_BTA_VAL 0x00000040
-#define WL_CHANINT_VAL 0x00000080
-#define WL_THERMAL_VAL 0x00000100 /* retired in TOT on 6/10/2009 */
-#define WL_P2P_VAL 0x00000200
-#define WL_ITFR_VAL 0x00000400
-#define WL_MCHAN_VAL 0x00000800
-#define WL_TDLS_VAL 0x00001000
-#define WL_MCNX_VAL 0x00002000
-#define WL_PROT_VAL 0x00004000
-#define WL_PSTA_VAL 0x00008000
-#define WL_TBTT_VAL 0x00010000
-#define WL_NIC_VAL 0x00020000
-#define WL_PWRSEL_VAL 0x00040000
-#define WL_TRF_MGMT_VAL 0x00080000
-#define WL_L2FILTER_VAL 0x00100000
-#define WL_TSO_VAL 0x00200000
-#define WL_MQ_VAL 0x00400000
-/* These 3 levels are currently not used in trunk but in Aardvark and Phoenix2 with != values */
-#define WL_LPC_VAL 0x00800000
-#define WL_TXBF_VAL 0x01000000
-#define WL_P2PO_VAL 0x02000000
-/* This level is synchronized with other branches */
-#define WL_WNM_VAL 0x04000000
-/* This level is currently not used in trunk but used in Phoenix2 */
-#define WL_SRSCAN_VAL 0x08000000
-
-/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier
- * rather than a message-type of its own
- */
-#define WL_TIMESTAMP_VAL 0x80000000
-
-/* max # of leds supported by GPIO (gpio pin# == led index#) */
-#define WL_LED_NUMGPIO 32 /* gpio 0-31 */
-
-/* led per-pin behaviors */
-#define WL_LED_OFF 0 /* always off */
-#define WL_LED_ON 1 /* always on */
-#define WL_LED_ACTIVITY 2 /* activity */
-#define WL_LED_RADIO 3 /* radio enabled */
-#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */
-#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */
-#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */
-#define WL_LED_WI1 7
-#define WL_LED_WI2 8
-#define WL_LED_WI3 9
-#define WL_LED_ASSOC 10 /* associated state indicator */
-#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */
-#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */
-#define WL_LED_WI4 13
-#define WL_LED_WI5 14
-#define WL_LED_BLINKSLOW 15 /* blink slow */
-#define WL_LED_BLINKMED 16 /* blink med */
-#define WL_LED_BLINKFAST 17 /* blink fast */
-#define WL_LED_BLINKCUSTOM 18 /* blink custom */
-#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */
-#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */
- /* keep on for 300 sec */
-#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */
-#define WL_LED_W6 22 /* off upon boot, could be turned on later */
-#define WL_LED_WI7 23 /* off upon boot, could be turned on later */
-#define WL_LED_NUMBEHAVIOR 24
-
-/* led behavior numeric value format */
-#define WL_LED_BEH_MASK 0x7f /* behavior mask */
-#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */
-
/* maximum channels returned by the get valid channels iovar */
#define WL_NUMCHANNELS 64
-/* max number of chanspecs (used by the iovar to calc. buf space) */
-#define WL_NUMCHANSPECS 110
-
-/* WDS link local endpoint WPA role */
-#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */
-#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */
-#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */
-
-/* number of bytes needed to define a 128-bit mask for MAC event reporting */
-#define WL_EVENTING_MASK_LEN 16
-
/*
* Join preference iovar value is an array of tuples. Each tuple has a one-byte type,
* a one-byte length, and a variable length value. RSSI type tuple must be present
* offset 1: +ve RSSI boost value in dB
*/
-/* join preference types */
-#define WL_JOIN_PREF_RSSI 1 /* by RSSI */
-#define WL_JOIN_PREF_WPA 2 /* by akm and ciphers */
-#define WL_JOIN_PREF_BAND 3 /* by 802.11 band */
-#define WL_JOIN_PREF_RSSI_DELTA 4 /* by 802.11 band only if RSSI delta condition matches */
-#define WL_JOIN_PREF_TRANS_PREF 5 /* defined by requesting AP */
-
-/* band preference */
-#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */
-
-/* any multicast cipher suite */
-#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00"
-
struct tsinfo_arg {
uint8 octets[3];
};
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define RATE_CCK_1MBPS 0
+#define RATE_CCK_2MBPS 1
+#define RATE_CCK_5_5MBPS 2
+#define RATE_CCK_11MBPS 3
+
+#define RATE_LEGACY_OFDM_6MBPS 0
+#define RATE_LEGACY_OFDM_9MBPS 1
+#define RATE_LEGACY_OFDM_12MBPS 2
+#define RATE_LEGACY_OFDM_18MBPS 3
+#define RATE_LEGACY_OFDM_24MBPS 4
+#define RATE_LEGACY_OFDM_36MBPS 5
+#define RATE_LEGACY_OFDM_48MBPS 6
+#define RATE_LEGACY_OFDM_54MBPS 7
+
+#define WL_BSSTRANS_RSSI_RATE_MAP_VERSION 1
+
+typedef struct wl_bsstrans_rssi {
+ int8 rssi_2g; /* RSSI in dbm for 2.4 G */
+ int8 rssi_5g; /* RSSI in dbm for 5G, unused for cck */
+} wl_bsstrans_rssi_t;
+
+#define RSSI_RATE_MAP_MAX_STREAMS 4 /* max streams supported */
+
+/* RSSI to rate mapping, all 20Mhz, no SGI */
+typedef struct wl_bsstrans_rssi_rate_map {
+ uint16 ver;
+ uint16 len; /* length of entire structure */
+ wl_bsstrans_rssi_t cck[WL_NUM_RATES_CCK]; /* 2.4G only */
+ wl_bsstrans_rssi_t ofdm[WL_NUM_RATES_OFDM]; /* 6 to 54mbps */
+ wl_bsstrans_rssi_t phy_n[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_MCS_1STREAM]; /* MCS0-7 */
+ wl_bsstrans_rssi_t phy_ac[RSSI_RATE_MAP_MAX_STREAMS][WL_NUM_RATES_VHT]; /* MCS0-9 */
+} wl_bsstrans_rssi_rate_map_t;
+
+#define WL_BSSTRANS_ROAMTHROTTLE_VERSION 1
+
+/* Configure number of scans allowed per throttle period */
+typedef struct wl_bsstrans_roamthrottle {
+ uint16 ver;
+ uint16 period;
+ uint16 scans_allowed;
+} wl_bsstrans_roamthrottle_t;
+
#define NFIFO 6 /* # tx/rx fifopairs */
+#define NREINITREASONCOUNT 8
+#define REINITREASONIDX(_x) (((_x) < NREINITREASONCOUNT) ? (_x) : 0)
-#define WL_CNT_T_VERSION 8 /* current version of wl_cnt_t struct */
+#define WL_CNT_T_VERSION 10 /* current version of wl_cnt_t struct */
typedef struct {
uint16 version; /* see definition of WL_CNT_T_VERSION */
uint32 txackfrm; /* number of ACK frames sent out */
uint32 txdnlfrm; /* Not used */
uint32 txbcnfrm; /* beacons transmitted */
- uint32 txfunfl[8]; /* per-fifo tx underflows */
+ uint32 txfunfl[6]; /* per-fifo tx underflows */
+ uint32 rxtoolate; /* receive too late */
+ uint32 txfbw; /* transmit at fallback bw (dynamic bw) */
uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
* or BCN)
*/
*/
uint32 rxnack; /* obsolete */
uint32 frmscons; /* obsolete */
- uint32 txnack; /* obsolete */
- uint32 txglitch_nack; /* obsolete */
- uint32 txburst; /* obsolete */
+ uint32 txnack; /* obsolete */
+ uint32 rxback; /* blockack rxcnt */
+ uint32 txback; /* blockack txcnt */
/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
uint32 txfrag; /* dot11TransmittedFragmentCount */
uint32 rx36mbps; /* packets rx at 36Mbps */
uint32 rx48mbps; /* packets rx at 48Mbps */
uint32 rx54mbps; /* packets rx at 54Mbps */
- uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
uint32 rx162mbps; /* packets rx at 162mbps */
uint32 rx216mbps; /* packets rx at 216 mbps */
uint32 rx270mbps; /* packets rx at 270 mbps */
uint32 rfdisable; /* count of radio disables */
uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+ uint32 bphy_badplcp;
uint32 txexptime; /* Tx frames suppressed due to timer expiration */
uint32 pstatxbcmc; /* count of bcmc frames transmitted on all psta */
uint32 cso_passthrough; /* hw cso required but passthrough */
+ uint32 cso_normal; /* hw cso hdr for normal process */
uint32 chained; /* number of frames chained */
uint32 chainedsz1; /* number of chain size 1 frames */
uint32 unchained; /* number of frames not chained */
uint32 maxchainsz; /* max chain size so far */
uint32 currchainsz; /* current chain size */
- uint32 cso_normal; /* hw cso hdr for normal process */
+ uint32 rxdrop20s; /* drop secondary cnt */
+ uint32 pciereset; /* Secondary Bus Reset issued by driver */
+ uint32 cfgrestore; /* configspace restore by driver */
+ uint32 reinitreason[NREINITREASONCOUNT]; /* reinitreason counters; 0: Unknown reason */
} wl_cnt_t;
#ifndef LINUX_POSTMOGRIFY_REMOVAL
uint32 txackfrm; /* number of ACK frames sent out */
uint32 txdnlfrm; /* Not used */
uint32 txbcnfrm; /* beacons transmitted */
- uint32 txfunfl[8]; /* per-fifo tx underflows */
+ uint32 txfunfl[6]; /* per-fifo tx underflows */
+ uint32 rxtoolate; /* receive too late */
+ uint32 txfbw; /* transmit at fallback bw (dynamic bw) */
uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS
* or BCN)
*/
*/
uint32 rxnack;
uint32 frmscons;
- uint32 txnack;
- uint32 txglitch_nack; /* obsolete */
- uint32 txburst; /* obsolete */
+ uint32 txnack; /* obsolete */
+ uint32 rxback; /* blockack rxcnt */
+ uint32 txback; /* blockack txcnt */
/* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */
uint32 txfrag; /* dot11TransmittedFragmentCount */
uint32 rfdisable; /* count of radio disables */
uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */
+ uint32 bphy_badplcp;
uint32 txmpdu_sgi; /* count for sgi transmit */
uint32 rxmpdu_sgi; /* count for sgi received */
uint32 txmpdu_stbc; /* count for stbc transmit */
uint32 rxmpdu_stbc; /* count for stbc received */
+
+ uint32 rxdrop20s; /* drop secondary cnt */
+
} wl_cnt_ver_six_t;
-#define WL_DELTA_STATS_T_VERSION 1 /* current version of wl_delta_stats_t struct */
+#define WL_DELTA_STATS_T_VERSION 2 /* current version of wl_delta_stats_t struct */
typedef struct {
uint16 version; /* see definition of WL_DELTA_STATS_T_VERSION */
uint32 rx36mbps; /* packets rx at 36Mbps */
uint32 rx48mbps; /* packets rx at 48Mbps */
uint32 rx54mbps; /* packets rx at 54Mbps */
- uint32 rx108mbps; /* packets rx at 108mbps */
+ uint32 rx108mbps; /* packets rx at 108mbps */
uint32 rx162mbps; /* packets rx at 162mbps */
uint32 rx216mbps; /* packets rx at 216 mbps */
uint32 rx270mbps; /* packets rx at 270 mbps */
uint32 rx432mbps; /* packets rx at 432 mbps */
uint32 rx486mbps; /* packets rx at 486 mbps */
uint32 rx540mbps; /* packets rx at 540 mbps */
+
+ /* phy stats */
+ uint32 rxbadplcp;
+ uint32 rxcrsglitch;
+ uint32 bphy_rxcrsglitch;
+ uint32 bphy_badplcp;
+
} wl_delta_stats_t;
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
-#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */
-
typedef struct {
uint32 packets;
uint32 bytes;
#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data)
#define WL_MKEEP_ALIVE_PRECISION 500
+/* TCP Keep-Alive conn struct */
+typedef struct wl_mtcpkeep_alive_conn_pkt {
+ struct ether_addr saddr; /* src mac address */
+ struct ether_addr daddr; /* dst mac address */
+ struct ipv4_addr sipaddr; /* source IP addr */
+ struct ipv4_addr dipaddr; /* dest IP addr */
+ uint16 sport; /* src port */
+ uint16 dport; /* dest port */
+ uint32 seq; /* seq number */
+ uint32 ack; /* ACK number */
+ uint16 tcpwin; /* TCP window */
+} wl_mtcpkeep_alive_conn_pkt_t;
+
+/* TCP Keep-Alive interval struct */
+typedef struct wl_mtcpkeep_alive_timers_pkt {
+ uint16 interval; /* interval timer */
+ uint16 retry_interval; /* retry_interval timer */
+ uint16 retry_count; /* retry_count */
+} wl_mtcpkeep_alive_timers_pkt_t;
+
+typedef struct wake_info {
+ uint32 wake_reason;
+ uint32 wake_info_len; /* size of packet */
+ uchar packet[1];
+} wake_info_t;
+
+typedef struct wake_pkt {
+ uint32 wake_pkt_len; /* size of packet */
+ uchar packet[1];
+} wake_pkt_t;
+
+
+#define WL_MTCPKEEP_ALIVE_VERSION 1
+
#ifdef WLBA
#define WLC_BA_CNT_VERSION 1 /* current version of wlc_ba_cnt_t */
uint8 enable; /* enable/disable */
};
+/* struct for ampdu tx/rx aggregation control */
+struct ampdu_aggr {
+ int8 aggr_override; /* aggr overrided by dongle. Not to be set by host. */
+ uint16 conf_TID_bmap; /* bitmap of TIDs to configure */
+ uint16 enab_TID_bmap; /* enable/disable per TID */
+};
+
/* structure for identifying ea/tid for sending addba/delba */
struct ampdu_ea_tid {
struct ether_addr ea; /* Station address */
uint8 tid; /* tid */
+ uint8 initiator; /* 0 is recipient, 1 is originator */
};
/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */
struct ampdu_retry_tid {
uint8 retry; /* retry value */
};
-/* Different discovery modes for dpt */
-#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */
-#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */
-#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */
-
-/* different path selection values */
-#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */
-#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */
-#define DPT_PATHSEL_APPATH 2 /* always use AP path */
-
-/* different ops for deny list */
-#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */
-#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */
-
-/* different ops for manual end point */
-#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
-#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
-#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
-
-/* structure for dpt iovars */
-typedef struct dpt_iovar {
- struct ether_addr ea; /* Station address */
- uint8 mode; /* mode: depends on iovar */
- uint32 pad; /* future */
-} dpt_iovar_t;
-
-/* flags to indicate DPT status */
-#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */
-#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */
-#define DPT_STATUS_FAILED 0x04 /* DPT link failed */
-
-#define DPT_FNAME_LEN 48 /* Max length of friendly name */
-
-typedef struct dpt_status {
- uint8 status; /* flags to indicate status */
- uint8 fnlen; /* length of friendly name */
- uchar name[DPT_FNAME_LEN]; /* friendly name */
- uint32 rssi; /* RSSI of the link */
- sta_info_t sta; /* sta info */
-} dpt_status_t;
-
-/* structure for dpt list */
-typedef struct dpt_list {
- uint32 num; /* number of entries in struct */
- dpt_status_t status[1]; /* per station info */
-} dpt_list_t;
-
-/* structure for dpt friendly name */
-typedef struct dpt_fname {
- uint8 len; /* length of friendly name */
- uchar name[DPT_FNAME_LEN]; /* friendly name */
-} dpt_fname_t;
-
#define BDD_FNAME_LEN 32 /* Max length of friendly name */
typedef struct bdd_fname {
uint8 len; /* length of friendly name */
};
#ifdef WLTDLS
-/* different ops for manual end point */
-#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */
-#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */
-#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */
-#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */
-#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */
-#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */
-#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */
-#define TDLS_MANUAL_EP_WFD_TPQ 8 /* WiFi-Display Tunneled Probe reQuest */
-
/* structure for tdls iovars */
typedef struct tdls_iovar {
struct ether_addr ea; /* Station address */
uint32 pad; /* future */
} tdls_iovar_t;
-/* modes */
-#define TDLS_WFD_IE_TX 0
-#define TDLS_WFD_IE_RX 1
-#define TDLS_WFD_PROBE_IE_TX 2
-#define TDLS_WFD_PROBE_IE_RX 3
-#define TDLS_WFD_IE_SIZE 512
+#define TDLS_WFD_IE_SIZE 512
/* structure for tdls wfd ie */
typedef struct tdls_wfd_ie_iovar {
struct ether_addr ea; /* Station address */
#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80
#define WLC_WOWL_MAX_KEEPALIVE 2
-/* define for flag */
-#define TSPEC_PENDING 0 /* TSPEC pending */
-#define TSPEC_ACCEPTED 1 /* TSPEC accepted */
-#define TSPEC_REJECTED 2 /* TSPEC rejected */
-#define TSPEC_UNKNOWN 3 /* TSPEC unknown */
-#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */
-
-#ifdef BCMCCX
-/* "wlan_reason" iovar interface */
-#define WL_WLAN_ASSOC_REASON_NORMAL_NETWORK 0 /* normal WLAN network setup */
-#define WL_WLAN_ASSOC_REASON_ROAM_FROM_CELLULAR_NETWORK 1 /* roam from Cellular network */
-#define WL_WLAN_ASSOC_REASON_ROAM_FROM_LAN 2 /* roam from LAN */
-#define WL_WLAN_ASSOC_REASON_MAX 2 /* largest value allowed */
-#endif /* BCMCCX */
-
-/* Software feature flag defines used by wlfeatureflag */
-#ifdef WLAFTERBURNER
-#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */
-#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */
-#endif /* WLAFTERBURNER */
-#define WL_SWFL_NOHWRADIO 0x0004
-#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */
-#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */
-
-#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */
-
/* Packet lifetime configuration per ac */
typedef struct wl_lifetime {
uint32 ac; /* access class */
uint8 count; /* count # of beacons before switching */
chanspec_t chspec; /* chanspec */
uint8 reg; /* regulatory class */
+ uint8 frame_type; /* csa frame type, unicast or broadcast */
} wl_chan_switch_t;
-/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER.
- *
- * (-100 < value < 0) value is used directly as a roaming trigger in dBm
- * (0 <= value) value specifies a logical roaming trigger level from
- * the list below
- *
- * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never
- * the logical roam trigger value.
- */
-#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */
-#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */
-#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */
-#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */
-#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */
-
-#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */
-
-/* Preferred Network Offload (PNO, formerly PFN) defines */
-#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */
-
enum {
PFN_LIST_ORDER,
PFN_RSSI
#define ENABLE_ADAPTSCAN_BIT 6
#define IMMEDIATE_EVENT_BIT 8
#define SUPPRESS_SSID_BIT 9
-#define ENABLE_NET_OFFLOAD_BIT 10
-#define REPORT_SEPERATELY_BIT 11
+#define ENABLE_NET_OFFLOAD_BIT 10
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_BIT 11
-#define SORT_CRITERIA_MASK 0x0001
-#define AUTO_NET_SWITCH_MASK 0x0002
-#define ENABLE_BKGRD_SCAN_MASK 0x0004
-#define IMMEDIATE_SCAN_MASK 0x0008
-#define AUTO_CONNECT_MASK 0x0010
+#define SORT_CRITERIA_MASK 0x0001
+#define AUTO_NET_SWITCH_MASK 0x0002
+#define ENABLE_BKGRD_SCAN_MASK 0x0004
+#define IMMEDIATE_SCAN_MASK 0x0008
+#define AUTO_CONNECT_MASK 0x0010
-#define ENABLE_BD_SCAN_MASK 0x0020
+#define ENABLE_BD_SCAN_MASK 0x0020
#define ENABLE_ADAPTSCAN_MASK 0x00c0
#define IMMEDIATE_EVENT_MASK 0x0100
#define SUPPRESS_SSID_MASK 0x0200
#define ENABLE_NET_OFFLOAD_MASK 0x0400
-#define REPORT_SEPERATELY_MASK 0x800
+/* report found/lost events for SSID and BSSID networks seperately */
+#define REPORT_SEPERATELY_MASK 0x0800
-#define PFN_VERSION 2
-#define PFN_SCANRESULT_VERSION 1
-#define PFN_LSCANRESULT_VERSION 2
-#define MAX_PFN_LIST_COUNT 16
+#define PFN_VERSION 2
+#define PFN_SCANRESULT_VERSION 1
+#define MAX_PFN_LIST_COUNT 16
#define PFN_COMPLETE 1
#define PFN_INCOMPLETE 0
-#define PFN_OUTOFMEMORY 2
#define DEFAULT_BESTN 2
#define DEFAULT_MSCAN 0
#define DEFAULT_REPEAT 10
-#define DEFAULT_EXP 2
-#define DEFAULT_RTTN 0
+#define DEFAULT_EXP 2
+
+#define PFN_PARTIAL_SCAN_BIT 0
+#define PFN_PARTIAL_SCAN_MASK 1
/* PFN network info structure */
typedef struct wl_pfn_subnet_info {
} wl_pfn_net_info_t;
typedef struct wl_pfn_lnet_info {
- wl_pfn_subnet_info_t pfnsubnet;
- int32 RSSI;
- uint32 timestamp;
- uint16 rtt0;
- uint16 rtt1;
+ wl_pfn_subnet_info_t pfnsubnet; /* BSSID + channel + SSID len + SSID */
+ uint16 flags; /* partial scan, etc */
+ int16 RSSI; /* receive signal strength (in dBm) */
+ uint32 timestamp; /* age in miliseconds */
+ uint16 rtt0; /* estimated distance to this AP in centimeters */
+ uint16 rtt1; /* standard deviation of the distance to this AP in centimeters */
} wl_pfn_lnet_info_t;
typedef struct wl_pfn_lscanresults {
wl_pfn_lnet_info_t netinfo[1];
} wl_pfn_lscanresults_t;
+/* this is used to report on 1-* pfn scan results */
typedef struct wl_pfn_scanresults {
uint32 version;
uint32 status;
wl_pfn_net_info_t netinfo[1];
} wl_pfn_scanresults_t;
+/* used to report exactly one scan result */
+/* plus reports detailed scan info in bss_info */
+typedef struct wl_pfn_scanresult {
+ uint32 version;
+ uint32 status;
+ uint32 count;
+ wl_pfn_net_info_t netinfo;
+ wl_bss_info_t bss_info;
+} wl_pfn_scanresult_t;
+
/* PFN data structure */
typedef struct wl_pfn_param {
int32 version; /* PNO parameters version */
int32 scan_freq; /* Scan frequency */
int32 lost_network_timeout; /* Timeout in sec. to declare
- * discovered network as lost
- */
+ * discovered network as lost
+ */
int16 flags; /* Bit field to control features
- * of PFN such as sort criteria auto
- * enable switch and background scan
- */
+ * of PFN such as sort criteria auto
+ * enable switch and background scan
+ */
int16 rssi_margin; /* Margin to avoid jitter for choosing a
- * PFN based on RSSI sort criteria
- */
- uint8 bestn; /* number of best networks in each scan */
- uint8 mscan; /* number of scans recorded */
- uint8 repeat; /* Minimum number of scan intervals
- *before scan frequency changes in adaptive scan
- */
- uint8 exp; /* Exponent of 2 for maximum scan interval */
- int32 slow_freq; /* slow scan period */
- uint8 rttn;
+ * PFN based on RSSI sort criteria
+ */
+ uint8 bestn; /* number of best networks in each scan */
+ uint8 mscan; /* number of scans recorded */
+ uint8 repeat; /* Minimum number of scan intervals
+ *before scan frequency changes in adaptive scan
+ */
+ uint8 exp; /* Exponent of 2 for maximum scan interval */
+ int32 slow_freq; /* slow scan period */
} wl_pfn_param_t;
typedef struct wl_pfn_bssid {
- struct ether_addr macaddr;
+ struct ether_addr macaddr;
/* Bit4: suppress_lost, Bit3: suppress_found */
- uint16 flags;
+ uint16 flags;
} wl_pfn_bssid_t;
#define WL_PFN_SUPPRESSFOUND_MASK 0x08
#define WL_PFN_SUPPRESSLOST_MASK 0x10
#define WL_PFN_RSSI_SHIFT 8
typedef struct wl_pfn_cfg {
- uint32 reporttype;
- int32 channel_num;
- uint16 channel_list[WL_NUMCHANNELS];
+ uint32 reporttype;
+ int32 channel_num;
+ uint16 channel_list[WL_NUMCHANNELS];
+ uint32 flags;
} wl_pfn_cfg_t;
#define WL_PFN_REPORT_ALLNET 0
#define WL_PFN_REPORT_SSIDNET 1
#define WL_PFN_REPORT_BSSIDNET 2
+#define WL_PFN_CFG_FLAGS_PROHIBITED 0x00000001 /* Accept and use prohibited channels */
+#define WL_PFN_CFG_FLAGS_RESERVED 0xfffffffe /* Remaining reserved for future use */
+
typedef struct wl_pfn {
- wlc_ssid_t ssid; /* ssid name and its length */
- int32 flags; /* bit2: hidden */
- int32 infra; /* BSS Vs IBSS */
- int32 auth; /* Open Vs Closed */
- int32 wpa_auth; /* WPA type */
- int32 wsec; /* wsec value */
+ wlc_ssid_t ssid; /* ssid name and its length */
+ int32 flags; /* bit2: hidden */
+ int32 infra; /* BSS Vs IBSS */
+ int32 auth; /* Open Vs Closed */
+ int32 wpa_auth; /* WPA type */
+ int32 wsec; /* wsec value */
} wl_pfn_t;
-#define WL_PFN_HIDDEN_BIT 2
-#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */
-#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */
-#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */
-#define WL_PFN_HIDDEN_MASK 0x4
+
+typedef struct wl_pfn_list {
+ uint32 version;
+ uint32 enabled;
+ uint32 count;
+ wl_pfn_t pfn[1];
+} wl_pfn_list_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pfn_olmsg_params_t {
+ wlc_ssid_t ssid;
+ uint32 cipher_type;
+ uint32 auth_type;
+ uint8 channels[4];
+} BWL_POST_PACKED_STRUCT pfn_olmsg_params;
+
+#define WL_PFN_HIDDEN_BIT 2
+#define WL_PFN_HIDDEN_MASK 0x4
+
+#ifndef BESTN_MAX
+#define BESTN_MAX 3
+#endif
+
+#ifndef MSCAN_MAX
+#define MSCAN_MAX 90
+#endif
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
uint16 interval; /* extended listen interval */
} wl_p2po_listen_t;
-/* ANQP offload */
+/* GAS state machine tunable parameters. Structure field values of 0 means use the default. */
+typedef struct wl_gas_config {
+ uint16 max_retransmit; /* Max # of firmware/driver retransmits on no Ack
+ * from peer (on top of the ucode retries).
+ */
+ uint16 response_timeout; /* Max time to wait for a GAS-level response
+ * after sending a packet.
+ */
+ uint16 max_comeback_delay; /* Max GAS response comeback delay.
+ * Exceeding this fails the GAS exchange.
+ */
+ uint16 max_retries; /* Max # of GAS state machine retries on failure
+ * of a GAS frame exchange.
+ */
+} wl_gas_config_t;
+
+/* P2P Find Offload parameters */
+typedef BWL_PRE_PACKED_STRUCT struct wl_p2po_find_config {
+ uint16 version; /* Version of this struct */
+ uint16 length; /* sizeof(wl_p2po_find_config_t) */
+ int32 search_home_time; /* P2P search state home time when concurrent
+ * connection exists. -1 for default.
+ */
+ uint8 num_social_channels;
+ /* Number of social channels up to WL_P2P_SOCIAL_CHANNELS_MAX.
+ * 0 means use default social channels.
+ */
+ uint8 flags;
+ uint16 social_channels[1]; /* Variable length array of social channels */
+} BWL_POST_PACKED_STRUCT wl_p2po_find_config_t;
+#define WL_P2PO_FIND_CONFIG_VERSION 2 /* value for version field */
+
+/* wl_p2po_find_config_t flags */
+#define P2PO_FIND_FLAG_SCAN_ALL_APS 0x01 /* Whether to scan for all APs in the p2po_find
+ * periodic scans of all channels.
+ * 0 means scan for only P2P devices.
+ * 1 means scan for P2P devices plus non-P2P APs.
+ */
+
+
+/* For adding a WFDS service to seek */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 seek_hdl; /* unique id chosen by host */
+ uint8 addr[6]; /* Seek service from a specific device with this
+ * MAC address, all 1's for any device.
+ */
+ uint8 service_hash[P2P_WFDS_HASH_LEN];
+ uint8 service_name_len;
+ uint8 service_name[MAX_WFDS_SEEK_SVC_NAME_LEN];
+ /* Service name to seek, not null terminated */
+ uint8 service_info_req_len;
+ uint8 service_info_req[1]; /* Service info request, not null terminated.
+ * Variable length specified by service_info_req_len.
+ * Maximum length is MAX_WFDS_SEEK_SVC_INFO_LEN.
+ */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_add_t;
+
+/* For deleting a WFDS service to seek */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 seek_hdl; /* delete service specified by id */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_seek_del_t;
+
+
+/* For adding a WFDS service to advertise */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 advertise_hdl; /* unique id chosen by host */
+ uint8 service_hash[P2P_WFDS_HASH_LEN];
+ uint32 advertisement_id;
+ uint16 service_config_method;
+ uint8 service_name_len;
+ uint8 service_name[MAX_WFDS_SVC_NAME_LEN];
+ /* Service name , not null terminated */
+ uint8 service_status;
+ uint16 service_info_len;
+ uint8 service_info[1]; /* Service info, not null terminated.
+ * Variable length specified by service_info_len.
+ * Maximum length is MAX_WFDS_ADV_SVC_INFO_LEN.
+ */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_add_t;
+
+/* For deleting a WFDS service to advertise */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 advertise_hdl; /* delete service specified by hdl */
+} BWL_POST_PACKED_STRUCT wl_p2po_wfds_advertise_del_t;
+
+/* P2P Offload discovery mode for the p2po_state iovar */
+typedef enum {
+ WL_P2PO_DISC_STOP,
+ WL_P2PO_DISC_LISTEN,
+ WL_P2PO_DISC_DISCOVERY
+} disc_mode_t;
+
+/* ANQP offload */
#define ANQPO_MAX_QUERY_SIZE 256
typedef struct {
- uint16 max_retransmit; /* -1 use default, max retransmit on no ACK from peer */
- uint16 response_timeout; /* -1 use default, msec to wait for resp after tx packet */
- uint16 max_comeback_delay; /* -1 use default, max comeback delay in resp else fail */
- uint16 max_retries; /* -1 use default, max retries on failure */
+ uint16 max_retransmit; /* ~0 use default, max retransmit on no ACK from peer */
+ uint16 response_timeout; /* ~0 use default, msec to wait for resp after tx packet */
+ uint16 max_comeback_delay; /* ~0 use default, max comeback delay in resp else fail */
+ uint16 max_retries; /* ~0 use default, max retries on failure */
uint16 query_len; /* length of ANQP query */
uint8 query_data[1]; /* ANQP encoded query (max ANQPO_MAX_QUERY_SIZE) */
} wl_anqpo_set_t;
struct ether_addr bssid[1]; /* max ANQPO_MAX_IGNORE_BSSID */
} wl_anqpo_ignore_bssid_list_t;
-/* TCP Checksum Offload defines */
-#define TOE_TX_CSUM_OL 0x00000001
-#define TOE_RX_CSUM_OL 0x00000002
-
#ifndef LINUX_POSTMOGRIFY_REMOVAL
-/* TCP Checksum Offload error injection for testing */
-#define TOE_ERRTEST_TX_CSUM 0x00000001
-#define TOE_ERRTEST_RX_CSUM 0x00000002
-#define TOE_ERRTEST_RX_CSUM2 0x00000004
struct toe_ol_stats_t {
/* Num of tx packets that don't need to be checksummed */
uint32 rx_icmp_errinj;
};
-/* ARP Offload feature flags for arp_ol iovar */
-#define ARP_OL_AGENT 0x00000001
-#define ARP_OL_SNOOP 0x00000002
-#define ARP_OL_HOST_AUTO_REPLY 0x00000004
-#define ARP_OL_PEER_AUTO_REPLY 0x00000008
-
-/* ARP Offload error injection */
-#define ARP_ERRTEST_REPLY_PEER 0x1
-#define ARP_ERRTEST_REPLY_HOST 0x2
-
-#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */
-#define ND_MULTIHOMING_MAX 10 /* Maximum local host IP addresses */
-
/* Arp offload statistic counts */
struct arp_ol_stats_t {
uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */
#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data)
+
/*
* Dongle pattern matching filter.
*/
+#define MAX_WAKE_PACKET_CACHE_BYTES 128 /* Maximum cached wake packet */
+
+#define MAX_WAKE_PACKET_BYTES (DOT11_A3_HDR_LEN + \
+ DOT11_QOS_LEN + \
+ sizeof(struct dot11_llc_snap_header) + \
+ ETHER_MAX_DATA)
+
+typedef struct pm_wake_packet {
+ uint32 status; /* Is the wake reason a packet (if all the other field's valid) */
+ uint32 pattern_id; /* Pattern ID that matched */
+ uint32 original_packet_size;
+ uint32 saved_packet_size;
+ uchar packet[MAX_WAKE_PACKET_CACHE_BYTES];
+} pm_wake_packet_t;
+
/* Packet filter types. Currently, only pattern matching is supported. */
typedef enum wl_pkt_filter_type {
- WL_PKT_FILTER_TYPE_PATTERN_MATCH /* Pattern matching filter */
+ WL_PKT_FILTER_TYPE_PATTERN_MATCH=0, /* Pattern matching filter */
+ WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH=1, /* Magic packet match */
+ WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH=2, /* A pattern list (match all to match filter) */
+ WL_PKT_FILTER_TYPE_ENCRYPTED_PATTERN_MATCH=3, /* SECURE WOWL magic / net pattern match */
} wl_pkt_filter_type_t;
#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t
+/* String mapping for types that may be used by applications or debug */
+#define WL_PKT_FILTER_TYPE_NAMES \
+ { "PATTERN", WL_PKT_FILTER_TYPE_PATTERN_MATCH }, \
+ { "MAGIC", WL_PKT_FILTER_TYPE_MAGIC_PATTERN_MATCH }, \
+ { "PATLIST", WL_PKT_FILTER_TYPE_PATTERN_LIST_MATCH }
+
+/* Secured WOWL packet was encrypted, need decrypted before check filter match */
+typedef struct wl_pkt_decrypter {
+ uint8* (*dec_cb)(void* dec_ctx, const void *sdu, int sending);
+ void* dec_ctx;
+} wl_pkt_decrypter_t;
+
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
*/
typedef struct wl_pkt_filter_pattern {
- uint32 offset; /* Offset within received packet to start pattern matching.
+ union {
+ uint32 offset; /* Offset within received packet to start pattern matching.
* Offset '0' is the first byte of the ethernet header.
*/
+ wl_pkt_decrypter_t* decrypt_ctx; /* Decrypt context */
+ };
uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */
uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts
* at offset 0. Pattern immediately follows mask.
*/
} wl_pkt_filter_pattern_t;
+/* A pattern list is a numerically specified list of modified pattern structures. */
+typedef struct wl_pkt_filter_pattern_listel {
+ uint16 rel_offs; /* Offset to begin match (relative to 'base' below) */
+ uint16 base_offs; /* Base for offset (defined below) */
+ uint16 size_bytes; /* Size of mask/pattern */
+ uint16 match_flags; /* Addition flags controlling the match */
+ uint8 mask_and_data[1]; /* Variable length mask followed by data, each size_bytes */
+} wl_pkt_filter_pattern_listel_t;
+
+typedef struct wl_pkt_filter_pattern_list {
+ uint8 list_cnt; /* Number of elements in the list */
+ uint8 PAD1[1]; /* Reserved (possible version: reserved) */
+ uint16 totsize; /* Total size of this pattern list (includes this struct) */
+ wl_pkt_filter_pattern_listel_t patterns[1]; /* Variable number of list elements */
+} wl_pkt_filter_pattern_list_t;
+
/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */
typedef struct wl_pkt_filter {
uint32 id; /* Unique filter id, specified by app. */
uint32 negate_match; /* Negate the result of filter matches */
union { /* Filter definitions */
wl_pkt_filter_pattern_t pattern; /* Pattern matching filter */
+ wl_pkt_filter_pattern_list_t patlist; /* List of patterns to match */
} u;
} wl_pkt_filter_t;
+/* IOVAR "tcp_keep_set" parameter. Used to install tcp keep_alive stuff. */
+typedef struct wl_tcp_keep_set {
+ uint32 val1;
+ uint32 val2;
+} wl_tcp_keep_set_t;
+
#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u)
#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern)
+#define WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_list_t, patterns)
+#define WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN \
+ OFFSETOF(wl_pkt_filter_pattern_listel_t, mask_and_data)
/* IOVAR "pkt_filter_enable" parameter. */
typedef struct wl_pkt_filter_enable {
uint32 num_pkts_discarded; /* # packets discarded by dongle for all filters */
} wl_pkt_filter_stats_t;
+/* IOVAR "pkt_filter_ports" parameter. Configure TCP/UDP port filters. */
+typedef struct wl_pkt_filter_ports {
+ uint8 version; /* Be proper */
+ uint8 reserved; /* Be really proper */
+ uint16 count; /* Number of ports following */
+ /* End of fixed data */
+ uint16 ports[1]; /* Placeholder for ports[<count>] */
+} wl_pkt_filter_ports_t;
+
+#define WL_PKT_FILTER_PORTS_FIXED_LEN OFFSETOF(wl_pkt_filter_ports_t, ports)
+
+#define WL_PKT_FILTER_PORTS_VERSION 0
+#define WL_PKT_FILTER_PORTS_MAX 128
+
#define RSN_KCK_LENGTH 16
#define RSN_KEK_LENGTH 16
#define RSN_REPLAY_LEN 8
((cmd) == WLC_GET_AP) || \
((cmd) == WLC_GET_INSTANCE))
-/*
- * Packet engine interface
- */
-
-#define WL_PKTENG_PER_TX_START 0x01
-#define WL_PKTENG_PER_TX_STOP 0x02
-#define WL_PKTENG_PER_RX_START 0x04
-#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05
-#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06
-#define WL_PKTENG_PER_RX_STOP 0x08
-#define WL_PKTENG_PER_MASK 0xff
-
-#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */
-
-#define WL_PKTENG_MAXPKTSZ 16384 /* max pktsz limit for pkteng */
-
typedef struct wl_pkteng {
uint32 flags;
uint32 delay; /* Inter-packet delay */
struct ether_addr src; /* Source address */
} wl_pkteng_t;
-#define NUM_80211b_RATES 4
-#define NUM_80211ag_RATES 8
-#define NUM_80211n_RATES 32
-#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES)
typedef struct wl_pkteng_stats {
uint32 lostfrmcnt; /* RX PER test: no of frames lost (skip seqno) */
int32 rssi; /* RSSI */
int32 snr; /* signal to noise ratio */
uint16 rxpktcnt[NUM_80211_RATES+1];
- uint8 rssi_qdb; /* qdB portion of the computed rssi */
+ uint8 rssi_qdb; /* qdB portion of the computed rssi */
} wl_pkteng_stats_t;
+typedef struct wl_txcal_params {
+ wl_pkteng_t pkteng;
+ uint8 gidx_start;
+ int8 gidx_step;
+ uint8 gidx_stop;
+} wl_txcal_params_t;
-#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */
-#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */
-#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */
-#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */
-#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */
-#define WL_WOWL_TST (1 << 5) /* Wakeup after test */
-#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */
-#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */
-#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */
-#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */
-#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */
-#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */
-#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */
-#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */
-#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */
-#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */
-
-#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */
-
-#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */
-#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */
-typedef struct {
- uint32 masksize; /* Size of the mask in #of bytes */
- uint32 offset; /* Offset to start looking for the packet in # of bytes */
- uint32 patternoffset; /* Offset of start of pattern in the structure */
- uint32 patternsize; /* Size of the pattern itself in #of bytes */
- uint32 id; /* id */
- uint32 reasonsize; /* Size of the wakeup reason code */
- uint32 flags; /* Flags to tell the pattern type and other properties */
+typedef enum {
+ wowl_pattern_type_bitmap = 0,
+ wowl_pattern_type_arp,
+ wowl_pattern_type_na
+} wowl_pattern_type_t;
+
+typedef struct wl_wowl_pattern {
+ uint32 masksize; /* Size of the mask in #of bytes */
+ uint32 offset; /* Pattern byte offset in packet */
+ uint32 patternoffset; /* Offset of start of pattern in the structure */
+ uint32 patternsize; /* Size of the pattern itself in #of bytes */
+ uint32 id; /* id */
+ uint32 reasonsize; /* Size of the wakeup reason code */
+ wowl_pattern_type_t type; /* Type of pattern */
/* Mask follows the structure above */
/* Pattern follows the mask is at 'patternoffset' from the start */
} wl_wowl_pattern_t;
-typedef struct {
+typedef struct wl_wowl_pattern_list {
uint count;
wl_wowl_pattern_t pattern[1];
} wl_wowl_pattern_list_t;
-typedef struct {
+typedef struct wl_wowl_wakeind {
uint8 pci_wakeind; /* Whether PCI PMECSR PMEStatus bit was set */
- uint16 ucode_wakeind; /* What wakeup-event indication was set by ucode */
+ uint32 ucode_wakeind; /* What wakeup-event indication was set by ucode */
} wl_wowl_wakeind_t;
+typedef struct {
+ uint32 pktlen; /* size of packet */
+ void *sdu;
+} tcp_keepalive_wake_pkt_infop_t;
/* per AC rate control related data structure */
typedef struct wl_txrate_class {
uint8 max_rate;
} wl_txrate_class_t;
-
-
-/* Overlap BSS Scan parameters default, minimum, maximum */
-#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */
-#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */
-#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */
-#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */
-#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */
-#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */
-#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */
-#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */
-#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */
-#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5
-#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5
-#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100
-#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */
-#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */
-#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
-#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */
-#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */
-#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */
-#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */
-#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */
-#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */
-
/* structure for Overlap BSS scan arguments */
typedef struct wl_obss_scan_arg {
int16 passive_dwell;
} wl_obss_scan_arg_t;
#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t)
-#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */
-
-#define WL_COEX_INFO_MASK 0x07
-#define WL_COEX_INFO_REQ 0x01
-#define WL_COEX_40MHZ_INTOLERANT 0x02
-#define WL_COEX_WIDTH20 0x04
-
-#define WLC_RSSI_INVALID 0 /* invalid RSSI value */
-
-#define MAX_RSSI_LEVELS 8
/* RSSI event notification configuration. */
typedef struct wl_rssi_event {
#define WL_IOV_PKTQ_LOG_PRECS 16
-typedef struct {
+typedef BWL_PRE_PACKED_STRUCT struct {
uint32 num_addrs;
char addr_type[WL_IOV_MAC_PARAM_LEN];
struct ether_addr ea[WL_IOV_MAC_PARAM_LEN];
-} wl_iov_mac_params_t;
+} BWL_POST_PACKED_STRUCT wl_iov_mac_params_t;
+/* This is extra info that follows wl_iov_mac_params_t */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 addr_info[WL_IOV_MAC_PARAM_LEN];
+} BWL_POST_PACKED_STRUCT wl_iov_mac_extra_params_t;
-/* Parameter block for PKTQ_LOG statistics */
+/* Combined structure */
typedef struct {
- uint32 requested; /* packets requested to be stored */
- uint32 stored; /* packets stored */
- uint32 saved; /* packets saved,
- because a lowest priority queue has given away one packet
- */
- uint32 selfsaved; /* packets saved,
- because an older packet from the same queue has been dropped
- */
- uint32 full_dropped; /* packets dropped,
- because pktq is full with higher precedence packets
- */
- uint32 dropped; /* packets dropped because pktq per that precedence is full */
- uint32 sacrificed; /* packets dropped,
- in order to save one from a queue of a highest priority
- */
- uint32 busy; /* packets droped because of hardware/transmission error */
- uint32 retry; /* packets re-sent because they were not received */
- uint32 ps_retry; /* packets retried again prior to moving power save mode */
- uint32 retry_drop; /* packets finally dropped after retry limit */
- uint32 max_avail; /* the high-water mark of the queue capacity for packets -
- goes to zero as queue fills
- */
- uint32 max_used; /* the high-water mark of the queue utilisation for packets -
- increases with use ('inverse' of max_avail)
- */
- uint32 queue_capacity; /* the maximum capacity of the queue */
-} pktq_log_counters_v01_t;
+ wl_iov_mac_params_t params;
+ wl_iov_mac_extra_params_t extra_params;
+} wl_iov_mac_full_params_t;
+
+/* Parameter block for PKTQ_LOG statistics */
+#define PKTQ_LOG_COUNTERS_V4 \
+ /* packets requested to be stored */ \
+ uint32 requested; \
+ /* packets stored */ \
+ uint32 stored; \
+ /* packets saved, because a lowest priority queue has given away one packet */ \
+ uint32 saved; \
+ /* packets saved, because an older packet from the same queue has been dropped */ \
+ uint32 selfsaved; \
+ /* packets dropped, because pktq is full with higher precedence packets */ \
+ uint32 full_dropped; \
+ /* packets dropped because pktq per that precedence is full */ \
+ uint32 dropped; \
+ /* packets dropped, in order to save one from a queue of a highest priority */ \
+ uint32 sacrificed; \
+ /* packets droped because of hardware/transmission error */ \
+ uint32 busy; \
+ /* packets re-sent because they were not received */ \
+ uint32 retry; \
+ /* packets retried again (ps pretend) prior to moving power save mode */ \
+ uint32 ps_retry; \
+ /* suppressed packet count */ \
+ uint32 suppress; \
+ /* packets finally dropped after retry limit */ \
+ uint32 retry_drop; \
+ /* the high-water mark of the queue capacity for packets - goes to zero as queue fills */ \
+ uint32 max_avail; \
+ /* the high-water mark of the queue utilisation for packets - ('inverse' of max_avail) */ \
+ uint32 max_used; \
+ /* the maximum capacity of the queue */ \
+ uint32 queue_capacity; \
+ /* count of rts attempts that failed to receive cts */ \
+ uint32 rtsfail; \
+ /* count of packets sent (acked) successfully */ \
+ uint32 acked; \
+ /* running total of phy rate of packets sent successfully */ \
+ uint32 txrate_succ; \
+ /* running total of phy 'main' rate */ \
+ uint32 txrate_main; \
+ /* actual data transferred successfully */ \
+ uint32 throughput; \
+ /* time difference since last pktq_stats */ \
+ uint32 time_delta;
typedef struct {
- uint32 requested; /* packets requested to be stored */
- uint32 stored; /* packets stored */
- uint32 saved; /* packets saved,
- because a lowest priority queue has given away one packet
- */
- uint32 selfsaved; /* packets saved,
- because an older packet from the same queue has been dropped
- */
- uint32 full_dropped; /* packets dropped,
- because pktq is full with higher precedence packets
- */
- uint32 dropped; /* packets dropped because pktq per that precedence is full */
- uint32 sacrificed; /* packets dropped,
- in order to save one from a queue of a highest priority
- */
- uint32 busy; /* packets droped because of hardware/transmission error */
- uint32 retry; /* packets re-sent because they were not received */
- uint32 ps_retry; /* packets retried again prior to moving power save mode */
- uint32 retry_drop; /* packets finally dropped after retry limit */
- uint32 max_avail; /* the high-water mark of the queue capacity for packets -
- goes to zero as queue fills
- */
- uint32 max_used; /* the high-water mark of the queue utilisation for packets -
- increases with use ('inverse' of max_avail)
- */
- uint32 queue_capacity; /* the maximum capacity of the queue */
- uint32 rtsfail; /* count of rts attempts that failed to receive cts */
- uint32 acked; /* count of packets sent (acked) successfully */
-} pktq_log_counters_v02_t;
+ PKTQ_LOG_COUNTERS_V4
+} pktq_log_counters_v04_t;
-#define sacrified sacrificed
+/* v5 is the same as V4 with extra parameter */
+typedef struct {
+ PKTQ_LOG_COUNTERS_V4
+ /* cumulative time to transmit */
+ uint32 airtime;
+} pktq_log_counters_v05_t;
typedef struct {
uint8 num_prec[WL_IOV_MAC_PARAM_LEN];
- pktq_log_counters_v01_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+ pktq_log_counters_v04_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+ uint32 counter_info[WL_IOV_MAC_PARAM_LEN];
+ uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
char headings[1];
-} pktq_log_format_v01_t;
+} pktq_log_format_v04_t;
typedef struct {
uint8 num_prec[WL_IOV_MAC_PARAM_LEN];
- pktq_log_counters_v02_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
- uint32 throughput[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
- uint32 time_delta;
+ pktq_log_counters_v05_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS];
+ uint32 counter_info[WL_IOV_MAC_PARAM_LEN];
+ uint32 pspretend_time_delta[WL_IOV_MAC_PARAM_LEN];
char headings[1];
-} pktq_log_format_v02_t;
+} pktq_log_format_v05_t;
typedef struct {
uint32 version;
wl_iov_mac_params_t params;
union {
- pktq_log_format_v01_t v01;
- pktq_log_format_v02_t v02;
+ pktq_log_format_v04_t v04;
+ pktq_log_format_v05_t v05;
} pktq_log;
} wl_iov_pktq_log_t;
+/* PKTQ_LOG_AUTO, PKTQ_LOG_DEF_PREC flags introduced in v05, they are ignored by v04 */
+#define PKTQ_LOG_AUTO (1 << 31)
+#define PKTQ_LOG_DEF_PREC (1 << 30)
-/* **** EXTLOG **** */
-#define EXTLOG_CUR_VER 0x0100
-
-#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */
-
-/* log modules (bitmap) */
-#define LOG_MODULE_COMMON 0x0001
-#define LOG_MODULE_ASSOC 0x0002
-#define LOG_MODULE_EVENT 0x0004
-#define LOG_MODULE_MAX 3 /* Update when adding module */
+/*
+ * SCB_BS_DATA iovar definitions start.
+ */
+#define SCB_BS_DATA_STRUCT_VERSION 1
-/* log levels */
-#define WL_LOG_LEVEL_DISABLE 0
-#define WL_LOG_LEVEL_ERR 1
-#define WL_LOG_LEVEL_WARN 2
-#define WL_LOG_LEVEL_INFO 3
-#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */
+/* The actual counters maintained for each station */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ /* The following counters are a subset of what pktq_stats provides per precedence. */
+ uint32 retry; /* packets re-sent because they were not received */
+ uint32 retry_drop; /* packets finally dropped after retry limit */
+ uint32 rtsfail; /* count of rts attempts that failed to receive cts */
+ uint32 acked; /* count of packets sent (acked) successfully */
+ uint32 txrate_succ; /* running total of phy rate of packets sent successfully */
+ uint32 txrate_main; /* running total of phy 'main' rate */
+ uint32 throughput; /* actual data transferred successfully */
+ uint32 time_delta; /* time difference since last pktq_stats */
+ uint32 airtime; /* cumulative total medium access delay in useconds */
+} BWL_POST_PACKED_STRUCT iov_bs_data_counters_t;
+
+/* The structure for individual station information. */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ struct ether_addr station_address; /* The station MAC address */
+ uint16 station_flags; /* Bit mask of flags, for future use. */
+ iov_bs_data_counters_t station_counters; /* The actual counter values */
+} BWL_POST_PACKED_STRUCT iov_bs_data_record_t;
-/* flag */
-#define LOG_FLAG_EVENT 1
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 structure_version; /* Structure version number (for wl/wlu matching) */
+ uint16 structure_count; /* Number of iov_bs_data_record_t records following */
+ iov_bs_data_record_t structure_record[1]; /* 0 - structure_count records */
+} BWL_POST_PACKED_STRUCT iov_bs_data_struct_t;
-/* log arg_type */
-#define LOG_ARGTYPE_NULL 0
-#define LOG_ARGTYPE_STR 1 /* %s */
-#define LOG_ARGTYPE_INT 2 /* %d */
-#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */
-#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */
+/* Bitmask of options that can be passed in to the iovar. */
+enum {
+ SCB_BS_DATA_FLAG_NO_RESET = (1<<0) /* Do not clear the counters after reading */
+};
+/*
+ * SCB_BS_DATA iovar definitions end.
+ */
typedef struct wlc_extlog_cfg {
int max_number;
uint32 len; /* overlay code len */
/* overlay code follows this struct */
} wl_ioctl_overlay_t;
-
-#define OVERLAY_IDX_MASK 0x000000ff
-#define OVERLAY_IDX_SHIFT 0
-#define OVERLAY_FLAGS_MASK 0xffffff00
-#define OVERLAY_FLAGS_SHIFT 8
-/* overlay written to device memory immediately after loading the base image */
-#define OVERLAY_FLAG_POSTLOAD 0x100
-/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */
-#define OVERLAY_FLAG_DEFER_DL 0x200
-/* overlay downloaded prior to the host going to sleep */
-#define OVERLAY_FLAG_PRESLEEP 0x400
-
-#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024
#endif /* DONGLEOVERLAYS */
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+/* 11k Neighbor Report element */
+typedef struct nbr_element {
+ uint8 id;
+ uint8 len;
+ struct ether_addr bssid;
+ uint32 bssid_info;
+ uint8 reg;
+ uint8 channel;
+ uint8 phytype;
+ uint8 pad;
+} nbr_element_t;
+
+
+typedef enum event_msgs_ext_command {
+ EVENTMSGS_NONE = 0,
+ EVENTMSGS_SET_BIT = 1,
+ EVENTMSGS_RESET_BIT = 2,
+ EVENTMSGS_SET_MASK = 3
+} event_msgs_ext_command_t;
+
+#define EVENTMSGS_VER 1
+#define EVENTMSGS_EXT_STRUCT_SIZE OFFSETOF(eventmsgs_ext_t, mask[0])
+
+/* len- for SET it would be mask size from the application to the firmware */
+/* for GET it would be actual firmware mask size */
+/* maxgetsize - is only used for GET. indicate max mask size that the */
+/* application can read from the firmware */
+typedef struct eventmsgs_ext
+{
+ uint8 ver;
+ uint8 command;
+ uint8 len;
+ uint8 maxgetsize;
+ uint8 mask[1];
+} eventmsgs_ext_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_params {
+ /* no of host dma descriptors programmed by the firmware before a commit */
+ uint16 max_dma_descriptors;
+
+ uint16 host_buf_len; /* length of host buffer */
+ dmaaddr_t host_buf_addr; /* physical address for bus_throughput_buf */
+} BWL_POST_PACKED_STRUCT pcie_bus_tput_params_t;
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_tput_stats {
+ uint16 time_taken; /* no of secs the test is run */
+ uint16 nbytes_per_descriptor; /* no of bytes of data dma ed per descriptor */
+
+ /* no of desciptors fo which dma is sucessfully completed within the test time */
+ uint32 count;
+} BWL_POST_PACKED_STRUCT pcie_bus_tput_stats_t;
+
/* no default structure packing */
#include <packed_section_end.h>
+typedef struct keepalives_max_idle {
+ uint16 keepalive_count; /* nmbr of keepalives per bss_max_idle period */
+ uint8 mkeepalive_index; /* mkeepalive_index for keepalive frame to be used */
+ uint8 PAD; /* to align next field */
+ uint16 max_interval; /* seconds */
+} keepalives_max_idle_t;
+
+#define PM_IGNORE_BCMC_PROXY_ARP (1 << 0)
+#define PM_IGNORE_BCMC_ALL_DMS_ACCEPTED (1 << 1)
+
/* require strict packing */
#include <packed_section_start.h>
+/* ##### Power Stats section ##### */
+
+#define WL_PWRSTATS_VERSION 2
+
+/* Input structure for pwrstats IOVAR */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats_query {
+ uint16 length; /* Number of entries in type array. */
+ uint16 type[1]; /* Types (tags) to retrieve.
+ * Length 0 (no types) means get all.
+ */
+} BWL_POST_PACKED_STRUCT wl_pwrstats_query_t;
+
+/* This structure is for version 2; version 1 will be deprecated in by FW */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwrstats {
+ uint16 version; /* Version = 2 is TLV format */
+ uint16 length; /* Length of entire structure */
+ uint8 data[1]; /* TLV data, a series of structures,
+ * each starting with type and length.
+ *
+ * Padded as necessary so each section
+ * starts on a 4-byte boundary.
+ *
+ * Both type and len are uint16, but the
+ * upper nibble of length is reserved so
+ * valid len values are 0-4095.
+ */
+} BWL_POST_PACKED_STRUCT wl_pwrstats_t;
+#define WL_PWR_STATS_HDRLEN OFFSETOF(wl_pwrstats_t, data)
+
+/* Type values for the data section */
+#define WL_PWRSTATS_TYPE_PHY 0 /* struct wl_pwr_phy_stats */
+#define WL_PWRSTATS_TYPE_SCAN 1 /* struct wl_pwr_scan_stats */
+#define WL_PWRSTATS_TYPE_USB_HSIC 2 /* struct wl_pwr_usb_hsic_stats */
+#define WL_PWRSTATS_TYPE_PM_AWAKE 3 /* struct wl_pwr_pm_awake_stats */
+#define WL_PWRSTATS_TYPE_CONNECTION 4 /* struct wl_pwr_connect_stats; assoc and key-exch time */
+#define WL_PWRSTATS_TYPE_PCIE 6 /* struct wl_pwr_pcie_stats */
+
+/* Bits for wake reasons */
+#define WLC_PMD_WAKE_SET 0x1
+#define WLC_PMD_PM_AWAKE_BCN 0x2
+#define WLC_PMD_BTA_ACTIVE 0x4
+#define WLC_PMD_SCAN_IN_PROGRESS 0x8
+#define WLC_PMD_RM_IN_PROGRESS 0x10
+#define WLC_PMD_AS_IN_PROGRESS 0x20
+#define WLC_PMD_PM_PEND 0x40
+#define WLC_PMD_PS_POLL 0x80
+#define WLC_PMD_CHK_UNALIGN_TBTT 0x100
+#define WLC_PMD_APSD_STA_UP 0x200
+#define WLC_PMD_TX_PEND_WAR 0x400
+#define WLC_PMD_GPTIMER_STAY_AWAKE 0x800
+#define WLC_PMD_PM2_RADIO_SOFF_PEND 0x2000
+#define WLC_PMD_NON_PRIM_STA_UP 0x4000
+#define WLC_PMD_AP_UP 0x8000
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_pm_debug {
+ uint32 timestamp; /* timestamp in millisecond */
+ uint32 reason; /* reason(s) for staying awake */
+} BWL_POST_PACKED_STRUCT wlc_pm_debug_t;
+
+/* Data sent as part of pwrstats IOVAR */
+typedef BWL_PRE_PACKED_STRUCT struct pm_awake_data {
+ uint32 curr_time; /* ms */
+ uint32 hw_macc; /* HW maccontrol */
+ uint32 sw_macc; /* SW maccontrol */
+ uint32 pm_dur; /* Total sleep time in PM, usecs */
+ uint32 mpc_dur; /* Total sleep time in MPC, usecs */
+
+ /* int32 drifts = remote - local; +ve drift => local-clk slow */
+ int32 last_drift; /* Most recent TSF drift from beacon */
+ int32 min_drift; /* Min TSF drift from beacon in magnitude */
+ int32 max_drift; /* Max TSF drift from beacon in magnitude */
+
+ uint32 avg_drift; /* Avg TSF drift from beacon */
+
+ /* Wake history tracking */
+
+ /* pmstate array (type wlc_pm_debug_t) start offset */
+ uint16 pm_state_offset;
+ /* pmstate number of array entries */
+ uint16 pm_state_len;
+
+ /* array (type uint32) start offset */
+ uint16 pmd_event_wake_dur_offset;
+ /* pmd_event_wake_dur number of array entries */
+ uint16 pmd_event_wake_dur_len;
+
+ uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */
+ uint8 pmwake_idx; /* for stepping through pm_state */
+ uint8 pad[3];
+ uint32 frts_time; /* Cumulative ms spent in frts since driver load */
+ uint32 frts_end_cnt; /* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT pm_awake_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pm_awake_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_PM_AWAKE */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ pm_awake_data_t awake_data;
+} BWL_POST_PACKED_STRUCT wl_pwr_pm_awake_stats_t;
+
+/* Original bus structure is for HSIC */
+typedef BWL_PRE_PACKED_STRUCT struct bus_metrics {
+ uint32 suspend_ct; /* suspend count */
+ uint32 resume_ct; /* resume count */
+ uint32 disconnect_ct; /* disconnect count */
+ uint32 reconnect_ct; /* reconnect count */
+ uint32 active_dur; /* msecs in bus, usecs for user */
+ uint32 suspend_dur; /* msecs in bus, usecs for user */
+ uint32 disconnect_dur; /* msecs in bus, usecs for user */
+} BWL_POST_PACKED_STRUCT bus_metrics_t;
+
+/* Bus interface info for USB/HSIC */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_usb_hsic_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_USB_HSIC */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ bus_metrics_t hsic; /* stats from hsic bus driver */
+} BWL_POST_PACKED_STRUCT wl_pwr_usb_hsic_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct pcie_bus_metrics {
+ uint32 d3_suspend_ct; /* suspend count */
+ uint32 d0_resume_ct; /* resume count */
+ uint32 perst_assrt_ct; /* PERST# assert count */
+ uint32 perst_deassrt_ct; /* PERST# de-assert count */
+ uint32 active_dur; /* msecs */
+ uint32 d3_suspend_dur; /* msecs */
+ uint32 perst_dur; /* msecs */
+ uint32 l0_cnt; /* L0 entry count */
+ uint32 l0_usecs; /* L0 duration in usecs */
+ uint32 l1_cnt; /* L1 entry count */
+ uint32 l1_usecs; /* L1 duration in usecs */
+ uint32 l1_1_cnt; /* L1_1ss entry count */
+ uint32 l1_1_usecs; /* L1_1ss duration in usecs */
+ uint32 l1_2_cnt; /* L1_2ss entry count */
+ uint32 l1_2_usecs; /* L1_2ss duration in usecs */
+ uint32 l2_cnt; /* L2 entry count */
+ uint32 l2_usecs; /* L2 duration in usecs */
+} BWL_POST_PACKED_STRUCT pcie_bus_metrics_t;
+
+/* Bus interface info for PCIE */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_pcie_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_PCIE */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+ pcie_bus_metrics_t pcie; /* stats from pcie bus driver */
+} BWL_POST_PACKED_STRUCT wl_pwr_pcie_stats_t;
+
+/* Scan information history per category */
+typedef BWL_PRE_PACKED_STRUCT struct scan_data {
+ uint32 count; /* Number of scans performed */
+ uint32 dur; /* Total time (in us) used */
+} BWL_POST_PACKED_STRUCT scan_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_scan_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_SCAN */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ /* Scan history */
+ scan_data_t user_scans; /* User-requested scans: (i/e/p)scan */
+ scan_data_t assoc_scans; /* Scans initiated by association requests */
+ scan_data_t roam_scans; /* Scans initiated by the roam engine */
+ scan_data_t pno_scans[8]; /* For future PNO bucketing (BSSID, SSID, etc) */
+ scan_data_t other_scans; /* Scan engine usage not assigned to the above */
+} BWL_POST_PACKED_STRUCT wl_pwr_scan_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_connect_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_SCAN */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ /* Connection (Association + Key exchange) data */
+ uint32 count; /* Number of connections performed */
+ uint32 dur; /* Total time (in ms) used */
+} BWL_POST_PACKED_STRUCT wl_pwr_connect_stats_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pwr_phy_stats {
+ uint16 type; /* WL_PWRSTATS_TYPE_PHY */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+ uint32 tx_dur; /* TX Active duration in us */
+ uint32 rx_dur; /* RX Active duration in us */
+} BWL_POST_PACKED_STRUCT wl_pwr_phy_stats_t;
+
+
+/* ##### End of Power Stats section ##### */
+
+/* IPV4 Arp offloads for ndis context */
+BWL_PRE_PACKED_STRUCT struct hostip_id {
+ struct ipv4_addr ipa;
+ uint8 id;
+} BWL_POST_PACKED_STRUCT;
+
+#if 0 && (NDISVER >= 0x0600)
+/* Return values */
+#define ND_REPLY_PEER 0x1 /* Reply was sent to service NS request from peer */
+#define ND_REQ_SINK 0x2 /* Input packet should be discarded */
+#define ND_FORCE_FORWARD 0X3 /* For the dongle to forward req to HOST */
+
+
+/* Neighbor Solicitation Response Offload IOVAR param */
+typedef BWL_PRE_PACKED_STRUCT struct nd_param {
+ struct ipv6_addr host_ip[2];
+ struct ipv6_addr solicit_ip;
+ struct ipv6_addr remote_ip;
+ uint8 host_mac[ETHER_ADDR_LEN];
+ uint32 offload_id;
+} BWL_POST_PACKED_STRUCT nd_param_t;
+#endif
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pfn_roam_thresh {
+ uint32 pfn_alert_thresh; /* time in ms */
+ uint32 roam_alert_thresh; /* time in ms */
+} BWL_POST_PACKED_STRUCT wl_pfn_roam_thresh_t;
+
+
+/* Reasons for wl_pmalert_t */
+#define PM_DUR_EXCEEDED (1<<0)
+#define MPC_DUR_EXCEEDED (1<<1)
+#define ROAM_ALERT_THRESH_EXCEEDED (1<<2)
+#define PFN_ALERT_THRESH_EXCEEDED (1<<3)
+#define CONST_AWAKE_DUR_ALERT (1<<4)
+#define CONST_AWAKE_DUR_RECOVERY (1<<5)
+
+#define MIN_PM_ALERT_LEN 9
+
+/* Data sent in EXCESS_PM_WAKE event */
+#define WL_PM_ALERT_VERSION 3
+
+#define MAX_P2P_BSS_DTIM_PRD 4
+
+/* This structure is for version 3; version 2 will be deprecated in by FW */
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert {
+ uint16 version; /* Version = 3 is TLV format */
+ uint16 length; /* Length of entire structure */
+ uint32 reasons; /* reason(s) for pm_alert */
+ uint8 data[1]; /* TLV data, a series of structures,
+ * each starting with type and length.
+ *
+ * Padded as necessary so each section
+ * starts on a 4-byte boundary.
+ *
+ * Both type and len are uint16, but the
+ * upper nibble of length is reserved so
+ * valid len values are 0-4095.
+ */
+} BWL_POST_PACKED_STRUCT wl_pmalert_t;
+
+/* Type values for the data section */
+#define WL_PMALERT_FIXED 0 /* struct wl_pmalert_fixed_t, fixed fields */
+#define WL_PMALERT_PMSTATE 1 /* struct wl_pmalert_pmstate_t, variable */
+#define WL_PMALERT_EVENT_DUR 2 /* struct wl_pmalert_event_dur_t, variable */
+#define WL_PMALERT_UCODE_DBG 3 /* struct wl_pmalert_ucode_dbg_t, variable */
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_fixed {
+ uint16 type; /* WL_PMALERT_FIXED */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+ uint32 prev_stats_time; /* msecs */
+ uint32 curr_time; /* ms */
+ uint32 prev_pm_dur; /* usecs */
+ uint32 pm_dur; /* Total sleep time in PM, usecs */
+ uint32 prev_mpc_dur; /* usecs */
+ uint32 mpc_dur; /* Total sleep time in MPC, usecs */
+ uint32 hw_macc; /* HW maccontrol */
+ uint32 sw_macc; /* SW maccontrol */
+
+ /* int32 drifts = remote - local; +ve drift -> local-clk slow */
+ int32 last_drift; /* Most recent TSF drift from beacon */
+ int32 min_drift; /* Min TSF drift from beacon in magnitude */
+ int32 max_drift; /* Max TSF drift from beacon in magnitude */
+
+ uint32 avg_drift; /* Avg TSF drift from beacon */
+ uint32 drift_cnt; /* Count of drift readings over which avg_drift was computed */
+ uint32 frts_time; /* Cumulative ms spent in frts since driver load */
+ uint32 frts_end_cnt; /* No of times frts ended since driver load */
+} BWL_POST_PACKED_STRUCT wl_pmalert_fixed_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_pmstate {
+ uint16 type; /* WL_PMALERT_PMSTATE */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ uint8 pmwake_idx; /* for stepping through pm_state */
+ uint8 pad[3];
+ /* Array of pmstate; len of array is based on tlv len */
+ wlc_pm_debug_t pmstate[1];
+} BWL_POST_PACKED_STRUCT wl_pmalert_pmstate_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_event_dur {
+ uint16 type; /* WL_PMALERT_EVENT_DUR */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+
+ /* Array of event_dur, len of array is based on tlv len */
+ uint32 event_dur[1];
+} BWL_POST_PACKED_STRUCT wl_pmalert_event_dur_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_pmalert_ucode_dbg {
+ uint16 type; /* WL_PMALERT_UCODE_DBG */
+ uint16 len; /* Up to 4K-1, top 4 bits are reserved */
+ uint32 macctrl;
+ uint16 m_p2p_hps;
+ uint32 psm_brc;
+ uint32 ifsstat;
+ uint16 m_p2p_bss_dtim_prd[MAX_P2P_BSS_DTIM_PRD];
+ uint32 psmdebug[20];
+ uint32 phydebug[20];
+} BWL_POST_PACKED_STRUCT wl_pmalert_ucode_dbg_t;
+
#ifndef LINUX_POSTMOGRIFY_REMOVAL
/* Structures and constants used for "vndr_ie" IOVar interface */
* "add", "del" (+ NUL)
*/
-/* 802.11 Mgmt Packet flags */
-#define VNDR_IE_BEACON_FLAG 0x1
-#define VNDR_IE_PRBRSP_FLAG 0x2
-#define VNDR_IE_ASSOCRSP_FLAG 0x4
-#define VNDR_IE_AUTHRSP_FLAG 0x8
-#define VNDR_IE_PRBREQ_FLAG 0x10
-#define VNDR_IE_ASSOCREQ_FLAG 0x20
-#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */
-#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */
-
-#if defined(WLP2P)
-/* P2P Action Frames flags (spec ordered) */
-#define VNDR_IE_GONREQ_FLAG 0x001000
-#define VNDR_IE_GONRSP_FLAG 0x002000
-#define VNDR_IE_GONCFM_FLAG 0x004000
-#define VNDR_IE_INVREQ_FLAG 0x008000
-#define VNDR_IE_INVRSP_FLAG 0x010000
-#define VNDR_IE_DISREQ_FLAG 0x020000
-#define VNDR_IE_DISRSP_FLAG 0x040000
-#define VNDR_IE_PRDREQ_FLAG 0x080000
-#define VNDR_IE_PRDRSP_FLAG 0x100000
-
-#define VNDR_IE_P2PAF_SHIFT 12
-#endif /* WLP2P */
-
#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32))
typedef BWL_PRE_PACKED_STRUCT struct {
uint8 est_Pout_cck; /* Latest CCK tx power out estimate */
uint8 tx_power_max[4]; /* Maximum target power among all rates */
uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */
- int8 clm_limits[WL_NUMRATES]; /* regulatory limits - 20, 40 or 80MHz */
- int8 clm_limits_subchan1[WL_NUMRATES]; /* regulatory limits - 20in40 or 40in80 */
- int8 clm_limits_subchan2[WL_NUMRATES]; /* regulatory limits - 20in80MHz */
int8 sar; /* SAR limit for display by wl executable */
int8 channel_bandwidth; /* 20, 40 or 80 MHz bandwidth? */
uint8 version; /* Version of the data format wlu <--> driver */
int8 target_offsets[4]; /* Target power offsets for current rate per core */
uint32 last_tx_ratespec; /* Ratespec for last transmition */
uint user_target; /* user limit */
- uint32 board_limit_len; /* length of board limit buffer */
- uint32 target_len; /* length of target power buffer */
+ uint32 ppr_len; /* length of each ppr serialization buffer */
int8 SARLIMIT[MAX_STREAMS_SUPPORTED];
uint8 pprdata[1]; /* ppr serialization buffer */
} BWL_POST_PACKED_STRUCT tx_pwr_rpt_t;
+typedef BWL_PRE_PACKED_STRUCT struct {
+ struct ipv4_addr ipv4_addr;
+ struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT ibss_route_entry_t;
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint32 num_entry;
+ ibss_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT ibss_route_tbl_t;
+
+#define MAX_IBSS_ROUTE_TBL_ENTRY 64
#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define TXPWR_TARGET_VERSION 0
+typedef BWL_PRE_PACKED_STRUCT struct {
+ int32 version; /* version number */
+ chanspec_t chanspec; /* txpwr report for this channel */
+ int8 txpwr[WL_STA_ANT_MAX]; /* Max tx target power, in qdb */
+ uint8 rf_cores; /* count of RF Cores being reported */
+} BWL_POST_PACKED_STRUCT txpwr_target_max_t;
+
+#define BSS_PEER_INFO_PARAM_CUR_VER 0
+/* Input structure for IOV_BSS_PEER_INFO */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ struct ether_addr ea; /* peer MAC address */
+} BWL_POST_PACKED_STRUCT bss_peer_info_param_t;
+
+#define BSS_PEER_INFO_CUR_VER 0
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ struct ether_addr ea;
+ int32 rssi;
+ uint32 tx_rate; /* current tx rate */
+ uint32 rx_rate; /* current rx rate */
+ wl_rateset_t rateset; /* rateset in use */
+ uint32 age; /* age in seconds */
+} BWL_POST_PACKED_STRUCT bss_peer_info_t;
+
+#define BSS_PEER_LIST_INFO_CUR_VER 0
+
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ uint16 bss_peer_info_len; /* length of bss_peer_info_t */
+ uint32 count; /* number of peer info */
+ bss_peer_info_t peer_info[1]; /* peer info */
+} BWL_POST_PACKED_STRUCT bss_peer_list_info_t;
+
+#define BSS_PEER_LIST_INFO_FIXED_LEN OFFSETOF(bss_peer_list_info_t, peer_info)
+
+#define AIBSS_BCN_FORCE_CONFIG_VER_0 0
+
+/* structure used to configure AIBSS beacon force xmit */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ uint16 len;
+ uint32 initial_min_bcn_dur; /* dur in ms to check a bcn in bcn_flood period */
+ uint32 min_bcn_dur; /* dur in ms to check a bcn after bcn_flood period */
+ uint32 bcn_flood_dur; /* Initial bcn xmit period in ms */
+} BWL_POST_PACKED_STRUCT aibss_bcn_force_config_t;
+
+#define AIBSS_TXFAIL_CONFIG_VER_0 0
+
+/* structure used to configure aibss tx fail event */
+typedef BWL_PRE_PACKED_STRUCT struct {
+ uint16 version;
+ uint16 len;
+ uint32 bcn_timeout; /* dur in seconds to receive 1 bcn */
+ uint32 max_tx_retry; /* no of consecutive no acks to send txfail event */
+} BWL_POST_PACKED_STRUCT aibss_txfail_config_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_aibss_if {
+ uint16 version;
+ uint16 len;
+ uint32 flags;
+ struct ether_addr addr;
+ chanspec_t chspec;
+} BWL_POST_PACKED_STRUCT wl_aibss_if_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_entry {
+ struct ipv4_addr ip_addr;
+ struct ether_addr nexthop;
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_entry_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wlc_ipfo_route_tbl {
+ uint32 num_entry;
+ wlc_ipfo_route_entry_t route_entry[1];
+} BWL_POST_PACKED_STRUCT wlc_ipfo_route_tbl_t;
+
+#define WL_IPFO_ROUTE_TBL_FIXED_LEN 4
+#define WL_MAX_IPFO_ROUTE_TBL_ENTRY 64
+
/* no strict structure packing */
#include <packed_section_end.h>
#ifndef LINUX_POSTMOGRIFY_REMOVAL
-/* Global ASSERT Logging */
+ /* Global ASSERT Logging */
#define ASSERTLOG_CUR_VER 0x0100
#define MAX_ASSRTSTR_LEN 64
-typedef struct assert_record {
- uint32 time;
- uint8 seq_num;
- char str[MAX_ASSRTSTR_LEN];
-} assert_record_t;
+ typedef struct assert_record {
+ uint32 time;
+ uint8 seq_num;
+ char str[MAX_ASSRTSTR_LEN];
+ } assert_record_t;
-typedef struct assertlog_results {
- uint16 version;
- uint16 record_len;
- uint32 num;
- assert_record_t logs[1];
-} assertlog_results_t;
+ typedef struct assertlog_results {
+ uint16 version;
+ uint16 record_len;
+ uint32 num;
+ assert_record_t logs[1];
+ } assertlog_results_t;
#define LOGRRC_FIX_LEN 8
#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type))
#ifdef BCMWAPI_WAI
#define IV_LEN 16
-struct wapi_sta_msg_t
-{
- uint16 msg_type;
- uint16 datalen;
- uint8 vap_mac[6];
- uint8 reserve_data1[2];
- uint8 sta_mac[6];
- uint8 reserve_data2[2];
- uint8 gsn[IV_LEN];
- uint8 wie[256];
-};
+ struct wapi_sta_msg_t
+ {
+ uint16 msg_type;
+ uint16 datalen;
+ uint8 vap_mac[6];
+ uint8 reserve_data1[2];
+ uint8 sta_mac[6];
+ uint8 reserve_data2[2];
+ uint8 gsn[IV_LEN];
+ uint8 wie[256];
+ };
#endif /* BCMWAPI_WAI */
-/* channel interference measurement (chanim) related defines */
-
-/* chanim mode */
-#define CHANIM_DISABLE 0 /* disabled */
-#define CHANIM_DETECT 1 /* detection only */
-#define CHANIM_EXT 2 /* external state machine */
-#define CHANIM_ACT 3 /* full internal state machine, detect + act */
-#define CHANIM_MODE_MAX 4
-
-/* define for apcs reason code */
-#define APCS_INIT 0
-#define APCS_IOCTL 1
-#define APCS_CHANIM 2
-#define APCS_CSTIMER 3
-#define APCS_BTA 4
-#define APCS_TXDLY 5
-#define APCS_NONACSD 6
-
-/* number of ACS record entries */
-#define CHANIM_ACS_RECORD 10
-
-/* CHANIM */
-#define CCASTATS_TXDUR 0
-#define CCASTATS_INBSS 1
-#define CCASTATS_OBSS 2
-#define CCASTATS_NOCTG 3
-#define CCASTATS_NOPKT 4
-#define CCASTATS_DOZE 5
-#define CCASTATS_TXOP 6
-#define CCASTATS_GDTXDUR 7
-#define CCASTATS_BDTXDUR 8
-#define CCASTATS_MAX 9
-
-/* chanim acs record */
-typedef struct {
- bool valid;
- uint8 trigger;
- chanspec_t selected_chspc;
- int8 bgnoise;
- uint32 glitch_cnt;
- uint8 ccastats;
- uint timestamp;
-} chanim_acs_record_t;
-
-typedef struct {
- chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
- uint8 count;
- uint timestamp;
-} wl_acs_record_t;
-
-typedef struct chanim_stats {
- uint32 glitchcnt; /* normalized as per second count */
- uint32 badplcp; /* normalized as per second count */
- uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */
- int8 bgnoise; /* background noise level (in dBm) */
- chanspec_t chanspec;
- uint32 timestamp;
-} chanim_stats_t;
-
-#define WL_CHANIM_STATS_VERSION 1
-#define WL_CHANIM_COUNT_ALL 0xff
-#define WL_CHANIM_COUNT_ONE 0x1
+ /* chanim acs record */
+ typedef struct {
+ bool valid;
+ uint8 trigger;
+ chanspec_t selected_chspc;
+ int8 bgnoise;
+ uint32 glitch_cnt;
+ uint8 ccastats;
+ uint timestamp;
+ } chanim_acs_record_t;
+
+ typedef struct {
+ chanim_acs_record_t acs_record[CHANIM_ACS_RECORD];
+ uint8 count;
+ uint timestamp;
+ } wl_acs_record_t;
+
+ typedef struct chanim_stats {
+ uint32 glitchcnt; /* normalized as per second count */
+ uint32 badplcp; /* normalized as per second count */
+ uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */
+ int8 bgnoise; /* background noise level (in dBm) */
+ chanspec_t chanspec;
+ uint32 timestamp;
+ uint32 bphy_glitchcnt; /* normalized as per second count */
+ uint32 bphy_badplcp; /* normalized as per second count */
+ uint8 chan_idle; /* normalized as 0~255 */
+ } chanim_stats_t;
+
+#define WL_CHANIM_STATS_VERSION 2
typedef struct {
uint32 buflen;
uint32 scb_max_probe;
} wl_scb_probe_t;
-/* ap tpc modes */
-#define AP_TPC_OFF 0
-#define AP_TPC_BSS_PWR 1 /* BSS power control */
-#define AP_TPC_AP_PWR 2 /* AP power control */
-#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
-#define AP_TPC_MAX_LINK_MARGIN 127
-
-/* ap tpc modes */
-#define AP_TPC_OFF 0
-#define AP_TPC_BSS_PWR 1 /* BSS power control */
-#define AP_TPC_AP_PWR 2 /* AP power control */
-#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */
-#define AP_TPC_MAX_LINK_MARGIN 127
-
/* structure/defines for selective mgmt frame (smf) stats support */
#define SMFS_VERSION 1
SMFS_CODETYPE_RC
};
-/* reuse two number in the sc/rc space */
-#define SMFS_CODE_MALFORMED 0xFFFE
-#define SMFS_CODE_IGNORED 0xFFFD
-
typedef enum smfs_type {
SMFS_TYPE_AUTH,
SMFS_TYPE_ASSOC,
uint16 dwell; /* valid in listen state, in ms */
} wl_p2p_disc_st_t;
-/* state */
-#define WL_P2P_DISC_ST_SCAN 0
-#define WL_P2P_DISC_ST_LISTEN 1
-#define WL_P2P_DISC_ST_SEARCH 2
-
/* scan request */
typedef struct wl_p2p_scan {
uint8 type; /* 'S' for WLC_SCAN, 'E' for "escan" */
chanspec_t chspec; /* for p2p_ifadd GO */
} wl_p2p_if_t;
-/* i/f type */
-#define WL_P2P_IF_CLIENT 0
-#define WL_P2P_IF_GO 1
-#define WL_P2P_IF_DYNBCN_GO 2
-#define WL_P2P_IF_DEV 3
-
/* i/f query */
typedef struct wl_p2p_ifq {
uint bsscfgidx;
uint32 count; /* see count */
} wl_p2p_sched_desc_t;
-/* count */
-#define WL_P2P_SCHED_RSVD 0
-#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */
-
typedef struct wl_p2p_sched {
uint8 type; /* see schedule type */
uint8 action; /* see schedule action */
uint8 option; /* see schedule option */
wl_p2p_sched_desc_t desc[1];
} wl_p2p_sched_t;
-#define WL_P2P_SCHED_FIXED_LEN 3
-
-/* schedule type */
-#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */
-#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */
-
-/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */
-#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */
-#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */
-/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
-#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */
-/* schedule option - WL_P2P_SCHED_TYPE_XXX */
-#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */
-
-/* schedule option - WL_P2P_SCHED_TYPE_ABS */
-#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */
-#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */
-/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */
-#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with
- * start being an offset of the 'current' TSF
- */
-
-/* feature flags */
-#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */
-#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe
- * requests
- */
-#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */
-
-#ifdef WLNIC
-/* nic_cnx iovar */
-typedef struct wl_nic_cnx {
- uint8 opcode;
- struct ether_addr addr;
- /* the following are valid for WL_NIC_CNX_CONN */
- uint8 SSID_len;
- uint8 SSID[32];
- struct ether_addr abssid;
- uint16 beacon_interval;
- uint16 sync_threshold;
- uint16 beacon_wait_time;
-} wl_nic_cnx_t;
-
-/* opcode */
-#define WL_NIC_CNX_ADD 0 /* add NIC connection */
-#define WL_NIC_CNX_DEL 1 /* delete NIC connection */
-#define WL_NIC_CNX_IDX 2 /* query NIC connection index */
-#define WL_NIC_CNX_CONN 3 /* join/create network */
-#define WL_NIC_CNX_DIS 4 /* disconnect from network */
-
-/* nic_cfg iovar */
-typedef struct wl_nic_cfg {
- uint8 version;
- uint8 beacon_mode;
- uint8 diluted_beacon_period;
- uint8 beacon_probability;
- uint8 num_awake_window_params;
- struct {
- uint8 channel_number;
- uint8 awake_window_length;
- uint8 repeat_EQC;
- } awake_window_params[3];
- uint8 scan_length;
- uint8 scan_interval;
- uint8 scan_probability;
- uint8 ASID;
- uint8 channel_usage_mode;
- uint8 CWmin_af;
- uint8 NIC_priority;
- uint8 NIC_data_ind;
- uint8 allowed_wakeup_delay;
-} wl_nic_cfg_t;
-
-/* version */
-#define WL_NIC_CFG_VER 1
-
-/* beacon_mode */
-#define WL_NIC_BCN_NORM 0
-#define WL_NIC_BCN_DILUTED 1
-
-/* channel_usage_mode */
-#define WL_NIC_CHAN_STATIC 0
-#define WL_NIC_CHAN_CYCLE 1
-
-/* nic_cfg iovar */
-typedef struct wl_nic_frm {
- uint8 type;
- struct ether_addr da;
- uint8 body[1];
-} wl_nic_frm_t;
-/* type */
-#define WL_NIC_FRM_ACTION 2
-
-/* i/f query */
-typedef struct wl_nic_ifq {
- uint bsscfgidx;
- char ifname[BCM_MSG_IFNAME_MAX];
-} wl_nic_ifq_t;
-
-/* data mode */
-/* nic_dm iovar */
-typedef struct wl_nic_dm {
- uint8 enab;
- uint8 rsvd;
- /* the following fields are valid when enabling... */
- chanspec_t chspec;
- uint8 DATA_priority;
- uint8 NIC_priority;
-} wl_nic_dm_t;
-
-/* immediate scan request */
-typedef struct wl_nic_isq {
- uint8 scan_length;
-} wl_nic_isq_t;
-#endif /* WLNIC */
-
-/* RFAWARE def */
-#define BCM_ACTION_RFAWARE 0x77
-#define BCM_ACTION_RFAWARE_DCS 0x01
-
-/* DCS reason code define */
-#define BCM_DCS_IOVAR 0x1
-#define BCM_DCS_UNKNOWN 0xFF
+typedef struct wl_p2p_wfds_hash {
+ uint32 advt_id;
+ uint16 nw_cfg_method;
+ uint8 wfds_hash[6];
+ uint8 name_len;
+ uint8 service_name[MAX_WFDS_SVC_NAME_LEN];
+} wl_p2p_wfds_hash_t;
typedef struct wl_bcmdcs_data {
uint reason;
chanspec_t chspec;
} wl_bcmdcs_data_t;
-/* n-mode support capability */
-/* 2x2 includes both 1x1 & 2x2 devices
- * reserved #define 2 for future when we want to separate 1x1 & 2x2 and
- * control it independently
- */
-#define WL_11N_2x2 1
-#define WL_11N_3x3 3
-#define WL_11N_4x4 4
-
-/* define 11n feature disable flags */
-#define WLFEATURE_DISABLE_11N 0x00000001
-#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
-#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
-#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
-#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
-#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
-#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
-#define WLFEATURE_DISABLE_11N_GF 0x00000080
-
-/* Proxy STA modes */
-#define PSTA_MODE_DISABLED 0
-#define PSTA_MODE_PROXY 1
-#define PSTA_MODE_REPEATER 2
-
/* NAT configuration */
typedef struct {
nat_if_info_t if_info; /* interface info */
} nat_cfg_t;
-/* op code in nat_cfg */
-#define NAT_OP_ENABLE 1 /* enable NAT on given interface */
-#define NAT_OP_DISABLE 2 /* disable NAT on given interface */
-#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */
-
-/* NAT state */
-#define NAT_STATE_ENABLED 1 /* NAT is enabled */
-#define NAT_STATE_DISABLED 2 /* NAT is disabled */
-
typedef struct {
int state; /* NAT state returned */
} nat_state_t;
-#ifdef PROP_TXSTATUS
-/* Bit definitions for tlv iovar */
-/*
- * enable RSSI signals:
- * WLFC_CTL_TYPE_RSSI
- */
-#define WLFC_FLAGS_RSSI_SIGNALS 0x0001
-
-/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals:
- *
- * WLFC_CTL_TYPE_MAC_OPEN
- * WLFC_CTL_TYPE_MAC_CLOSE
- *
- * WLFC_CTL_TYPE_INTERFACE_OPEN
- * WLFC_CTL_TYPE_INTERFACE_CLOSE
- *
- * WLFC_CTL_TYPE_MACDESC_ADD
- * WLFC_CTL_TYPE_MACDESC_DEL
- *
- */
-#define WLFC_FLAGS_XONXOFF_SIGNALS 0x0002
-
-/* enable (status, fifo_credit, mac_credit) signals
- * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT
- * WLFC_CTL_TYPE_TXSTATUS
- * WLFC_CTL_TYPE_FIFO_CREDITBACK
- */
-#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
-
-#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
-#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
-#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
-#define WLFC_FLAGS_HOST_RXRERODER_ACTIVE 0x0040
-#endif /* PROP_TXSTATUS */
#define BTA_STATE_LOG_SZ 64
struct ether_addr ea;
} flush_txfifo_t;
-#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */
-#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */
-#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */
-#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */
-
enum {
SPATIAL_MODE_2G_IDX = 0,
SPATIAL_MODE_5G_LOW_IDX,
uint8 band5g[WLC_SUBBAND_MAX][WLC_TXCORE_MAX];
} sar_limit_t;
+#define WLC_TXCAL_CORE_MAX 2 /* max number of txcore supports for txcal */
+#define MAX_NUM_TXCAL_MEAS 128
+
+typedef struct wl_txcal_meas {
+ uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+ int16 pwr[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+ uint8 valid_cnt;
+} wl_txcal_meas_t;
+
+typedef struct wl_txcal_power_tssi {
+ uint8 set_core;
+ uint8 channel;
+ int16 pwr_start[WLC_TXCAL_CORE_MAX];
+ uint8 num_entries[WLC_TXCAL_CORE_MAX];
+ uint8 tssi[WLC_TXCAL_CORE_MAX][MAX_NUM_TXCAL_MEAS];
+ bool gen_tbl;
+} wl_txcal_power_tssi_t;
+
/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */
typedef struct wl_mempool_stats {
int num; /* Number of memory pools */
bcm_mp_stats_t s[1]; /* Variable array of memory pool stats. */
} wl_mempool_stats_t;
-
-/* D0 Coalescing */
-#define IPV4_ARP_FILTER 0x0001
-#define IPV4_NETBT_FILTER 0x0002
-#define IPV4_LLMNR_FILTER 0x0004
-#define IPV4_SSDP_FILTER 0x0008
-#define IPV4_WSD_FILTER 0x0010
-#define IPV6_NETBT_FILTER 0x0200
-#define IPV6_LLMNR_FILTER 0x0400
-#define IPV6_SSDP_FILTER 0x0800
-#define IPV6_WSD_FILTER 0x1000
-
-/* Network Offload Engine */
-#define NWOE_OL_ENABLE 0x00000001
-
typedef struct {
uint32 ipaddr;
uint32 ipaddr_netmask;
uint32 ipaddr_gateway;
} nwoe_ifconfig_t;
-/*
- * Traffic management structures/defines.
- */
-
-/* Traffic management bandwidth parameters */
-#define TRF_MGMT_MAX_PRIORITIES 3
-
-#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */
-#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Don't shape traffic */
-#define TRF_MGMT_FLAG_MANAGE_LOCAL_TRAFFIC 0x0008 /* Manage traffic over our local subnet */
-#define TRF_MGMT_FLAG_FILTER_ON_MACADDR 0x0010 /* filter on MAC address */
-#define TRF_MGMT_FLAG_NO_RX 0x0020 /* do not apply fiters to rx packets */
-
-#define TRF_FILTER_MAC_ADDR 0x0001 /* L2 filter use dst mac address for filtering */
-#define TRF_FILTER_IP_ADDR 0x0002 /* L3 filter use ip ddress for filtering */
-#define TRF_FILTER_L4 0x0004 /* L4 filter use tcp/udp for filtering */
-#define TRF_FILTER_FAVORED 0x0010 /* Tag the packet FAVORED */
-
/* Traffic management priority classes */
typedef enum trf_mgmt_priority_class {
trf_mgmt_priority_low = 0, /* Maps to 802.1p BK */
uint16 prot; /* L4 protocol (only TCP or UDP) */
uint16 flags; /* TBD. For now, this must be zero. */
trf_mgmt_priority_class_t priority; /* Priority for filtered packets */
+ uint32 dscp; /* DSCP */
} trf_mgmt_filter_t;
/* Traffic management filter list (variable length) */
} trf_mgmt_stats_t;
/* Traffic management statisics array */
-typedef struct trf_mgmt_stats_array {
+typedef struct trf_mgmt_stats_array {
trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES];
} trf_mgmt_stats_array_t;
uint8 pwr_sel_exp_time; /* Time lapse for expiry of database */
} powersel_params_t;
+typedef struct lpc_params {
+ /* LPC Params exposed via IOVAR */
+ uint8 rate_stab_thresh; /* Thresh for rate stability based on nupd */
+ uint8 pwr_stab_thresh; /* Number of successes before power step down */
+ uint8 lpc_exp_time; /* Time lapse for expiry of database */
+ uint8 pwrup_slow_step; /* Step size for slow step up */
+ uint8 pwrup_fast_step; /* Step size for fast step up */
+ uint8 pwrdn_slow_step; /* Step size for slow step down */
+} lpc_params_t;
+
/* tx pkt delay statistics */
#define SCB_RETRY_SHORT_DEF 7 /* Default Short retry Limit */
#define WLPKTDLY_HIST_NBINS 16 /* number of bins used in the Delay histogram */
uint8 tune; /* Debug */
} txdelay_params_t;
-#define WL_RELMCAST_MAX_CLIENT 32
-#define WL_RELMCAST_FLAG_INBLACKLIST 1
-#define WL_RELMCAST_FLAG_ACTIVEACKER 2
-#define WL_RELMCAST_FLAG_RELMCAST 4
-#define WL_RELMCAST_MAX_TABLE_ENTRY 4
-
-#define WL_RELMCAST_VER 1
-#define WL_RELMCAST_INDEX_ACK_ALL 255
-#define WL_RELMCAST_NUM_OF_MC_STREAMS 4
-#define WL_RELMCAST_MAX_TRS_PER_GROUP 1
-#define WL_RELMCAST_ACK_MCAST0 0x02
-#define WL_RELMCAST_ACK_MCAST_ALL 0x01
-#define WL_RELMCAST_ACTF_TIME_MIN 300 /* time in ms */
-#define WL_RELMCAST_ACTF_TIME_MAX 20000 /* time in ms */
+enum {
+ WNM_SERVICE_DMS = 1,
+ WNM_SERVICE_FMS = 2,
+ WNM_SERVICE_TFS = 3
+};
+
+/* Definitions for WNM/NPS TCLAS */
+typedef struct wl_tclas {
+ uint8 user_priority;
+ uint8 fc_len;
+ dot11_tclas_fc_t fc;
+} wl_tclas_t;
+
+#define WL_TCLAS_FIXED_SIZE OFFSETOF(wl_tclas_t, fc)
+
+typedef struct wl_tclas_list {
+ uint32 num;
+ wl_tclas_t tclas[1];
+} wl_tclas_list_t;
+
+/* Definitions for WNM/NPS Traffic Filter Service */
+typedef struct wl_tfs_req {
+ uint8 tfs_id;
+ uint8 tfs_actcode;
+ uint8 tfs_subelem_id;
+ uint8 send;
+} wl_tfs_req_t;
+
+typedef struct wl_tfs_filter {
+ uint8 status; /* Status returned by the AP */
+ uint8 tclas_proc; /* TCLAS processing value (0:and, 1:or) */
+ uint8 tclas_cnt; /* count of all wl_tclas_t in tclas array */
+ uint8 tclas[1]; /* VLA of wl_tclas_t */
+} wl_tfs_filter_t;
+#define WL_TFS_FILTER_FIXED_SIZE OFFSETOF(wl_tfs_filter_t, tclas)
+
+typedef struct wl_tfs_fset {
+ struct ether_addr ea; /* Address of AP/STA involved with this filter set */
+ uint8 tfs_id; /* TFS ID field chosen by STA host */
+ uint8 status; /* Internal status TFS_STATUS_xxx */
+ uint8 actcode; /* Action code DOT11_TFS_ACTCODE_xxx */
+ uint8 token; /* Token used in last request frame */
+ uint8 notify; /* Notify frame sent/received because of this set */
+ uint8 filter_cnt; /* count of all wl_tfs_filter_t in filter array */
+ uint8 filter[1]; /* VLA of wl_tfs_filter_t */
+} wl_tfs_fset_t;
+#define WL_TFS_FSET_FIXED_SIZE OFFSETOF(wl_tfs_fset_t, filter)
enum {
- RELMCAST_ENTRY_OP_DISABLE = 0,
- RELMCAST_ENTRY_OP_DELETE,
- RELMCAST_ENTRY_OP_ENABLE,
- RELMCAST_ENTRY_OP_ACK_ALL
+ TFS_STATUS_DISABLED = 0, /* TFS filter set disabled by user */
+ TFS_STATUS_DISABLING = 1, /* Empty request just sent to AP */
+ TFS_STATUS_VALIDATED = 2, /* Filter set validated by AP (but maybe not enabled!) */
+ TFS_STATUS_VALIDATING = 3, /* Filter set just sent to AP */
+ TFS_STATUS_NOT_ASSOC = 4, /* STA not associated */
+ TFS_STATUS_NOT_SUPPORT = 5, /* TFS not supported by AP */
+ TFS_STATUS_DENIED = 6, /* Filter set refused by AP (=> all sets are disabled!) */
};
+typedef struct wl_tfs_status {
+ uint8 fset_cnt; /* count of all wl_tfs_fset_t in fset array */
+ wl_tfs_fset_t fset[1]; /* VLA of wl_tfs_fset_t */
+} wl_tfs_status_t;
+
+typedef struct wl_tfs_set {
+ uint8 send; /* Immediatly register registered sets on AP side */
+ uint8 tfs_id; /* ID of a specific set (existing or new), or nul for all */
+ uint8 actcode; /* Action code for this filter set */
+ uint8 tclas_proc; /* TCLAS processing operator for this filter set */
+} wl_tfs_set_t;
+
+typedef struct wl_tfs_term {
+ uint8 del; /* Delete internal set once confirmation received */
+ uint8 tfs_id; /* ID of a specific set (existing), or nul for all */
+} wl_tfs_term_t;
+
+
+#define DMS_DEP_PROXY_ARP (1 << 0)
+
+/* Definitions for WNM/NPS Directed Multicast Service */
enum {
- WL_RELMCAST_MODE_RECEIVER = 0,
- WL_RELMCAST_MODE_TRANSMITTER,
- WL_RELMCAST_MODE_INITIATOR
+ DMS_STATUS_DISABLED = 0, /* DMS desc disabled by user */
+ DMS_STATUS_ACCEPTED = 1, /* Request accepted by AP */
+ DMS_STATUS_NOT_ASSOC = 2, /* STA not associated */
+ DMS_STATUS_NOT_SUPPORT = 3, /* DMS not supported by AP */
+ DMS_STATUS_DENIED = 4, /* Request denied by AP */
+ DMS_STATUS_TERM = 5, /* Request terminated by AP */
+ DMS_STATUS_REMOVING = 6, /* Remove request just sent */
+ DMS_STATUS_ADDING = 7, /* Add request just sent */
+ DMS_STATUS_ERROR = 8, /* Non compliant AP behvior */
+ DMS_STATUS_IN_PROGRESS = 9, /* Request just sent */
+ DMS_STATUS_REQ_MISMATCH = 10 /* Conditions for sending DMS req not met */
};
+typedef struct wl_dms_desc {
+ uint8 user_id;
+ uint8 status;
+ uint8 token;
+ uint8 dms_id;
+ uint8 tclas_proc;
+ uint8 mac_len; /* length of all ether_addr in data array, 0 if STA */
+ uint8 tclas_len; /* length of all wl_tclas_t in data array */
+ uint8 data[1]; /* VLA of 'ether_addr' and 'wl_tclas_t' (in this order ) */
+} wl_dms_desc_t;
+
+#define WL_DMS_DESC_FIXED_SIZE OFFSETOF(wl_dms_desc_t, data)
+
+typedef struct wl_dms_status {
+ uint32 cnt;
+ wl_dms_desc_t desc[1];
+} wl_dms_status_t;
+
+typedef struct wl_dms_set {
+ uint8 send;
+ uint8 user_id;
+ uint8 tclas_proc;
+} wl_dms_set_t;
+
+typedef struct wl_dms_term {
+ uint8 del;
+ uint8 user_id;
+} wl_dms_term_t;
+
+typedef struct wl_service_term {
+ uint8 service;
+ union {
+ wl_dms_term_t dms;
+ } u;
+} wl_service_term_t;
+
+/* Definitions for WNM/NPS BSS Transistion */
+typedef struct wl_bsstrans_req {
+ uint16 tbtt; /* time of BSS to end of life, in unit of TBTT */
+ uint16 dur; /* time of BSS to keep off, in unit of minute */
+ uint8 reqmode; /* request mode of BSS transition request */
+ uint8 unicast; /* request by unicast or by broadcast */
+} wl_bsstrans_req_t;
+
+enum {
+ BSSTRANS_RESP_AUTO = 0, /* Currently equivalent to ENABLE */
+ BSSTRANS_RESP_DISABLE = 1, /* Never answer BSS Trans Req frames */
+ BSSTRANS_RESP_ENABLE = 2, /* Always answer Req frames with preset data */
+ BSSTRANS_RESP_WAIT = 3, /* Send ind, wait and/or send preset data (NOT IMPL) */
+ BSSTRANS_RESP_IMMEDIATE = 4 /* After an ind, set data and send resp (NOT IMPL) */
+};
+
+typedef struct wl_bsstrans_resp {
+ uint8 policy;
+ uint8 status;
+ uint8 delay;
+ struct ether_addr target;
+} wl_bsstrans_resp_t;
+
+/* "wnm_bsstrans_policy" argument programs behavior after BSSTRANS Req reception.
+ * BSS-Transition feature is used by multiple programs such as NPS-PF, VE-PF,
+ * Band-steering, Hotspot 2.0 and customer requirements. Each PF and its test plan
+ * mandates different behavior on receiving BSS-transition request. To accomodate
+ * such divergent behaviors these policies have been created.
+ */
+enum {
+ WL_BSSTRANS_POLICY_ROAM_ALWAYS = 0, /* Roam (or disassociate) in all cases */
+ WL_BSSTRANS_POLICY_ROAM_IF_MODE = 1, /* Roam only if requested by Request Mode field */
+ WL_BSSTRANS_POLICY_ROAM_IF_PREF = 2, /* Roam only if Preferred BSS provided */
+ WL_BSSTRANS_POLICY_WAIT = 3, /* Wait for deauth and send Accepted status */
+ WL_BSSTRANS_POLICY_PRODUCT = 4, /* Policy for real product use cases (non-pf) */
+};
+
+/* Definitions for WNM/NPS TIM Broadcast */
+typedef struct wl_timbc_offset {
+ int16 offset; /* offset in us */
+ uint16 fix_intv; /* override interval sent from STA */
+ uint16 rate_override; /* use rate override to send high rate TIM broadcast frame */
+ uint8 tsf_present; /* show timestamp in TIM broadcast frame */
+} wl_timbc_offset_t;
+
+typedef struct wl_timbc_set {
+ uint8 interval; /* Interval in DTIM wished or required. */
+ uint8 flags; /* Bitfield described below */
+ uint16 rate_min; /* Minimum rate required for High/Low TIM frames. Optionnal */
+ uint16 rate_max; /* Maximum rate required for High/Low TIM frames. Optionnal */
+} wl_timbc_set_t;
+
+enum {
+ WL_TIMBC_SET_TSF_REQUIRED = 1, /* Enable TIMBC only if TSF in TIM frames */
+ WL_TIMBC_SET_NO_OVERRIDE = 2, /* ... if AP does not override interval */
+ WL_TIMBC_SET_PROXY_ARP = 4, /* ... if AP support Proxy ARP */
+ WL_TIMBC_SET_DMS_ACCEPTED = 8 /* ... if all DMS desc have been accepted */
+};
+
+typedef struct wl_timbc_status {
+ uint8 status_sta; /* Status from internal state machine (check below) */
+ uint8 status_ap; /* From AP response frame (check 8.4.2.86 from 802.11) */
+ uint8 interval;
+ uint8 pad;
+ int32 offset;
+ uint16 rate_high;
+ uint16 rate_low;
+} wl_timbc_status_t;
+
+enum {
+ WL_TIMBC_STATUS_DISABLE = 0, /* TIMBC disabled by user */
+ WL_TIMBC_STATUS_REQ_MISMATCH = 1, /* AP settings do no match user requirements */
+ WL_TIMBC_STATUS_NOT_ASSOC = 2, /* STA not associated */
+ WL_TIMBC_STATUS_NOT_SUPPORT = 3, /* TIMBC not supported by AP */
+ WL_TIMBC_STATUS_DENIED = 4, /* Req to disable TIMBC sent to AP */
+ WL_TIMBC_STATUS_ENABLE = 5 /* TIMBC enabled */
+};
+
+/* Definitions for PM2 Dynamic Fast Return To Sleep */
+typedef struct wl_pm2_sleep_ret_ext {
+ uint8 logic; /* DFRTS logic: see WL_DFRTS_LOGIC_* below */
+ uint16 low_ms; /* Low FRTS timeout */
+ uint16 high_ms; /* High FRTS timeout */
+ uint16 rx_pkts_threshold; /* switching threshold: # rx pkts */
+ uint16 tx_pkts_threshold; /* switching threshold: # tx pkts */
+ uint16 txrx_pkts_threshold; /* switching threshold: # (tx+rx) pkts */
+ uint32 rx_bytes_threshold; /* switching threshold: # rx bytes */
+ uint32 tx_bytes_threshold; /* switching threshold: # tx bytes */
+ uint32 txrx_bytes_threshold; /* switching threshold: # (tx+rx) bytes */
+} wl_pm2_sleep_ret_ext_t;
+
+#define WL_DFRTS_LOGIC_OFF 0 /* Feature is disabled */
+#define WL_DFRTS_LOGIC_OR 1 /* OR all non-zero threshold conditions */
+#define WL_DFRTS_LOGIC_AND 2 /* AND all non-zero threshold conditions */
+
+/* Values for the passive_on_restricted_mode iovar. When set to non-zero, this iovar
+ * disables automatic conversions of a channel from passively scanned to
+ * actively scanned. These values only have an effect for country codes such
+ * as XZ where some 5 GHz channels are defined to be passively scanned.
+ */
+#define WL_PASSACTCONV_DISABLE_NONE 0 /* Enable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_ALL 1 /* Disable permanent and temporary conversions */
+#define WL_PASSACTCONV_DISABLE_PERM 2 /* Disable only permanent conversions */
+
+/* Definitions for Reliable Multicast */
+#define WL_RMC_CNT_VERSION 1
+#define WL_RMC_TR_VERSION 1
+#define WL_RMC_MAX_CLIENT 32
+#define WL_RMC_FLAG_INBLACKLIST 1
+#define WL_RMC_FLAG_ACTIVEACKER 2
+#define WL_RMC_FLAG_RELMCAST 4
+#define WL_RMC_MAX_TABLE_ENTRY 4
+
+#define WL_RMC_VER 1
+#define WL_RMC_INDEX_ACK_ALL 255
+#define WL_RMC_NUM_OF_MC_STREAMS 4
+#define WL_RMC_MAX_TRS_PER_GROUP 1
+#define WL_RMC_MAX_TRS_IN_ACKALL 1
+#define WL_RMC_ACK_MCAST0 0x02
+#define WL_RMC_ACK_MCAST_ALL 0x01
+#define WL_RMC_ACTF_TIME_MIN 300 /* time in ms */
+#define WL_RMC_ACTF_TIME_MAX 20000 /* time in ms */
+#define WL_RMC_MAX_NUM_TRS 32 /* maximun transmitters allowed */
+#define WL_RMC_ARTMO_MIN 350 /* time in ms */
+#define WL_RMC_ARTMO_MAX 40000 /* time in ms */
+
+/* RMC events in action frames */
+enum rmc_opcodes {
+ RELMCAST_ENTRY_OP_DISABLE = 0, /* Disable multi-cast group */
+ RELMCAST_ENTRY_OP_DELETE = 1, /* Delete multi-cast group */
+ RELMCAST_ENTRY_OP_ENABLE = 2, /* Enable multi-cast group */
+ RELMCAST_ENTRY_OP_ACK_ALL = 3 /* Enable ACK ALL bit in AMT */
+};
+
+/* RMC operational modes */
+enum rmc_modes {
+ WL_RMC_MODE_RECEIVER = 0, /* Receiver mode by default */
+ WL_RMC_MODE_TRANSMITTER = 1, /* Transmitter mode using wl ackreq */
+ WL_RMC_MODE_INITIATOR = 2 /* Initiator mode using wl ackreq */
+};
+
+/* Each RMC mcast client info */
typedef struct wl_relmcast_client {
- uint8 flag;
- int16 rssi;
- struct ether_addr addr;
+ uint8 flag; /* status of client such as AR, R, or blacklisted */
+ int16 rssi; /* rssi value of RMC client */
+ struct ether_addr addr; /* mac address of RMC client */
} wl_relmcast_client_t;
+/* RMC Counters */
+typedef struct wl_rmc_cnts {
+ uint16 version; /* see definition of WL_CNT_T_VERSION */
+ uint16 length; /* length of entire structure */
+ uint16 dupcnt; /* counter for duplicate rmc MPDU */
+ uint16 ackreq_err; /* counter for wl ackreq error */
+ uint16 af_tx_err; /* error count for action frame transmit */
+ uint16 null_tx_err; /* error count for rmc null frame transmit */
+ uint16 af_unicast_tx_err; /* error count for rmc unicast frame transmit */
+ uint16 mc_no_amt_slot; /* No mcast AMT entry available */
+ /* Unused. Keep for rom compatibility */
+ uint16 mc_no_glb_slot; /* No mcast entry available in global table */
+ uint16 mc_not_mirrored; /* mcast group is not mirrored */
+ uint16 mc_existing_tr; /* mcast group is already taken by transmitter */
+ uint16 mc_exist_in_amt; /* mcast group is already programmed in amt */
+ /* Unused. Keep for rom compatibility */
+ uint16 mc_not_exist_in_gbl; /* mcast group is not in global table */
+ uint16 mc_not_exist_in_amt; /* mcast group is not in AMT table */
+ uint16 mc_utilized; /* mcast addressed is already taken */
+ uint16 mc_taken_other_tr; /* multi-cast addressed is already taken */
+ uint32 rmc_rx_frames_mac; /* no of mc frames received from mac */
+ uint32 rmc_tx_frames_mac; /* no of mc frames transmitted to mac */
+ uint32 mc_null_ar_cnt; /* no. of times NULL AR is received */
+ uint32 mc_ar_role_selected; /* no. of times took AR role */
+ uint32 mc_ar_role_deleted; /* no. of times AR role cancelled */
+ uint32 mc_noacktimer_expired; /* no. of times noack timer expired */
+ uint16 mc_no_wl_clk; /* no wl clk detected when trying to access amt */
+ uint16 mc_tr_cnt_exceeded; /* No of transmitters in the network exceeded */
+} wl_rmc_cnts_t;
+
+/* RMC Status */
typedef struct wl_relmcast_st {
- uint8 ver;
- uint8 num;
- wl_relmcast_client_t clients[WL_RELMCAST_MAX_CLIENT];
- uint16 err;
+ uint8 ver; /* version of RMC */
+ uint8 num; /* number of clients detected by transmitter */
+ wl_relmcast_client_t clients[WL_RMC_MAX_CLIENT];
+ uint16 err; /* error status (used in infra) */
+ uint16 actf_time; /* action frame time period */
} wl_relmcast_status_t;
-typedef struct wl_relmcast_entry {
- int8 flag;
- struct ether_addr addr;
-} wl_relmcast_entry_t;
+/* Entry for each STA/node */
+typedef struct wl_rmc_entry {
+ /* operation on multi-cast entry such add,
+ * delete, ack-all
+ */
+ int8 flag;
+ struct ether_addr addr; /* multi-cast group mac address */
+} wl_rmc_entry_t;
+
+/* RMC table */
+typedef struct wl_rmc_entry_table {
+ uint8 index; /* index to a particular mac entry in table */
+ uint8 opcode; /* opcodes or operation on entry */
+ wl_rmc_entry_t entry[WL_RMC_MAX_TABLE_ENTRY];
+} wl_rmc_entry_table_t;
+
+typedef struct wl_rmc_trans_elem {
+ struct ether_addr tr_mac; /* transmitter mac */
+ struct ether_addr ar_mac; /* ar mac */
+ uint16 artmo; /* AR timeout */
+ uint8 amt_idx; /* amt table entry */
+ uint16 flag; /* entry will be acked, not acked, programmed, full etc */
+} wl_rmc_trans_elem_t;
+
+/* RMC transmitters */
+typedef struct wl_rmc_trans_in_network {
+ uint8 ver; /* version of RMC */
+ uint8 num_tr; /* number of transmitters in the network */
+ wl_rmc_trans_elem_t trs[WL_RMC_MAX_NUM_TRS];
+} wl_rmc_trans_in_network_t;
+
+/* To update vendor specific ie for RMC */
+typedef struct wl_rmc_vsie {
+ uint8 oui[DOT11_OUI_LEN];
+ uint16 payload; /* IE Data Payload */
+} wl_rmc_vsie_t;
+
+
+/* structures & defines for proximity detection */
+enum proxd_method {
+ PROXD_UNDEFINED_METHOD = 0,
+ PROXD_RSSI_METHOD = 1,
+ PROXD_TOF_METHOD = 2
+};
-typedef struct wl_relmcast_entry_table {
- int8 index;
- int8 opcode;
- wl_relmcast_entry_t entry[WL_RELMCAST_MAX_TABLE_ENTRY];
-} wl_relmcast_entry_table_t;
+/* structures for proximity detection device role */
+#define WL_PROXD_MODE_DISABLE 0
+#define WL_PROXD_MODE_NEUTRAL 1
+#define WL_PROXD_MODE_INITIATOR 2
+#define WL_PROXD_MODE_TARGET 3
-typedef struct wl_tr_Info {
- struct ether_addr addr;
- uint32 timeVal;
- uint16 seq;
-} wl_tr_Info_t;
-
-typedef struct wl_mcGrpEntry {
- struct ether_addr mcaddr;
- struct ether_addr ar;
- wl_tr_Info_t trInfo[WL_RELMCAST_MAX_TRS_PER_GROUP];
-} wl_mcGrpEntry_t;
-
-typedef struct wl_mcAckAllEntry {
- struct ether_addr ar;
- wl_tr_Info_t trInfo[WL_RELMCAST_NUM_OF_MC_STREAMS];
-} wl_mcAckAllEntry_t;
-
-typedef struct wl_relmcast_globalMcTbl {
- uint8 activeMask;
- wl_mcAckAllEntry_t ackAll;
- wl_mcGrpEntry_t mcEntry[WL_RELMCAST_NUM_OF_MC_STREAMS];
-} wl_relmcast_globalMcTbl_t;
-#endif /* LINUX_POSTMOGRIFY_REMOVAL */
+#define WL_PROXD_ACTION_STOP 0
+#define WL_PROXD_ACTION_START 1
-/* fbt_cap: FBT assoc / reassoc modes. */
-#define WLC_FBT_CAP_DRV_4WAY_AND_REASSOC 1 /* Driver 4-way handshake & reassoc (WLFBT). */
+#define WL_PROXD_FLAG_TARGET_REPORT 0x1
+#define WL_PROXD_FLAG_REPORT_FAILURE 0x2
+#define WL_PROXD_FLAG_INITIATOR_REPORT 0x4
+#define WL_PROXD_FLAG_NOCHANSWT 0x8
+#define WL_PROXD_FLAG_NETRUAL 0x10
+#define WL_PROXD_FLAG_INITIATOR_RPTRTT 0x20
+#define WL_PROXD_FLAG_ONEWAY 0x40
+#define WL_PROXD_FLAG_SEQ_EN 0x80
+
+#define WL_PROXD_RANDOM_WAKEUP 0x8000
+
+typedef struct wl_proxd_iovar {
+ uint16 method; /* Proxmity Detection method */
+ uint16 mode; /* Mode (neutral, initiator, target) */
+} wl_proxd_iovar_t;
+
+/*
+ * structures for proximity detection parameters
+ * consists of two parts, common and method specific params
+ * common params should be placed at the beginning
+ */
+
+/* require strict packing */
+#include <packed_section_start.h>
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_common {
+ chanspec_t chanspec; /* channel spec */
+ int16 tx_power; /* tx power of Proximity Detection(PD) frames (in dBm) */
+ uint16 tx_rate; /* tx rate of PD rames (in 500kbps units) */
+ uint16 timeout; /* timeout value */
+ uint16 interval; /* interval between neighbor finding attempts (in TU) */
+ uint16 duration; /* duration of neighbor finding attempts (in ms) */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_common_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_rssi_method {
+ chanspec_t chanspec; /* chanspec for home channel */
+ int16 tx_power; /* tx power of Proximity Detection frames (in dBm) */
+ uint16 tx_rate; /* tx rate of PD frames, 500kbps units */
+ uint16 timeout; /* state machine wait timeout of the frames (in ms) */
+ uint16 interval; /* interval between neighbor finding attempts (in TU) */
+ uint16 duration; /* duration of neighbor finding attempts (in ms) */
+ /* method specific ones go after this line */
+ int16 rssi_thresh; /* RSSI threshold (in dBm) */
+ uint16 maxconvergtmo; /* max wait converge timeout (in ms) */
+} wl_proxd_params_rssi_method_t;
+
+#define Q1_NS 25 /* Q1 time units */
+
+#define TOF_BW_NUM 3 /* number of bandwidth that the TOF can support */
+#define TOF_BW_SEQ_NUM (TOF_BW_NUM+2) /* number of total index */
+enum tof_bw_index {
+ TOF_BW_20MHZ_INDEX = 0,
+ TOF_BW_40MHZ_INDEX = 1,
+ TOF_BW_80MHZ_INDEX = 2,
+ TOF_BW_SEQTX_INDEX = 3,
+ TOF_BW_SEQRX_INDEX = 4
+};
+
+#define BANDWIDTH_BASE 20 /* base value of bandwidth */
+#define TOF_BW_20MHZ (BANDWIDTH_BASE << TOF_BW_20MHZ_INDEX)
+#define TOF_BW_40MHZ (BANDWIDTH_BASE << TOF_BW_40MHZ_INDEX)
+#define TOF_BW_80MHZ (BANDWIDTH_BASE << TOF_BW_80MHZ_INDEX)
+#define TOF_BW_10MHZ 10
+
+#define NFFT_BASE 64 /* base size of fft */
+#define TOF_NFFT_20MHZ (NFFT_BASE << TOF_BW_20MHZ_INDEX)
+#define TOF_NFFT_40MHZ (NFFT_BASE << TOF_BW_40MHZ_INDEX)
+#define TOF_NFFT_80MHZ (NFFT_BASE << TOF_BW_80MHZ_INDEX)
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_method {
+ chanspec_t chanspec; /* chanspec for home channel */
+ int16 tx_power; /* tx power of Proximity Detection(PD) frames (in dBm) */
+ uint16 tx_rate; /* tx rate of PD rames (in 500kbps units) */
+ uint16 timeout; /* state machine wait timeout of the frames (in ms) */
+ uint16 interval; /* interval between neighbor finding attempts (in TU) */
+ uint16 duration; /* duration of neighbor finding attempts (in ms) */
+ /* specific for the method go after this line */
+ struct ether_addr tgt_mac; /* target mac addr for TOF method */
+ uint16 ftm_cnt; /* number of the frames txed by initiator */
+ uint16 retry_cnt; /* number of retransmit attampts for ftm frames */
+ int16 vht_rate; /* ht or vht rate */
+ /* add more params required for other methods can be added here */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_method_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_params_tof_tune {
+ uint32 Ki; /* h/w delay K factor for initiator */
+ uint32 Kt; /* h/w delay K factor for target */
+ int16 vhtack; /* enable/disable VHT ACK */
+ int16 N_log2[TOF_BW_SEQ_NUM]; /* simple threshold crossing */
+ int16 w_offset[TOF_BW_NUM]; /* offset of threshold crossing window(per BW) */
+ int16 w_len[TOF_BW_NUM]; /* length of threshold crossing window(per BW) */
+ int32 maxDT; /* max time difference of T4/T1 or T3/T2 */
+ int32 minDT; /* min time difference of T4/T1 or T3/T2 */
+ uint8 totalfrmcnt; /* total count of transfered measurement frames */
+ uint16 rsv_media; /* reserve media value for TOF */
+ uint32 flags; /* flags */
+ uint8 core; /* core to use for tx */
+ uint8 force_K; /* set to force value of K */
+ int16 N_scale[TOF_BW_SEQ_NUM]; /* simple threshold crossing */
+ uint8 sw_adj; /* enable sw assisted timestamp adjustment */
+ uint8 hw_adj; /* enable hw assisted timestamp adjustment */
+ uint8 seq_en; /* enable ranging sequence */
+ uint8 ftm_cnt[TOF_BW_SEQ_NUM]; /* number of ftm frames based on bandwidth */
+} BWL_POST_PACKED_STRUCT wl_proxd_params_tof_tune_t;
+
+typedef struct wl_proxd_params_iovar {
+ uint16 method; /* Proxmity Detection method */
+ union {
+ /* common params for pdsvc */
+ wl_proxd_params_common_t cmn_params; /* common parameters */
+ /* method specific */
+ wl_proxd_params_rssi_method_t rssi_params; /* RSSI method parameters */
+ wl_proxd_params_tof_method_t tof_params; /* TOF meothod parameters */
+ /* tune parameters */
+ wl_proxd_params_tof_tune_t tof_tune; /* TOF tune parameters */
+ } u; /* Method specific optional parameters */
+} wl_proxd_params_iovar_t;
+
+#define PROXD_COLLECT_GET_STATUS 0
+#define PROXD_COLLECT_SET_STATUS 1
+#define PROXD_COLLECT_QUERY_HEADER 2
+#define PROXD_COLLECT_QUERY_DATA 3
+#define PROXD_COLLECT_QUERY_DEBUG 4
+#define PROXD_COLLECT_REMOTE_REQUEST 5
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_query {
+ uint32 method; /* method */
+ uint8 request; /* Query request. */
+ uint8 status; /* 0 -- disable, 1 -- enable collection, */
+ /* 2 -- enable collection & debug */
+ uint16 index; /* The current frame index [0 to total_frames - 1]. */
+ uint16 mode; /* Initiator or Target */
+ bool busy; /* tof sm is busy */
+ bool remote; /* Remote collect data */
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_query_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_header {
+ uint16 total_frames; /* The totral frames for this collect. */
+ uint16 nfft; /* nfft value */
+ uint16 bandwidth; /* bandwidth */
+ uint16 channel; /* channel number */
+ uint32 chanspec; /* channel spec */
+ uint32 fpfactor; /* avb timer value factor */
+ uint16 fpfactor_shift; /* avb timer value shift bits */
+ int32 distance; /* distance calculated by fw */
+ uint32 meanrtt; /* mean of RTTs */
+ uint32 modertt; /* mode of RTTs */
+ uint32 medianrtt; /* median of RTTs */
+ uint32 sdrtt; /* standard deviation of RTTs */
+ uint32 clkdivisor; /* clock divisor */
+ uint16 chipnum; /* chip type */
+ uint8 chiprev; /* chip revision */
+ uint8 phyver; /* phy version */
+ struct ether_addr loaclMacAddr; /* local mac address */
+ struct ether_addr remoteMacAddr; /* remote mac address */
+ wl_proxd_params_tof_tune_t params;
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_header_t;
+
+
+/* ********************** NAN wl interface struct types and defs ******************** */
+
+#define WL_NAN_IOCTL_VERSION 0x1
+
+/* wl_nan_sub_cmd may also be used in dhd */
+typedef struct wl_nan_sub_cmd wl_nan_sub_cmd_t;
+typedef int (cmd_handler_t)(void *wl, const wl_nan_sub_cmd_t *cmd, char **argv);
+/* nan cmd list entry */
+struct wl_nan_sub_cmd {
+ char *name;
+ uint8 version; /* cmd version */
+ uint16 id; /* id for the dongle f/w switch/case */
+ uint16 type; /* base type of argument */
+ cmd_handler_t *handler; /* cmd handler */
+};
+
+/* container for nan iovtls & events */
+typedef BWL_PRE_PACKED_STRUCT struct wl_nan_ioc {
+ uint16 version; /* interface command or event version */
+ uint16 id; /* nan ioctl cmd ID */
+ uint16 len; /* total length of all tlv records in data[] */
+ uint8 data [1]; /* var len payload of bcm_xtlv_t type */
+} BWL_POST_PACKED_STRUCT wl_nan_ioc_t;
+
+typedef struct wl_nan_status {
+ uint8 inited;
+ uint8 joined;
+ uint8 role;
+ uint8 hop_count;
+ uint32 chspec;
+ uint8 amr[8]; /* Anchor Master Rank */
+ uint32 cnt_pend_txfrm; /* pending TX frames */
+ uint32 cnt_bcn_tx; /* TX disc/sync beacon count */
+ uint32 cnt_bcn_rx; /* RX disc/sync beacon count */
+ uint32 cnt_svc_disc_tx; /* TX svc disc frame count */
+ uint32 cnt_svc_disc_rx; /* RX svc disc frame count */
+ struct ether_addr cid;
+} wl_nan_status_t;
+
+/* various params and ctl swithce for nan_debug instance */
+typedef struct nan_debug_params {
+ uint8 enabled; /* runtime debuging enabled */
+ uint8 collect; /* enables debug svc sdf monitor mode */
+ uint16 cmd; /* debug cmd to perform a debug action */
+ uint32 msglevel; /* msg level if enabled */
+ uint16 status;
+} nan_debug_params_t;
+
+
+/* nan passive scan params */
+#define NAN_SCAN_MAX_CHCNT 8
+typedef BWL_PRE_PACKED_STRUCT struct nan_scan_params {
+ uint16 scan_time;
+ uint16 home_time;
+ uint16 chspec_num;
+ chanspec_t chspec_list[NAN_SCAN_MAX_CHCNT]; /* act. used 3, 5 rfu */
+} BWL_POST_PACKED_STRUCT nan_scan_params_t;
+
+enum wl_nan_role {
+ WL_NAN_ROLE_AUTO = 0,
+ WL_NAN_ROLE_NON_MASTER_NON_SYNC = 1,
+ WL_NAN_ROLE_NON_MASTER_SYNC = 2,
+ WL_NAN_ROLE_MASTER = 3,
+ WL_NAN_ROLE_ANCHOR_MASTER = 4
+};
+#define NAN_MASTER_RANK_LEN 8
+/* nan cmd IDs */
+enum wl_nan_cmds {
+ /* nan cfg /disc & dbg ioctls */
+ WL_NAN_CMD_ENABLE = 1,
+ WL_NAN_CMD_ATTR = 2,
+ WL_NAN_CMD_NAN_JOIN = 3,
+ WL_NAN_CMD_LEAVE = 4,
+ WL_NAN_CMD_MERGE = 5,
+ WL_NAN_CMD_STATUS = 6,
+ /* discovery engine commands */
+ WL_NAN_CMD_PUBLISH = 20,
+ WL_NAN_CMD_SUBSCRIBE = 21,
+ WL_NAN_CMD_CANCEL_PUBLISH = 22,
+ WL_NAN_CMD_CANCEL_SUBSCRIBE = 23,
+ WL_NAN_CMD_TRANSMIT = 24,
+ WL_NAN_CMD_CONNECTION = 25,
+ WL_NAN_CMD_SHOW = 26,
+ WL_NAN_CMD_STOP = 27, /* stop nan for a given cluster ID */
+ /* nan debug iovars & cmds */
+ WL_NAN_CMD_SCAN_PARAMS = 46,
+ WL_NAN_CMD_SCAN = 47,
+ WL_NAN_CMD_SCAN_RESULTS = 48,
+ WL_NAN_CMD_EVENT_MASK = 49,
+ WL_NAN_CMD_EVENT_CHECK = 50,
+
+ WL_NAN_CMD_DEBUG = 60,
+ WL_NAN_CMD_TEST1 = 61,
+ WL_NAN_CMD_TEST2 = 62,
+ WL_NAN_CMD_TEST3 = 63
+};
+
+/*
+ * tlv IDs uniquely identifies cmd parameters
+ * packed into wl_nan_ioc_t container
+ */
+enum wl_nan_cmd_xtlv_id {
+ /* 0x00 ~ 0xFF: standard TLV ID whose data format is the same as NAN attribute TLV */
+ WL_NAN_XTLV_ZERO = 0, /* used as tlv buf end marker */
+#ifdef NAN_STD_TLV /* rfu, don't use yet */
+ WL_NAN_XTLV_MASTER_IND = 1, /* == NAN_ATTR_MASTER_IND, */
+ WL_NAN_XTLV_CLUSTER = 2, /* == NAN_ATTR_CLUSTER, */
+ WL_NAN_XTLV_VENDOR = 221, /* == NAN_ATTR_VENDOR, */
+#endif
+ /* 0x02 ~ 0xFF: reserved. In case to use with the same data format as NAN attribute TLV */
+ /* 0x100 ~ : private TLV ID defined just for NAN command */
+ /* common types */
+ WL_NAN_XTLV_BUFFER = 0x101, /* generic type, function depends on cmd context */
+ WL_NAN_XTLV_MAC_ADDR = 0x102, /* used in various cmds */
+ WL_NAN_XTLV_REASON = 0x103,
+ WL_NAN_XTLV_ENABLE = 0x104,
+ /* explicit types, primarily for discovery engine iovars */
+ WL_NAN_XTLV_SVC_PARAMS = 0x120, /* Contains required params: wl_nan_disc_params_t */
+ WL_NAN_XTLV_MATCH_RX = 0x121, /* Matching filter to evaluate on receive */
+ WL_NAN_XTLV_MATCH_TX = 0x122, /* Matching filter to send */
+ WL_NAN_XTLV_SVC_INFO = 0x123, /* Service specific info */
+ WL_NAN_XTLV_SVC_NAME = 0x124, /* Optional UTF-8 service name, for debugging. */
+ WL_NAN_XTLV_INSTANCE_ID = 0x125, /* Identifies unique publish or subscribe instance */
+ WL_NAN_XTLV_PRIORITY = 0x126, /* used in transmit cmd context */
+ WL_NAN_XTLV_REQUESTOR_ID = 0x127, /* Requestor instance ID */
+ WL_NAN_XTLV_VNDR = 0x128, /* Vendor specific attribute */
+ /* explicit types, primarily for NAN MAC iovars */
+ WL_NAN_XTLV_DW_LEN = 0x140, /* discovery win length */
+ WL_NAN_XTLV_BCN_INTERVAL = 0x141, /* beacon interval, both sync and descovery bcns? */
+ WL_NAN_XTLV_CLUSTER_ID = 0x142,
+ WL_NAN_XTLV_IF_ADDR = 0x143,
+ WL_NAN_XTLV_MC_ADDR = 0x144,
+ WL_NAN_XTLV_ROLE = 0x145,
+ WL_NAN_XTLV_START = 0x146,
+
+ WL_NAN_XTLV_MASTER_PREF = 0x147,
+ WL_NAN_XTLV_DW_INTERVAL = 0x148,
+ WL_NAN_XTLV_PTBTT_OVERRIDE = 0x149,
+ /* nan status command xtlvs */
+ WL_NAN_XTLV_MAC_INITED = 0x14a,
+ WL_NAN_XTLV_MAC_ENABLED = 0x14b,
+ WL_NAN_XTLV_MAC_CHANSPEC = 0x14c,
+ WL_NAN_XTLV_MAC_AMR = 0x14d, /* anchormaster rank u8 amr[8] */
+ WL_NAN_XTLV_MAC_HOPCNT = 0x14e,
+ WL_NAN_XTLV_MAC_AMBTT = 0x14f,
+ WL_NAN_XTLV_MAC_TXRATE = 0x150,
+ WL_NAN_XTLV_MAC_STATUS = 0x151, /* xtlv payload is nan_status_t */
+ WL_NAN_XTLV_NAN_SCANPARAMS = 0x152, /* payload is nan_scan_params_t */
+ WL_NAN_XTLV_DEBUGPARAMS = 0x153, /* payload is nan_scan_params_t */
+ WL_NAN_XTLV_SUBSCR_ID = 0x154, /* subscriber id */
+ WL_NAN_XTLV_PUBLR_ID = 0x155, /* publisher id */
+ WL_NAN_XTLV_EVENT_MASK = 0x156,
+ WL_NAN_XTLV_MERGE = 0x157
+};
+
+/* Flag bits for Publish and Subscribe (wl_nan_disc_params_t flags) */
+#define WL_NAN_RANGE_LIMITED 0x0040
+/* Bits specific to Publish */
+/* Unsolicited transmissions */
+#define WL_NAN_PUB_UNSOLICIT 0x1000
+/* Solicited transmissions */
+#define WL_NAN_PUB_SOLICIT 0x2000
+#define WL_NAN_PUB_BOTH 0x3000
+/* Set for broadcast solicited transmission
+ * Do not set for unicast solicited transmission
+ */
+#define WL_NAN_PUB_BCAST 0x4000
+/* Generate event on each solicited transmission */
+#define WL_NAN_PUB_EVENT 0x8000
+/* Used for one-time solicited Publish functions to indicate transmision occurred */
+#define WL_NAN_PUB_SOLICIT_PENDING 0x10000
+/* Follow-up frames */
+#define WL_NAN_FOLLOWUP 0x20000
+/* Bits specific to Subscribe */
+/* Active subscribe mode (Leave unset for passive) */
+#define WL_NAN_SUB_ACTIVE 0x1000
+
+/* Special values for time to live (ttl) parameter */
+#define WL_NAN_TTL_UNTIL_CANCEL 0xFFFFFFFF
+/* Publish - runs until first transmission
+ * Subscribe - runs until first DiscoveryResult event
+ */
+#define WL_NAN_TTL_FIRST 0
+
+/* The service hash (service id) is exactly this many bytes. */
+#define WL_NAN_SVC_HASH_LEN 6
+
+/* Instance ID type (unique identifier) */
+typedef uint8 wl_nan_instance_id_t;
+
+/* Mandatory parameters for publish/subscribe iovars - NAN_TLV_SVC_PARAMS */
+typedef struct wl_nan_disc_params_s {
+ /* Periodicity of unsolicited/query transmissions, in DWs */
+ uint32 period;
+ /* Time to live in DWs */
+ uint32 ttl;
+ /* Flag bits */
+ uint32 flags;
+ /* Publish or subscribe service id, i.e. hash of the service name */
+ uint8 svc_hash[WL_NAN_SVC_HASH_LEN];
+ /* Publish or subscribe id */
+ wl_nan_instance_id_t instance_id;
+} wl_nan_disc_params_t;
+
+/*
+* desovery interface event structures *
+*/
+
+/* NAN Ranging */
+
+/* Bit defines for global flags */
+#define WL_NAN_RANGING_ENABLE 1 /* enable RTT */
+#define WL_NAN_RANGING_RANGED 2 /* Report to host if ranged as target */
+typedef struct nan_ranging_config {
+ uint32 chanspec; /* Ranging chanspec */
+ uint16 timeslot; /* NAN RTT start time slot 1-511 */
+ uint16 duration; /* NAN RTT duration in ms */
+ struct ether_addr allow_mac; /* peer initiated ranging: the allowed peer mac
+ * address, a unicast (for one peer) or
+ * a broadcast for all. Setting it to all zeros
+ * means responding to none,same as not setting
+ * the flag bit NAN_RANGING_RESPOND
+ */
+ uint16 flags;
+} wl_nan_ranging_config_t;
+
+/* list of peers for self initiated ranging */
+/* Bit defines for per peer flags */
+#define WL_NAN_RANGING_REPORT (1<<0) /* Enable reporting range to target */
+typedef struct nan_ranging_peer {
+ uint32 chanspec; /* desired chanspec for this peer */
+ uint32 abitmap; /* available bitmap */
+ struct ether_addr ea; /* peer MAC address */
+ uint8 frmcnt; /* frame count */
+ uint8 retrycnt; /* retry count */
+ uint16 flags; /* per peer flags, report or not */
+} wl_nan_ranging_peer_t;
+typedef struct nan_ranging_list {
+ uint8 count; /* number of MAC addresses */
+ uint8 num_peers_done; /* host set to 0, when read, shows number of peers
+ * completed, success or fail
+ */
+ uint8 num_dws; /* time period to do the ranging, specified in dws */
+ uint8 reserve; /* reserved field */
+ wl_nan_ranging_peer_t rp[1]; /* variable length array of peers */
+} wl_nan_ranging_list_t;
+
+/* ranging results, a list for self initiated ranging and one for peer initiated ranging */
+/* There will be one structure for each peer */
+#define WL_NAN_RANGING_STATUS_SUCCESS 1
+#define WL_NAN_RANGING_STATUS_FAIL 2
+#define WL_NAN_RANGING_STATUS_TIMEOUT 3
+#define WL_NAN_RANGING_STATUS_ABORT 4 /* with partial results if sounding count > 0 */
+typedef struct nan_ranging_result {
+ uint8 status; /* 1: Success, 2: Fail 3: Timeout 4: Aborted */
+ uint8 sounding_count; /* number of measurements completed (0 = failure) */
+ struct ether_addr ea; /* initiator MAC address */
+ uint32 chanspec; /* Chanspec where the ranging was done */
+ uint32 timestamp; /* 32bits of the TSF timestamp ranging was completed at */
+ uint32 distance; /* mean distance in meters expressed as Q4 number.
+ * Only valid when sounding_count > 0. Examples:
+ * 0x08 = 0.5m
+ * 0x10 = 1m
+ * 0x18 = 1.5m
+ * set to 0xffffffff to indicate invalid number
+ */
+ int32 rtt_var; /* standard deviation in 10th of ns of RTTs measured.
+ * Only valid when sounding_count > 0
+ */
+ struct ether_addr tgtea; /* target MAC address */
+} wl_nan_ranging_result_t;
+typedef struct nan_ranging_event_data {
+ uint8 mode; /* 1: Result of host initiated ranging */
+ /* 2: Result of peer initiated ranging */
+ uint8 reserved;
+ uint8 success_count; /* number of peers completed successfully */
+ uint8 count; /* number of peers in the list */
+ wl_nan_ranging_result_t rr[1]; /* variable array of ranging peers */
+} wl_nan_ranging_event_data_t;
+
+/* ********************* end of NAN section ******************************** */
+
+
+#define RSSI_THRESHOLD_SIZE 16
+#define MAX_IMP_RESP_SIZE 256
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias {
+ int32 version; /* version */
+ int32 threshold[RSSI_THRESHOLD_SIZE]; /* threshold */
+ int32 peak_offset; /* peak offset */
+ int32 bias; /* rssi bias */
+ int32 gd_delta; /* GD - GD_ADJ */
+ int32 imp_resp[MAX_IMP_RESP_SIZE]; /* (Hi*Hi)+(Hr*Hr) */
+} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_rssi_bias_avg {
+ int32 avg_threshold[RSSI_THRESHOLD_SIZE]; /* avg threshold */
+ int32 avg_peak_offset; /* avg peak offset */
+ int32 avg_rssi; /* avg rssi */
+ int32 avg_bias; /* avg bias */
+} BWL_POST_PACKED_STRUCT wl_proxd_rssi_bias_avg_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_info {
+ uint16 type; /* type: 0 channel table, 1 channel smoothing table, 2 and 3 seq */
+ uint16 index; /* The current frame index, from 1 to total_frames. */
+ uint16 tof_cmd; /* M_TOF_CMD */
+ uint16 tof_rsp; /* M_TOF_RSP */
+ uint16 tof_avb_rxl; /* M_TOF_AVB_RX_L */
+ uint16 tof_avb_rxh; /* M_TOF_AVB_RX_H */
+ uint16 tof_avb_txl; /* M_TOF_AVB_TX_L */
+ uint16 tof_avb_txh; /* M_TOF_AVB_TX_H */
+ uint16 tof_id; /* M_TOF_ID */
+ uint8 tof_frame_type;
+ uint8 tof_frame_bw;
+ int8 tof_rssi;
+ int32 tof_cfo;
+ int32 gd_adj_ns; /* gound delay */
+ int32 gd_h_adj_ns; /* group delay + threshold crossing */
+#ifdef RSSI_REFINE
+ wl_proxd_rssi_bias_t rssi_bias; /* RSSI refinement info */
+#endif
+ int16 nfft; /* number of samples stored in H */
+
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_info_t;
+
+#define k_tof_collect_H_pad 1
+#define k_tof_collect_H_size (256+16+k_tof_collect_H_pad)
+#define k_tof_collect_Hraw_size (2*k_tof_collect_H_size)
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_collect_data {
+ wl_proxd_collect_info_t info;
+ uint32 H[k_tof_collect_H_size]; /* raw data read from phy used to adjust timestamps */
+
+} BWL_POST_PACKED_STRUCT wl_proxd_collect_data_t;
+
+typedef BWL_PRE_PACKED_STRUCT struct wl_proxd_debug_data {
+ uint8 count; /* number of packets */
+ uint8 stage; /* state machone stage */
+ uint8 received; /* received or txed */
+ uint8 paket_type; /* packet type */
+ uint8 category; /* category field */
+ uint8 action; /* action field */
+ uint8 token; /* token number */
+ uint8 follow_token; /* following token number */
+ uint16 index; /* index of the packet */
+ uint16 tof_cmd; /* M_TOF_CMD */
+ uint16 tof_rsp; /* M_TOF_RSP */
+ uint16 tof_avb_rxl; /* M_TOF_AVB_RX_L */
+ uint16 tof_avb_rxh; /* M_TOF_AVB_RX_H */
+ uint16 tof_avb_txl; /* M_TOF_AVB_TX_L */
+ uint16 tof_avb_txh; /* M_TOF_AVB_TX_H */
+ uint16 tof_id; /* M_TOF_ID */
+ uint16 tof_status0; /* M_TOF_STATUS_0 */
+ uint16 tof_status2; /* M_TOF_STATUS_2 */
+ uint16 tof_chsm0; /* M_TOF_CHNSM_0 */
+ uint16 tof_phyctl0; /* M_TOF_PHYCTL0 */
+ uint16 tof_phyctl1; /* M_TOF_PHYCTL1 */
+ uint16 tof_phyctl2; /* M_TOF_PHYCTL2 */
+ uint16 tof_lsig; /* M_TOF_LSIG */
+ uint16 tof_vhta0; /* M_TOF_VHTA0 */
+ uint16 tof_vhta1; /* M_TOF_VHTA1 */
+ uint16 tof_vhta2; /* M_TOF_VHTA2 */
+ uint16 tof_vhtb0; /* M_TOF_VHTB0 */
+ uint16 tof_vhtb1; /* M_TOF_VHTB1 */
+ uint16 tof_apmductl; /* M_TOF_AMPDU_CTL */
+ uint16 tof_apmdudlim; /* M_TOF_AMPDU_DLIM */
+ uint16 tof_apmdulen; /* M_TOF_AMPDU_LEN */
+} BWL_POST_PACKED_STRUCT wl_proxd_debug_data_t;
+
+/* version of the wl_wsec_info structure */
+#define WL_WSEC_INFO_VERSION 0x01
+
+/* start enum value for BSS properties */
+#define WL_WSEC_INFO_BSS_BASE 0x0100
+
+/* size of len and type fields of wl_wsec_info_tlv_t struct */
+#define WL_WSEC_INFO_TLV_HDR_LEN OFFSETOF(wl_wsec_info_tlv_t, data)
+
+/* Allowed wl_wsec_info properties; not all of them may be supported. */
+typedef enum {
+ WL_WSEC_INFO_NONE = 0,
+ WL_WSEC_INFO_MAX_KEYS = 1,
+ WL_WSEC_INFO_NUM_KEYS = 2,
+ WL_WSEC_INFO_NUM_HW_KEYS = 3,
+ WL_WSEC_INFO_MAX_KEY_IDX = 4,
+ WL_WSEC_INFO_NUM_REPLAY_CNTRS = 5,
+ WL_WSEC_INFO_SUPPORTED_ALGOS = 6,
+ WL_WSEC_INFO_MAX_KEY_LEN = 7,
+ WL_WSEC_INFO_FLAGS = 8,
+ /* add global/per-wlc properties above */
+ WL_WSEC_INFO_BSS_FLAGS = (WL_WSEC_INFO_BSS_BASE + 1),
+ WL_WSEC_INFO_BSS_WSEC = (WL_WSEC_INFO_BSS_BASE + 2),
+ WL_WSEC_INFO_BSS_TX_KEY_ID = (WL_WSEC_INFO_BSS_BASE + 3),
+ WL_WSEC_INFO_BSS_ALGO = (WL_WSEC_INFO_BSS_BASE + 4),
+ WL_WSEC_INFO_BSS_KEY_LEN = (WL_WSEC_INFO_BSS_BASE + 5),
+ /* add per-BSS properties above */
+ WL_WSEC_INFO_MAX = 0xffff
+} wl_wsec_info_type_t;
+
+/* tlv used to return wl_wsec_info properties */
+typedef struct {
+ uint16 type;
+ uint16 len; /* data length */
+ uint8 data[1]; /* data follows */
+} wl_wsec_info_tlv_t;
+
+/* input/output data type for wsec_info iovar */
+typedef struct wl_wsec_info {
+ uint8 version; /* structure version */
+ uint8 pad[2];
+ uint8 num_tlvs;
+ wl_wsec_info_tlv_t tlvs[1]; /* tlv data follows */
+} wl_wsec_info_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
+enum rssi_reason {
+ RSSI_REASON_UNKNOW = 0,
+ RSSI_REASON_LOWRSSI = 1,
+ RSSI_REASON_NSYC = 2,
+ RSSI_REASON_TIMEOUT = 3
+};
+
+enum tof_reason {
+ TOF_REASON_OK = 0,
+ TOF_REASON_REQEND = 1,
+ TOF_REASON_TIMEOUT = 2,
+ TOF_REASON_NOACK = 3,
+ TOF_REASON_INVALIDAVB = 4,
+ TOF_REASON_INITIAL = 5,
+ TOF_REASON_ABORT = 6
+};
+
+enum rssi_state {
+ RSSI_STATE_POLL = 0,
+ RSSI_STATE_TPAIRING = 1,
+ RSSI_STATE_IPAIRING = 2,
+ RSSI_STATE_THANDSHAKE = 3,
+ RSSI_STATE_IHANDSHAKE = 4,
+ RSSI_STATE_CONFIRMED = 5,
+ RSSI_STATE_PIPELINE = 6,
+ RSSI_STATE_NEGMODE = 7,
+ RSSI_STATE_MONITOR = 8,
+ RSSI_STATE_LAST = 9
+};
+
+enum tof_state {
+ TOF_STATE_IDLE = 0,
+ TOF_STATE_IWAITM = 1,
+ TOF_STATE_TWAITM = 2,
+ TOF_STATE_ILEGACY = 3,
+ TOF_STATE_IWAITCL = 4,
+ TOF_STATE_TWAITCL = 5,
+ TOF_STATE_ICONFIRM = 6,
+ TOF_STATE_IREPORT = 7
+};
+
+enum tof_mode_type {
+ TOF_LEGACY_UNKNOWN = 0,
+ TOF_LEGACY_AP = 1,
+ TOF_NONLEGACY_AP = 2
+};
+
+enum tof_way_type {
+ TOF_TYPE_ONE_WAY = 0,
+ TOF_TYPE_TWO_WAY = 1,
+ TOF_TYPE_REPORT = 2
+};
+
+enum tof_rate_type {
+ TOF_FRAME_RATE_VHT = 0,
+ TOF_FRAME_RATE_LEGACY = 1
+};
+
+#define TOF_ADJ_TYPE_NUM 4 /* number of assisted timestamp adjustment */
+enum tof_adj_mode {
+ TOF_ADJ_SOFTWARE = 0,
+ TOF_ADJ_HARDWARE = 1,
+ TOF_ADJ_SEQ = 2,
+ TOF_ADJ_NONE = 3
+};
+
+#define FRAME_TYPE_NUM 4 /* number of frame type */
+enum frame_type {
+ FRAME_TYPE_CCK = 0,
+ FRAME_TYPE_OFDM = 1,
+ FRAME_TYPE_11N = 2,
+ FRAME_TYPE_11AC = 3
+};
+
+typedef struct wl_proxd_status_iovar {
+ uint16 method; /* method */
+ uint8 mode; /* mode */
+ uint8 peermode; /* peer mode */
+ uint8 state; /* state */
+ uint8 reason; /* reason code */
+ uint32 distance; /* distance */
+ uint32 txcnt; /* tx pkt counter */
+ uint32 rxcnt; /* rx pkt counter */
+ struct ether_addr peer; /* peer mac address */
+ int8 avg_rssi; /* average rssi */
+ int8 hi_rssi; /* highest rssi */
+ int8 low_rssi; /* lowest rssi */
+ uint32 dbgstatus; /* debug status */
+ uint16 frame_type_cnt[FRAME_TYPE_NUM]; /* frame types */
+ uint8 adj_type_cnt[TOF_ADJ_TYPE_NUM]; /* adj types HW/SW */
+} wl_proxd_status_iovar_t;
+
+#ifdef NET_DETECT
+typedef struct net_detect_adapter_features {
+ bool wowl_enabled;
+ bool net_detect_enabled;
+ bool nlo_enabled;
+} net_detect_adapter_features_t;
+
+typedef enum net_detect_bss_type {
+ nd_bss_any = 0,
+ nd_ibss,
+ nd_ess
+} net_detect_bss_type_t;
+
+typedef struct net_detect_profile {
+ wlc_ssid_t ssid;
+ net_detect_bss_type_t bss_type; /* Ignore for now since Phase 1 is only for ESS */
+ uint32 cipher_type; /* DOT11_CIPHER_ALGORITHM enumeration values */
+ uint32 auth_type; /* DOT11_AUTH_ALGORITHM enumeration values */
+} net_detect_profile_t;
+
+typedef struct net_detect_profile_list {
+ uint32 num_nd_profiles;
+ net_detect_profile_t nd_profile[0];
+} net_detect_profile_list_t;
+
+typedef struct net_detect_config {
+ bool nd_enabled;
+ uint32 scan_interval;
+ uint32 wait_period;
+ bool wake_if_connected;
+ bool wake_if_disconnected;
+ net_detect_profile_list_t nd_profile_list;
+} net_detect_config_t;
+
+typedef enum net_detect_wake_reason {
+ nd_reason_unknown,
+ nd_net_detected,
+ nd_wowl_event,
+ nd_ucode_error
+} net_detect_wake_reason_t;
+
+typedef struct net_detect_wake_data {
+ net_detect_wake_reason_t nd_wake_reason;
+ uint32 nd_wake_date_length;
+ uint8 nd_wake_data[0]; /* Wake data (currently unused) */
+} net_detect_wake_data_t;
+
+#endif /* NET_DETECT */
+
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
typedef struct bcnreq {
uint8 bcn_mode;
uint16 reps;
} statreq_t;
+#define WL_RRM_RPT_VER 0
+#define WL_RRM_RPT_MAX_PAYLOAD 64
+#define WL_RRM_RPT_MIN_PAYLOAD 7
+#define WL_RRM_RPT_FALG_ERR 0
+#define WL_RRM_RPT_FALG_OK 1
+typedef struct {
+ uint16 ver; /* version */
+ struct ether_addr addr; /* STA MAC addr */
+ uint32 timestamp; /* timestamp of the report */
+ uint16 flag; /* flag */
+ uint16 len; /* length of payload data */
+ unsigned char data[WL_RRM_RPT_MAX_PAYLOAD];
+} statrpt_t;
+
+typedef struct wlc_l2keepalive_ol_params {
+ uint8 flags;
+ uint8 prio;
+ uint16 period_ms;
+} wlc_l2keepalive_ol_params_t;
+
+typedef struct wlc_dwds_config {
+ uint32 enable;
+ uint32 mode; /* STA/AP interface */
+ struct ether_addr ea;
+} wlc_dwds_config_t;
+
typedef struct wl_el_set_params_s {
uint8 set; /* Set number */
uint32 size; /* Size to make/expand */
uint8 flags;
} wl_el_tag_params_t;
+/* Video Traffic Interference Monitor config */
+#define INTFER_VERSION 1
+typedef struct wl_intfer_params {
+ uint16 version; /* version */
+ uint8 period; /* sample period */
+ uint8 cnt; /* sample cnt */
+ uint8 txfail_thresh; /* non-TCP txfail threshold */
+ uint8 tcptxfail_thresh; /* tcptxfail threshold */
+} wl_intfer_params_t;
+
+typedef struct wl_staprio_cfg {
+ struct ether_addr ea; /* mac addr */
+ uint8 prio; /* scb priority */
+} wl_staprio_cfg_t;
+
+typedef enum wl_stamon_cfg_cmd_type {
+ STAMON_CFG_CMD_DEL = 0,
+ STAMON_CFG_CMD_ADD = 1
+} wl_stamon_cfg_cmd_type_t;
+
+typedef struct wlc_stamon_sta_config {
+ wl_stamon_cfg_cmd_type_t cmd; /* 0 - delete, 1 - add */
+ struct ether_addr ea;
+} wlc_stamon_sta_config_t;
+
+#ifdef SR_DEBUG
+typedef struct /* pmu_reg */{
+ uint32 pmu_control;
+ uint32 pmu_capabilities;
+ uint32 pmu_status;
+ uint32 res_state;
+ uint32 res_pending;
+ uint32 pmu_timer1;
+ uint32 min_res_mask;
+ uint32 max_res_mask;
+ uint32 pmu_chipcontrol1[4];
+ uint32 pmu_regcontrol[5];
+ uint32 pmu_pllcontrol[5];
+ uint32 pmu_rsrc_up_down_timer[31];
+ uint32 rsrc_dep_mask[31];
+} pmu_reg_t;
+#endif /* pmu_reg */
+
+typedef struct wl_taf_define {
+ struct ether_addr ea; /* STA MAC or 0xFF... */
+ uint16 version; /* version */
+ uint32 sch; /* method index */
+ uint32 prio; /* priority */
+ uint32 misc; /* used for return value */
+ char text[1]; /* used to pass and return ascii text */
+} wl_taf_define_t;
+
+/* Received Beacons lengths information */
+#define WL_LAST_BCNS_INFO_FIXED_LEN OFFSETOF(wlc_bcn_len_hist_t, bcnlen_ring)
+typedef struct wlc_bcn_len_hist {
+ uint16 ver; /* version field */
+ uint16 cur_index; /* current pointed index in ring buffer */
+ uint32 max_bcnlen; /* Max beacon length received */
+ uint32 min_bcnlen; /* Min beacon length received */
+ uint32 ringbuff_len; /* Length of the ring buffer 'bcnlen_ring' */
+ uint32 bcnlen_ring[1]; /* ring buffer storing received beacon lengths */
+} wlc_bcn_len_hist_t;
+
+/* WDS net interface types */
+#define WL_WDSIFTYPE_NONE 0x0 /* The interface type is neither WDS nor DWDS. */
+#define WL_WDSIFTYPE_WDS 0x1 /* The interface is WDS type. */
+#define WL_WDSIFTYPE_DWDS 0x2 /* The interface is DWDS type. */
+
+typedef struct wl_bssload_static {
+ bool is_static;
+ uint16 sta_count;
+ uint8 chan_util;
+ uint16 aac;
+} wl_bssload_static_t;
+
+
+/* LTE coex info */
+/* Analogue of HCI Set MWS Signaling cmd */
+typedef struct {
+ uint16 mws_rx_assert_offset;
+ uint16 mws_rx_assert_jitter;
+ uint16 mws_rx_deassert_offset;
+ uint16 mws_rx_deassert_jitter;
+ uint16 mws_tx_assert_offset;
+ uint16 mws_tx_assert_jitter;
+ uint16 mws_tx_deassert_offset;
+ uint16 mws_tx_deassert_jitter;
+ uint16 mws_pattern_assert_offset;
+ uint16 mws_pattern_assert_jitter;
+ uint16 mws_inact_dur_assert_offset;
+ uint16 mws_inact_dur_assert_jitter;
+ uint16 mws_scan_freq_assert_offset;
+ uint16 mws_scan_freq_assert_jitter;
+ uint16 mws_prio_assert_offset_req;
+} wci2_config_t;
+
+/* Analogue of HCI MWS Channel Params */
+typedef struct {
+ uint16 mws_rx_center_freq; /* MHz */
+ uint16 mws_tx_center_freq;
+ uint16 mws_rx_channel_bw; /* KHz */
+ uint16 mws_tx_channel_bw;
+ uint8 mws_channel_en;
+ uint8 mws_channel_type; /* Don't care for WLAN? */
+} mws_params_t;
+
+/* MWS wci2 message */
+typedef struct {
+ uint8 mws_wci2_data; /* BT-SIG msg */
+ uint16 mws_wci2_interval; /* Interval in us */
+ uint16 mws_wci2_repeat; /* No of msgs to send */
+} mws_wci2_msg_t;
+
+typedef struct {
+ uint32 config; /* MODE: AUTO (-1), Disable (0), Enable (1) */
+ uint32 status; /* Current state: Disabled (0), Enabled (1) */
+} wl_config_t;
+
+#define WLC_RSDB_MODE_AUTO_MASK 0x80
+#define WLC_RSDB_EXTRACT_MODE(val) ((int8)((val) & (~(WLC_RSDB_MODE_AUTO_MASK))))
+
+#define WL_IF_STATS_T_VERSION 1 /* current version of wl_if_stats structure */
+
+/* per interface counters */
+typedef struct wl_if_stats {
+ uint16 version; /* version of the structure */
+ uint16 length; /* length of the entire structure */
+ uint32 PAD; /* padding */
+
+ /* transmit stat counters */
+ uint64 txframe; /* tx data frames */
+ uint64 txbyte; /* tx data bytes */
+ uint64 txerror; /* tx data errors (derived: sum of others) */
+ uint64 txnobuf; /* tx out of buffer errors */
+ uint64 txrunt; /* tx runt frames */
+ uint64 txfail; /* tx failed frames */
+ uint64 txretry; /* tx retry frames */
+ uint64 txretrie; /* tx multiple retry frames */
+ uint64 txfrmsnt; /* tx sent frames */
+ uint64 txmulti; /* tx mulitcast sent frames */
+ uint64 txfrag; /* tx fragments sent */
+
+ /* receive stat counters */
+ uint64 rxframe; /* rx data frames */
+ uint64 rxbyte; /* rx data bytes */
+ uint64 rxerror; /* rx data errors (derived: sum of others) */
+ uint64 rxnobuf; /* rx out of buffer errors */
+ uint64 rxrunt; /* rx runt frames */
+ uint64 rxfragerr; /* rx fragment errors */
+ uint64 rxmulti; /* rx multicast frames */
+}
+wl_if_stats_t;
+
+typedef struct wl_band {
+ uint16 bandtype; /* WL_BAND_2G, WL_BAND_5G */
+ uint16 bandunit; /* bandstate[] index */
+ uint16 phytype; /* phytype */
+ uint16 phyrev;
+}
+wl_band_t;
+
+#define WL_WLC_VERSION_T_VERSION 1 /* current version of wlc_version structure */
+
+/* wlc interface version */
+typedef struct wl_wlc_version {
+ uint16 version; /* version of the structure */
+ uint16 length; /* length of the entire structure */
+
+ /* epi version numbers */
+ uint16 epi_ver_major; /* epi major version number */
+ uint16 epi_ver_minor; /* epi minor version number */
+ uint16 epi_rc_num; /* epi RC number */
+ uint16 epi_incr_num; /* epi increment number */
+
+ /* wlc interface version numbers */
+ uint16 wlc_ver_major; /* wlc interface major version number */
+ uint16 wlc_ver_minor; /* wlc interface minor version number */
+}
+wl_wlc_version_t;
+
+/* Version of WLC interface to be returned as a part of wl_wlc_version structure.
+ * For the discussion related to versions update policy refer to
+ * http://hwnbu-twiki.broadcom.com/bin/view/Mwgroup/WlShimAbstractionLayer
+ * For now the policy is to increment WLC_VERSION_MAJOR each time
+ * there is a change that involves both WLC layer and per-port layer.
+ * WLC_VERSION_MINOR is currently not in use.
+ */
+#define WLC_VERSION_MAJOR 3
+#define WLC_VERSION_MINOR 0
+
+
+/* require strict packing */
+#include <packed_section_start.h>
+/* Data returned by the bssload_report iovar.
+ * This is also the WLC_E_BSS_LOAD event data.
+ */
+typedef BWL_PRE_PACKED_STRUCT struct wl_bssload {
+ uint16 sta_count; /* station count */
+ uint16 aac; /* available admission capacity */
+ uint8 chan_util; /* channel utilization */
+} BWL_POST_PACKED_STRUCT wl_bssload_t;
+
+/* Maximum number of configurable BSS Load levels. The number of BSS Load
+ * ranges is always 1 more than the number of configured levels. eg. if
+ * 3 levels of 10, 20, 30 are configured then this defines 4 load ranges:
+ * 0-10, 11-20, 21-30, 31-255. A WLC_E_BSS_LOAD event is generated each time
+ * the utilization level crosses into another range, subject to the rate limit.
+ */
+#define MAX_BSSLOAD_LEVELS 8
+#define MAX_BSSLOAD_RANGES (MAX_BSSLOAD_LEVELS + 1)
+
+/* BSS Load event notification configuration. */
+typedef struct wl_bssload_cfg {
+ uint32 rate_limit_msec; /* # of events posted to application will be limited to
+ * one per specified period (0 to disable rate limit).
+ */
+ uint8 num_util_levels; /* Number of entries in util_levels[] below */
+ uint8 util_levels[MAX_BSSLOAD_LEVELS];
+ /* Variable number of BSS Load utilization levels in
+ * low to high order. An event will be posted each time
+ * a received beacon's BSS Load IE channel utilization
+ * value crosses a level.
+ */
+} wl_bssload_cfg_t;
+
+/* Multiple roaming profile suport */
+#define WL_MAX_ROAM_PROF_BRACKETS 4
+
+#define WL_MAX_ROAM_PROF_VER 0
+
+#define WL_ROAM_PROF_NONE (0 << 0)
+#define WL_ROAM_PROF_LAZY (1 << 0)
+#define WL_ROAM_PROF_NO_CI (1 << 1)
+#define WL_ROAM_PROF_SUSPEND (1 << 2)
+#define WL_ROAM_PROF_SYNC_DTIM (1 << 6)
+#define WL_ROAM_PROF_DEFAULT (1 << 7) /* backward compatible single default profile */
+
+typedef struct wl_roam_prof {
+ int8 roam_flags; /* bit flags */
+ int8 roam_trigger; /* RSSI trigger level per profile/RSSI bracket */
+ int8 rssi_lower;
+ int8 roam_delta;
+ int8 rssi_boost_thresh; /* Min RSSI to qualify for RSSI boost */
+ int8 rssi_boost_delta; /* RSSI boost for AP in the other band */
+ uint16 nfscan; /* nuber of full scan to start with */
+ uint16 fullscan_period;
+ uint16 init_scan_period;
+ uint16 backoff_multiplier;
+ uint16 max_scan_period;
+} wl_roam_prof_t;
+
+typedef struct wl_roam_prof_band {
+ uint32 band; /* Must be just one band */
+ uint16 ver; /* version of this struct */
+ uint16 len; /* length in bytes of this structure */
+ wl_roam_prof_t roam_prof[WL_MAX_ROAM_PROF_BRACKETS];
+} wl_roam_prof_band_t;
+
+/* no default structure packing */
+#include <packed_section_end.h>
+
#endif /* _wlioctl_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: linux_osl.c 412994 2013-07-17 12:38:03Z $
+ * $Id: linux_osl.c 490846 2014-07-12 13:08:59Z $
*/
#define LINUX_PORT
#include <bcmendian.h>
#include <linuxver.h>
#include <bcmdefs.h>
+
+#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+#include <asm/cacheflush.h>
+#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
+
+#include <linux/random.h>
+
#include <osl.h>
#include <bcmutils.h>
#include <linux/delay.h>
#include <linux/fs.h>
-#define PCI_CFG_RETRY 10
+#ifdef BCM47XX_ACP_WAR
+#include <linux/spinlock.h>
+extern spinlock_t l2x0_reg_lock;
+#endif
+
+#define PCI_CFG_RETRY 10
-#define OS_HANDLE_MAGIC 0x1234abcd
-#define BCM_MEM_FILENAME_LEN 24
+#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
+#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
+#define DUMPBUFSZ 1024
#ifdef CONFIG_DHD_USE_STATIC_BUF
-#define DHD_SKB_HDRSIZE 336
+#define DHD_SKB_HDRSIZE 336
#define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
#define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
#define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
#else
#define STATIC_PKT_4PAGE_NUM 0
#define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
-#endif
+#endif /* ENHANCED_STATIC_BUF */
typedef struct bcm_static_pkt {
struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
} bcm_static_pkt_t;
static bcm_static_pkt_t *bcm_static_skb = 0;
-#endif
+
+void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
typedef struct bcm_mem_link {
struct bcm_mem_link *prev;
char file[BCM_MEM_FILENAME_LEN];
} bcm_mem_link_t;
+struct osl_cmn_info {
+ atomic_t malloced;
+ atomic_t pktalloced; /* Number of allocated packet buffers */
+ spinlock_t dbgmem_lock;
+ bcm_mem_link_t *dbgmem_list;
+ spinlock_t pktalloc_lock;
+ atomic_t refcount; /* Number of references to this shared structure. */
+};
+typedef struct osl_cmn_info osl_cmn_t;
+
struct osl_info {
osl_pubinfo_t pub;
#ifdef CTFPOOL
ctfpool_t *ctfpool;
-#endif
+#endif /* CTFPOOL */
uint magic;
void *pdev;
- atomic_t malloced;
- atomic_t pktalloced;
uint failed;
uint bustype;
- bcm_mem_link_t *dbgmem_list;
- spinlock_t dbgmem_lock;
+ osl_cmn_t *cmn; /* Common OSL related data shred between two OSH's */
+
+ void *bus_handle;
#ifdef BCMDBG_CTRACE
spinlock_t ctrace_lock;
struct list_head ctrace_list;
int ctrace_num;
-#endif
- spinlock_t pktalloc_lock;
+#endif /* BCMDBG_CTRACE */
+ uint32 flags; /* If specific cases to be handled in the OSL */
};
#define OSL_PKTTAG_CLEAR(p) \
*(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
} while (0)
+/* PCMCIA attribute space access macros */
-
-
-uint32 g_assert_type = FALSE;
+/* Global ASSERT type flag */
+uint32 g_assert_type = 0;
+module_param(g_assert_type, int, 0);
static int16 linuxbcmerrormap[] =
-{ 0,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -E2BIG,
- -E2BIG,
- -EBUSY,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EFAULT,
- -ENOMEM,
- -EOPNOTSUPP,
- -EMSGSIZE,
- -EINVAL,
- -EPERM,
- -ENOMEM,
- -EINVAL,
- -ERANGE,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EINVAL,
- -EIO,
- -ENODEV,
- -EINVAL,
- -EIO,
- -EIO,
- -ENODEV,
- -EINVAL,
- -ENODATA,
- -EINVAL,
- -EINVAL,
- -EIO,
- -EIO,
+{ 0, /* 0 */
+ -EINVAL, /* BCME_ERROR */
+ -EINVAL, /* BCME_BADARG */
+ -EINVAL, /* BCME_BADOPTION */
+ -EINVAL, /* BCME_NOTUP */
+ -EINVAL, /* BCME_NOTDOWN */
+ -EINVAL, /* BCME_NOTAP */
+ -EINVAL, /* BCME_NOTSTA */
+ -EINVAL, /* BCME_BADKEYIDX */
+ -EINVAL, /* BCME_RADIOOFF */
+ -EINVAL, /* BCME_NOTBANDLOCKED */
+ -EINVAL, /* BCME_NOCLK */
+ -EINVAL, /* BCME_BADRATESET */
+ -EINVAL, /* BCME_BADBAND */
+ -E2BIG, /* BCME_BUFTOOSHORT */
+ -E2BIG, /* BCME_BUFTOOLONG */
+ -EBUSY, /* BCME_BUSY */
+ -EINVAL, /* BCME_NOTASSOCIATED */
+ -EINVAL, /* BCME_BADSSIDLEN */
+ -EINVAL, /* BCME_OUTOFRANGECHAN */
+ -EINVAL, /* BCME_BADCHAN */
+ -EFAULT, /* BCME_BADADDR */
+ -ENOMEM, /* BCME_NORESOURCE */
+ -EOPNOTSUPP, /* BCME_UNSUPPORTED */
+ -EMSGSIZE, /* BCME_BADLENGTH */
+ -EINVAL, /* BCME_NOTREADY */
+ -EPERM, /* BCME_EPERM */
+ -ENOMEM, /* BCME_NOMEM */
+ -EINVAL, /* BCME_ASSOCIATED */
+ -ERANGE, /* BCME_RANGE */
+ -EINVAL, /* BCME_NOTFOUND */
+ -EINVAL, /* BCME_WME_NOT_ENABLED */
+ -EINVAL, /* BCME_TSPEC_NOTFOUND */
+ -EINVAL, /* BCME_ACM_NOTSUPPORTED */
+ -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
+ -EIO, /* BCME_SDIO_ERROR */
+ -ENODEV, /* BCME_DONGLE_DOWN */
+ -EINVAL, /* BCME_VERSION */
+ -EIO, /* BCME_TXFAIL */
+ -EIO, /* BCME_RXFAIL */
+ -ENODEV, /* BCME_NODEVICE */
+ -EINVAL, /* BCME_NMODE_DISABLED */
+ -ENODATA, /* BCME_NONRESIDENT */
+ -EINVAL, /* BCME_SCANREJECT */
+ -EINVAL, /* BCME_USAGE_ERROR */
+ -EIO, /* BCME_IOCTL_ERROR */
+ -EIO, /* BCME_SERIAL_PORT_ERR */
+ -EOPNOTSUPP, /* BCME_DISABLED, BCME_NOTENABLED */
+ -EIO, /* BCME_DECERR */
+ -EIO, /* BCME_ENCERR */
+ -EIO, /* BCME_MICERR */
+ -ERANGE, /* BCME_REPLAY */
+ -EINVAL, /* BCME_IE_NOTFOUND */
+
+/* When an new error code is added to bcmutils.h, add os
+ * specific error translation here as well
+ */
+/* check if BCME_LAST changed since the last time this function was updated */
+#if BCME_LAST != -52
+#error "You need to add a OS error translation in the linuxbcmerrormap \
+ for new error code defined in bcmutils.h"
+#endif
+};
+#if 1
+static inline void *
+pci_alloc_consistent1(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
+}
-};
+static inline dma_addr_t
+pci_map_single1(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+ return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
+}
+static inline void
+pci_free_consistent1(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
+}
+static inline void
+pci_unmap_single1(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
+}
+#endif
+
+/* translate bcmerrors into linux errors */
int
osl_error(int bcmerror)
{
else if (bcmerror < BCME_LAST)
bcmerror = BCME_ERROR;
-
+ /* Array bounds covered by ASSERT in osl_attach */
return linuxbcmerrormap[-bcmerror];
}
-
-extern uint8* dhd_os_prealloc(void *osh, int section, int size);
-
+#ifdef SHARED_OSL_CMN
+osl_t *
+osl_attach(void *pdev, uint bustype, bool pkttag, void **osl_cmn)
+{
+#else
osl_t *
osl_attach(void *pdev, uint bustype, bool pkttag)
{
+ void **osl_cmn = NULL;
+#endif /* SHARED_OSL_CMN */
osl_t *osh;
gfp_t flags;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
-#else
- flags = GFP_ATOMIC;
-#endif
+ flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
if (!(osh = kmalloc(sizeof(osl_t), flags)))
return osh;
bzero(osh, sizeof(osl_t));
-
+ if (osl_cmn == NULL || *osl_cmn == NULL) {
+ if (!(osh->cmn = kmalloc(sizeof(osl_cmn_t), flags))) {
+ kfree(osh);
+ return NULL;
+ }
+ bzero(osh->cmn, sizeof(osl_cmn_t));
+ if (osl_cmn)
+ *osl_cmn = osh->cmn;
+ atomic_set(&osh->cmn->malloced, 0);
+ osh->cmn->dbgmem_list = NULL;
+ spin_lock_init(&(osh->cmn->dbgmem_lock));
+
+ spin_lock_init(&(osh->cmn->pktalloc_lock));
+
+ } else {
+ osh->cmn = *osl_cmn;
+ }
+ atomic_add(1, &osh->cmn->refcount);
+
+ /* Check that error map has the right number of entries in it */
ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
- osh->magic = OS_HANDLE_MAGIC;
- atomic_set(&osh->malloced, 0);
osh->failed = 0;
- osh->dbgmem_list = NULL;
- spin_lock_init(&(osh->dbgmem_lock));
osh->pdev = pdev;
osh->pub.pkttag = pkttag;
osh->bustype = bustype;
+ osh->magic = OS_HANDLE_MAGIC;
switch (bustype) {
case PCI_BUS:
break;
}
-#if defined(CONFIG_DHD_USE_STATIC_BUF)
- if (!bcm_static_buf) {
- if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
- STATIC_BUF_TOTAL_LEN))) {
- printk("can not alloc static buf!\n");
- bcm_static_skb = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- kfree(osh);
- return NULL;
- }
- else
- printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
+#ifdef BCMDBG_CTRACE
+ spin_lock_init(&osh->ctrace_lock);
+ INIT_LIST_HEAD(&osh->ctrace_list);
+ osh->ctrace_num = 0;
+#endif /* BCMDBG_CTRACE */
- sema_init(&bcm_static_buf->static_sem, 1);
+ return osh;
+}
- bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
- }
+int osl_static_mem_init(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (!bcm_static_buf && adapter) {
+ if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
+ 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
+ printk("can not alloc static buf!\n");
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ kfree(osh);
+ return -ENOMEM;
+ }
+ else
+ printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
- if (!bcm_static_skb) {
- int i;
- void *skb_buff_ptr = 0;
- bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
- skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
- if (!skb_buff_ptr) {
- printk("cannot alloc static buf!\n");
- bcm_static_buf = NULL;
- bcm_static_skb = NULL;
- ASSERT(osh->magic == OS_HANDLE_MAGIC);
- kfree(osh);
- return NULL;
+
+ sema_init(&bcm_static_buf->static_sem, 1);
+
+ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
}
- bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
- (STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
- for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
- bcm_static_skb->pkt_use[i] = 0;
+#ifdef BCMSDIO
+ if (!bcm_static_skb && adapter) {
+ int i;
+ void *skb_buff_ptr = 0;
+ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
+ skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
+ if (!skb_buff_ptr) {
+ printk("cannot alloc static buf!\n");
+ bcm_static_buf = NULL;
+ bcm_static_skb = NULL;
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ kfree(osh);
+ return -ENOMEM;
+ }
- sema_init(&bcm_static_skb->osl_pkt_sem, 1);
- }
-#endif
+ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
+ (STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
+ for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
+ bcm_static_skb->pkt_use[i] = 0;
-#ifdef BCMDBG_CTRACE
- spin_lock_init(&osh->ctrace_lock);
- INIT_LIST_HEAD(&osh->ctrace_list);
- osh->ctrace_num = 0;
-#endif
+ sema_init(&bcm_static_skb->osl_pkt_sem, 1);
+ }
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
- spin_lock_init(&(osh->pktalloc_lock));
+ return 0;
+}
- return osh;
+void osl_set_bus_handle(osl_t *osh, void *bus_handle)
+{
+ osh->bus_handle = bus_handle;
+}
+
+void* osl_get_bus_handle(osl_t *osh)
+{
+ return osh->bus_handle;
}
void
if (osh == NULL)
return;
-#ifdef CONFIG_DHD_USE_STATIC_BUF
- if (bcm_static_buf) {
- bcm_static_buf = 0;
- }
- if (bcm_static_skb) {
- bcm_static_skb = 0;
- }
-#endif
-
ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ atomic_sub(1, &osh->cmn->refcount);
+ if (atomic_read(&osh->cmn->refcount) == 0) {
+ kfree(osh->cmn);
+ }
kfree(osh);
}
+int osl_static_mem_deinit(osl_t *osh, void *adapter)
+{
+#ifdef CONFIG_DHD_USE_STATIC_BUF
+ if (bcm_static_buf) {
+ bcm_static_buf = 0;
+ }
+#ifdef BCMSDIO
+ if (bcm_static_skb) {
+ bcm_static_skb = 0;
+ }
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ return 0;
+}
+
static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
{
struct sk_buff *skb;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
-
+#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
+ flags |= GFP_ATOMIC;
+#endif
skb = __dev_alloc_skb(len, flags);
#else
skb = dev_alloc_skb(len);
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
return skb;
}
#else
#define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
#define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
-#endif
-
+#endif /* CTFPOOL_SPINLOCK */
+/*
+ * Allocate and add an object to packet pool.
+ */
void *
osl_ctfpool_add(osl_t *osh)
{
struct sk_buff *skb;
#ifdef CTFPOOL_SPINLOCK
unsigned long flags;
-#endif
+#endif /* CTFPOOL_SPINLOCK */
if ((osh == NULL) || (osh->ctfpool == NULL))
return NULL;
CTFPOOL_LOCK(osh->ctfpool, flags);
ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
-
+ /* No need to allocate more objects */
if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
CTFPOOL_UNLOCK(osh->ctfpool, flags);
return NULL;
}
-
+ /* Allocate a new skb and add it to the ctfpool */
skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
if (skb == NULL) {
printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
return NULL;
}
-
+ /* Add to ctfpool */
skb->next = (struct sk_buff *)osh->ctfpool->head;
osh->ctfpool->head = skb;
osh->ctfpool->fast_frees++;
osh->ctfpool->curr_obj++;
-
+ /* Hijack a skb member to store ptr to ctfpool */
CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
-
+ /* Use bit flag to indicate skb from fast ctfpool */
PKTFAST(osh, skb) = FASTBUF;
CTFPOOL_UNLOCK(osh->ctfpool, flags);
return skb;
}
-
+/*
+ * Add new objects to the pool.
+ */
void
osl_ctfpool_replenish(osl_t *osh, uint thresh)
{
if ((osh == NULL) || (osh->ctfpool == NULL))
return;
-
+ /* Do nothing if no refills are required */
while ((osh->ctfpool->refills > 0) && (thresh--)) {
osl_ctfpool_add(osh);
osh->ctfpool->refills--;
}
}
-
+/*
+ * Initialize the packet pool with specified number of objects.
+ */
int32
osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
{
gfp_t flags;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
-#else
- flags = GFP_ATOMIC;
-#endif
+ flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
ASSERT(osh->ctfpool);
return 0;
}
-
+/*
+ * Cleanup the packet pool objects.
+ */
void
osl_ctfpool_cleanup(osl_t *osh)
{
struct sk_buff *skb, *nskb;
#ifdef CTFPOOL_SPINLOCK
unsigned long flags;
-#endif
+#endif /* CTFPOOL_SPINLOCK */
if ((osh == NULL) || (osh->ctfpool == NULL))
return;
if (bcm_static_buf) {
bcm_static_buf = 0;
}
+#ifdef BCMSDIO
if (bcm_static_skb) {
bcm_static_skb = 0;
}
-#endif
+#endif /* BCMSDIO */
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
bb = b;
struct sk_buff *skb;
#ifdef CTFPOOL_SPINLOCK
unsigned long flags;
-#endif
+#endif /* CTFPOOL_SPINLOCK */
-
+ /* Try to do fast allocate. Return null if ctfpool is not in use
+ * or if there are no items in the ctfpool.
+ */
if (osh->ctfpool == NULL)
return NULL;
return NULL;
}
+ if (len > osh->ctfpool->obj_size) {
+ CTFPOOL_UNLOCK(osh->ctfpool, flags);
+ return NULL;
+ }
+
ASSERT(len <= osh->ctfpool->obj_size);
-
+ /* Get an object from ctfpool */
skb = (struct sk_buff *)osh->ctfpool->head;
osh->ctfpool->head = (void *)skb->next;
ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
CTFPOOL_UNLOCK(osh->ctfpool, flags);
-
+ /* Init skb struct */
skb->next = skb->prev = NULL;
#if defined(__ARM_ARCH_7A__)
skb->data = skb->head + NET_SKB_PAD;
#else
skb->data = skb->head + 16;
skb->tail = skb->head + 16;
-#endif
+#endif /* __ARM_ARCH_7A__ */
skb->len = 0;
skb->cloned = 0;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
PKTSETCLINK(skb, NULL);
PKTCCLRATTR(skb);
+ PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
return skb;
}
+#endif /* CTFPOOL */
+
+#if defined(BCM_GMAC3)
+/* Account for a packet delivered to downstream forwarder.
+ * Decrement a GMAC forwarder interface's pktalloced count.
+ */
+void BCMFASTPATH
+osl_pkt_tofwder(osl_t *osh, void *skbs, int skb_cnt)
+{
+
+ atomic_sub(skb_cnt, &osh->cmn->pktalloced);
+}
+
+/* Account for a downstream forwarder delivered packet to a WL/DHD driver.
+ * Increment a GMAC forwarder interface's pktalloced count.
+ */
+#ifdef BCMDBG_CTRACE
+void BCMFASTPATH
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt, int line, char *file)
+#else
+void BCMFASTPATH
+osl_pkt_frmfwder(osl_t *osh, void *skbs, int skb_cnt)
+#endif /* BCMDBG_CTRACE */
+{
+#if defined(BCMDBG_CTRACE)
+ int i;
+ struct sk_buff *skb;
#endif
+#if defined(BCMDBG_CTRACE)
+ if (skb_cnt > 1) {
+ struct sk_buff **skb_array = (struct sk_buff **)skbs;
+ for (i = 0; i < skb_cnt; i++) {
+ skb = skb_array[i];
+#if defined(BCMDBG_CTRACE)
+ ASSERT(!PKTISCHAINED(skb));
+ ADD_CTRACE(osh, skb, file, line);
+#endif /* BCMDBG_CTRACE */
+ }
+ } else {
+ skb = (struct sk_buff *)skbs;
+#if defined(BCMDBG_CTRACE)
+ ASSERT(!PKTISCHAINED(skb));
+ ADD_CTRACE(osh, skb, file, line);
+#endif /* BCMDBG_CTRACE */
+ }
+#endif
+
+ atomic_add(skb_cnt, &osh->cmn->pktalloced);
+}
+
+#endif /* BCM_GMAC3 */
+
+/* Convert a driver packet to native(OS) packet
+ * In the process, packettag is zeroed out before sending up
+ * IP code depends on skb->cb to be setup correctly with various options
+ * In our case, that means it should be 0
+ */
struct sk_buff * BCMFASTPATH
osl_pkt_tonative(osl_t *osh, void *pkt)
{
if (osh->pub.pkttag)
OSL_PKTTAG_CLEAR(pkt);
-
+ /* Decrement the packet counter */
for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
- atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
+ atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
#ifdef BCMDBG_CTRACE
for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
DEL_CTRACE(osh, nskb1);
}
-#endif
+#endif /* BCMDBG_CTRACE */
}
return (struct sk_buff *)pkt;
}
-
+/* Convert a native(OS) packet to driver packet.
+ * In the process, native packet is destroyed, there is no copying
+ * Also, a packettag is zeroed out
+ */
#ifdef BCMDBG_CTRACE
void * BCMFASTPATH
osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
#else
void * BCMFASTPATH
osl_pkt_frmnative(osl_t *osh, void *pkt)
-#endif
+#endif /* BCMDBG_CTRACE */
{
struct sk_buff *nskb;
#ifdef BCMDBG_CTRACE
if (osh->pub.pkttag)
OSL_PKTTAG_CLEAR(pkt);
-
+ /* Increment the packet counter */
for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
- atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
+ atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
#ifdef BCMDBG_CTRACE
for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
ADD_CTRACE(osh, nskb1, file, line);
}
-#endif
+#endif /* BCMDBG_CTRACE */
}
return (void *)pkt;
}
-
+/* Return a new packet. zero out pkttag */
#ifdef BCMDBG_CTRACE
void * BCMFASTPATH
osl_pktget(osl_t *osh, uint len, int line, char *file)
#else
void * BCMFASTPATH
osl_pktget(osl_t *osh, uint len)
-#endif
+#endif /* BCMDBG_CTRACE */
{
struct sk_buff *skb;
#ifdef CTFPOOL
-
+ /* Allocate from local pool */
skb = osl_pktfastget(osh, len);
if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
-#else
+#else /* CTFPOOL */
if ((skb = osl_alloc_skb(osh, len))) {
-#endif
+#endif /* CTFPOOL */
skb->tail += len;
skb->len += len;
skb->priority = 0;
#ifdef BCMDBG_CTRACE
ADD_CTRACE(osh, skb, file, line);
#endif
- atomic_inc(&osh->pktalloced);
+ atomic_inc(&osh->cmn->pktalloced);
}
return ((void*) skb);
ctfpool_t *ctfpool;
#ifdef CTFPOOL_SPINLOCK
unsigned long flags;
-#endif
+#endif /* CTFPOOL_SPINLOCK */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
skb->tstamp.tv.sec = 0;
skb->stamp.tv_sec = 0;
#endif
-
+ /* We only need to init the fields that we change */
skb->dev = NULL;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
skb->dst = NULL;
ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
ASSERT(ctfpool != NULL);
-
+ /* Add object to the ctfpool */
CTFPOOL_LOCK(ctfpool, flags);
skb->next = (struct sk_buff *)ctfpool->head;
ctfpool->head = (void *)skb;
ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
CTFPOOL_UNLOCK(ctfpool, flags);
}
-#endif
-
+#endif /* CTFPOOL */
+/* Free the driver packet. Free the tag if present */
void BCMFASTPATH
osl_pktfree(osl_t *osh, void *p, bool send)
{
struct sk_buff *skb, *nskb;
-
if (osh == NULL)
- {
- printk("%s: osh == NULL \n", __FUNCTION__);
return;
- }
skb = (struct sk_buff*) p;
PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
-
+ /* perversion: we use skb->next to chain multi-skb packets */
while (skb) {
nskb = skb->next;
skb->next = NULL;
} else
#endif
{
- if (skb->destructor)
-
- dev_kfree_skb_any(skb);
- else
-
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
}
#ifdef CTFPOOL
next_skb:
#endif
- atomic_dec(&osh->pktalloced);
+ atomic_dec(&osh->cmn->pktalloced);
skb = nskb;
}
}
struct sk_buff *skb;
if (len > DHD_SKB_MAX_BUFSIZE) {
- //printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
+ printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
return osl_pktget(osh, len);
}
up(&bcm_static_skb->osl_pkt_sem);
osl_pktfree(osh, p, send);
}
-#endif
-
-int osh_pktpadtailroom(osl_t *osh, void* p, int pad)
-{
- int err;
- int ntail;
- struct sk_buff* skb = (struct sk_buff*)p;
-
- ntail = skb->data_len + pad - (skb->end - skb->tail);
- if (likely(skb_cloned(skb) || ntail > 0)) {
- err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
- if (unlikely(err))
- goto done;
- }
-
- err = skb_linearize(skb);
- if (unlikely(err))
- goto done;
-
- memset(skb->data + skb->len, 0, pad);
-
-done:
- return err;
-}
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
uint32
osl_pci_read_config(osl_t *osh, uint offset, uint size)
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-
+ /* only 4byte access supported */
ASSERT(size == 4);
do {
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-
+ /* only 4byte access supported */
ASSERT(size == 4);
do {
}
-
+/* return bus # for the pci device pointed by osh->pdev */
uint
osl_pci_bus(osl_t *osh)
{
ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+#if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+ return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+#else
return ((struct pci_dev *)osh->pdev)->bus->number;
+#endif
}
-
+/* return slot # for the pci device pointed by osh->pdev */
uint
osl_pci_slot(osl_t *osh)
{
#endif
}
+/* return domain # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_domain(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return pci_domain_nr(((struct pci_dev *)osh->pdev)->bus);
+}
+
+/* return bus # for the pci device pointed by osh->pdev */
+uint
+osl_pcie_bus(osl_t *osh)
+{
+ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
+
+ return ((struct pci_dev *)osh->pdev)->bus->number;
+}
+/* return the pci device pointed by osh->pdev */
struct pci_dev *
osl_pci_device(osl_t *osh)
{
void *addr;
gfp_t flags;
-
+ /* only ASSERT if osh is defined */
if (osh)
ASSERT(osh->magic == OS_HANDLE_MAGIC);
-
#ifdef CONFIG_DHD_USE_STATIC_BUF
if (bcm_static_buf)
{
bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
if (osh)
- atomic_add(size, &osh->malloced);
+ atomic_add(size, &osh->cmn->malloced);
return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
}
}
original:
-#endif
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
- flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
-#else
- flags = GFP_ATOMIC;
-#endif
+ flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
if ((addr = kmalloc(size, flags)) == NULL) {
if (osh)
osh->failed++;
return (NULL);
}
- if (osh)
- atomic_add(size, &osh->malloced);
+ if (osh && osh->cmn)
+ atomic_add(size, &osh->cmn->malloced);
return (addr);
}
+void *
+osl_mallocz(osl_t *osh, uint size)
+{
+ void *ptr;
+
+ ptr = osl_malloc(osh, size);
+
+ if (ptr != NULL) {
+ bzero(ptr, size);
+ }
+
+ return ptr;
+}
+
void
osl_mfree(osl_t *osh, void *addr, uint size)
{
bcm_static_buf->buf_use[buf_idx] = 0;
up(&bcm_static_buf->static_sem);
- if (osh) {
+ if (osh && osh->cmn) {
ASSERT(osh->magic == OS_HANDLE_MAGIC);
- atomic_sub(size, &osh->malloced);
+ atomic_sub(size, &osh->cmn->malloced);
}
return;
}
}
-#endif
- if (osh) {
+#endif /* CONFIG_DHD_USE_STATIC_BUF */
+ if (osh && osh->cmn) {
ASSERT(osh->magic == OS_HANDLE_MAGIC);
- atomic_sub(size, &osh->malloced);
+
+ ASSERT(size <= osl_malloced(osh));
+
+ atomic_sub(size, &osh->cmn->malloced);
}
kfree(addr);
}
+uint
+osl_check_memleak(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ if (atomic_read(&osh->cmn->refcount) == 1)
+ return (atomic_read(&osh->cmn->malloced));
+ else
+ return 0;
+}
+
uint
osl_malloced(osl_t *osh)
{
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
- return (atomic_read(&osh->malloced));
+ return (atomic_read(&osh->cmn->malloced));
}
uint
}
void*
-osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
+osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, dmaaddr_t *pap)
{
void *va;
uint16 align = (1 << align_bits);
size += align;
*alloced = size;
-#ifdef __ARM_ARCH_7A__
+#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
if (va)
*pap = (ulong)__virt_to_phys((ulong)va);
#else
- va = pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap);
-#endif
+ {
+ dma_addr_t pap_lin;
+ va = pci_alloc_consistent1(osh->pdev, size, &pap_lin);
+ *pap = (dmaaddr_t)pap_lin;
+ }
+#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
return va;
}
void
-osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, dmaaddr_t pa)
{
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
-#ifdef __ARM_ARCH_7A__
+#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
kfree(va);
#else
- pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
-#endif
+ pci_free_consistent1(osh->pdev, size, va, (dma_addr_t)pa);
+#endif /* BCM47XX_CA9 && __ARM_ARCH_7A__ */
}
-uint BCMFASTPATH
+dmaaddr_t BCMFASTPATH
osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
{
int dir;
+#ifdef BCM47XX_ACP_WAR
+ uint pa;
+#endif
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
if (dmah != NULL) {
int32 nsegs, i, totsegs = 0, totlen = 0;
struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
+#ifdef BCM47XX_ACP_WAR
+ struct scatterlist *s;
+#endif
struct sk_buff *skb;
for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
sg = &_sg[totsegs];
if (skb_is_nonlinear(skb)) {
nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
+#ifdef BCM47XX_ACP_WAR
+ for_each_sg(sg, s, nsegs, i) {
+ if (sg_phys(s) >= ACP_WIN_LIMIT) {
+ dma_map_page(&((struct pci_dev *)osh->pdev)->dev,
+ sg_page(s), s->offset, s->length, dir);
+ }
+ }
+#else
pci_map_sg(osh->pdev, sg, nsegs, dir);
+#endif
} else {
nsegs = 1;
ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
sg->page_link = 0;
sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
- pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
+#ifdef BCM47XX_ACP_WAR
+ if (virt_to_phys(PKTDATA(osh, skb)) >= ACP_WIN_LIMIT)
+#endif
+ pci_map_single1(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
}
totsegs += nsegs;
totlen += PKTLEN(osh, skb);
}
return dmah->segs[0].addr;
}
-#endif
+#endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
- return (pci_map_single(osh->pdev, va, size, dir));
+#ifdef BCM47XX_ACP_WAR
+ pa = virt_to_phys(va);
+ if (pa < ACP_WIN_LIMIT)
+ return (pa);
+#endif
+ return (pci_map_single1(osh->pdev, va, size, dir));
}
void BCMFASTPATH
int dir;
ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+#ifdef BCM47XX_ACP_WAR
+ if (pa < ACP_WIN_LIMIT)
+ return;
+#endif
dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
- pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
+ pci_unmap_single1(osh->pdev, (uint32)pa, size, dir);
+}
+
+
+#if defined(BCM47XX_CA9) && defined(__ARM_ARCH_7A__)
+
+inline void BCMFASTPATH
+osl_cache_flush(void *va, uint size)
+{
+#ifdef BCM47XX_ACP_WAR
+ if (virt_to_phys(va) < ACP_WIN_LIMIT)
+ return;
+#endif
+ if (size > 0)
+ dma_sync_single_for_device(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_TX);
+}
+
+inline void BCMFASTPATH
+osl_cache_inv(void *va, uint size)
+{
+#ifdef BCM47XX_ACP_WAR
+ if (virt_to_phys(va) < ACP_WIN_LIMIT)
+ return;
+#endif
+ dma_sync_single_for_cpu(OSH_NULL, virt_to_dma(OSH_NULL, va), size, DMA_RX);
}
+inline void osl_prefetch(const void *ptr)
+{
+ /* Borrowed from linux/linux-2.6/include/asm-arm/processor.h */
+ __asm__ __volatile__(
+ "pld\t%0"
+ :
+ : "o" (*(char *)ptr)
+ : "cc");
+}
+
+int osl_arch_is_coherent(void)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
+ return 0;
+#else
+ return arch_is_coherent();
+#endif
+}
+#endif
+
#if defined(BCMASSERT_LOG)
void
osl_assert(const char *exp, const char *file, int line)
const char *basename;
basename = strrchr(file, '/');
-
+ /* skip the '/' */
if (basename)
basename++;
snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
exp, basename, line);
printk("%s", tempbuf);
-#endif
+#endif /* BCMASSERT_LOG */
}
-
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
#ifdef BCMDBG_CTRACE
void *
osl_pktdup(osl_t *osh, void *skb, int line, char *file)
#else
void *
osl_pktdup(osl_t *osh, void *skb)
-#endif
+#endif /* BCMDBG_CTRACE */
{
void * p;
ASSERT(!PKTISCHAINED(skb));
-
+ /* clear the CTFBUF flag if set and map the rest of the buffer
+ * before cloning.
+ */
PKTCTFMAP(osh, skb);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
if (PKTISFAST(osh, skb)) {
ctfpool_t *ctfpool;
-
+ /* if the buffer allocated from ctfpool is cloned then
+ * we can't be sure when it will be freed. since there
+ * is a chance that we will be losing a buffer
+ * from our pool, we increment the refill count for the
+ * object to be alloced later.
+ */
ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
ASSERT(ctfpool != NULL);
PKTCLRFAST(osh, p);
PKTCLRFAST(osh, skb);
ctfpool->refills++;
}
-#endif
+#endif /* CTFPOOL */
-
+ /* Clear PKTC context */
PKTSETCLINK(p, NULL);
PKTCCLRFLAGS(p);
PKTCSETCNT(p, 1);
PKTCSETLEN(p, PKTLEN(osh, skb));
-
+ /* skb_clone copies skb->cb.. we don't want that */
if (osh->pub.pkttag)
OSL_PKTTAG_CLEAR(p);
-
- atomic_inc(&osh->pktalloced);
+ /* Increment the packet counter */
+ atomic_inc(&osh->cmn->pktalloced);
#ifdef BCMDBG_CTRACE
ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
#endif
return;
}
-#endif
-
-
+#endif /* BCMDBG_CTRACE */
+/*
+ * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
+ */
+/*
+ * BINOSL selects the slightly slower function-call-based binary compatible osl.
+ */
uint
osl_pktalloced(osl_t *osh)
{
- return (atomic_read(&osh->pktalloced));
+ if (atomic_read(&osh->cmn->refcount) == 1)
+ return (atomic_read(&osh->cmn->pktalloced));
+ else
+ return 0;
}
+uint32
+osl_rand(void)
+{
+ uint32 rand;
+ get_random_bytes(&rand, sizeof(rand));
+
+ return rand;
+}
+
+/* Linux Kernel: File Operations: start */
void *
osl_os_open_image(char *filename)
{
struct file *fp;
fp = filp_open(filename, O_RDONLY, 0);
-
+ /*
+ * 2.6.11 (FC4) supports filp_open() but later revs don't?
+ * Alternative:
+ * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
+ * ???
+ */
if (IS_ERR(fp))
fp = NULL;
int len = 0, curroffset;
if (image) {
-
+ /* store the current offset */
curroffset = generic_file_llseek(image, 0, 1);
-
+ /* goto end of file to get length */
len = generic_file_llseek(image, 0, 2);
-
+ /* restore back the offset */
generic_file_llseek(image, curroffset, 0);
}
return len;
}
+
+/* Linux Kernel: File Operations: end */
+
+#ifdef BCM47XX_ACP_WAR
+inline void osl_pcie_rreg(osl_t *osh, ulong addr, void *v, uint size)
+{
+ uint32 flags;
+ int pci_access = 0;
+
+ if (osh && BUSTYPE(osh->bustype) == PCI_BUS)
+ pci_access = 1;
+
+ if (pci_access)
+ spin_lock_irqsave(&l2x0_reg_lock, flags);
+ switch (size) {
+ case sizeof(uint8):
+ *(uint8*)v = readb((volatile uint8*)(addr));
+ break;
+ case sizeof(uint16):
+ *(uint16*)v = readw((volatile uint16*)(addr));
+ break;
+ case sizeof(uint32):
+ *(uint32*)v = readl((volatile uint32*)(addr));
+ break;
+ case sizeof(uint64):
+ *(uint64*)v = *((volatile uint64*)(addr));
+ break;
+ }
+ if (pci_access)
+ spin_unlock_irqrestore(&l2x0_reg_lock, flags);
+}
+#endif /* BCM47XX_ACP_WAR */
+
+/* APIs to set/get specific quirks in OSL layer */
+void
+osl_flag_set(osl_t *osh, uint32 mask)
+{
+ osh->flags |= mask;
+}
+
+bool
+osl_is_flag_set(osl_t *osh, uint32 mask)
+{
+ return (osh->flags & mask);
+}
--- /dev/null
+/** @file pcie_core.c
+ *
+ * Contains PCIe related functions that are shared between different driver models (e.g. firmware
+ * builds, DHD builds, BMAC builds), in order to avoid code duplication.
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: pcie_core.c 444841 2013-12-21 04:32:29Z $
+ */
+
+#include <bcm_cfg.h>
+#include <typedefs.h>
+#include <bcmutils.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include <siutils.h>
+#include <hndsoc.h>
+#include <sbchipc.h>
+
+#include "pcie_core.h"
+
+/* local prototypes */
+
+/* local variables */
+
+/* function definitions */
+
+#ifdef BCMDRIVER
+
+void pcie_watchdog_reset(osl_t *osh, si_t *sih, sbpcieregs_t *sbpcieregs)
+{
+ uint32 val, i, lsc;
+ uint16 cfg_offset[] = {PCIECFGREG_STATUS_CMD, PCIECFGREG_PM_CSR,
+ PCIECFGREG_MSI_CAP, PCIECFGREG_MSI_ADDR_L,
+ PCIECFGREG_MSI_ADDR_H, PCIECFGREG_MSI_DATA,
+ PCIECFGREG_LINK_STATUS_CTRL2, PCIECFGREG_RBAR_CTRL,
+ PCIECFGREG_PML1_SUB_CTRL1, PCIECFGREG_REG_BAR2_CONFIG,
+ PCIECFGREG_REG_BAR3_CONFIG};
+ sbpcieregs_t *pcie = NULL;
+ uint32 origidx = si_coreidx(sih);
+
+ /* Switch to PCIE2 core */
+ pcie = (sbpcieregs_t *)si_setcore(sih, PCIE2_CORE_ID, 0);
+ BCM_REFERENCE(pcie);
+ ASSERT(pcie != NULL);
+
+ /* Disable/restore ASPM Control to protect the watchdog reset */
+ W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+ lsc = R_REG(osh, &sbpcieregs->configdata);
+ val = lsc & (~PCIE_ASPM_ENAB);
+ W_REG(osh, &sbpcieregs->configdata, val);
+
+ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, watchdog), ~0, 4);
+ OSL_DELAY(100000);
+
+ W_REG(osh, &sbpcieregs->configaddr, PCIECFGREG_LINK_STATUS_CTRL);
+ W_REG(osh, &sbpcieregs->configdata, lsc);
+
+ /* Write configuration registers back to the shadow registers
+ * cause shadow registers are cleared out after watchdog reset.
+ */
+ for (i = 0; i < ARRAYSIZE(cfg_offset); i++) {
+ W_REG(osh, &sbpcieregs->configaddr, cfg_offset[i]);
+ val = R_REG(osh, &sbpcieregs->configdata);
+ W_REG(osh, &sbpcieregs->configdata, val);
+ }
+ si_setcoreidx(sih, origidx);
+}
+
+#endif /* BCMDRIVER */
*/
-const char RKWIFI_DRV_VERSION[] = "4.80.WFD.OOB";
+const char RKWIFI_DRV_VERSION[] = "5.00.WFD.OOB.64bit";
const char WIFI_MODULE_NAME[] = "";
const char CONFIG_BCMDHD_FW_PATH[] = "";
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: sbutils.c 379512 2013-01-17 22:49:08Z $
+ * $Id: sbutils.c 467150 2014-04-02 17:30:43Z $
*/
#include <bcm_cfg.h>
uint ncores);
static uint32 _sb_coresba(si_info_t *sii);
static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
-
#define SET_SBREG(sii, r, mask, val) \
W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
#define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
static uint32
sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
{
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint8 tmp;
uint32 val, intr_val = 0;
static void
sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
{
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint8 tmp;
volatile uint32 dummy;
uint32 intr_val = 0;
uint
sb_intflag(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
void *corereg;
sbconfig_t *sb;
uint origidx, intflag, intr_val = 0;
- sii = SI_INFO(sih);
-
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
corereg = si_setcore(sih, CC_CORE_ID, 0);
_sb_coreidx(si_info_t *sii, uint32 sba)
{
uint i;
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
for (i = 0; i < sii->numcores; i ++)
- if (sba == sii->coresba[i])
+ if (sba == cores_info->coresba[i])
return i;
return BADIDX;
}
break;
}
+#ifdef BCMSDIO
case SPI_BUS:
case SDIO_BUS:
sbaddr = (uint32)(uintptr)sii->curmap;
break;
+#endif
default:
uint w;
uint intr_val = 0;
bool fast = FALSE;
- si_info_t *sii;
-
- sii = SI_INFO(sih);
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
ASSERT(GOODIDX(coreidx));
ASSERT(regoff < SI_CORE_SIZE);
/* If internal bus, we can always get at everything */
fast = TRUE;
/* map if does not exist */
- if (!sii->regs[coreidx]) {
- sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
SI_CORE_SIZE);
- ASSERT(GOODREGS(sii->regs[coreidx]));
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
}
- r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
+ r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
} else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
- if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
/* Chipc registers are mapped at 12KB */
fast = TRUE;
return (w);
}
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+ uint32 *r = NULL;
+ bool fast = FALSE;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ ASSERT(GOODIDX(coreidx));
+ ASSERT(regoff < SI_CORE_SIZE);
+
+ if (coreidx >= SI_MAXCORES)
+ return 0;
+
+ if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
+ /* If internal bus, we can always get at everything */
+ fast = TRUE;
+ /* map if does not exist */
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
+ SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
+ }
+ r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
+ } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
+ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
+
+ if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
+ /* Chipc registers are mapped at 12KB */
+
+ fast = TRUE;
+ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
+ } else if (sii->pub.buscoreidx == coreidx) {
+ /* pci registers are at either in the last 2KB of an 8KB window
+ * or, in pcie and pci rev 13 at 8KB
+ */
+ fast = TRUE;
+ if (SI_FAST(sii))
+ r = (uint32 *)((char *)sii->curmap +
+ PCI_16KB0_PCIREGS_OFFSET + regoff);
+ else
+ r = (uint32 *)((char *)sii->curmap +
+ ((regoff >= SBCONFIGOFF) ?
+ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
+ regoff);
+ }
+ }
+
+ if (!fast)
+ return 0;
+
+ return (r);
+}
+
/* Scan the enumeration space to find all cores starting from the given
* bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
* is the default core address at chip POR time and 'regs' is the virtual
uint next;
uint ncc = 0;
uint i;
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
if (bus >= SB_MAXBUSES) {
SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
* Core addresses must be contiguous on each bus.
*/
for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
- sii->coresba[next] = sbba + (i * SI_CORE_SIZE);
+ cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
/* keep and reuse the initial register mapping */
- if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (sii->coresba[next] == sba)) {
+ if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
- sii->regs[next] = regs;
+ cores_info->regs[next] = regs;
}
/* change core to 'next' and read its coreid */
sii->curmap = _sb_setcoreidx(sii, next);
sii->curidx = next;
- sii->coreid[next] = sb_coreid(&sii->pub);
+ cores_info->coreid[next] = sb_coreid(&sii->pub);
/* core specific processing... */
/* chipc provides # cores */
- if (sii->coreid[next] == CC_CORE_ID) {
+ if (cores_info->coreid[next] == CC_CORE_ID) {
chipcregs_t *cc = (chipcregs_t *)sii->curmap;
uint32 ccrev = sb_corerev(&sii->pub);
sii->pub.issim ? "QT" : ""));
}
/* scan bridged SB(s) and add results to the end of the list */
- else if (sii->coreid[next] == OCP_CORE_ID) {
+ else if (cores_info->coreid[next] == OCP_CORE_ID) {
sbconfig_t *sb = REGS2SB(sii->curmap);
uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
uint nsbcc;
void
sb_scan(si_t *sih, void *regs, uint devid)
{
- si_info_t *sii;
uint32 origsba;
sbconfig_t *sb;
+ si_info_t *sii = SI_INFO(sih);
- sii = SI_INFO(sih);
sb = REGS2SB(sii->curmap);
sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
void *
sb_setcoreidx(si_t *sih, uint coreidx)
{
- si_info_t *sii;
-
- sii = SI_INFO(sih);
+ si_info_t *sii = SI_INFO(sih);
if (coreidx >= sii->numcores)
return (NULL);
static void *
_sb_setcoreidx(si_info_t *sii, uint coreidx)
{
- uint32 sbaddr = sii->coresba[coreidx];
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint32 sbaddr = cores_info->coresba[coreidx];
void *regs;
switch (BUSTYPE(sii->pub.bustype)) {
case SI_BUS:
/* map new one */
- if (!sii->regs[coreidx]) {
- sii->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
- ASSERT(GOODREGS(sii->regs[coreidx]));
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
}
- regs = sii->regs[coreidx];
+ regs = cores_info->regs[coreidx];
break;
case PCI_BUS:
regs = sii->curmap;
break;
}
+#ifdef BCMSDIO
case SPI_BUS:
case SDIO_BUS:
/* map new one */
- if (!sii->regs[coreidx]) {
- sii->regs[coreidx] = (void *)(uintptr)sbaddr;
- ASSERT(GOODREGS(sii->regs[coreidx]));
+ if (!cores_info->regs[coreidx]) {
+ cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
+ ASSERT(GOODREGS(cores_info->regs[coreidx]));
}
- regs = sii->regs[coreidx];
+ regs = cores_info->regs[coreidx];
break;
+#endif /* BCMSDIO */
default:
void
sb_commit(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
- sii = SI_INFO(sih);
-
origidx = sii->curidx;
ASSERT(GOODIDX(origidx));
uint32
sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
uint32 tmp, ret = 0xffffffff;
sbconfig_t *sb;
- sii = SI_INFO(sih);
if ((to & ~TO_MASK) != 0)
return ret;
idx = SI_CC_IDX;
break;
case PCMCIA_BUS:
+#ifdef BCMSDIO
case SDIO_BUS:
+#endif
idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
break;
case SI_BUS:
return (size);
}
+
+#if defined(BCMDBG_PHYDUMP)
+/* print interesting sbconfig registers */
+void
+sb_dumpregs(si_t *sih, struct bcmstrbuf *b)
+{
+ sbconfig_t *sb;
+ uint origidx, i, intr_val = 0;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ origidx = sii->curidx;
+
+ INTR_OFF(sii, intr_val);
+
+ for (i = 0; i < sii->numcores; i++) {
+ sb = REGS2SB(sb_setcoreidx(sih, i));
+
+ bcm_bprintf(b, "core 0x%x: \n", cores_info->coreid[i]);
+
+ if (sii->pub.socirev > SONICS_2_2)
+ bcm_bprintf(b, "sbimerrlog 0x%x sbimerrloga 0x%x\n",
+ sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOG, 0, 0),
+ sb_corereg(sih, si_coreidx(&sii->pub), SBIMERRLOGA, 0, 0));
+
+ bcm_bprintf(b, "sbtmstatelow 0x%x sbtmstatehigh 0x%x sbidhigh 0x%x "
+ "sbimstate 0x%x\n sbimconfiglow 0x%x sbimconfighigh 0x%x\n",
+ R_SBREG(sii, &sb->sbtmstatelow), R_SBREG(sii, &sb->sbtmstatehigh),
+ R_SBREG(sii, &sb->sbidhigh), R_SBREG(sii, &sb->sbimstate),
+ R_SBREG(sii, &sb->sbimconfiglow), R_SBREG(sii, &sb->sbimconfighigh));
+ }
+
+ sb_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
+}
+#endif
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: siutils.c 414368 2013-07-24 15:00:23Z $
+ * $Id: siutils.c 481602 2014-05-29 22:43:34Z $
*/
#include <bcm_cfg.h>
#include <pcicfg.h>
#include <sbpcmcia.h>
#include <sbsocram.h>
+#ifdef BCMSDIO
#include <bcmsdh.h>
#include <sdio.h>
#include <sbsdio.h>
#include <sbhnddma.h>
#include <sbsdpcmdev.h>
#include <bcmsdpcm.h>
+#endif /* BCMSDIO */
#include <hndpmu.h>
#include <dhd_config.h>
+#ifdef BCM_SDRBL
+#include <hndcpu.h>
+#endif /* BCM_SDRBL */
+#ifdef HNDGCI
+#include <hndgci.h>
+#endif /* HNDGCI */
+
#include "siutils_priv.h"
+/**
+ * A set of PMU registers is clocked in the ILP domain, which has an implication on register write
+ * behavior: if such a register is written, it takes multiple ILP clocks for the PMU block to absorb
+ * the write. During that time the 'SlowWritePending' bit in the PMUStatus register is set.
+ */
+#define PMUREGS_ILP_SENSITIVE(regoff) \
+ ((regoff) == OFFSETOF(pmuregs_t, pmutimer) || \
+ (regoff) == OFFSETOF(pmuregs_t, pmuwatchdog) || \
+ (regoff) == OFFSETOF(pmuregs_t, res_req_timer))
+
+#define CHIPCREGS_ILP_SENSITIVE(regoff) \
+ ((regoff) == OFFSETOF(chipcregs_t, pmutimer) || \
+ (regoff) == OFFSETOF(chipcregs_t, pmuwatchdog) || \
+ (regoff) == OFFSETOF(chipcregs_t, res_req_timer))
+
/* local prototypes */
static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
uint bustype, void *sdh, char **vars, uint *varsz);
uint *origidx, void *regs);
+static bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff);
+
+#ifdef BCMLTECOEX
+static void si_config_gcigpio(si_t *sih, uint32 gci_pos, uint8 gcigpio,
+ uint8 gpioctl_mask, uint8 gpioctl_val);
+#endif /* BCMLTECOEX */
+
/* global variable to indicate reservation/release of gpio's */
static uint32 si_gpioreservation = 0;
/* global flag to prevent shared resources from being initialized multiple times in si_attach() */
+#ifdef SR_DEBUG
+static const uint32 si_power_island_test_array[] = {
+ 0x0000, 0x0001, 0x0010, 0x0011,
+ 0x0100, 0x0101, 0x0110, 0x0111,
+ 0x1000, 0x1001, 0x1010, 0x1011,
+ 0x1100, 0x1101, 0x1110, 0x1111
+};
+#endif /* SR_DEBUG */
int do_4360_pcie2_war = 0;
-/*
- * Allocate a si handle.
+/* global kernel resource */
+static si_info_t ksii;
+static si_cores_info_t ksii_cores_info;
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
* devid - pci device id (used to determine chip#)
* osh - opaque OS handle
* regs - virtual address of initial core registers
* bustype - pci/pcmcia/sb/sdio/etc
- * vars - pointer to a pointer area for "environment" variables
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ * function set 'vars' to NULL, making dereferencing of this parameter undesired.
* varsz - pointer to int to return the size of the vars
*/
si_t *
uint bustype, void *sdh, char **vars, uint *varsz)
{
si_info_t *sii;
-
+ si_cores_info_t *cores_info;
/* alloc si_info_t */
- if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) {
+ if ((sii = MALLOCZ(osh, sizeof (si_info_t))) == NULL) {
SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
return (NULL);
}
+ /* alloc si_cores_info_t */
+ if ((cores_info = (si_cores_info_t *)MALLOCZ(osh, sizeof (si_cores_info_t))) == NULL) {
+ SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh)));
+ MFREE(osh, sii, sizeof(si_info_t));
+ return (NULL);
+ }
+ sii->cores_info = cores_info;
+
if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) {
MFREE(osh, sii, sizeof(si_info_t));
+ MFREE(osh, cores_info, sizeof(si_cores_info_t));
return (NULL);
}
sii->vars = vars ? *vars : NULL;
return (si_t *)sii;
}
-/* global kernel resource */
-static si_info_t ksii;
static uint32 wd_msticks; /* watchdog timer ticks normalized to ms */
-/* generic kernel variant of si_attach() */
+/** generic kernel variant of si_attach() */
si_t *
si_kattach(osl_t *osh)
{
static bool ksii_attached = FALSE;
+ si_cores_info_t *cores_info;
if (!ksii_attached) {
void *regs = NULL;
regs = REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
+ cores_info = (si_cores_info_t *)&ksii_cores_info;
+ ksii.cores_info = cores_info;
ASSERT(osh);
if (si_doattach(&ksii, BCM4710_DEVICE_ID, osh, regs,
SI_BUS, NULL,
- osh != SI_OSH ? &ksii.vars : NULL,
- osh != SI_OSH ? &ksii.varsz : NULL) == NULL) {
+ osh != SI_OSH ? &(ksii.vars) : NULL,
+ osh != SI_OSH ? &(ksii.varsz) : NULL) == NULL) {
SI_ERROR(("si_kattach: si_doattach failed\n"));
REG_UNMAP(regs);
return NULL;
sii->memseg = TRUE;
+#if defined(BCMSDIO)
if (BUSTYPE(bustype) == SDIO_BUS) {
int err;
uint8 clkset;
bcmsdh_cfg_write(sdh, SDIO_FUNC_1, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
}
+#endif /* BCMSDIO && BCMDONGLEHOST */
return TRUE;
}
si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin,
uint *origidx, void *regs)
{
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
bool pci, pcie, pcie_gen2 = FALSE;
uint i;
uint pciidx, pcieidx, pcirev, pcierev;
/* get pmu rev and caps */
if (sii->pub.cccaps & CC_CAP_PMU) {
- sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+ if (AOB_ENAB(&sii->pub)) {
+ uint pmucoreidx;
+ pmuregs_t *pmu;
+ pmucoreidx = si_findcoreidx(&sii->pub, PMU_CORE_ID, 0);
+ pmu = si_setcoreidx(&sii->pub, pmucoreidx);
+ sii->pub.pmucaps = R_REG(sii->osh, &pmu->pmucapabilities);
+ si_setcoreidx(&sii->pub, SI_CC_IDX);
+ } else
+ sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities);
+
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
/* Display cores found */
SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n",
- i, cid, crev, sii->coresba[i], sii->regs[i]));
+ i, cid, crev, cores_info->coresba[i], cores_info->regs[i]));
+
+ if (BUSTYPE(bustype) == SI_BUS) {
+ /* now look at the chipstatus register to figure the pacakge */
+ /* for SDIO but downloaded on PCIE dev */
+ if (cid == PCIE2_CORE_ID) {
+ if ((CHIPID(sii->pub.chip) == BCM43602_CHIP_ID) ||
+ ((CHIPID(sii->pub.chip) == BCM4345_CHIP_ID) &&
+ CST4345_CHIPMODE_PCIE(sii->pub.chipst))) {
+ pcieidx = i;
+ pcierev = crev;
+ pcie = TRUE;
+ pcie_gen2 = TRUE;
+ }
+ }
- if (BUSTYPE(bustype) == PCI_BUS) {
+ }
+ else if (BUSTYPE(bustype) == PCI_BUS) {
if (cid == PCI_CORE_ID) {
pciidx = i;
pcirev = crev;
sii->pub.buscoretype = cid;
sii->pub.buscoreidx = i;
}
+#ifdef BCMSDIO
else if (((BUSTYPE(bustype) == SDIO_BUS) ||
(BUSTYPE(bustype) == SPI_BUS)) &&
((cid == PCMCIA_CORE_ID) ||
sii->pub.buscoretype = cid;
sii->pub.buscoreidx = i;
}
+#endif /* BCMSDIO */
/* find the core idx before entering this func. */
- if ((savewin && (savewin == sii->coresba[i])) ||
- (regs == sii->regs[i]))
+ if ((savewin && (savewin == cores_info->coresba[i])) ||
+ (regs == cores_info->regs[i]))
*origidx = i;
}
+#if defined(PCIE_FULL_DONGLE)
+ pci = FALSE;
+#endif
if (pci) {
sii->pub.buscoretype = PCI_CORE_ID;
sii->pub.buscorerev = pcirev;
OR_REG(sii->osh, &cc->slow_clk_ctl, SCC_SS_XTAL);
+#if defined(BCMSDIO)
/* Make sure any on-chip ARM is off (in case strapping is wrong), or downloaded code was
* already running.
*/
si_setcore(&sii->pub, ARMCM3_CORE_ID, 0))
si_core_disable(&sii->pub, 0);
}
+#endif /* BCMSDIO && BCMDONGLEHOST */
/* return to the original core */
si_setcoreidx(&sii->pub, *origidx);
+uint16
+si_chipid(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ return (sii->chipnew) ? sii->chipnew : sih->chip;
+}
+
+static void
+si_chipid_fixup(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+
+ ASSERT(sii->chipnew == 0);
+ switch (sih->chip) {
+ case BCM43570_CHIP_ID:
+ case BCM4358_CHIP_ID:
+ sii->chipnew = sih->chip; /* save it */
+ sii->pub.chip = BCM43569_CHIP_ID; /* chip class */
+ break;
+ case BCM4356_CHIP_ID:
+ sii->chipnew = sih->chip; /* save it */
+ sii->pub.chip = BCM4354_CHIP_ID; /* chip class */
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+/**
+ * Allocate an si handle. This function may be called multiple times.
+ *
+ * vars - pointer to a to-be created pointer area for "environment" variables. Some callers of this
+ * function set 'vars' to NULL.
+ */
static si_info_t *
si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs,
uint bustype, void *sdh, char **vars, uint *varsz)
uint origidx;
#if !defined(_CFEZ_) || defined(CFG_WL)
#endif
- ASSERT(GOODREGS(regs));
- bzero((uchar*)sii, sizeof(si_info_t));
+ ASSERT(GOODREGS(regs));
savewin = 0;
sii->osh = osh;
+ /* check to see if we are a si core mimic'ing a pci core */
+ if ((bustype == PCI_BUS) &&
+ (OSL_PCI_READ_CONFIG(sii->osh, PCI_SPROM_CONTROL, sizeof(uint32)) == 0xffffffff)) {
+ SI_ERROR(("%s: incoming bus is PCI but it's a lie, switching to SI "
+ "devid:0x%x\n", __FUNCTION__, devid));
+ bustype = SI_BUS;
+ }
/* find Chipcommon address */
if (bustype == PCI_BUS) {
if (!regs)
return NULL;
cc = (chipcregs_t *)regs;
+#ifdef BCMSDIO
} else if ((bustype == SDIO_BUS) || (bustype == SPI_BUS)) {
cc = (chipcregs_t *)sii->curmap;
+#endif
} else {
cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE);
}
}
/* ChipID recognition.
- * We assume we can read chipid at offset 0 from the regs arg.
- * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
- * some way of recognizing them needs to be added here.
- */
+ * We assume we can read chipid at offset 0 from the regs arg.
+ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon),
+ * some way of recognizing them needs to be added here.
+ */
if (!cc) {
SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__));
return NULL;
dhd_conf_set_hw_oob_intr(sdh, sih->chip);
#endif
+ if ((sih->chip == BCM4358_CHIP_ID) ||
+ (sih->chip == BCM43570_CHIP_ID) ||
+ (sih->chip == BCM4358_CHIP_ID)) {
+ si_chipid_fixup(sih);
+ }
+
if ((CHIPID(sih->chip) == BCM4329_CHIP_ID) && (sih->chiprev == 0) &&
(sih->chippkg != BCM4329_289PIN_PKG_ID)) {
sih->chippkg = BCM4329_182PIN_PKG_ID;
}
#endif
+#ifdef BCM_SDRBL
+ /* 4360 rom bootloader in PCIE case, if the SDR is enabled, But preotection is
+ * not turned on, then we want to hold arm in reset.
+ * Bottomline: In sdrenable case, we allow arm to boot only when protection is
+ * turned on.
+ */
+ if (CHIP_HOSTIF_PCIE(&(sii->pub))) {
+ uint32 sflags = si_arm_sflags(&(sii->pub));
+
+ /* If SDR is enabled but protection is not turned on
+ * then we want to force arm to WFI.
+ */
+ if ((sflags & (SISF_SDRENABLE | SISF_TCMPROT)) == SISF_SDRENABLE) {
+ disable_arm_irq();
+ while (1) {
+ hnd_cpu_wait(sih);
+ }
+ }
+ }
+#endif /* BCM_SDRBL */
pvars = NULL;
BCM_REFERENCE(pvars);
/* clear any previous epidiag-induced target abort */
ASSERT(!si_taclear(sih, FALSE));
+
+#ifdef BOOTLOADER_CONSOLE_OUTPUT
+ /* Enable console prints */
+ si_muxenab(sii, 3);
+#endif
+
return (sii);
exit:
return NULL;
}
-/* may be called with core in reset */
+/** may be called with core in reset */
void
si_detach(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint idx;
- sii = SI_INFO(sih);
-
- if (sii == NULL)
- return;
-
if (BUSTYPE(sih->bustype) == SI_BUS)
for (idx = 0; idx < SI_MAXCORES; idx++)
- if (sii->regs[idx]) {
- REG_UNMAP(sii->regs[idx]);
- sii->regs[idx] = NULL;
+ if (cores_info->regs[idx]) {
+ REG_UNMAP(cores_info->regs[idx]);
+ cores_info->regs[idx] = NULL;
}
+#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
+ if (cores_info != &ksii_cores_info)
+#endif /* !BCMBUSTYPE || (BCMBUSTYPE == SI_BUS) */
+ MFREE(sii->osh, cores_info, sizeof(si_cores_info_t));
#if !defined(BCMBUSTYPE) || (BCMBUSTYPE == SI_BUS)
if (sii != &ksii)
sii->osh = osh;
}
-/* register driver interrupt disabling and restoring callback functions */
+/** register driver interrupt disabling and restoring callback functions */
void
si_register_intr_callback(si_t *sih, void *intrsoff_fn, void *intrsrestore_fn,
void *intrsenabled_fn, void *intr_arg)
{
- si_info_t *sii;
-
- sii = SI_INFO(sih);
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
sii->intr_arg = intr_arg;
sii->intrsoff_fn = (si_intrsoff_t)intrsoff_fn;
sii->intrsrestore_fn = (si_intrsrestore_t)intrsrestore_fn;
/* save current core id. when this function called, the current core
* must be the core which provides driver functions(il, et, wl, etc.)
*/
- sii->dev_coreid = sii->coreid[sii->curidx];
+ sii->dev_coreid = cores_info->coreid[sii->curidx];
}
void
sii = SI_INFO(sih);
sii->intrsoff_fn = NULL;
+ sii->intrsrestore_fn = NULL;
+ sii->intrsenabled_fn = NULL;
}
uint
uint
si_coreid(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
- sii = SI_INFO(sih);
- return sii->coreid[sii->curidx];
+ return cores_info->coreid[sii->curidx];
}
uint
return sii->curidx;
}
-/* return the core-type instantiation # of the current core */
+void *
+si_d11_switch_addrbase(si_t *sih, uint coreunit)
+{
+ return si_setcore(sih, D11_CORE_ID, coreunit);
+}
+
+/** return the core-type instantiation # of the current core */
uint
si_coreunit(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint idx;
uint coreid;
uint coreunit;
uint i;
- sii = SI_INFO(sih);
coreunit = 0;
idx = sii->curidx;
/* count the cores of our type */
for (i = 0; i < idx; i++)
- if (sii->coreid[i] == coreid)
+ if (cores_info->coreid[i] == coreid)
coreunit++;
return (coreunit);
}
}
+
/* return index of coreid or BADIDX if not found */
uint
si_findcoreidx(si_t *sih, uint coreid, uint coreunit)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint found;
uint i;
- sii = SI_INFO(sih);
found = 0;
for (i = 0; i < sii->numcores; i++)
- if (sii->coreid[i] == coreid) {
+ if (cores_info->coreid[i] == coreid) {
if (found == coreunit)
return (i);
found++;
return (BADIDX);
}
-/* return list of found cores */
+/** return total coreunit of coreid or zero if not found */
uint
-si_corelist(si_t *sih, uint coreid[])
+si_numcoreunits(si_t *sih, uint coreid)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint found = 0;
+ uint i;
- sii = SI_INFO(sih);
+ for (i = 0; i < sii->numcores; i++) {
+ if (cores_info->coreid[i] == coreid) {
+ found++;
+ }
+ }
+
+ return found;
+}
+
+/** return total D11 coreunits */
+uint
+BCMRAMFN(si_numd11coreunits)(si_t *sih)
+{
+ uint found = 0;
- bcopy((uchar*)sii->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
+ found = si_numcoreunits(sih, D11_CORE_ID);
+
+#if defined(WLRSDB) && defined(WLRSDB_DISABLED)
+ /* If RSDB functionality is compiled out,
+ * then ignore any D11 cores beyond the first
+ * Used in norsdb dongle build variants for rsdb chip.
+ */
+ found = 1;
+#endif /* defined(WLRSDB) && !defined(WLRSDB_DISABLED) */
+
+ return found;
+}
+
+/** return list of found cores */
+uint
+si_corelist(si_t *sih, uint coreid[])
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+
+ bcopy((uchar*)cores_info->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint)));
return (sii->numcores);
}
-/* return current wrapper mapping */
+/** return current wrapper mapping */
void *
si_wrapperregs(si_t *sih)
{
return (sii->curwrap);
}
-/* return current register mapping */
+/** return current register mapping */
void *
si_coreregs(si_t *sih)
{
return (sii->curmap);
}
-/*
+/**
* This function changes logical "focus" to the indicated core;
* must be called with interrupts off.
* Moreover, callers should keep interrupts off during switching out of and back to d11 core
}
}
-/* Turn off interrupt as required by sb_setcore, before switch core */
+/** Turn off interrupt as required by sb_setcore, before switch core */
void *
si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val)
{
void *cc;
- si_info_t *sii;
-
- sii = SI_INFO(sih);
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
if (SI_FAST(sii)) {
/* Overloading the origidx variable to remember the coreid,
void
si_restore_core(si_t *sih, uint coreid, uint intr_val)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
- sii = SI_INFO(sih);
if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
return;
}
}
+/** ILP sensitive register access needs special treatment to avoid backplane stalls */
+bool si_pmu_is_ilp_sensitive(uint32 idx, uint regoff)
+{
+ if (idx == SI_CC_IDX) {
+ if (CHIPCREGS_ILP_SENSITIVE(regoff))
+ return TRUE;
+ } else if (PMUREGS_ILP_SENSITIVE(regoff)) {
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/** 'idx' should refer either to the chipcommon core or the PMU core */
+uint
+si_pmu_corereg(si_t *sih, uint32 idx, uint regoff, uint mask, uint val)
+{
+ int pmustatus_offset;
+
+ /* prevent backplane stall on double write to 'ILP domain' registers in the PMU */
+ if (mask != 0 && sih->pmurev >= 22 &&
+ si_pmu_is_ilp_sensitive(idx, regoff)) {
+ pmustatus_offset = AOB_ENAB(sih) ? OFFSETOF(pmuregs_t, pmustatus) :
+ OFFSETOF(chipcregs_t, pmustatus);
+
+ while (si_corereg(sih, idx, pmustatus_offset, 0, 0) & PST_SLOW_WR_PENDING)
+ {};
+ }
+
+ return si_corereg(sih, idx, regoff, mask, val);
+}
+
+/*
+ * If there is no need for fiddling with interrupts or core switches (typically silicon
+ * back plane registers, pci registers and chipcommon registers), this function
+ * returns the register offset on this core to a mapped address. This address can
+ * be used for W_REG/R_REG directly.
+ *
+ * For accessing registers that would need a core switch, this function will return
+ * NULL.
+ */
+uint32 *
+si_corereg_addr(si_t *sih, uint coreidx, uint regoff)
+{
+ if (CHIPTYPE(sih->socitype) == SOCI_SB)
+ return sb_corereg_addr(sih, coreidx, regoff);
+ else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NAI))
+ return ai_corereg_addr(sih, coreidx, regoff);
+ else {
+ return 0;
+ }
+}
+
void
si_core_disable(si_t *sih, uint32 bits)
{
ub_core_reset(sih, bits, resetbits);
}
-/* Run bist on current core. Caller needs to take care of core-specific bist hazards */
+/** Run bist on current core. Caller needs to take care of core-specific bist hazards */
int
si_corebist(si_t *sih)
{
}
}
-/* calculate the speed the SI would run at given a set of clockcontrol values */
+/** calculate the speed the SI would run at given a set of clockcontrol values */
uint32
si_clock_rate(uint32 pll_type, uint32 n, uint32 m)
{
}
}
+/**
+ * Some chips could have multiple host interfaces, however only one will be active.
+ * For a given chip. Depending pkgopt and cc_chipst return the active host interface.
+ */
+uint
+si_chip_hostif(si_t *sih)
+{
+ uint hosti = 0;
+
+ switch (CHIPID(sih->chip)) {
+
+ case BCM43602_CHIP_ID:
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+
+ case BCM4360_CHIP_ID:
+ /* chippkg bit-0 == 0 is PCIE only pkgs
+ * chippkg bit-0 == 1 has both PCIE and USB cores enabled
+ */
+ if ((sih->chippkg & 0x1) && (sih->chipst & CST4360_MODE_USB))
+ hosti = CHIP_HOSTIF_USBMODE;
+ else
+ hosti = CHIP_HOSTIF_PCIEMODE;
+
+ break;
+
+ case BCM4335_CHIP_ID:
+ /* TBD: like in 4360, do we need to check pkg? */
+ if (CST4335_CHIPMODE_USB20D(sih->chipst))
+ hosti = CHIP_HOSTIF_USBMODE;
+ else if (CST4335_CHIPMODE_SDIOD(sih->chipst))
+ hosti = CHIP_HOSTIF_SDIOMODE;
+ else
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+
+ case BCM4345_CHIP_ID:
+ if (CST4345_CHIPMODE_USB20D(sih->chipst) || CST4345_CHIPMODE_HSIC(sih->chipst))
+ hosti = CHIP_HOSTIF_USBMODE;
+ else if (CST4345_CHIPMODE_SDIOD(sih->chipst))
+ hosti = CHIP_HOSTIF_SDIOMODE;
+ else if (CST4345_CHIPMODE_PCIE(sih->chipst))
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+
+ case BCM4349_CHIP_GRPID:
+ if (CST4349_CHIPMODE_SDIOD(sih->chipst))
+ hosti = CHIP_HOSTIF_SDIOMODE;
+ else if (CST4349_CHIPMODE_PCIE(sih->chipst))
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+
+ case BCM4350_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM4356_CHIP_ID:
+ case BCM43556_CHIP_ID:
+ case BCM43558_CHIP_ID:
+ case BCM43566_CHIP_ID:
+ case BCM43568_CHIP_ID:
+ case BCM43569_CHIP_ID:
+ case BCM43570_CHIP_ID:
+ case BCM4358_CHIP_ID:
+ if (CST4350_CHIPMODE_USB20D(sih->chipst) ||
+ CST4350_CHIPMODE_HSIC20D(sih->chipst) ||
+ CST4350_CHIPMODE_USB30D(sih->chipst) ||
+ CST4350_CHIPMODE_USB30D_WL(sih->chipst) ||
+ CST4350_CHIPMODE_HSIC30D(sih->chipst))
+ hosti = CHIP_HOSTIF_USBMODE;
+ else if (CST4350_CHIPMODE_SDIOD(sih->chipst))
+ hosti = CHIP_HOSTIF_SDIOMODE;
+ else if (CST4350_CHIPMODE_PCIE(sih->chipst))
+ hosti = CHIP_HOSTIF_PCIEMODE;
+ break;
+
+ default:
+ break;
+ }
+
+ return hosti;
+}
-/* set chip watchdog reset timer to fire in 'ticks' */
+/** set chip watchdog reset timer to fire in 'ticks' */
void
si_watchdog(si_t *sih, uint ticks)
{
else if (ticks > maxt)
ticks = maxt;
- si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, pmuwatchdog), ~0, ticks);
+ pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, ~0, ticks);
} else {
maxt = (1 << 28) - 1;
if (ticks > maxt)
}
}
-/* trigger watchdog reset after ms milliseconds */
+/** trigger watchdog reset after ms milliseconds */
void
si_watchdog_ms(si_t *sih, uint32 ms)
{
-/* return the slow clock source - LPO, XTAL, or PCI */
+/** return the slow clock source - LPO, XTAL, or PCI */
static uint
si_slowclk_src(si_info_t *sii)
{
return (SCC_SS_XTAL);
}
-/* return the ILP (slowclock) min or max frequency */
+/** return the ILP (slowclock) min or max frequency */
static uint
si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc)
{
W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay);
}
-/* initialize power control delay registers */
+/** initialize power control delay registers */
void
si_clkctl_init(si_t *sih)
{
}
-/* change logical "focus" to the gpio core for optimized access */
+/** change logical "focus" to the gpio core for optimized access */
void *
si_gpiosetcore(si_t *sih)
{
return (si_setcoreidx(sih, SI_CC_IDX));
}
-/*
+/**
* mask & set gpiocontrol bits.
* If a gpiocontrol bit is set to 0, chipcommon controls the corresponding GPIO pin.
* If a gpiocontrol bit is set to 1, the GPIO pin is no longer a GPIO and becomes dedicated
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
-/* mask&set gpio output enable bits */
+/** mask&set gpio output enable bits */
uint32
si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
-/* mask&set gpio output bits */
+/** mask&set gpio output bits */
uint32
si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority)
{
return (si_corereg(sih, SI_CC_IDX, regoff, mask, val));
}
-/* reserve one gpio */
+/** reserve one gpio */
uint32
si_gpioreserve(si_t *sih, uint32 gpio_bitmask, uint8 priority)
{
return si_gpioreservation;
}
-/* release one gpio */
-/*
+/**
+ * release one gpio.
+ *
* releasing the gpio doesn't change the current value on the GPIO last write value
- * persists till some one overwrites it
+ * persists till someone overwrites it.
*/
-
uint32
si_gpiorelease(si_t *sih, uint32 gpio_bitmask, uint8 priority)
{
si_gpio_handler_register(si_t *sih, uint32 event,
bool level, gpio_handler_t cb, void *arg)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
gpioh_item_t *gi;
ASSERT(event);
ASSERT(cb != NULL);
- sii = SI_INFO(sih);
if (sih->ccrev < 11)
return NULL;
void
si_gpio_handler_unregister(si_t *sih, void *gpioh)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
gpioh_item_t *p, *n;
- sii = SI_INFO(sih);
if (sih->ccrev < 11)
return;
void
si_gpio_handler_process(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
gpioh_item_t *h;
uint32 level = si_gpioin(sih);
uint32 levelp = si_gpiointpolarity(sih, 0, 0, 0);
uint32 edge = si_gpioevent(sih, GPIO_REGEVT, 0, 0);
uint32 edgep = si_gpioevent(sih, GPIO_REGEVT_INTPOL, 0, 0);
- sii = SI_INFO(sih);
for (h = sii->gpioh_head; h != NULL; h = h->next) {
if (h->handler) {
uint32 status = (h->level ? level : edge) & h->event;
uint32 polarity = (h->level ? levelp : edgep) & h->event;
/* polarity bitval is opposite of status bitval */
- if (status ^ polarity)
+ if ((h->level && (status ^ polarity)) || (!h->level && status))
h->handler(status, h->arg);
}
}
}
-/* Return the size of the specified SOCRAM bank */
+/** Return the size of the specified SOCRAM bank */
static uint
socram_banksize(si_info_t *sii, sbsocramregs_t *regs, uint8 idx, uint8 mem_type)
{
return banksize;
}
+void si_socram_set_bankpda(si_t *sih, uint32 bankidx, uint32 bankpda)
+{
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
+ uint origidx;
+ uint intr_val = 0;
+ sbsocramregs_t *regs;
+ bool wasup;
+ uint corerev;
+
+ /* Block ints and save current core */
+ INTR_OFF(sii, intr_val);
+ origidx = si_coreidx(sih);
+
+ /* Switch to SOCRAM core */
+ if (!(regs = si_setcore(sih, SOCRAM_CORE_ID, 0)))
+ goto done;
+
+ if (!(wasup = si_iscoreup(sih)))
+ si_core_reset(sih, 0, 0);
+
+ corerev = si_corerev(sih);
+ if (corerev >= 16) {
+ W_REG(sii->osh, ®s->bankidx, bankidx);
+ W_REG(sii->osh, ®s->bankpda, bankpda);
+ }
+
+ /* Return to previous state and core */
+ if (!wasup)
+ si_core_disable(sih, 0);
+ si_setcoreidx(sih, origidx);
+
+done:
+ INTR_RESTORE(sii, intr_val);
+}
+
void
si_socdevram(si_t *sih, bool set, uint8 *enable, uint8 *protect, uint8 *remap)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
sbsocramregs_t *regs;
bool wasup;
uint corerev;
- sii = SI_INFO(sih);
-
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
bool
si_socdevram_remap_isenb(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
sbsocramregs_t *regs;
uint8 i;
uint32 bankidx, bankinfo;
- sii = SI_INFO(sih);
-
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
uint32
si_socdevram_size(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
uint32 memsize = 0;
bool wasup;
uint corerev;
- sii = SI_INFO(sih);
-
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
uint32
si_socdevram_remap_size(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
uint32 memsize = 0, banksz;
uint8 i;
uint32 bankidx, bankinfo;
- sii = SI_INFO(sih);
-
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
return memsize;
}
-/* Return the RAM size of the SOCRAM core */
+/** Return the RAM size of the SOCRAM core */
uint32
si_socram_size(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
uint32 coreinfo;
uint memsize = 0;
- sii = SI_INFO(sih);
-
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
}
-/* Return the TCM-RAM size of the ARMCR4 core. */
+/** Return the TCM-RAM size of the ARMCR4 core. */
uint32
si_tcm_size(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
uint8 *regs;
uint32 *arm_bidx;
uint32 *arm_binfo;
- sii = SI_INFO(sih);
-
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
return memsize;
}
+bool
+si_has_flops(si_t *sih)
+{
+ uint origidx, cr4_rev;
+
+ /* Find out CR4 core revision */
+ origidx = si_coreidx(sih);
+ if (si_setcore(sih, ARMCR4_CORE_ID, 0)) {
+ cr4_rev = si_corerev(sih);
+ si_setcoreidx(sih, origidx);
+
+ if (cr4_rev == 1 || cr4_rev >= 3)
+ return TRUE;
+ }
+ return FALSE;
+}
+
uint32
si_socram_srmem_size(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
return (32 * 1024);
}
- sii = SI_INFO(sih);
+ if (CHIPID(sih->chip) == BCM43430_CHIP_ID) {
+ return (64 * 1024);
+ }
/* Block ints and save current core */
INTR_OFF(sii, intr_val);
void
si_btcgpiowar(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
uint origidx;
uint intr_val = 0;
chipcregs_t *cc;
- sii = SI_INFO(sih);
-
/* Make sure that there is ChipCommon core present &&
* UART_TX is strapped to 1
*/
void
si_chipcontrl_btshd0_4331(si_t *sih, bool on)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
chipcregs_t *cc;
uint origidx;
uint32 val;
uint intr_val = 0;
- sii = SI_INFO(sih);
-
INTR_OFF(sii, intr_val);
origidx = si_coreidx(sih);
void
si_chipcontrl_restore(si_t *sih, uint32 val)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
- uint origidx;
+ uint origidx = si_coreidx(sih);
- sii = SI_INFO(sih);
- origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
W_REG(sii->osh, &cc->chipcontrol, val);
si_setcoreidx(sih, origidx);
uint32
si_chipcontrl_read(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
- uint origidx;
+ uint origidx = si_coreidx(sih);
uint32 val;
- sii = SI_INFO(sih);
- origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
val = R_REG(sii->osh, &cc->chipcontrol);
si_setcoreidx(sih, origidx);
void
si_chipcontrl_epa4331(si_t *sih, bool on)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
- uint origidx;
+ uint origidx = si_coreidx(sih);
uint32 val;
- sii = SI_INFO(sih);
- origidx = si_coreidx(sih);
-
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
-
val = R_REG(sii->osh, &cc->chipcontrol);
if (on) {
si_setcoreidx(sih, origidx);
}
-/* switch muxed pins, on: SROM, off: FEMCTRL */
+/** switch muxed pins, on: SROM, off: FEMCTRL. Called for a family of ac chips, not just 4360. */
void
si_chipcontrl_srom4360(si_t *sih, bool on)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
- uint origidx;
+ uint origidx = si_coreidx(sih);
uint32 val;
- sii = SI_INFO(sih);
- origidx = si_coreidx(sih);
-
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
-
val = R_REG(sii->osh, &cc->chipcontrol);
if (on) {
return (err);
}
-/* Enable BT-COEX & Ex-PA for 4313 */
+/** Enable BT-COEX & Ex-PA for 4313 */
void
si_epa_4313war(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
- uint origidx;
-
- sii = SI_INFO(sih);
- origidx = si_coreidx(sih);
+ uint origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
/* EPA Fix */
W_REG(sii->osh, &cc->gpiocontrol,
- R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
+ R_REG(sii->osh, &cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
si_setcoreidx(sih, origidx);
}
{
}
-/* Re-enable synth_pwrsw resource in min_res_mask for 4313 */
+/** Re-enable synth_pwrsw resource in min_res_mask for 4313 */
void
si_pmu_synth_pwrsw_4313_war(si_t *sih)
{
}
-/* WL/BT control for 4313 btcombo boards >= P250 */
+/** WL/BT control for 4313 btcombo boards >= P250 */
void
si_btcombo_p250_4313_war(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
- uint origidx;
-
- sii = SI_INFO(sih);
- origidx = si_coreidx(sih);
+ uint origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
W_REG(sii->osh, &cc->gpiocontrol,
void
si_btc_enable_chipcontrol(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
- uint origidx;
-
- sii = SI_INFO(sih);
- origidx = si_coreidx(sih);
+ uint origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
void
si_btcombo_43228_war(si_t *sih)
{
- si_info_t *sii;
+ si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc;
- uint origidx;
-
- sii = SI_INFO(sih);
- origidx = si_coreidx(sih);
+ uint origidx = si_coreidx(sih);
cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
si_setcoreidx(sih, origidx);
}
-/* check if the device is removed */
+/** check if the device is removed */
bool
si_deviceremoved(si_t *sih)
{
uint32 w;
- si_info_t *sii;
-
- sii = SI_INFO(sih);
switch (BUSTYPE(sih->bustype)) {
case PCI_BUS:
- ASSERT(sii->osh != NULL);
- w = OSL_PCI_READ_CONFIG(sii->osh, PCI_CFG_VID, sizeof(uint32));
+ ASSERT(SI_INFO(sih)->osh != NULL);
+ w = OSL_PCI_READ_CONFIG(SI_INFO(sih)->osh, PCI_CFG_VID, sizeof(uint32));
if ((w & 0xFFFF) != VENDOR_BROADCOM)
return TRUE;
break;
return ((sih->chipst & CST4324_SPROM_MASK) &&
!(sih->chipst & CST4324_SFLASH_MASK));
case BCM4335_CHIP_ID:
+ case BCM4345_CHIP_ID:
return ((sih->chipst & CST4335_SPROM_MASK) &&
!(sih->chipst & CST4335_SFLASH_MASK));
+ case BCM4349_CHIP_GRPID:
+ return (sih->chipst & CST4349_SPROM_PRESENT) != 0;
+ break;
case BCM4350_CHIP_ID:
+ case BCM4354_CHIP_ID:
+ case BCM4356_CHIP_ID:
+ case BCM43556_CHIP_ID:
+ case BCM43558_CHIP_ID:
+ case BCM43566_CHIP_ID:
+ case BCM43568_CHIP_ID:
+ case BCM43569_CHIP_ID:
+ case BCM43570_CHIP_ID:
+ case BCM4358_CHIP_ID:
return (sih->chipst & CST4350_SPROM_PRESENT) != 0;
+ case BCM43602_CHIP_ID:
+ return (sih->chipst & CST43602_SPROM_PRESENT) != 0;
case BCM43131_CHIP_ID:
case BCM43217_CHIP_ID:
case BCM43227_CHIP_ID:
uint32 si_get_sromctl(si_t *sih)
{
chipcregs_t *cc;
- uint origidx;
+ uint origidx = si_coreidx(sih);
uint32 sromctl;
- osl_t *osh;
+ osl_t *osh = si_osh(sih);
- osh = si_osh(sih);
- origidx = si_coreidx(sih);
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT((uintptr)cc);
int si_set_sromctl(si_t *sih, uint32 value)
{
chipcregs_t *cc;
- uint origidx;
- osl_t *osh;
+ uint origidx = si_coreidx(sih);
+ osl_t *osh = si_osh(sih);
- osh = si_osh(sih);
- origidx = si_coreidx(sih);
cc = si_setcoreidx(sih, SI_CC_IDX);
ASSERT((uintptr)cc);
uint
si_core_wrapperreg(si_t *sih, uint32 coreidx, uint32 offset, uint32 mask, uint32 val)
{
- uint origidx;
+ uint origidx, intr_val = 0;
uint ret_val;
+ si_info_t *sii = SI_INFO(sih);
+ si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
origidx = si_coreidx(sih);
+ INTR_OFF(sii, intr_val);
si_setcoreidx(sih, coreidx);
ret_val = si_wrapperreg(sih, offset, mask, val);
/* return to the original core */
si_setcoreidx(sih, origidx);
+ INTR_RESTORE(sii, intr_val);
return ret_val;
}
+
+
+/* cleanup the timer from the host when ARM is been halted
+ * without a chance for ARM cleanup its resources
+ * If left not cleanup, Intr from a software timer can still
+ * request HT clk when ARM is halted.
+ */
+uint32
+si_pmu_res_req_timer_clr(si_t *sih)
+{
+ uint32 mask;
+
+ mask = PRRT_REQ_ACTIVE | PRRT_INTEN | PRRT_HT_REQ;
+ if (CHIPID(sih->chip) != BCM4328_CHIP_ID)
+ mask <<= 14;
+ /* clear mask bits */
+ pmu_corereg(sih, SI_CC_IDX, res_req_timer, mask, 0);
+ /* readback to ensure write completes */
+ return pmu_corereg(sih, SI_CC_IDX, res_req_timer, 0, 0);
+}
+
+/** turn on/off rfldo */
+void
+si_pmu_rfldo(si_t *sih, bool on)
+{
+}
+
+
+#ifdef SURVIVE_PERST_ENAB
+static uint32
+si_pcie_survive_perst(si_t *sih, uint32 mask, uint32 val)
+{
+ si_info_t *sii;
+
+ sii = SI_INFO(sih);
+
+ if (!PCIE(sii))
+ return (0);
+
+ return pcie_survive_perst(sii->pch, mask, val);
+}
+
+static void
+si_watchdog_reset(si_t *sih)
+{
+ si_info_t *sii = SI_INFO(sih);
+ uint32 i;
+
+ /* issue a watchdog reset */
+ pmu_corereg(sih, SI_CC_IDX, pmuwatchdog, 2, 2);
+ /* do busy wait for 20ms */
+ for (i = 0; i < 2000; i++) {
+ OSL_DELAY(10);
+ }
+}
+#endif /* SURVIVE_PERST_ENAB */
+
+void
+si_survive_perst_war(si_t *sih, bool reset, uint32 sperst_mask, uint32 sperst_val)
+{
+#ifdef SURVIVE_PERST_ENAB
+ if (BUSTYPE(sih->bustype) != PCI_BUS)
+ return;
+
+ if ((CHIPID(sih->chip) != BCM4360_CHIP_ID && CHIPID(sih->chip) != BCM4352_CHIP_ID) ||
+ (CHIPREV(sih->chiprev) >= 4))
+ return;
+
+ if (reset) {
+ si_info_t *sii = SI_INFO(sih);
+ uint32 bar0win, bar0win_after;
+
+ /* save the bar0win */
+ bar0win = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+
+ si_watchdog_reset(sih);
+
+ bar0win_after = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
+ if (bar0win_after != bar0win) {
+ SI_ERROR(("%s: bar0win before %08x, bar0win after %08x\n",
+ __FUNCTION__, bar0win, bar0win_after));
+ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32), bar0win);
+ }
+ }
+ if (sperst_mask) {
+ /* enable survive perst */
+ si_pcie_survive_perst(sih, sperst_mask, sperst_val);
+ }
+#endif /* SURVIVE_PERST_ENAB */
+}
+
+void
+si_pcie_ltr_war(si_t *sih)
+{
+}
+
+void
+si_pcie_hw_LTR_war(si_t *sih)
+{
+}
+
+void
+si_pciedev_reg_pm_clk_period(si_t *sih)
+{
+}
+
+void
+si_pciedev_crwlpciegen2(si_t *sih)
+{
+}
+
+void
+si_pcie_prep_D3(si_t *sih, bool enter_D3)
+{
+}
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: siutils_priv.h 385510 2013-02-15 21:02:07Z $
+ * $Id: siutils_priv.h 474902 2014-05-02 18:31:33Z $
*/
#ifndef _siutils_priv_h_
struct gpioh_item *next;
} gpioh_item_t;
+
+#define SI_GPIO_MAX 16
+
+typedef struct gci_gpio_item {
+ void *arg;
+ uint8 gci_gpio;
+ uint8 status;
+ gci_gpio_handler_t handler;
+ struct gci_gpio_item *next;
+} gci_gpio_item_t;
+
+
+typedef struct si_cores_info {
+ void *regs[SI_MAXCORES]; /* other regs va */
+
+ uint coreid[SI_MAXCORES]; /* id of each core */
+ uint32 coresba[SI_MAXCORES]; /* backplane address of each core */
+ void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */
+ uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */
+ uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */
+ uint32 coresba2_size[SI_MAXCORES]; /* second address space size */
+
+ void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
+ uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
+
+ uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */
+ uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */
+} si_cores_info_t;
+
/* misc si info needed by some of the routines */
typedef struct si_info {
struct si_pub pub; /* back plane public state (must be first field) */
uint varsz;
void *curmap; /* current regs va */
- void *regs[SI_MAXCORES]; /* other regs va */
uint curidx; /* current core index */
uint numcores; /* # discovered cores */
- uint coreid[SI_MAXCORES]; /* id of each core */
- uint32 coresba[SI_MAXCORES]; /* backplane address of each core */
- void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */
- uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */
- uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */
- uint32 coresba2_size[SI_MAXCORES]; /* second address space size */
void *curwrap; /* current wrapper va */
- void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
- uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
- uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */
- uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */
uint32 oob_router; /* oob router registers for axi */
+
+ void *cores_info;
+ gci_gpio_item_t *gci_gpio_head; /* gci gpio interrupts head */
+ uint chipnew; /* new chip number */
} si_info_t;
-#define SI_INFO(sih) (si_info_t *)(uintptr)sih
+
+#define SI_INFO(sih) ((si_info_t *)(uintptr)sih)
#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
ISALIGNED((x), SI_CORE_SIZE))
* after core switching to avoid invalid register accesss inside ISR.
*/
#define INTR_OFF(si, intr_val) \
- if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ if ((si)->intrsoff_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) { \
intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); }
#define INTR_RESTORE(si, intr_val) \
- if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \
+ if ((si)->intrsrestore_fn && (cores_info)->coreid[(si)->curidx] == (si)->dev_coreid) { \
(*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); }
/* dynamic clock control defines */
extern uint sb_corevendor(si_t *sih);
extern uint sb_corerev(si_t *sih);
extern uint sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
+extern uint32 *sb_corereg_addr(si_t *sih, uint coreidx, uint regoff);
extern bool sb_iscoreup(si_t *sih);
extern void *sb_setcoreidx(si_t *sih, uint coreidx);
extern uint32 sb_core_cflags(si_t *sih, uint32 mask, uint32 val);
extern bool sb_taclear(si_t *sih, bool details);
+#if defined(BCMDBG_PHYDUMP)
+extern void sb_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif
/* Wake-on-wireless-LAN (WOWL) */
extern bool sb_pci_pmecap(si_t *sih);
extern uint ai_coreidx(si_t *sih);
extern uint ai_corevendor(si_t *sih);
extern uint ai_corerev(si_t *sih);
+extern uint32 *ai_corereg_addr(si_t *sih, uint coreidx, uint regoff);
extern bool ai_iscoreup(si_t *sih);
extern void *ai_setcoreidx(si_t *sih, uint coreidx);
extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val);
extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val);
extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val);
extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits);
+extern void ai_d11rsdb_core_reset(si_t *sih, uint32 bits,
+ uint32 resetbits, void *p, void *s);
extern void ai_core_disable(si_t *sih, uint32 bits);
+extern void ai_d11rsdb_core_disable(const si_info_t *sii, uint32 bits,
+ aidmp_t *pmacai, aidmp_t *smacai);
extern int ai_numaddrspaces(si_t *sih);
extern uint32 ai_addrspace(si_t *sih, uint asidx);
extern uint32 ai_addrspacesize(si_t *sih, uint asidx);
extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size);
extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val);
+#if defined(BCMDBG_PHYDUMP)
+extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b);
+#endif
#define ub_scan(a, b, c) do {} while (0)
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: uamp_api.h 294267 2011-11-04 23:41:52Z $
+ * $Id: uamp_api.h 467328 2014-04-03 01:23:40Z $
*
*/
+
+
#ifndef UAMP_API_H
#define UAMP_API_H
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_android.c 420671 2013-08-28 11:37:19Z $
+ * $Id: wl_android.c 490852 2014-07-12 15:20:53Z $
*/
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/of_gpio.h>
-#include <linux/regulator/consumer.h>
+#include <net/netlink.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
#include <wl_android.h>
#include <wldev_common.h>
#include <dngl_stats.h>
#include <dhd.h>
#include <dhd_config.h>
+#include <proto/bcmip.h>
#ifdef PNO_SUPPORT
#include <dhd_pno.h>
#endif
+#ifdef BCMSDIO
#include <bcmsdbus.h>
+#endif
#ifdef WL_CFG80211
#include <wl_cfg80211.h>
#endif
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
-#include <linux/platform_device.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
-#include <linux/wlan_plat.h>
-#else
-#include <linux/wifi_tiwlan.h>
-#endif
-#endif /* CONFIG_WIFI_CONTROL_FUNC */
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
#ifndef WL_CFG80211
#define htod32(i) i
#define CMD_SETROAMMODE "SETROAMMODE"
#define CMD_SETIBSSBEACONOUIDATA "SETIBSSBEACONOUIDATA"
#define CMD_MIRACAST "MIRACAST"
+#define CMD_NAN "NAN_"
+#define CMD_GET_CHANNEL "GET_CHANNEL"
+#define CMD_SET_ROAM "SET_ROAM_TRIGGER"
+#define CMD_GET_ROAM "GET_ROAM_TRIGGER"
+#define CMD_GET_KEEP_ALIVE "GET_KEEP_ALIVE"
+#define CMD_GET_PM "GET_PM"
+#define CMD_SET_PM "SET_PM"
+#define CMD_MONITOR "MONITOR"
#if defined(WL_SUPPORT_AUTO_CHANNEL)
#define CMD_GET_BEST_CHANNELS "GET_BEST_CHANNELS"
#endif /* WL_SUPPORT_AUTO_CHANNEL */
+#define CMD_KEEP_ALIVE "KEEPALIVE"
/* CCX Private Commands */
#ifdef BCMCCX
#define CMD_OKC_ENABLE "OKC_ENABLE"
#define CMD_HAPD_MAC_FILTER "HAPD_MAC_FILTER"
-/* hostap mac mode */
-#define MACLIST_MODE_DISABLED 0
-#define MACLIST_MODE_DENY 1
-#define MACLIST_MODE_ALLOW 2
-/* max number of assoc list */
-#define MAX_NUM_OF_ASSOCLIST 64
+#ifdef WLFBT
+#define CMD_GET_FTKEY "GET_FTKEY"
+#endif
-/* max number of mac filter list
- * restrict max number to 10 as maximum cmd string size is 255
- */
-#define MAX_NUM_MAC_FILT 10
+#ifdef WLAIBSS
+#define CMD_SETIBSSTXFAILEVENT "SETIBSSTXFAILEVENT"
+#define CMD_GET_IBSS_PEER_INFO "GETIBSSPEERINFO"
+#define CMD_GET_IBSS_PEER_INFO_ALL "GETIBSSPEERINFOALL"
+#define CMD_SETIBSSROUTETABLE "SETIBSSROUTETABLE"
+#define CMD_SETIBSSAMPDU "SETIBSSAMPDU"
+#define CMD_SETIBSSANTENNAMODE "SETIBSSANTENNAMODE"
+#endif /* WLAIBSS */
+#define CMD_ROAM_OFFLOAD "SETROAMOFFLOAD"
/* miracast related definition */
#define MIRACAST_MODE_OFF 0
#endif
static LIST_HEAD(miracast_resume_list);
+#ifdef WL_CFG80211
static u8 miracast_cur_mode;
+#endif
struct io_cfg {
s8 *iovar;
struct list_head list;
};
-typedef struct android_wifi_priv_cmd {
+typedef struct _android_wifi_priv_cmd {
char *buf;
int used_len;
int total_len;
} android_wifi_priv_cmd;
+#ifdef CONFIG_COMPAT
+typedef struct _compat_android_wifi_priv_cmd {
+ compat_caddr_t buf;
+ int used_len;
+ int total_len;
+} compat_android_wifi_priv_cmd;
+#endif /* CONFIG_COMPAT */
+
+#if defined(BCMFW_ROAM_ENABLE)
+#define CMD_SET_ROAMPREF "SET_ROAMPREF"
+
+#define MAX_NUM_SUITES 10
+#define WIDTH_AKM_SUITE 8
+#define JOIN_PREF_RSSI_LEN 0x02
+#define JOIN_PREF_RSSI_SIZE 4 /* RSSI pref header size in bytes */
+#define JOIN_PREF_WPA_HDR_SIZE 4 /* WPA pref header size in bytes */
+#define JOIN_PREF_WPA_TUPLE_SIZE 12 /* Tuple size in bytes */
+#define JOIN_PREF_MAX_WPA_TUPLES 16
+#define MAX_BUF_SIZE (JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE + \
+ (JOIN_PREF_WPA_TUPLE_SIZE * JOIN_PREF_MAX_WPA_TUPLES))
+#endif /* BCMFW_ROAM_ENABLE */
+
#ifdef WL_GENL
static s32 wl_genl_handle_msg(struct sk_buff *skb, struct genl_info *info);
static int wl_genl_init(void);
};
/* commands: mapping between the command enumeration and the actual function */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+struct genl_ops wl_genl_ops[] = {
+ {
+ .cmd = BCM_GENL_CMD_MSG,
+ .flags = 0,
+ .policy = wl_genl_policy,
+ .doit = wl_genl_handle_msg,
+ .dumpit = NULL,
+ },
+};
+#else
struct genl_ops wl_genl_ops = {
.cmd = BCM_GENL_CMD_MSG,
.flags = 0,
.policy = wl_genl_policy,
.doit = wl_genl_handle_msg,
.dumpit = NULL,
+
};
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+static struct genl_multicast_group wl_genl_mcast[] = {
+ { .name = "bcm-genl-mcast", },
+};
+#else
static struct genl_multicast_group wl_genl_mcast = {
.id = GENL_ID_GENERATE, /* Genetlink would generate the ID */
.name = "bcm-genl-mcast",
};
-
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
#endif /* WL_GENL */
/**
* Extern function declarations (TODO: move them to dhd_linux.h)
*/
-void dhd_customer_gpio_wlan_ctrl(int onoff);
-int dhd_dev_reset(struct net_device *dev, uint8 flag);
+int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
int dhd_dev_init_ioctl(struct net_device *dev);
#ifdef WL_CFG80211
int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
-int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, char *command);
-int wl_cfg80211_get_ioctl_version(void);
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command);
#else
int wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
{ return 0; }
int wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
{ return 0; }
#endif /* WL_CFG80211 */
-extern int dhd_os_check_if_up(void *dhdp);
-#ifdef BCMLXSDMMC
-extern void *bcmsdh_get_drvdata(void);
-#endif /* BCMLXSDMMC */
#ifdef ENABLE_4335BT_WAR
if (error)
return -1;
#if defined(RSSIOFFSET)
- rssi = wl_update_rssi_offset(rssi);
+ rssi = wl_update_rssi_offset(net, rssi);
#endif
error = wldev_get_ssid(net, &ssid);
#ifdef PNO_SUPPORT
-#define PARAM_SIZE 50
+#define PNO_PARAM_SIZE 50
#define VALUE_SIZE 50
+#define LIMIT_STR_FMT ("%50s %50s")
static int
wls_parse_batching_cmd(struct net_device *dev, char *command, int total_len)
{
int err = BCME_OK;
uint i, tokens;
char *pos, *pos2, *token, *token2, *delim;
- char param[PARAM_SIZE], value[VALUE_SIZE];
+ char param[PNO_PARAM_SIZE], value[VALUE_SIZE];
struct dhd_pno_batch_params batch_params;
ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
if (total_len < strlen(CMD_WLS_BATCHING)) {
if (delim != NULL)
*delim = ' ';
- tokens = sscanf(token, "%s %s", param, value);
- if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_MSCAN))) {
+ tokens = sscanf(token, LIMIT_STR_FMT, param, value);
+ if (!strncmp(param, PNO_PARAM_SCANFREQ, strlen(PNO_PARAM_SCANFREQ))) {
batch_params.scan_fr = simple_strtol(value, NULL, 0);
ANDROID_INFO(("scan_freq : %d\n", batch_params.scan_fr));
- } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_MSCAN))) {
+ } else if (!strncmp(param, PNO_PARAM_BESTN, strlen(PNO_PARAM_BESTN))) {
batch_params.bestn = simple_strtol(value, NULL, 0);
ANDROID_INFO(("bestn : %d\n", batch_params.bestn));
} else if (!strncmp(param, PNO_PARAM_MSCAN, strlen(PNO_PARAM_MSCAN))) {
batch_params.mscan = simple_strtol(value, NULL, 0);
ANDROID_INFO(("mscan : %d\n", batch_params.mscan));
- } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_MSCAN))) {
+ } else if (!strncmp(param, PNO_PARAM_CHANNEL, strlen(PNO_PARAM_CHANNEL))) {
i = 0;
pos2 = value;
tokens = sscanf(value, "<%s>", value);
batch_params.chan_list[i-1]));
}
}
- } else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_MSCAN))) {
+ } else if (!strncmp(param, PNO_PARAM_RTT, strlen(PNO_PARAM_RTT))) {
batch_params.rtt = simple_strtol(value, NULL, 0);
ANDROID_INFO(("rtt : %d\n", batch_params.rtt));
} else {
err = dhd_dev_pno_set_for_batch(dev, &batch_params);
if (err < 0) {
ANDROID_ERROR(("failed to configure batch scan\n"));
+ } else {
+ memset(command, 0, total_len);
+ err = sprintf(command, "%d", err);
}
} else if (!strncmp(pos, PNO_BATCHING_GET, strlen(PNO_BATCHING_GET))) {
err = dhd_dev_pno_get_for_batch(dev, command, total_len);
err = dhd_dev_pno_stop_for_batch(dev);
if (err < 0) {
ANDROID_ERROR(("failed to stop batching scan\n"));
+ } else {
+ memset(command, 0, total_len);
+ err = sprintf(command, "OK");
}
} else {
ANDROID_ERROR(("%s : unknown command\n", __FUNCTION__));
0x00
};
#endif /* PNO_SET_DEBUG */
-
ANDROID_INFO(("%s: command=%s, len=%d\n", __FUNCTION__, command, total_len));
if (total_len < (strlen(CMD_PNOSETUP_SET) + sizeof(cmd_tlv_t))) {
ANDROID_ERROR(("%s argument=%d less min size\n", __FUNCTION__, total_len));
goto exit_proc;
}
-
#ifdef PNO_SET_DEBUG
memcpy(command, pno_in_example, sizeof(pno_in_example));
total_len = sizeof(pno_in_example);
#endif
-
str_ptr = command + strlen(CMD_PNOSETUP_SET);
tlv_size_left = total_len - strlen(CMD_PNOSETUP_SET);
#endif /* BCMCCX */
-static int
+int
wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist)
{
int i, j, match;
dhd_net_if_lock(dev);
printk("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on);
if (!g_wifi_on) {
-#ifdef POWER_OFF_IN_SUSPEND
- g_netdev = dev;
-#endif
do {
- dhd_customer_gpio_wlan_ctrl(WLAN_RESET_ON);
- ret = sdioh_start(NULL, 0);
+ dhd_net_wifi_platform_set_power(dev, TRUE, WIFI_TURNON_DELAY);
+#ifdef BCMSDIO
+ ret = dhd_net_bus_resume(dev, 0);
+#endif /* BCMSDIO */
+#ifdef BCMPCIE
+ ret = dhd_net_bus_devreset(dev, FALSE);
+#endif /* BCMPCIE */
if (ret == 0)
break;
ANDROID_ERROR(("\nfailed to power up wifi chip, retry again (%d left) **\n\n",
- retry+1));
- dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
- } while (retry-- >= 0);
+ retry));
+#ifdef BCMPCIE
+ dhd_net_bus_devreset(dev, TRUE);
+#endif /* BCMPCIE */
+ dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
+ } while (retry-- > 0);
if (ret != 0) {
ANDROID_ERROR(("\nfailed to power up wifi chip, max retry reached **\n\n"));
goto exit;
}
- ret = dhd_dev_reset(dev, FALSE);
+#ifdef BCMSDIO
+ ret = dhd_net_bus_devreset(dev, FALSE);
if (ret)
goto err;
- sdioh_start(NULL, 1);
+ dhd_net_bus_resume(dev, 1);
+#endif /* BCMSDIO */
+
+#ifndef BCMPCIE
if (!ret) {
if (dhd_dev_init_ioctl(dev) < 0) {
ret = -EFAULT;
goto err;
}
}
+#endif /* !BCMPCIE */
g_wifi_on = TRUE;
}
return ret;
err:
- dhd_dev_reset(dev, TRUE);
- sdioh_stop(NULL);
- dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+#ifdef BCMSDIO
+ dhd_net_bus_devreset(dev, TRUE);
+ dhd_net_bus_suspend(dev);
+#endif
+ dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
printk("%s: Failed\n", __FUNCTION__);
dhd_net_if_unlock(dev);
dhd_net_if_lock(dev);
printk("%s in 2: g_wifi_on=%d\n", __FUNCTION__, g_wifi_on);
if (g_wifi_on) {
- ret = dhd_dev_reset(dev, TRUE);
- sdioh_stop(NULL);
- dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+#if defined(BCMSDIO) || defined(BCMPCIE)
+ ret = dhd_net_bus_devreset(dev, TRUE);
+#ifdef BCMSDIO
+ dhd_net_bus_suspend(dev);
+#endif /* BCMSDIO */
+#endif /* BCMSDIO || BCMPCIE */
+ dhd_net_wifi_platform_set_power(dev, FALSE, WIFI_TURNOFF_DELAY);
g_wifi_on = FALSE;
}
printk("%s out\n", __FUNCTION__);
{
if ((strlen(command) - strlen(CMD_SETFWPATH)) > MOD_PARAM_PATHLEN)
return -1;
- bcm_strncpy_s(fw_path, sizeof(fw_path),
- command + strlen(CMD_SETFWPATH) + 1, MOD_PARAM_PATHLEN - 1);
- if (strstr(fw_path, "apsta") != NULL) {
- ANDROID_INFO(("GOT APSTA FIRMWARE\n"));
- ap_fw_loaded = TRUE;
- } else {
- ANDROID_INFO(("GOT STA FIRMWARE\n"));
- ap_fw_loaded = FALSE;
- }
- return 0;
+ return dhd_net_set_fw_path(net, command + strlen(CMD_SETFWPATH) + 1);
}
+
static int
wl_android_set_pmk(struct net_device *dev, char *command, int total_len)
{
return error;
}
+
+
int wl_android_set_roam_mode(struct net_device *dev, char *command, int total_len)
{
int error = 0;
return 0;
}
+#ifdef WL_CFG80211
int wl_android_set_ibss_beacon_ouidata(struct net_device *dev, char *command, int total_len)
{
char ie_buf[VNDR_IE_MAX_LEN];
return err;
}
+#endif
+
+#if defined(BCMFW_ROAM_ENABLE)
+static int
+wl_android_set_roampref(struct net_device *dev, char *command, int total_len)
+{
+ int error = 0;
+ char smbuf[WLC_IOCTL_SMLEN];
+ uint8 buf[MAX_BUF_SIZE];
+ uint8 *pref = buf;
+ char *pcmd;
+ int num_ucipher_suites = 0;
+ int num_akm_suites = 0;
+ wpa_suite_t ucipher_suites[MAX_NUM_SUITES];
+ wpa_suite_t akm_suites[MAX_NUM_SUITES];
+ int num_tuples = 0;
+ int total_bytes = 0;
+ int total_len_left;
+ int i, j;
+ char hex[] = "XX";
+
+ pcmd = command + strlen(CMD_SET_ROAMPREF) + 1;
+ total_len_left = total_len - strlen(CMD_SET_ROAMPREF) + 1;
+
+ num_akm_suites = simple_strtoul(pcmd, NULL, 16);
+ /* Increment for number of AKM suites field + space */
+ pcmd += 3;
+ total_len_left -= 3;
+
+ /* check to make sure pcmd does not overrun */
+ if (total_len_left < (num_akm_suites * WIDTH_AKM_SUITE))
+ return -1;
+
+ memset(buf, 0, sizeof(buf));
+ memset(akm_suites, 0, sizeof(akm_suites));
+ memset(ucipher_suites, 0, sizeof(ucipher_suites));
+
+ /* Save the AKM suites passed in the command */
+ for (i = 0; i < num_akm_suites; i++) {
+ /* Store the MSB first, as required by join_pref */
+ for (j = 0; j < 4; j++) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+ }
+ memcpy((uint8 *)&akm_suites[i], buf, sizeof(uint32));
+ }
+
+ total_len_left -= (num_akm_suites * WIDTH_AKM_SUITE);
+ num_ucipher_suites = simple_strtoul(pcmd, NULL, 16);
+ /* Increment for number of cipher suites field + space */
+ pcmd += 3;
+ total_len_left -= 3;
+
+ if (total_len_left < (num_ucipher_suites * WIDTH_AKM_SUITE))
+ return -1;
+
+ /* Save the cipher suites passed in the command */
+ for (i = 0; i < num_ucipher_suites; i++) {
+ /* Store the MSB first, as required by join_pref */
+ for (j = 0; j < 4; j++) {
+ hex[0] = *pcmd++;
+ hex[1] = *pcmd++;
+ buf[j] = (uint8)simple_strtoul(hex, NULL, 16);
+ }
+ memcpy((uint8 *)&ucipher_suites[i], buf, sizeof(uint32));
+ }
+
+ /* Join preference for RSSI
+ * Type : 1 byte (0x01)
+ * Length : 1 byte (0x02)
+ * Value : 2 bytes (reserved)
+ */
+ *pref++ = WL_JOIN_PREF_RSSI;
+ *pref++ = JOIN_PREF_RSSI_LEN;
+ *pref++ = 0;
+ *pref++ = 0;
+
+ /* Join preference for WPA
+ * Type : 1 byte (0x02)
+ * Length : 1 byte (not used)
+ * Value : (variable length)
+ * reserved: 1 byte
+ * count : 1 byte (no of tuples)
+ * Tuple1 : 12 bytes
+ * akm[4]
+ * ucipher[4]
+ * mcipher[4]
+ * Tuple2 : 12 bytes
+ * Tuplen : 12 bytes
+ */
+ num_tuples = num_akm_suites * num_ucipher_suites;
+ if (num_tuples != 0) {
+ if (num_tuples <= JOIN_PREF_MAX_WPA_TUPLES) {
+ *pref++ = WL_JOIN_PREF_WPA;
+ *pref++ = 0;
+ *pref++ = 0;
+ *pref++ = (uint8)num_tuples;
+ total_bytes = JOIN_PREF_RSSI_SIZE + JOIN_PREF_WPA_HDR_SIZE +
+ (JOIN_PREF_WPA_TUPLE_SIZE * num_tuples);
+ } else {
+ ANDROID_ERROR(("%s: Too many wpa configs for join_pref \n", __FUNCTION__));
+ return -1;
+ }
+ } else {
+ /* No WPA config, configure only RSSI preference */
+ total_bytes = JOIN_PREF_RSSI_SIZE;
+ }
+
+ /* akm-ucipher-mcipher tuples in the format required for join_pref */
+ for (i = 0; i < num_ucipher_suites; i++) {
+ for (j = 0; j < num_akm_suites; j++) {
+ memcpy(pref, (uint8 *)&akm_suites[j], WPA_SUITE_LEN);
+ pref += WPA_SUITE_LEN;
+ memcpy(pref, (uint8 *)&ucipher_suites[i], WPA_SUITE_LEN);
+ pref += WPA_SUITE_LEN;
+ /* Set to 0 to match any available multicast cipher */
+ memset(pref, 0, WPA_SUITE_LEN);
+ pref += WPA_SUITE_LEN;
+ }
+ }
+
+ prhex("join pref", (uint8 *)buf, total_bytes);
+ error = wldev_iovar_setbuf(dev, "join_pref", buf, total_bytes, smbuf, sizeof(smbuf), NULL);
+ if (error) {
+ ANDROID_ERROR(("Failed to set join_pref, error = %d\n", error));
+ }
+ return error;
+}
+#endif /* defined(BCMFW_ROAM_ENABLE */
+#ifdef WL_CFG80211
static int
wl_android_iolist_add(struct net_device *dev, struct list_head *head, struct io_cfg *config)
{
case MIRACAST_MODE_SOURCE:
/* setting mchan_algo to platform specific value */
config.iovar = "mchan_algo";
- config.param = MIRACAST_MCHAN_ALGO;
+
+ ret = wldev_ioctl(dev, WLC_GET_BCNPRD, &val, sizeof(int), false);
+ if (!ret && val > 100) {
+ config.param = 0;
+ ANDROID_ERROR(("%s: Connected station's beacon interval: "
+ "%d and set mchan_algo to %d \n",
+ __FUNCTION__, val, config.param));
+ }
+ else {
+ config.param = MIRACAST_MCHAN_ALGO;
+ }
ret = wl_android_iolist_add(dev, &miracast_resume_list, &config);
if (ret)
goto resume;
wl_android_iolist_resume(dev, &miracast_resume_list);
return ret;
}
+#endif
-int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+#define NETLINK_OXYGEN 30
+#define AIBSS_BEACON_TIMEOUT 10
+
+static struct sock *nl_sk = NULL;
+
+static void wl_netlink_recv(struct sk_buff *skb)
{
-#define PRIVATE_COMMAND_MAX_LEN 8192
- int ret = 0;
- char *command = NULL;
- int bytes_written = 0;
- android_wifi_priv_cmd priv_cmd;
+ ANDROID_ERROR(("netlink_recv called\n"));
+}
- net_os_wake_lock(net);
+static int wl_netlink_init(void)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+ struct netlink_kernel_cfg cfg = {
+ .input = wl_netlink_recv,
+ };
+#endif
- if (!ifr->ifr_data) {
- ret = -EINVAL;
- goto exit;
- }
- if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
- ret = -EFAULT;
- goto exit;
- }
- if (priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN)
- {
- ANDROID_ERROR(("%s: too long priavte command\n", __FUNCTION__));
- ret = -EINVAL;
- goto exit;
- }
- command = kmalloc((priv_cmd.total_len + 1), GFP_KERNEL);
- if (!command)
- {
- ANDROID_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
- ret = -ENOMEM;
- goto exit;
- }
- if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
- ret = -EFAULT;
- goto exit;
+ if (nl_sk != NULL) {
+ ANDROID_ERROR(("nl_sk already exist\n"));
+ return BCME_ERROR;
}
- command[priv_cmd.total_len] = '\0';
- ANDROID_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN,
+ 0, wl_netlink_recv, NULL, THIS_MODULE);
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, THIS_MODULE, &cfg);
+#else
+ nl_sk = netlink_kernel_create(&init_net, NETLINK_OXYGEN, &cfg);
+#endif
- if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
- ANDROID_INFO(("%s, Received regular START command\n", __FUNCTION__));
- bytes_written = wl_android_wifi_on(net);
+ if (nl_sk == NULL) {
+ ANDROID_ERROR(("nl_sk is not ready\n"));
+ return BCME_ERROR;
}
- else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
- bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+
+ return BCME_OK;
+}
+
+static void wl_netlink_deinit(void)
+{
+ if (nl_sk) {
+ netlink_kernel_release(nl_sk);
+ nl_sk = NULL;
}
+}
- if (!g_wifi_on) {
- ANDROID_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
- __FUNCTION__, command, ifr->ifr_name));
- ret = 0;
- goto exit;
+s32
+wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh = NULL;
+ int ret = -1;
+
+ if (nl_sk == NULL) {
+ ANDROID_ERROR(("nl_sk was not initialized\n"));
+ goto nlmsg_failure;
}
- if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
- bytes_written = wl_android_wifi_off(net);
+ skb = alloc_skb(NLMSG_SPACE(size), GFP_ATOMIC);
+ if (skb == NULL) {
+ ANDROID_ERROR(("failed to allocate memory\n"));
+ goto nlmsg_failure;
}
- else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
- /* TBD: SCAN-ACTIVE */
+
+ nlh = nlmsg_put(skb, 0, 0, 0, size, 0);
+ if (nlh == NULL) {
+ ANDROID_ERROR(("failed to build nlmsg, skb_tailroom:%d, nlmsg_total_size:%d\n",
+ skb_tailroom(skb), nlmsg_total_size(size)));
+ dev_kfree_skb(skb);
+ goto nlmsg_failure;
}
- else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
- /* TBD: SCAN-PASSIVE */
+
+ memcpy(nlmsg_data(nlh), data, size);
+ nlh->nlmsg_seq = seq;
+ nlh->nlmsg_type = type;
+
+ /* netlink_unicast() takes ownership of the skb and frees it itself. */
+ ret = netlink_unicast(nl_sk, skb, pid, 0);
+ ANDROID_TRACE(("netlink_unicast() pid=%d, ret=%d\n", pid, ret));
+
+nlmsg_failure:
+ return ret;
+}
+
+#ifdef WLAIBSS
+static int wl_android_set_ibss_txfail_event(struct net_device *dev, char *command, int total_len)
+{
+ int err = 0;
+ int retry = 0;
+ int pid = 0;
+ aibss_txfail_config_t txfail_config = {0, 0, 0, 0};
+ char smbuf[WLC_IOCTL_SMLEN];
+
+ if (sscanf(command, CMD_SETIBSSTXFAILEVENT " %d %d", &retry, &pid) <= 0) {
+ ANDROID_ERROR(("Failed to get Parameter from : %s\n", command));
+ return -1;
}
- else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
- bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+
+ /* set pid, and if the event was happened, let's send a notification through netlink */
+ wl_cfg80211_set_txfail_pid(pid);
+
+ /* If retry value is 0, it disables the functionality for TX Fail. */
+ if (retry > 0) {
+ txfail_config.max_tx_retry = retry;
+ txfail_config.bcn_timeout = 0; /* 0 : disable tx fail from beacon */
}
- else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
- bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+ txfail_config.version = AIBSS_TXFAIL_CONFIG_VER_0;
+ txfail_config.len = sizeof(txfail_config);
+
+ err = wldev_iovar_setbuf(dev, "aibss_txfail_config", (void *) &txfail_config,
+ sizeof(aibss_txfail_config_t), smbuf, WLC_IOCTL_SMLEN, NULL);
+ ANDROID_TRACE(("retry=%d, pid=%d, err=%d\n", retry, pid, err));
+
+ return ((err == 0)?total_len:err);
+}
+
+static int wl_android_get_ibss_peer_info(struct net_device *dev, char *command,
+ int total_len, bool bAll)
+{
+ int error;
+ int bytes_written = 0;
+ void *buf = NULL;
+ bss_peer_list_info_t peer_list_info;
+ bss_peer_info_t *peer_info;
+ int i;
+ bool found = false;
+ struct ether_addr mac_ea;
+
+ ANDROID_TRACE(("get ibss peer info(%s)\n", bAll?"true":"false"));
+
+ if (!bAll) {
+ if (sscanf (command, "GETIBSSPEERINFO %02x:%02x:%02x:%02x:%02x:%02x",
+ (unsigned int *)&mac_ea.octet[0], (unsigned int *)&mac_ea.octet[1],
+ (unsigned int *)&mac_ea.octet[2], (unsigned int *)&mac_ea.octet[3],
+ (unsigned int *)&mac_ea.octet[4], (unsigned int *)&mac_ea.octet[5]) != 6) {
+ ANDROID_TRACE(("invalid MAC address\n"));
+ return -1;
+ }
}
-#ifdef PKT_FILTER_SUPPORT
- else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
- bytes_written = net_os_enable_packet_filter(net, 1);
+
+ if ((buf = kmalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL)) == NULL) {
+ ANDROID_ERROR(("kmalloc failed\n"));
+ return -1;
}
- else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
- bytes_written = net_os_enable_packet_filter(net, 0);
+
+ error = wldev_iovar_getbuf(dev, "bss_peer_info", NULL, 0, buf, WLC_IOCTL_MAXLEN, NULL);
+ if (unlikely(error)) {
+ ANDROID_ERROR(("could not get ibss peer info (%d)\n", error));
+ kfree(buf);
+ return -1;
}
- else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
- int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
- bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+
+ memcpy(&peer_list_info, buf, sizeof(peer_list_info));
+ peer_list_info.version = htod16(peer_list_info.version);
+ peer_list_info.bss_peer_info_len = htod16(peer_list_info.bss_peer_info_len);
+ peer_list_info.count = htod32(peer_list_info.count);
+
+ ANDROID_TRACE(("ver:%d, len:%d, count:%d\n", peer_list_info.version,
+ peer_list_info.bss_peer_info_len, peer_list_info.count));
+
+ if (peer_list_info.count > 0) {
+ if (bAll)
+ bytes_written += sprintf(&command[bytes_written], "%u ",
+ peer_list_info.count);
+
+ peer_info = (bss_peer_info_t *) ((void *)buf + BSS_PEER_LIST_INFO_FIXED_LEN);
+
+
+ for (i = 0; i < peer_list_info.count; i++) {
+
+ ANDROID_TRACE(("index:%d rssi:%d, tx:%u, rx:%u\n", i, peer_info->rssi,
+ peer_info->tx_rate, peer_info->rx_rate));
+
+ if (!bAll &&
+ memcmp(&mac_ea, &peer_info->ea, sizeof(struct ether_addr)) == 0) {
+ found = true;
+ }
+
+ if (bAll || found) {
+ bytes_written += sprintf(&command[bytes_written], MACF,
+ ETHER_TO_MACF(peer_info->ea));
+ bytes_written += sprintf(&command[bytes_written], " %u %d ",
+ peer_info->tx_rate/1000, peer_info->rssi);
+ }
+
+ if (found)
+ break;
+
+ peer_info = (bss_peer_info_t *)((void *)peer_info+sizeof(bss_peer_info_t));
+ }
}
- else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
- int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
- bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+ else {
+ ANDROID_ERROR(("could not get ibss peer info : no item\n"));
}
-#endif /* PKT_FILTER_SUPPORT */
- else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
- /* TBD: BTCOEXSCAN-START */
+ bytes_written += sprintf(&command[bytes_written], "%s", "\0");
+
+ ANDROID_TRACE(("command(%u):%s\n", total_len, command));
+ ANDROID_TRACE(("bytes_written:%d\n", bytes_written));
+
+ kfree(buf);
+ return bytes_written;
+}
+
+int wl_android_set_ibss_routetable(struct net_device *dev, char *command, int total_len)
+{
+
+ char *pcmd = command;
+ char *str = NULL;
+
+ ibss_route_tbl_t *route_tbl = NULL;
+ char *ioctl_buf = NULL;
+ u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
+ s32 err = BCME_OK;
+ uint32 route_tbl_len;
+ uint32 entries;
+ char *endptr;
+ uint32 i = 0;
+ struct ipv4_addr dipaddr;
+ struct ether_addr ea;
+
+ route_tbl_len = sizeof(ibss_route_tbl_t) +
+ (MAX_IBSS_ROUTE_TBL_ENTRY - 1) * sizeof(ibss_route_entry_t);
+ route_tbl = (ibss_route_tbl_t *)kzalloc(route_tbl_len, kflags);
+ if (!route_tbl) {
+ ANDROID_ERROR(("Route TBL alloc failed\n"));
+ return -ENOMEM;
}
- else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
- /* TBD: BTCOEXSCAN-STOP */
+ ioctl_buf = kzalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
+ if (!ioctl_buf) {
+ ANDROID_ERROR(("ioctl memory alloc failed\n"));
+ if (route_tbl) {
+ kfree(route_tbl);
+ }
+ return -ENOMEM;
}
- else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
-#ifdef WL_CFG80211
- bytes_written = wl_cfg80211_set_btcoex_dhcp(net, command);
-#else
-#ifdef PKT_FILTER_SUPPORT
- uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+ memset(ioctl_buf, 0, WLC_IOCTL_MEDLEN);
- if (mode == 1)
- net_os_enable_packet_filter(net, 0); /* DHCP starts */
- else
- net_os_enable_packet_filter(net, 1); /* DHCP ends */
-#endif /* PKT_FILTER_SUPPORT */
-#endif /* WL_CFG80211 */
- }
- else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
- bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+ /* drop command */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ /* get count */
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid number parameter %s\n", str));
+ err = -EINVAL;
+ goto exit;
}
- else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
- bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len);
+ entries = bcm_strtoul(str, &endptr, 0);
+ if (*endptr != '\0') {
+ ANDROID_ERROR(("Invalid number parameter %s\n", str));
+ err = -EINVAL;
+ goto exit;
}
- else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
- uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+ ANDROID_INFO(("Routing table count:%d\n", entries));
+ route_tbl->num_entry = entries;
- if (dhd_conf_get_band(bcmsdh_get_drvdata()) != WLC_BAND_AUTO) {
- printf("%s: Band is fixed in config.txt\n", __FUNCTION__);
+ for (i = 0; i < entries; i++) {
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_atoipv4(str, &dipaddr)) {
+ ANDROID_ERROR(("Invalid ip string %s\n", str));
+ err = -EINVAL;
goto exit;
}
-#ifdef WL_HOST_BAND_MGMT
- s32 ret = 0;
- if ((ret = wl_cfg80211_set_band(net, band)) < 0) {
- if (ret == BCME_UNSUPPORTED) {
- /* If roam_var is unsupported, fallback to the original method */
- ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, "
- "but roam_band iovar unsupported in the firmware\n"));
+
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str || !bcm_ether_atoe(str, &ea)) {
+ ANDROID_ERROR(("Invalid ethernet string %s\n", str));
+ err = -EINVAL;
+ goto exit;
+ }
+ bcopy(&dipaddr, &route_tbl->route_entry[i].ipv4_addr, IPV4_ADDR_LEN);
+ bcopy(&ea, &route_tbl->route_entry[i].nexthop, ETHER_ADDR_LEN);
+ }
+
+ route_tbl_len = sizeof(ibss_route_tbl_t) +
+ ((!entries?0:(entries - 1)) * sizeof(ibss_route_entry_t));
+ err = wldev_iovar_setbuf(dev, "ibss_route_tbl",
+ route_tbl, route_tbl_len, ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ if (err != BCME_OK) {
+ ANDROID_ERROR(("Fail to set iovar %d\n", err));
+ err = -EINVAL;
+ }
+
+exit:
+ if (route_tbl)
+ kfree(route_tbl);
+ if (ioctl_buf)
+ kfree(ioctl_buf);
+ return err;
+
+}
+
+int
+wl_android_set_ibss_ampdu(struct net_device *dev, char *command, int total_len)
+{
+ char *pcmd = command;
+ char *str = NULL, *endptr = NULL;
+ struct ampdu_aggr aggr;
+ char smbuf[WLC_IOCTL_SMLEN];
+ int idx;
+ int err = 0;
+ int wme_AC2PRIO[AC_COUNT][2] = {
+ {PRIO_8021D_VO, PRIO_8021D_NC}, /* AC_VO - 3 */
+ {PRIO_8021D_CL, PRIO_8021D_VI}, /* AC_VI - 2 */
+ {PRIO_8021D_BK, PRIO_8021D_NONE}, /* AC_BK - 1 */
+ {PRIO_8021D_BE, PRIO_8021D_EE}}; /* AC_BE - 0 */
+
+ ANDROID_TRACE(("set ibss ampdu:%s\n", command));
+
+ memset(&aggr, 0, sizeof(aggr));
+ /* Cofigure all priorities */
+ aggr.conf_TID_bmap = NBITMASK(NUMPRIO);
+
+ /* acquire parameters */
+ /* drop command */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ for (idx = 0; idx < AC_COUNT; idx++) {
+ bool on;
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
+ return -EINVAL;
+ }
+ on = bcm_strtoul(str, &endptr, 0) ? TRUE : FALSE;
+ if (*endptr != '\0') {
+ ANDROID_ERROR(("Invalid number format %s\n", str));
+ return -EINVAL;
+ }
+ if (on) {
+ setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][0]);
+ setbit(&aggr.enab_TID_bmap, wme_AC2PRIO[idx][1]);
+ }
+ }
+
+ err = wldev_iovar_setbuf(dev, "ampdu_txaggr", (void *)&aggr,
+ sizeof(aggr), smbuf, WLC_IOCTL_SMLEN, NULL);
+
+ return ((err == 0) ? total_len : err);
+}
+
+int wl_android_set_ibss_antenna(struct net_device *dev, char *command, int total_len)
+{
+ char *pcmd = command;
+ char *str = NULL;
+ int txchain, rxchain;
+ int err = 0;
+
+ ANDROID_TRACE(("set ibss antenna:%s\n", command));
+
+ /* acquire parameters */
+ /* drop command */
+ str = bcmstrtok(&pcmd, " ", NULL);
+
+ /* TX chain */
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
+ return -EINVAL;
+ }
+ txchain = bcm_atoi(str);
+
+ /* RX chain */
+ str = bcmstrtok(&pcmd, " ", NULL);
+ if (!str) {
+ ANDROID_ERROR(("Invalid parameter : %s\n", pcmd));
+ return -EINVAL;
+ }
+ rxchain = bcm_atoi(str);
+
+ err = wldev_iovar_setint(dev, "txchain", txchain);
+ if (err != 0)
+ return err;
+ err = wldev_iovar_setint(dev, "rxchain", rxchain);
+ return ((err == 0)?total_len:err);
+}
+#endif /* WLAIBSS */
+
+int wl_keep_alive_set(struct net_device *dev, char* extra, int total_len)
+{
+ char buf[256];
+ const char *str;
+ wl_mkeep_alive_pkt_t mkeep_alive_pkt;
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int buf_len;
+ int str_len;
+ int res = -1;
+ uint period_msec = 0;
+
+ if (extra == NULL)
+ {
+ ANDROID_ERROR(("%s: extra is NULL\n", __FUNCTION__));
+ return -1;
+ }
+ if (sscanf(extra, "%d", &period_msec) != 1)
+ {
+ ANDROID_ERROR(("%s: sscanf error. check period_msec value\n", __FUNCTION__));
+ return -EINVAL;
+ }
+ ANDROID_ERROR(("%s: period_msec is %d\n", __FUNCTION__, period_msec));
+
+ memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
+
+ str = "mkeep_alive";
+ str_len = strlen(str);
+ strncpy(buf, str, str_len);
+ buf[ str_len ] = '\0';
+ mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
+ mkeep_alive_pkt.period_msec = period_msec;
+ buf_len = str_len + 1;
+ mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
+ mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
+
+ /* Setup keep alive zero for null packet generation */
+ mkeep_alive_pkt.keep_alive_id = 0;
+ mkeep_alive_pkt.len_bytes = 0;
+ buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
+ /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
+ * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
+ * guarantee that the buffer is properly aligned.
+ */
+ memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
+
+ if ((res = wldev_ioctl(dev, WLC_SET_VAR, buf, buf_len, TRUE)) < 0)
+ {
+ ANDROID_ERROR(("%s:keep_alive set failed. res[%d]\n", __FUNCTION__, res));
+ }
+ else
+ {
+ ANDROID_ERROR(("%s:keep_alive set ok. res[%d]\n", __FUNCTION__, res));
+ }
+
+ return res;
+}
+
+int
+wl_android_get_channel(
+struct net_device *dev, char* command, int total_len)
+{
+ int ret;
+ channel_info_t ci;
+ int bytes_written = 0;
+
+ if (!(ret = wldev_ioctl(dev, WLC_GET_CHANNEL, &ci, sizeof(channel_info_t), FALSE))) {
+ ANDROID_TRACE(("hw_channel %d\n", ci.hw_channel));
+ ANDROID_TRACE(("target_channel %d\n", ci.target_channel));
+ ANDROID_TRACE(("scan_channel %d\n", ci.scan_channel));
+ bytes_written = snprintf(command, sizeof(channel_info_t)+2, "channel %d", ci.hw_channel);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ }
+
+ return bytes_written;
+}
+
+int
+wl_android_set_roam_trigger(
+struct net_device *dev, char* command, int total_len)
+{
+ int ret = 0;
+ int roam_trigger[2];
+
+ sscanf(command, "%*s %10d", &roam_trigger[0]);
+ roam_trigger[1] = WLC_BAND_ALL;
+
+ ret = wldev_ioctl(dev, WLC_SET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 1);
+ if (ret)
+ ANDROID_ERROR(("WLC_SET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret));
+
+ return ret;
+}
+
+int
+wl_android_get_roam_trigger(
+struct net_device *dev, char *command, int total_len)
+{
+ int ret;
+ int bytes_written;
+ int roam_trigger[2] = {0, 0};
+ int trigger[2]= {0, 0};
+
+ roam_trigger[1] = WLC_BAND_2G;
+ ret = wldev_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 0);
+ if (!ret)
+ trigger[0] = roam_trigger[0];
+ else
+ ANDROID_ERROR(("2G WLC_GET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret));
+
+ roam_trigger[1] = WLC_BAND_5G;
+ ret = wldev_ioctl(dev, WLC_GET_ROAM_TRIGGER, roam_trigger, sizeof(roam_trigger), 0);
+ if (!ret)
+ trigger[1] = roam_trigger[0];
+ else
+ ANDROID_ERROR(("5G WLC_GET_ROAM_TRIGGER ERROR %d ret=%d\n", roam_trigger[0], ret));
+
+ ANDROID_TRACE(("roam_trigger %d %d\n", trigger[0], trigger[1]));
+ bytes_written = snprintf(command, total_len, "%d %d", trigger[0], trigger[1]);
+
+ return bytes_written;
+}
+
+s32
+wl_android_get_keep_alive(struct net_device *dev, char *command, int total_len) {
+
+ wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
+ int bytes_written = -1;
+ int res = -1, len, i = 0;
+ char* str = "mkeep_alive";
+
+ ANDROID_TRACE(("%s: command = %s\n", __FUNCTION__, command));
+
+ len = WLC_IOCTL_MEDLEN;
+ mkeep_alive_pktp = kmalloc(len, GFP_KERNEL);
+ memset(mkeep_alive_pktp, 0, len);
+ strcpy((char*)mkeep_alive_pktp, str);
+
+ if ((res = wldev_ioctl(dev, WLC_GET_VAR, mkeep_alive_pktp, len, FALSE))<0) {
+ ANDROID_ERROR(("%s: GET mkeep_alive ERROR %d\n", __FUNCTION__, res));
+ goto exit;
+ } else {
+ printf("Id :%d\n"
+ "Period (msec) :%d\n"
+ "Length :%d\n"
+ "Packet :0x",
+ mkeep_alive_pktp->keep_alive_id,
+ dtoh32(mkeep_alive_pktp->period_msec),
+ dtoh16(mkeep_alive_pktp->len_bytes));
+ for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
+ printf("%02x", mkeep_alive_pktp->data[i]);
+ }
+ printf("\n");
+ }
+ bytes_written = snprintf(command, total_len, "mkeep_alive_period_msec %d ", dtoh32(mkeep_alive_pktp->period_msec));
+ bytes_written += snprintf(command+bytes_written, total_len, "0x");
+ for (i=0; i<mkeep_alive_pktp->len_bytes; i++) {
+ bytes_written += snprintf(command+bytes_written, total_len, "%x", mkeep_alive_pktp->data[i]);
+ }
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+
+exit:
+ kfree(mkeep_alive_pktp);
+ return bytes_written;
+}
+
+int
+wl_android_set_pm(struct net_device *dev,char *command, int total_len)
+{
+ int pm, ret = -1;
+
+ ANDROID_TRACE(("%s: cmd %s\n", __FUNCTION__, command));
+
+ sscanf(command, "%*s %d", &pm);
+
+ ret = wldev_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm), FALSE);
+ if (ret)
+ ANDROID_ERROR(("WLC_SET_PM ERROR %d ret=%d\n", pm, ret));
+
+ return ret;
+}
+
+int
+wl_android_get_pm(struct net_device *dev,char *command, int total_len)
+{
+
+ int ret = 0;
+ int pm_local;
+ char *pm;
+ int bytes_written=-1;
+
+ ret = wldev_ioctl(dev, WLC_GET_PM, &pm_local, sizeof(pm_local),FALSE);
+ if (!ret) {
+ ANDROID_TRACE(("%s: PM = %d\n", __func__, pm_local));
+ if (pm_local == PM_OFF)
+ pm = "PM_OFF";
+ else if(pm_local == PM_MAX)
+ pm = "PM_MAX";
+ else if(pm_local == PM_FAST)
+ pm = "PM_FAST";
+ else {
+ pm_local = 0;
+ pm = "Invalid";
+ }
+ bytes_written = snprintf(command, total_len, "PM %s", pm);
+ ANDROID_TRACE(("%s: command result is %s\n", __FUNCTION__, command));
+ }
+ return bytes_written;
+}
+
+static int
+wl_android_set_monitor(struct net_device *dev, char *command, int total_len)
+{
+ int val;
+ int ret = 0;
+ int bytes_written;
+
+ sscanf(command, "%*s %d", &val);
+ bytes_written = wldev_ioctl(dev, WLC_SET_MONITOR, &val, sizeof(int), 1);
+ if (bytes_written)
+ ANDROID_ERROR(("WLC_SET_MONITOR ERROR %d ret=%d\n", val, ret));
+ return bytes_written;
+}
+
+int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+#define PRIVATE_COMMAND_MAX_LEN 8192
+ int ret = 0;
+ char *command = NULL;
+ int bytes_written = 0;
+ android_wifi_priv_cmd priv_cmd;
+
+ net_os_wake_lock(net);
+
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ compat_android_wifi_priv_cmd compat_priv_cmd;
+ if (copy_from_user(&compat_priv_cmd, ifr->ifr_data,
+ sizeof(compat_android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+
+ }
+ priv_cmd.buf = compat_ptr(compat_priv_cmd.buf);
+ priv_cmd.used_len = compat_priv_cmd.used_len;
+ priv_cmd.total_len = compat_priv_cmd.total_len;
+ } else
+#endif /* CONFIG_COMPAT */
+ {
+ if (copy_from_user(&priv_cmd, ifr->ifr_data, sizeof(android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ }
+ if ((priv_cmd.total_len > PRIVATE_COMMAND_MAX_LEN) || (priv_cmd.total_len < 0)) {
+ ANDROID_ERROR(("%s: too long priavte command\n", __FUNCTION__));
+ ret = -EINVAL;
+ goto exit;
+ }
+ command = kmalloc((priv_cmd.total_len + 1), GFP_KERNEL);
+ if (!command)
+ {
+ ANDROID_ERROR(("%s: failed to allocate memory\n", __FUNCTION__));
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (copy_from_user(command, priv_cmd.buf, priv_cmd.total_len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ command[priv_cmd.total_len] = '\0';
+
+ ANDROID_INFO(("%s: Android private cmd \"%s\" on %s\n", __FUNCTION__, command, ifr->ifr_name));
+
+ if (strnicmp(command, CMD_START, strlen(CMD_START)) == 0) {
+ ANDROID_INFO(("%s, Received regular START command\n", __FUNCTION__));
+ bytes_written = wl_android_wifi_on(net);
+ }
+ else if (strnicmp(command, CMD_SETFWPATH, strlen(CMD_SETFWPATH)) == 0) {
+ bytes_written = wl_android_set_fwpath(net, command, priv_cmd.total_len);
+ }
+
+ if (!g_wifi_on) {
+ ANDROID_ERROR(("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+ __FUNCTION__, command, ifr->ifr_name));
+ ret = 0;
+ goto exit;
+ }
+
+ if (strnicmp(command, CMD_STOP, strlen(CMD_STOP)) == 0) {
+ bytes_written = wl_android_wifi_off(net);
+ }
+ else if (strnicmp(command, CMD_SCAN_ACTIVE, strlen(CMD_SCAN_ACTIVE)) == 0) {
+ /* TBD: SCAN-ACTIVE */
+ }
+ else if (strnicmp(command, CMD_SCAN_PASSIVE, strlen(CMD_SCAN_PASSIVE)) == 0) {
+ /* TBD: SCAN-PASSIVE */
+ }
+ else if (strnicmp(command, CMD_RSSI, strlen(CMD_RSSI)) == 0) {
+ bytes_written = wl_android_get_rssi(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_LINKSPEED, strlen(CMD_LINKSPEED)) == 0) {
+ bytes_written = wl_android_get_link_speed(net, command, priv_cmd.total_len);
+ }
+#ifdef PKT_FILTER_SUPPORT
+ else if (strnicmp(command, CMD_RXFILTER_START, strlen(CMD_RXFILTER_START)) == 0) {
+ bytes_written = net_os_enable_packet_filter(net, 1);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_STOP, strlen(CMD_RXFILTER_STOP)) == 0) {
+ bytes_written = net_os_enable_packet_filter(net, 0);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_ADD, strlen(CMD_RXFILTER_ADD)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_ADD) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, TRUE, filter_num);
+ }
+ else if (strnicmp(command, CMD_RXFILTER_REMOVE, strlen(CMD_RXFILTER_REMOVE)) == 0) {
+ int filter_num = *(command + strlen(CMD_RXFILTER_REMOVE) + 1) - '0';
+ bytes_written = net_os_rxfilter_add_remove(net, FALSE, filter_num);
+ }
+#endif /* PKT_FILTER_SUPPORT */
+ else if (strnicmp(command, CMD_BTCOEXSCAN_START, strlen(CMD_BTCOEXSCAN_START)) == 0) {
+ /* TBD: BTCOEXSCAN-START */
+ }
+ else if (strnicmp(command, CMD_BTCOEXSCAN_STOP, strlen(CMD_BTCOEXSCAN_STOP)) == 0) {
+ /* TBD: BTCOEXSCAN-STOP */
+ }
+ else if (strnicmp(command, CMD_BTCOEXMODE, strlen(CMD_BTCOEXMODE)) == 0) {
+#ifdef WL_CFG80211
+ void *dhdp = wl_cfg80211_get_dhdp();
+ bytes_written = wl_cfg80211_set_btcoex_dhcp(net, dhdp, command);
+#else
+#ifdef PKT_FILTER_SUPPORT
+ uint mode = *(command + strlen(CMD_BTCOEXMODE) + 1) - '0';
+
+ if (mode == 1)
+ net_os_enable_packet_filter(net, 0); /* DHCP starts */
+ else
+ net_os_enable_packet_filter(net, 1); /* DHCP ends */
+#endif /* PKT_FILTER_SUPPORT */
+#endif /* WL_CFG80211 */
+ }
+ else if (strnicmp(command, CMD_SETSUSPENDOPT, strlen(CMD_SETSUSPENDOPT)) == 0) {
+ bytes_written = wl_android_set_suspendopt(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SETSUSPENDMODE, strlen(CMD_SETSUSPENDMODE)) == 0) {
+ bytes_written = wl_android_set_suspendmode(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SETBAND, strlen(CMD_SETBAND)) == 0) {
+ uint band = *(command + strlen(CMD_SETBAND) + 1) - '0';
+#ifdef WL_HOST_BAND_MGMT
+ s32 ret = 0;
+ if ((ret = wl_cfg80211_set_band(net, band)) < 0) {
+ if (ret == BCME_UNSUPPORTED) {
+ /* If roam_var is unsupported, fallback to the original method */
+ ANDROID_ERROR(("WL_HOST_BAND_MGMT defined, "
+ "but roam_band iovar unsupported in the firmware\n"));
} else {
bytes_written = -1;
goto exit;
}
#ifdef WL_CFG80211
/* CUSTOMER_SET_COUNTRY feature is define for only GGSM model */
- else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
- char *country_code = command + strlen(CMD_COUNTRY) + 1;
- bytes_written = wldev_set_country(net, country_code, true, true);
- }
+ //else if (strnicmp(command, CMD_COUNTRY, strlen(CMD_COUNTRY)) == 0) {
+ // char *country_code = command + strlen(CMD_COUNTRY) + 1;
+ // bytes_written = wldev_set_country(net, country_code, true, true);
+ //}
#endif /* WL_CFG80211 */
+
#ifdef PNO_SUPPORT
else if (strnicmp(command, CMD_PNOSSIDCLR_SET, strlen(CMD_PNOSSIDCLR_SET)) == 0) {
bytes_written = dhd_dev_pno_stop_for_ssid(net);
bytes_written = wl_cfg80211_sd_offload(net, cmd_id, buf, len);
}
#endif /* WL_SDO */
+#ifdef WL_NAN
+ else if (strnicmp(command, CMD_NAN, strlen(CMD_NAN)) == 0) {
+ bytes_written = wl_cfg80211_nan_cmd_handler(net, command,
+ priv_cmd.total_len);
+ }
+#endif /* WL_NAN */
#if !defined WL_ENABLE_P2P_IF
else if (strnicmp(command, CMD_P2P_GET_NOA, strlen(CMD_P2P_GET_NOA)) == 0) {
bytes_written = wl_cfg80211_get_p2p_noa(net, command, priv_cmd.total_len);
bytes_written = wl_cfg80211_set_wps_p2p_ie(net, command + skip,
priv_cmd.total_len - skip, *(command + skip - 2) - '0');
}
+#ifdef WLFBT
+ else if (strnicmp(command, CMD_GET_FTKEY, strlen(CMD_GET_FTKEY)) == 0) {
+ wl_cfg80211_get_fbt_key(command);
+ bytes_written = FBT_KEYLEN;
+ }
+#endif /* WLFBT */
#endif /* WL_CFG80211 */
else if (strnicmp(command, CMD_OKC_SET_PMK, strlen(CMD_OKC_SET_PMK)) == 0)
bytes_written = wl_android_set_pmk(net, command, priv_cmd.total_len);
}
else if (strnicmp(command, CMD_SETROAMMODE, strlen(CMD_SETROAMMODE)) == 0)
bytes_written = wl_android_set_roam_mode(net, command, priv_cmd.total_len);
+#if defined(BCMFW_ROAM_ENABLE)
+ else if (strnicmp(command, CMD_SET_ROAMPREF, strlen(CMD_SET_ROAMPREF)) == 0) {
+ bytes_written = wl_android_set_roampref(net, command, priv_cmd.total_len);
+ }
+#endif /* BCMFW_ROAM_ENABLE */
+#ifdef WL_CFG80211
else if (strnicmp(command, CMD_MIRACAST, strlen(CMD_MIRACAST)) == 0)
bytes_written = wl_android_set_miracast(net, command, priv_cmd.total_len);
- else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA,
- strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
- bytes_written = wl_android_set_ibss_beacon_ouidata(net, command,
+ else if (strnicmp(command, CMD_SETIBSSBEACONOUIDATA, strlen(CMD_SETIBSSBEACONOUIDATA)) == 0)
+ bytes_written = wl_android_set_ibss_beacon_ouidata(net,
+ command, priv_cmd.total_len);
+#endif
+#ifdef WLAIBSS
+ else if (strnicmp(command, CMD_SETIBSSTXFAILEVENT,
+ strlen(CMD_SETIBSSTXFAILEVENT)) == 0)
+ bytes_written = wl_android_set_ibss_txfail_event(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO_ALL,
+ strlen(CMD_GET_IBSS_PEER_INFO_ALL)) == 0)
+ bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len,
+ TRUE);
+ else if (strnicmp(command, CMD_GET_IBSS_PEER_INFO,
+ strlen(CMD_GET_IBSS_PEER_INFO)) == 0)
+ bytes_written = wl_android_get_ibss_peer_info(net, command, priv_cmd.total_len,
+ FALSE);
+ else if (strnicmp(command, CMD_SETIBSSROUTETABLE,
+ strlen(CMD_SETIBSSROUTETABLE)) == 0)
+ bytes_written = wl_android_set_ibss_routetable(net, command,
priv_cmd.total_len);
- else {
+ else if (strnicmp(command, CMD_SETIBSSAMPDU, strlen(CMD_SETIBSSAMPDU)) == 0)
+ bytes_written = wl_android_set_ibss_ampdu(net, command, priv_cmd.total_len);
+ else if (strnicmp(command, CMD_SETIBSSANTENNAMODE, strlen(CMD_SETIBSSANTENNAMODE)) == 0)
+ bytes_written = wl_android_set_ibss_antenna(net, command, priv_cmd.total_len);
+#endif /* WLAIBSS */
+ else if (strnicmp(command, CMD_KEEP_ALIVE, strlen(CMD_KEEP_ALIVE)) == 0) {
+ int skip = strlen(CMD_KEEP_ALIVE) + 1;
+ bytes_written = wl_keep_alive_set(net, command + skip, priv_cmd.total_len - skip);
+ }
+#ifdef WL_CFG80211
+ else if (strnicmp(command, CMD_ROAM_OFFLOAD, strlen(CMD_ROAM_OFFLOAD)) == 0) {
+ int enable = *(command + strlen(CMD_ROAM_OFFLOAD) + 1) - '0';
+ bytes_written = wl_cfg80211_enable_roam_offload(net, enable);
+ }
+#endif
+ else if(strnicmp(command, CMD_GET_CHANNEL, strlen(CMD_GET_CHANNEL)) == 0) {
+ bytes_written = wl_android_get_channel(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_ROAM, strlen(CMD_SET_ROAM)) == 0) {
+ bytes_written = wl_android_set_roam_trigger(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_ROAM, strlen(CMD_GET_ROAM)) == 0) {
+ bytes_written = wl_android_get_roam_trigger(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_GET_KEEP_ALIVE, strlen(CMD_GET_KEEP_ALIVE)) == 0) {
+ int skip = strlen(CMD_GET_KEEP_ALIVE) + 1;
+ bytes_written = wl_android_get_keep_alive(net, command+skip, priv_cmd.total_len-skip);
+ }
+ else if (strnicmp(command, CMD_GET_PM, strlen(CMD_GET_PM)) == 0) {
+ bytes_written = wl_android_get_pm(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_SET_PM, strlen(CMD_SET_PM)) == 0) {
+ bytes_written = wl_android_set_pm(net, command, priv_cmd.total_len);
+ }
+ else if (strnicmp(command, CMD_MONITOR, strlen(CMD_MONITOR)) == 0) {
+ bytes_written = wl_android_set_monitor(net, command, priv_cmd.total_len);
+ } else {
ANDROID_ERROR(("Unknown PRIVATE command %s - ignored\n", command));
snprintf(command, 3, "OK");
bytes_written = strlen("OK");
{
int ret = 0;
- dhd_msg_level |= DHD_ERROR_VAL;
#ifdef ENABLE_INSMOD_NO_FW_LOAD
dhd_download_fw_on_driverload = FALSE;
#endif /* ENABLE_INSMOD_NO_FW_LOAD */
#ifdef WL_GENL
wl_genl_init();
#endif
+ wl_netlink_init();
return ret;
}
int wl_android_exit(void)
{
int ret = 0;
+ struct io_cfg *cur, *q;
#ifdef WL_GENL
wl_genl_deinit();
#endif /* WL_GENL */
+ wl_netlink_deinit();
+
+ list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
+ list_del(&cur->list);
+ kfree(cur);
+ }
return ret;
}
printk("%s: btlock released\n", __FUNCTION__);
#endif /* ENABLE_4335BT_WAR */
- if (!dhd_download_fw_on_driverload) {
- sdioh_stop(NULL);
- /* Call customer gpio to turn off power with WL_REG_ON signal */
- dhd_customer_gpio_wlan_ctrl(WLAN_RESET_OFF);
+ if (!dhd_download_fw_on_driverload)
g_wifi_on = FALSE;
- }
}
#ifdef WL_GENL
ANDROID_TRACE(("GEN Netlink Init\n\n"));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
/* register new family */
ret = genl_register_family(&wl_genl_family);
if (ret != 0)
}
ret = genl_register_mc_group(&wl_genl_family, &wl_genl_mcast);
+#else
+ ret = genl_register_family_with_ops_groups(&wl_genl_family, wl_genl_ops, wl_genl_mcast);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
if (ret != 0) {
ANDROID_ERROR(("register mc_group failed: %i\n", ret));
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
genl_unregister_ops(&wl_genl_family, &wl_genl_ops);
+#endif
genl_unregister_family(&wl_genl_family);
goto failure;
}
/* Generic netlink deinit */
static int wl_genl_deinit(void)
{
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
if (genl_unregister_ops(&wl_genl_family, &wl_genl_ops) < 0)
ANDROID_ERROR(("Unregister wl_genl_ops failed\n"));
-
+#endif
if (genl_unregister_family(&wl_genl_family) < 0)
ANDROID_ERROR(("Unregister wl_genl_ops failed\n"));
event = BCM_E_DEV_LOST;
break;
/* Above events are supported from BCM Supp ver 47 Onwards */
+#ifdef BT_WIFI_HANDOVER
+ case WLC_E_BT_WIFI_HANDOVER_REQ:
+ event = BCM_E_DEV_BT_WIFI_HO_REQ;
+ break;
+#endif /* BT_WIFI_HANDOVER */
default:
ANDROID_ERROR(("Event not supported\n"));
u16 subhdr_len)
{
int ret = 0;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
void *msg;
u32 attr_type = 0;
bcm_event_hdr_t *hdr = NULL;
/* finalize the message */
genlmsg_end(skb, msg);
/* NETLINK_CB(skb).dst_group = 1; */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
if ((err = genlmsg_multicast(skb, 0, wl_genl_mcast.id, GFP_ATOMIC)) < 0)
+#else
+ if ((err = genlmsg_multicast(&wl_genl_family, skb, 0, 0, GFP_ATOMIC)) < 0)
+#endif
ANDROID_ERROR(("genlmsg_multicast for attr(%d) failed. Error:%d \n",
attr_type, err));
else
return -EINVAL;
} else {
/* Handle the data */
-#if !defined(WL_CFG80211_P2P_DEV_IF) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) || defined(WL_COMPAT_WIRELESS)
ANDROID_TRACE(("%s: Data received from pid (%d) \n", __func__,
info->snd_pid));
#else
ANDROID_TRACE(("%s: Data received from pid (%d) \n", __func__,
info->snd_portid));
-#endif /* !WL_CFG80211_P2P_DEV_IF && (LINUX_VERSION < VERSION(3, 6, 0) */
+#endif /* (LINUX_VERSION < VERSION(3, 7, 0) || WL_COMPAT_WIRELESS */
}
return 0;
}
#endif /* WL_GENL */
-extern void *rockchip_mem_prealloc(int section, unsigned long size);
-void* wl_android_prealloc(int section, unsigned long size)
-{
- void *alloc_ptr = NULL;
- //if (wifi_control_data && wifi_control_data->mem_prealloc) {
- alloc_ptr = rockchip_mem_prealloc(section, size);
- if (alloc_ptr) {
- ANDROID_INFO(("success alloc section %d\n", section));
- if (size != 0L)
- bzero(alloc_ptr, size);
- return alloc_ptr;
- }
- //}
-
- ANDROID_ERROR(("can't alloc section %d\n", section));
- return NULL;
-}
-
-/**
- * Functions for Android WiFi card detection
- */
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
-
-bool g_wifi_poweron = FALSE;
-static int g_wifidev_registered = 0;
-static struct semaphore wifi_control_sem;
-static struct wifi_platform_data *wifi_control_data = NULL;
-static struct resource *wifi_irqres = NULL;
-static struct regulator *wifi_regulator = NULL;
-
-static int wifi_add_dev(void);
-static void wifi_del_dev(void);
-
-int wl_android_wifictrl_func_add(void)
-{
- int ret = 0;
- sema_init(&wifi_control_sem, 0);
-
- ret = wifi_add_dev();
- if (ret) {
- ANDROID_ERROR(("%s: platform_driver_register failed\n", __FUNCTION__));
- return ret;
- }
- g_wifidev_registered = 1;
-
- /* Waiting callback after platform_driver_register is done or exit with error */
- if (down_timeout(&wifi_control_sem, msecs_to_jiffies(1000)) != 0) {
- ret = -EINVAL;
- ANDROID_ERROR(("%s: platform_driver_register timeout\n", __FUNCTION__));
- }
-
- return ret;
-}
-
-void wl_android_wifictrl_func_del(void)
-{
- if (g_wifidev_registered)
- {
- wifi_del_dev();
- g_wifidev_registered = 0;
- }
-}
-
-void* wl_android_prealloc(int section, unsigned long size)
-{
- void *alloc_ptr = NULL;
- if (wifi_control_data && wifi_control_data->mem_prealloc) {
- alloc_ptr = wifi_control_data->mem_prealloc(section, size);
- if (alloc_ptr) {
- ANDROID_INFO(("success alloc section %d\n", section));
- if (size != 0L)
- bzero(alloc_ptr, size);
- return alloc_ptr;
- }
- }
-
- ANDROID_ERROR(("can't alloc section %d\n", section));
- return NULL;
-}
-
-int wifi_get_irq_number(unsigned long *irq_flags_ptr)
-{
- if (wifi_irqres) {
- *irq_flags_ptr = wifi_irqres->flags & IRQF_TRIGGER_MASK;
- return (int)wifi_irqres->start;
- }
-#ifdef CUSTOM_OOB_GPIO_NUM
- return CUSTOM_OOB_GPIO_NUM;
-#else
- return -1;
-#endif
-}
-
-int wifi_set_power(int on, unsigned long msec)
-{
- int ret = 0;
- ANDROID_ERROR(("%s = %d\n", __FUNCTION__, on));
- if (wifi_regulator && on)
- ret = regulator_enable(wifi_regulator);
- if (wifi_control_data && wifi_control_data->set_power) {
-#ifdef ENABLE_4335BT_WAR
- if (on) {
- printk("WiFi: trying to acquire BT lock\n");
- if (bcm_bt_lock(lock_cookie_wifi) != 0)
- printk("** WiFi: timeout in acquiring bt lock**\n");
- printk("%s: btlock acquired\n", __FUNCTION__);
- }
- else {
- /* For a exceptional case, release btlock */
- bcm_bt_unlock(lock_cookie_wifi);
- }
-#endif /* ENABLE_4335BT_WAR */
- ret = wifi_control_data->set_power(on);
- }
-
- if (wifi_regulator && !on)
- ret = regulator_disable(wifi_regulator);
-
- if (msec && !ret)
- OSL_SLEEP(msec);
- return ret;
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
-int wifi_get_mac_addr(unsigned char *buf)
-{
- ANDROID_ERROR(("%s\n", __FUNCTION__));
- if (!buf)
- return -EINVAL;
- if (wifi_control_data && wifi_control_data->get_mac_addr) {
- return wifi_control_data->get_mac_addr(buf);
- }
- return -EOPNOTSUPP;
-}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)) */
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39))
-void *wifi_get_country_code(char *ccode)
-{
- ANDROID_TRACE(("%s\n", __FUNCTION__));
- if (!ccode)
- return NULL;
- if (wifi_control_data && wifi_control_data->get_country_code) {
- return wifi_control_data->get_country_code(ccode);
- }
- return NULL;
-}
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)) */
-
-static int wifi_set_carddetect(int on)
-{
- ANDROID_ERROR(("%s = %d\n", __FUNCTION__, on));
- if (wifi_control_data && wifi_control_data->set_carddetect) {
- wifi_control_data->set_carddetect(on);
- }
- return 0;
-}
-
-static struct resource *get_wifi_irqres_from_of(struct platform_device *pdev)
-{
- static struct resource gpio_wifi_irqres;
- int irq;
- int gpio = of_get_gpio(pdev->dev.of_node, 0);
- if (gpio < 0)
- return NULL;
- irq = gpio_to_irq(gpio);
- if (irq < 0)
- return NULL;
-
- gpio_wifi_irqres.name = "bcmdhd_wlan_irq";
- gpio_wifi_irqres.start = irq;
- gpio_wifi_irqres.end = irq;
- gpio_wifi_irqres.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL |
- IORESOURCE_IRQ_SHAREABLE;
-
- return &gpio_wifi_irqres;
-}
-
-static int wifi_probe(struct platform_device *pdev)
-{
- int err;
- struct regulator *regulator;
- struct wifi_platform_data *wifi_ctrl =
- (struct wifi_platform_data *)(pdev->dev.platform_data);
-
- if (!wifi_ctrl) {
- regulator = regulator_get(&pdev->dev, "wlreg_on");
- if (IS_ERR(regulator))
- return PTR_ERR(regulator);
- wifi_regulator = regulator;
- }
-
- wifi_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "bcmdhd_wlan_irq");
- if (wifi_irqres == NULL)
- wifi_irqres = platform_get_resource_byname(pdev,
- IORESOURCE_IRQ, "bcm4329_wlan_irq");
- if (wifi_irqres == NULL)
- wifi_irqres = get_wifi_irqres_from_of(pdev);
- wifi_control_data = wifi_ctrl;
- err = wifi_set_power(1, 200); /* Power On */
- if (unlikely(err)) {
- ANDROID_ERROR(("%s: set_power failed. err=%d\n", __FUNCTION__, err));
- wifi_set_power(0, WIFI_TURNOFF_DELAY);
- /* WL_REG_ON state unknown, Power off forcely */
- } else {
- wifi_set_carddetect(1); /* CardDetect (0->1) */
- g_wifi_poweron = TRUE;
- }
-
- up(&wifi_control_sem);
- return 0;
-}
-
-static int wifi_remove(struct platform_device *pdev)
-{
- struct wifi_platform_data *wifi_ctrl =
- (struct wifi_platform_data *)(pdev->dev.platform_data);
- struct io_cfg *cur, *q;
-
- ANDROID_ERROR(("## %s\n", __FUNCTION__));
- wifi_control_data = wifi_ctrl;
-
- if (g_wifi_poweron) {
- wifi_set_power(0, WIFI_TURNOFF_DELAY); /* Power Off */
- wifi_set_carddetect(0); /* CardDetect (1->0) */
- g_wifi_poweron = FALSE;
- list_for_each_entry_safe(cur, q, &miracast_resume_list, list) {
- list_del(&cur->list);
- kfree(cur);
- }
- }
- if (wifi_regulator) {
- regulator_put(wifi_regulator);
- wifi_regulator = NULL;
- }
-
- up(&wifi_control_sem);
- return 0;
-}
-
-static int wifi_suspend(struct platform_device *pdev, pm_message_t state)
-{
- ANDROID_TRACE(("##> %s\n", __FUNCTION__));
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && 1
- bcmsdh_oob_intr_set(0);
-#endif /* (OOB_INTR_ONLY) */
- return 0;
-}
-
-static int wifi_resume(struct platform_device *pdev)
-{
- ANDROID_TRACE(("##> %s\n", __FUNCTION__));
-#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)) && defined(OOB_INTR_ONLY) && 1
- if (dhd_os_check_if_up(bcmsdh_get_drvdata()))
- bcmsdh_oob_intr_set(1);
-#endif /* (OOB_INTR_ONLY) */
- return 0;
-}
-
-static const struct of_device_id wifi_device_dt_match[] = {
- { .compatible = "android,bcmdhd_wlan", },
- {},
-};
-MODULE_DEVICE_TABLE(of, wifi_device_dt_match);
-
-static struct platform_driver wifi_device = {
- .probe = wifi_probe,
- .remove = wifi_remove,
- .suspend = wifi_suspend,
- .resume = wifi_resume,
- .driver = {
- .name = "bcmdhd_wlan",
- .of_match_table = wifi_device_dt_match,
- }
-};
-
-static struct platform_driver wifi_device_legacy = {
- .probe = wifi_probe,
- .remove = wifi_remove,
- .suspend = wifi_suspend,
- .resume = wifi_resume,
- .driver = {
- .name = "bcm4329_wlan",
- }
-};
-
-static int wifi_add_dev(void)
-{
- int ret = 0;
- ANDROID_TRACE(("## Calling platform_driver_register\n"));
- ret = platform_driver_register(&wifi_device);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&wifi_device_legacy);
- return ret;
-}
-
-static void wifi_del_dev(void)
-{
- ANDROID_TRACE(("## Unregister platform_driver_register\n"));
- platform_driver_unregister(&wifi_device);
- platform_driver_unregister(&wifi_device_legacy);
-}
-#endif /* defined(CONFIG_WIFI_CONTROL_FUNC) */
-
#if defined(RSSIAVG)
void
{
wl_rssi_cache_t *node, *prev, **rssi_head;
int i = -1, tmp = 0;
-#if defined(BSSCACHE)
- int max = 0;
-#else
- int max = RSSICACHE_LEN;
-#endif
- max = min(max, RSSICACHE_LEN);
+ struct timeval now;
+
+ do_gettimeofday(&now);
rssi_head = &rssi_cache_ctrl->m_cache_head;
node = *rssi_head;
prev = node;
for (;node;) {
i++;
- if (node->dirty > max) {
+ if (now.tv_sec > node->tv.tv_sec) {
if (node == *rssi_head) {
tmp = 1;
*rssi_head = node->next;
int j, k=0;
int rssi, error=0;
struct ether_addr bssid;
+ struct timeval now, timeout;
if (!g_wifi_on)
return 0;
return error;
}
+ do_gettimeofday(&now);
+ timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
+ if (timeout.tv_sec < now.tv_sec) {
+ /*
+ * Integer overflow - assume long enough timeout to be assumed
+ * to be infinite, i.e., the timeout would never happen.
+ */
+ ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+ __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+ }
+
/* update RSSI */
rssi_head = &rssi_cache_ctrl->m_cache_head;
node = *rssi_head;
node->RSSI[j] = node->RSSI[j+1];
node->RSSI[j] = rssi;
node->dirty = 0;
+ node->tv = timeout;
goto exit;
}
prev = node;
leaf->next = NULL;
leaf->dirty = 0;
+ leaf->tv = timeout;
memcpy(&leaf->BSSID, &bssid, ETHER_ADDR_LEN);
for (j=0; j<RSSIAVG_LEN; j++)
leaf->RSSI[j] = rssi;
wl_rssi_cache_t *node, *prev, *leaf, **rssi_head;
wl_bss_info_t *bi = NULL;
int i, j, k;
+ struct timeval now, timeout;
if (!ss_list->count)
return;
+ do_gettimeofday(&now);
+ timeout.tv_sec = now.tv_sec + RSSICACHE_TIMEOUT;
+ if (timeout.tv_sec < now.tv_sec) {
+ /*
+ * Integer overflow - assume long enough timeout to be assumed
+ * to be infinite, i.e., the timeout would never happen.
+ */
+ ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+ __FUNCTION__, RSSICACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+ }
+
rssi_head = &rssi_cache_ctrl->m_cache_head;
/* update RSSI */
node->RSSI[j] = node->RSSI[j+1];
node->RSSI[j] = dtoh16(bi->RSSI);
node->dirty = 0;
+ node->tv = timeout;
break;
}
prev = node;
leaf->next = NULL;
leaf->dirty = 0;
+ leaf->tv = timeout;
memcpy(&leaf->BSSID, &bi->BSSID, ETHER_ADDR_LEN);
for (j=0; j<RSSIAVG_LEN; j++)
leaf->RSSI[j] = dtoh16(bi->RSSI);
rssi_head = &rssi_cache_ctrl->m_cache_head;
- /* reset dirty */
node = *rssi_head;
for (;node;) {
if (!memcmp(&node->BSSID, addr, ETHER_ADDR_LEN)) {
#if defined(RSSIOFFSET)
int
-wl_update_rssi_offset(int rssi)
+wl_update_rssi_offset(struct net_device *net, int rssi)
{
uint chip, chiprev;
if (!g_wifi_on)
return rssi;
- chip = dhd_bus_chip_id(bcmsdh_get_drvdata());
- chiprev = dhd_bus_chiprev_id(bcmsdh_get_drvdata());
+ chip = dhd_conf_get_chip(dhd_get_pub(net));
+ chiprev = dhd_conf_get_chiprev(dhd_get_pub(net));
if (chip == BCM4330_CHIP_ID && chiprev == BCM4330B2_CHIP_REV) {
#if defined(RSSIOFFSET_NEW)
int j;
{
wl_bss_cache_t *node, *prev, **bss_head;
int i = -1, tmp = 0;
+ struct timeval now;
+
+ do_gettimeofday(&now);
bss_head = &bss_cache_ctrl->m_cache_head;
node = *bss_head;
prev = node;
for (;node;) {
i++;
- if (node->dirty > BSSCACHE_LEN) {
+ if (now.tv_sec > node->tv.tv_sec) {
if (node == *bss_head) {
tmp = 1;
*bss_head = node->next;
wl_bss_cache_t *node, *prev, *leaf, *tmp, **bss_head;
wl_bss_info_t *bi = NULL;
int i, k=0;
+ struct timeval now, timeout;
if (!ss_list->count)
return;
+ do_gettimeofday(&now);
+ timeout.tv_sec = now.tv_sec + BSSCACHE_TIMEOUT;
+ if (timeout.tv_sec < now.tv_sec) {
+ /*
+ * Integer overflow - assume long enough timeout to be assumed
+ * to be infinite, i.e., the timeout would never happen.
+ */
+ ANDROID_TRACE(("%s: Too long timeout (secs=%d) to ever happen - now=%lu, timeout=%lu",
+ __FUNCTION__, BSSCACHE_TIMEOUT, now.tv_sec, timeout.tv_sec));
+ }
+
bss_head = &bss_cache_ctrl->m_cache_head;
for (i=0; i < ss_list->count; i++) {
memcpy(leaf->results.bss_info, bi, dtoh32(bi->length));
leaf->next = node->next;
leaf->dirty = 0;
+ leaf->tv = timeout;
leaf->results.count = 1;
leaf->results.version = ss_list->version;
ANDROID_TRACE(("%s: Update %d with BSSID %pM, RSSI=%d, SSID \"%s\", length=%d\n",
memcpy(leaf->results.bss_info, bi, dtoh32(bi->length));
leaf->next = NULL;
leaf->dirty = 0;
+ leaf->tv = timeout;
leaf->results.count = 1;
leaf->results.version = ss_list->version;
k++;
}
}
-void
-wl_run_bss_cache_timer(wl_bss_cache_ctrl_t *bss_cache_ctrl, int kick_off)
-{
- struct timer_list **timer;
-
- timer = &bss_cache_ctrl->m_timer;
-
- if (*timer) {
- if (kick_off) {
- (*timer)->expires = jiffies + BSSCACHE_TIME * HZ / 1000;
- add_timer(*timer);
- ANDROID_TRACE(("%s: timer starts\n", __FUNCTION__));
- } else {
- del_timer_sync(*timer);
- ANDROID_TRACE(("%s: timer stops\n", __FUNCTION__));
- }
- }
-}
-
-void
-wl_set_bss_cache_timer_flag(ulong data)
-{
- wl_bss_cache_ctrl_t *bss_cache_ctrl = (wl_bss_cache_ctrl_t *)data;
-
- bss_cache_ctrl->m_timer_expired = 1;
- ANDROID_TRACE(("%s called\n", __FUNCTION__));
-}
-
void
wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl)
{
ANDROID_TRACE(("%s:\n", __FUNCTION__));
wl_free_bss_cache(bss_cache_ctrl);
- wl_run_bss_cache_timer(bss_cache_ctrl, 0);
- if (bss_cache_ctrl->m_timer) {
- kfree(bss_cache_ctrl->m_timer);
- }
-}
-
-int
-wl_init_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl)
-{
- ANDROID_TRACE(("%s:\n", __FUNCTION__));
- bss_cache_ctrl->m_timer_expired = 0;
-
- bss_cache_ctrl->m_timer = kmalloc(sizeof(struct timer_list), GFP_KERNEL);
- if (!bss_cache_ctrl->m_timer) {
- ANDROID_ERROR(("%s: Memory alloc failure\n", __FUNCTION__ ));
- return -ENOMEM;
- }
- init_timer(bss_cache_ctrl->m_timer);
- bss_cache_ctrl->m_timer->function = (void *)wl_set_bss_cache_timer_flag;
- bss_cache_ctrl->m_timer->data = (ulong)bss_cache_ctrl;
-
- return 0;
}
#endif
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_android.h 367305 2012-11-07 13:49:55Z $
+ * $Id: wl_android.h 487838 2014-06-27 05:51:44Z $
*/
#ifndef _wl_android_
/* If any feature uses the Generic Netlink Interface, put it here to enable WL_GENL
* automatically
*/
-#ifdef WL_SDO
+#if defined(WL_SDO) || defined(BT_WIFI_HANDOVER) || defined(WL_NAN)
#define WL_GENL
#endif
int wl_android_wifi_on(struct net_device *dev);
int wl_android_wifi_off(struct net_device *dev);
int wl_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
-void* wl_android_prealloc(int section, unsigned long size);
-
-#if defined(CONFIG_WIFI_CONTROL_FUNC)
-int wl_android_wifictrl_func_add(void);
-void wl_android_wifictrl_func_del(void);
-void* wl_android_prealloc(int section, unsigned long size);
-
-int wifi_get_irq_number(unsigned long *irq_flags_ptr);
-int wifi_set_power(int on, unsigned long msec);
-int wifi_get_mac_addr(unsigned char *buf);
-void *wifi_get_country_code(char *ccode);
-#endif /* CONFIG_WIFI_CONTROL_FUNC */
#ifdef WL_GENL
typedef struct bcm_event_hdr {
BCM_E_SVC_FOUND,
BCM_E_DEV_FOUND,
BCM_E_DEV_LOST,
+ BCM_E_DEV_BT_WIFI_HO_REQ,
BCM_E_MAX
};
s32 wl_genl_send_msg(struct net_device *ndev, u32 event_type,
u8 *string, u16 len, u8 *hdr, u16 hdrlen);
#endif /* WL_GENL */
+s32 wl_netlink_send_msg(int pid, int type, int seq, void *data, size_t size);
+
+/* hostap mac mode */
+#define MACLIST_MODE_DISABLED 0
+#define MACLIST_MODE_DENY 1
+#define MACLIST_MODE_ALLOW 2
+
+/* max number of assoc list */
+#define MAX_NUM_OF_ASSOCLIST 64
+
+/* max number of mac filter list
+ * restrict max number to 10 as maximum cmd string size is 255
+ */
+#define MAX_NUM_MAC_FILT 10
+
+int wl_android_set_ap_mac_list(struct net_device *dev, int macmode, struct maclist *maclist);
/* terence:
* BSSCACHE: Cache bss list
* RSSAVG: Average RSSI of BSS list
* RSSIOFFSET: RSSI offset
*/
-#define BSSCACHE
-#define RSSIAVG
-#define RSSIOFFSET
+//#define BSSCACHE
+//#define RSSIAVG
+//#define RSSIOFFSET
//#define RSSIOFFSET_NEW
#define RSSI_MAXVAL -2
#if defined(RSSIAVG)
#define RSSIAVG_LEN (4*REPEATED_SCAN_RESULT_CNT)
-#define RSSICACHE_LEN (4*REPEATED_SCAN_RESULT_CNT)
+#define RSSICACHE_TIMEOUT 15
typedef struct wl_rssi_cache {
struct wl_rssi_cache *next;
int dirty;
+ struct timeval tv;
struct ether_addr BSSID;
int16 RSSI[RSSIAVG_LEN];
} wl_rssi_cache_t;
#endif
#define BCM4330_CHIP_ID 0x4330
#define BCM4330B2_CHIP_REV 4
-int wl_update_rssi_offset(int rssi);
+int wl_update_rssi_offset(struct net_device *net, int rssi);
#endif
#if defined(BSSCACHE)
-#define BSSCACHE_LEN (4*REPEATED_SCAN_RESULT_CNT)
-#define BSSCACHE_TIME 15000
+#define BSSCACHE_TIMEOUT 15
typedef struct wl_bss_cache {
struct wl_bss_cache *next;
int dirty;
+ struct timeval tv;
wl_scan_results_t results;
} wl_bss_cache_t;
typedef struct wl_bss_cache_ctrl {
wl_bss_cache_t *m_cache_head;
- struct timer_list *m_timer;
- int m_timer_expired;
} wl_bss_cache_ctrl_t;
void wl_free_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
void wl_delete_disconnected_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, u8 *bssid);
void wl_reset_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl);
void wl_update_bss_cache(wl_bss_cache_ctrl_t *bss_cache_ctrl, wl_scan_results_t *ss_list);
-void wl_run_bss_cache_timer(wl_bss_cache_ctrl_t *bss_cache_ctrl, int kick_off);
void wl_release_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl);
-int wl_init_bss_cache_ctrl(wl_bss_cache_ctrl_t *bss_cache_ctrl);
#endif
-#endif /* _wl_android_ */
+#endif /* _wl_android_ */
\ No newline at end of file
-
/*
* Linux cfg80211 driver
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_cfg80211.c 419844 2013-08-23 00:07:03Z $
+ * $Id: wl_cfg80211.c 491569 2014-07-16 21:28:40Z $
*/
/* */
#include <typedefs.h>
#include <dngl_stats.h>
#include <dhd.h>
+#include <dhd_linux.h>
#include <dhdioctl.h>
#include <wlioctl.h>
#include <dhd_cfg80211.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
#include <wl_android.h>
+#include <wl_cfgvendor.h>
+#ifdef WL_NAN
+#include <wl_cfgnan.h>
+#endif /* WL_NAN */
#include <dhd_config.h>
#ifdef PROP_TXSTATUS
#endif /* BCMWAPI_WPI */
static struct device *cfg80211_parent_dev = NULL;
-struct wl_priv *wlcfg_drv_priv = NULL;
+/* g_bcm_cfg should be static. Do not change */
+static struct bcm_cfg80211 *g_bcm_cfg = NULL;
u32 wl_dbg_level = WL_DBG_ERR;
#define MAX_WAIT_TIME 1500
+#ifdef WLAIBSS_MCHAN
+#define IBSS_IF_NAME "ibss%d"
+#endif /* WLAIBSS_MCHAN */
#ifdef VSDB
/* sleep time to keep STA's connecting or connection for continuous af tx or finding a peer */
#define DEFAULT_SLEEP_TIME_VSDB 120
#define OFF_CHAN_TIME_THRESHOLD_MS 200
-#define AF_RETRY_DELAY_TIME 40
+#define AF_RETRY_DELAY_TIME 40
/* if sta is connected or connecting, sleep for a while before retry af tx or finding a peer */
-#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(wl) \
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg) \
do { \
- if (wl_get_drv_status(wl, CONNECTED, wl_to_prmry_ndev(wl)) || \
- wl_get_drv_status(wl, CONNECTING, wl_to_prmry_ndev(wl))) { \
- OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB); \
+ if (wl_get_drv_status(cfg, CONNECTED, bcmcfg_to_prmry_ndev(cfg)) || \
+ wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) { \
+ OSL_SLEEP(DEFAULT_SLEEP_TIME_VSDB); \
} \
} while (0)
#else /* VSDB */
/* if not VSDB, do nothing */
-#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(wl)
+#define WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg)
#endif /* VSDB */
#ifdef WL_CFG80211_SYNC_GON
-#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(wl) \
- (wl_get_drv_status_all(wl, SENDING_ACT_FRM) || \
- wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM_LISTEN))
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) \
+ (wl_get_drv_status_all(cfg, SENDING_ACT_FRM) || \
+ wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN))
#else
-#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(wl) wl_get_drv_status_all(wl, SENDING_ACT_FRM)
+#define WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg) wl_get_drv_status_all(cfg, SENDING_ACT_FRM)
#endif /* WL_CFG80211_SYNC_GON */
+#define WL_IS_P2P_DEV_EVENT(e) ((e->emsg.ifidx == 0) && \
+ (e->emsg.bsscfgidx == P2PAPI_BSSCFG_DEVICE))
-#define WL_CHANSPEC_CTL_SB_NONE WL_CHANSPEC_CTL_SB_LLL
-
-
-#define DNGL_FUNC(func, parameters) func parameters;
+#define DNGL_FUNC(func, parameters) func parameters
#define COEX_DHCP
#define WLAN_EID_SSID 0
#define CH_MIN_5G_CHANNEL 34
#define CH_MIN_2G_CHANNEL 1
+#ifdef WLAIBSS
+enum abiss_event_type {
+ AIBSS_EVENT_TXFAIL
+};
+#endif
+
+enum rmc_event_type {
+ RMC_EVENT_NONE,
+ RMC_EVENT_LEADER_CHECK_FAIL
+};
+
/* This is to override regulatory domains defined in cfg80211 module (reg.c)
* By default world regulatory domain defined in reg.c puts the flags NL80211_RRF_PASSIVE_SCAN
* and NL80211_RRF_NO_IBSS for 5GHz channels (for 36..48 and 149..165).
* STA + P2P Mode - #STA <= 2, #{P2P-GO, P2P-client} <= 1, #P2P-device <= 1
* on channels = 2
*/
-static const struct ieee80211_iface_limit softap_limits[] = {
+static const struct ieee80211_iface_limit common_if_limits[] = {
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP),
},
-};
-
-static const struct ieee80211_iface_limit sta_p2p_limits[] = {
+ {
/*
* During P2P-GO removal, P2P-GO is first changed to STA and later only
- * removed. So setting maximum possible number of STA interfaces as 2 to
- * accommodate the above behaviour.
+ * removed. So setting maximum possible number of STA interfaces according
+ * to kernel version.
+ *
+ * less than linux-3.8 - max:3 (wlan0 + p2p0 + group removal of p2p-p2p0-x)
+ * linux-3.8 and above - max:2 (wlan0 + group removal of p2p-wlan0-x)
*/
- {
+#ifdef WL_ENABLE_P2P_IF
+ .max = 3,
+#else
.max = 2,
+#endif /* WL_ENABLE_P2P_IF */
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.types = BIT(NL80211_IFTYPE_ADHOC),
},
};
-
-static const struct ieee80211_iface_combination
-softap_iface_combinations[] = {
- {
- .num_different_channels = 1,
- .max_interfaces = 1,
- .limits = softap_limits,
- .n_limits = ARRAY_SIZE(softap_limits),
- },
-};
-
+#ifdef BCM4330_CHIP
+#define NUM_DIFF_CHANNELS 1
+#else
+#define NUM_DIFF_CHANNELS 2
+#endif
static const struct ieee80211_iface_combination
-sta_p2p_iface_combinations[] = {
+common_iface_combinations[] = {
{
- .num_different_channels = 2,
- .max_interfaces = 3,
- .limits = sta_p2p_limits,
- .n_limits = ARRAY_SIZE(sta_p2p_limits),
+ .num_different_channels = NUM_DIFF_CHANNELS,
+ .max_interfaces = 4,
+ .limits = common_if_limits,
+ .n_limits = ARRAY_SIZE(common_if_limits),
},
};
#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
#endif /* MFP */
#ifndef IBSS_COALESCE_ALLOWED
-#define IBSS_COALESCE_ALLOWED 0
+#define IBSS_COALESCE_ALLOWED 0
#endif
#ifndef IBSS_INITIAL_SCAN_ALLOWED
-#define IBSS_INITIAL_SCAN_ALLOWED 0
+#define IBSS_INITIAL_SCAN_ALLOWED 0
#endif
+
+#define CUSTOM_RETRY_MASK 0xff000000 /* Mask for retry counter of custom dwell time */
/*
* cfg80211_ops api/callback list
*/
struct cfg80211_scan_request *request);
#endif /* WL_CFG80211_P2P_DEV_IF */
static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed);
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev* bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name);
+static s32 bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* WLAIBSS_MCHAN */
static s32 wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params);
static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy,
bcm_struct_cfgdev *cfgdev, u64 cookie);
static s32 wl_cfg80211_del_station(struct wiphy *wiphy,
struct net_device *ndev, u8* mac_addr);
+static s32 wl_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev, u8 *mac, struct station_parameters *params);
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
-static s32
+static s32
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
wl_cfg80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
#else
struct cfg80211_pmksa *pmksa);
static s32 wl_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *dev);
-static void wl_cfg80211_scan_abort(struct wl_priv *wl);
-static s32 wl_notify_escan_complete(struct wl_priv *wl,
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
struct net_device *ndev, bool aborted, bool fw_abort);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
static s32 wl_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
u8 *peer, enum nl80211_tdls_operation oper);
#endif /* LINUX_VERSION > KERNEL_VERSION(3,2,0) || WL_COMPAT_WIRELESS */
+#ifdef WL_SCHED_SCAN
+static int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev);
+#endif
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy, enum nl80211_iftype
+ iface_type, u8 *mac_addr, const char *name);
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev);
+#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */
/*
* event & event Q handlers for cfg80211 interfaces
*/
-static s32 wl_create_event_handler(struct wl_priv *wl);
-static void wl_destroy_event_handler(struct wl_priv *wl);
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg);
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg);
static s32 wl_event_handler(void *data);
-static void wl_init_eq(struct wl_priv *wl);
-static void wl_flush_eq(struct wl_priv *wl);
-static unsigned long wl_lock_eq(struct wl_priv *wl);
-static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags);
-static void wl_init_eq_lock(struct wl_priv *wl);
-static void wl_init_event_handler(struct wl_priv *wl);
-static struct wl_event_q *wl_deq_event(struct wl_priv *wl);
-static s32 wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 type,
+static void wl_init_eq(struct bcm_cfg80211 *cfg);
+static void wl_flush_eq(struct bcm_cfg80211 *cfg);
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg);
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags);
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg);
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg);
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg);
+static s32 wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 type,
const wl_event_msg_t *msg, void *data);
static void wl_put_event(struct wl_event_q *e);
-static void wl_wakeup_event(struct wl_priv *wl);
-static s32 wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev,
+static void wl_wakeup_event(struct bcm_cfg80211 *cfg);
+static s32 wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data);
-static s32 wl_notify_connect_status(struct wl_priv *wl,
+static s32 wl_notify_connect_status(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
-static s32 wl_notify_roaming_status(struct wl_priv *wl,
+static s32 wl_notify_roaming_status(struct bcm_cfg80211 *cfg,
bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
-static s32 wl_notify_scan_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+static s32 wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
-static s32 wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+static s32 wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, bool completed);
-static s32 wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+static s32 wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data);
-static s32 wl_notify_mic_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+static s32 wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
+#ifdef BT_WIFI_HANDOVER
+static s32 wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg,
+ bcm_struct_cfgdev *cfgdev, const wl_event_msg_t *e, void *data);
+#endif /* BT_WIFI_HANDOVER */
#ifdef WL_SCHED_SCAN
static s32
-wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev,
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data);
#endif /* WL_SCHED_SCAN */
#ifdef PNO_SUPPORT
-static s32 wl_notify_pfn_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+static s32 wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
#endif /* PNO_SUPPORT */
-static s32 wl_notifier_change_state(struct wl_priv *wl, struct net_info *_net_info,
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
enum wl_status state, bool set);
#ifdef WL_SDO
-static s32 wl_svc_resp_handler(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+static s32 wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
-static s32 wl_notify_device_discovery(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+static s32 wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
#endif
#ifdef WLTDLS
-static s32 wl_tdls_event_handler(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+static s32 wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
#endif /* WLTDLS */
/*
static s32 wl_set_retry(struct net_device *dev, u32 retry, bool l);
/*
- * wl profile utilities
+ * cfg profile utilities
*/
-static s32 wl_update_prof(struct wl_priv *wl, struct net_device *ndev,
+static s32 wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, s32 item);
-static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item);
-static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev);
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item);
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev);
/*
* cfg80211 connect utilites
static s32 wl_set_set_wapi_ie(struct net_device *dev,
struct cfg80211_connect_params *sme);
#endif
-static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev);
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev);
static void wl_ch_to_chanspec(int ch,
struct wl_join_params *join_params, size_t *join_params_size);
/*
* information element utilities
*/
-static void wl_rst_ie(struct wl_priv *wl);
-static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v);
-static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size);
-static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size);
-static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size);
-static u32 wl_get_ielen(struct wl_priv *wl);
+static void wl_rst_ie(struct bcm_cfg80211 *cfg);
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v);
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam);
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size);
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size);
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg);
#ifdef MFP
static int wl_cfg80211_get_rsn_capa(bcm_tlv_t *wpa2ie, u8* capa);
#endif
bcm_tlv_t *
wl_cfg80211_find_interworking_ie(u8 *parse, u32 len);
static s32
-wl_cfg80211_add_iw_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag,
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
uint8 ie_id, uint8 *data, uint8 data_len);
#endif /* WL11U */
static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *dev, void *data);
-static void wl_free_wdev(struct wl_priv *wl);
+static void wl_free_wdev(struct bcm_cfg80211 *cfg);
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
static int
wl_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
-static s32 wl_inform_bss(struct wl_priv *wl);
-static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi);
-static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev);
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg);
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam);
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam);
static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy);
s32 wl_cfg80211_channel_to_freq(u32 channel);
static void swap_key_to_BE(struct wl_wsec_key *key);
/*
- * wl_priv memory init/deinit utilities
+ * bcm_cfg80211 memory init/deinit utilities
*/
-static s32 wl_init_priv_mem(struct wl_priv *wl);
-static void wl_deinit_priv_mem(struct wl_priv *wl);
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg);
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg);
static void wl_delay(u32 ms);
/*
* ibss mode utilities
*/
-static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev);
-static __used bool wl_is_ibssstarter(struct wl_priv *wl);
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev);
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg);
/*
* link up/down , default configuration utilities
*/
-static s32 __wl_cfg80211_up(struct wl_priv *wl);
-static s32 __wl_cfg80211_down(struct wl_priv *wl);
-static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e);
-static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev);
-static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e);
-static void wl_link_up(struct wl_priv *wl);
-static void wl_link_down(struct wl_priv *wl);
-static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype);
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg);
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg);
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e,
+ struct net_device *ndev);
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e);
+static void wl_link_up(struct bcm_cfg80211 *cfg);
+static void wl_link_down(struct bcm_cfg80211 *cfg);
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype);
static void wl_init_conf(struct wl_conf *conf);
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+ struct net_device* ndev);
-/*
- * iscan handler
- */
-static void wl_iscan_timer(unsigned long data);
-static void wl_term_iscan(struct wl_priv *wl);
-static s32 wl_init_scan(struct wl_priv *wl);
-static s32 wl_iscan_thread(void *data);
-static s32 wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request,
- u16 action);
-static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request);
-static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan);
-static s32 wl_invoke_iscan(struct wl_priv *wl);
-static s32 wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
- struct wl_scan_results **bss_list);
-static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted);
-static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan);
-static s32 wl_iscan_done(struct wl_priv *wl);
-static s32 wl_iscan_pending(struct wl_priv *wl);
-static s32 wl_iscan_inprogress(struct wl_priv *wl);
-static s32 wl_iscan_aborted(struct wl_priv *wl);
+int wl_cfg80211_get_ioctl_version(void);
/*
* find most significant bit set
/*
* rfkill support
*/
-static int wl_setup_rfkill(struct wl_priv *wl, bool setup);
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup);
static int wl_rfkill_set(void *data, bool blocked);
#ifdef DEBUGFS_CFG80211
-static s32 wl_setup_debugfs(struct wl_priv *wl);
-static s32 wl_free_debugfs(struct wl_priv *wl);
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg);
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg);
#endif
static wl_scan_params_t *wl_cfg80211_scan_alloc_params(int channel,
int nprobes, int *out_params_size);
-static bool check_dev_role_integrity(struct wl_priv *wl, u32 dev_role);
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role);
+
+#ifdef WL_CFG80211_ACL
+/* ACL */
+static int wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+ const struct cfg80211_acl_data *acl);
+#endif /* WL_CFG80211_ACL */
/*
* Some external functions, TODO: move them to dhd_linux.h
int dhd_start_xmit(struct sk_buff *skb, struct net_device *net);
+static int wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const struct ether_addr *bssid);
+
#ifdef WL_SDO
-s32 wl_cfg80211_sdo_init(struct wl_priv *wl);
-s32 wl_cfg80211_sdo_deinit(struct wl_priv *wl);
+s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg);
+s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg);
#define MAX_SDO_PROTO 5
wl_sdo_proto_t wl_sdo_protos [] = {
{ "all", SVC_RPOTYPE_ALL },
{ "vendor", SVC_RPOTYPE_VENDOR },
};
#endif
+static int bw2cap[] = { 0, 0, WLC_BW_CAP_20MHZ, WLC_BW_CAP_40MHZ, WLC_BW_CAP_80MHZ,
+ WLC_BW_CAP_160MHZ, WLC_BW_CAP_160MHZ };
#define RETURN_EIO_IF_NOT_UP(wlpriv) \
do { \
- struct net_device *checkSysUpNDev = wl_to_prmry_ndev(wlpriv); \
+ struct net_device *checkSysUpNDev = bcmcfg_to_prmry_ndev(wlpriv); \
if (unlikely(!wl_get_drv_status(wlpriv, READY, checkSysUpNDev))) { \
- WL_INFO(("device is not ready\n")); \
+ WL_INFORM(("device is not ready\n")); \
return -EIO; \
} \
} while (0)
-
#define IS_WPA_AKM(akm) ((akm) == RSN_AKM_NONE || \
(akm) == RSN_AKM_UNSPECIFIED || \
(akm) == RSN_AKM_PSK)
extern int dhd_wait_pend8021x(struct net_device *dev);
#ifdef PROP_TXSTATUS_VSDB
extern int disable_proptx;
-extern int dhd_wlfc_init(dhd_pub_t *dhd);
-extern void dhd_wlfc_deinit(dhd_pub_t *dhd);
#endif /* PROP_TXSTATUS_VSDB */
#if (WL_DBG_LEVEL > 0)
CHAN5G(116, 0), CHAN5G(120, 0),
CHAN5G(124, 0), CHAN5G(128, 0),
CHAN5G(132, 0), CHAN5G(136, 0),
- CHAN5G(140, 0), CHAN5G(149, 0),
- CHAN5G(153, 0), CHAN5G(157, 0),
- CHAN5G(161, 0), CHAN5G(165, 0)
+ CHAN5G(140, 0), CHAN5G(144, 0),
+ CHAN5G(149, 0), CHAN5G(153, 0),
+ CHAN5G(157, 0), CHAN5G(161, 0),
+ CHAN5G(165, 0)
};
static struct ieee80211_supported_band __wl_band_2ghz = {
#endif
};
+#ifdef WL_SUPPORT_ACS
+/*
+ * The firmware code required for this feature to work is currently under
+ * BCMINTERNAL flag. In future if this is to enabled we need to bring the
+ * required firmware code out of the BCMINTERNAL flag.
+ */
+struct wl_dump_survey {
+ u32 obss;
+ u32 ibss;
+ u32 no_ctg;
+ u32 no_pckt;
+ u32 tx;
+ u32 idle;
+};
+#endif /* WL_SUPPORT_ACS */
+
#if defined(USE_DYNAMIC_MAXPKT_RXGLOM)
static int maxrxpktglom = 0;
#endif
+static void wl_add_remove_pm_enable_work(struct bcm_cfg80211 *cfg, bool add_remove,
+ enum wl_handler_del_type type)
+{
+ if (cfg == NULL)
+ return;
+
+ if (cfg->pm_enable_work_on) {
+ if (add_remove) {
+ schedule_delayed_work(&cfg->pm_enable_work,
+ msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+ } else {
+ cancel_delayed_work_sync(&cfg->pm_enable_work);
+ switch (type) {
+ case WL_HANDLER_MAINTAIN:
+ schedule_delayed_work(&cfg->pm_enable_work,
+ msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+ break;
+ case WL_HANDLER_PEND:
+ schedule_delayed_work(&cfg->pm_enable_work,
+ msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT*2));
+ break;
+ case WL_HANDLER_DEL:
+ default:
+ cfg->pm_enable_work_on = false;
+ break;
+ }
+ }
+ }
+}
+
/* Return a new chanspec given a legacy chanspec
* Returns INVCHANSPEC on error
*/
* a chanspec_t value
* Returns INVCHANSPEC on error
*/
-static chanspec_t
+chanspec_t
wl_chspec_host_to_driver(chanspec_t chanspec)
{
if (ioctl_version == 1) {
return chanspec;
}
+/*
+ * convert ASCII string to MAC address (colon-delimited format)
+ * eg: 00:11:22:33:44:55
+ */
+int
+wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n)
+{
+ char *c = NULL;
+ int count = 0;
+
+ memset(n, 0, ETHER_ADDR_LEN);
+ for (;;) {
+ n->octet[count++] = (uint8)simple_strtoul(a, &c, 16);
+ if (!*c++ || count == ETHER_ADDR_LEN)
+ break;
+ a = c;
+ }
+ return (count == ETHER_ADDR_LEN);
+}
+
+/* convert hex string buffer to binary */
+int
+wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str)
+{
+ int count, slen;
+ int hvalue;
+ char tmp[3] = {0};
+ char *ptr = str, *endp = NULL;
+
+ if (!data || !str || !dlen) {
+ WL_DBG((" passed buffer is empty \n"));
+ return 0;
+ }
+
+ slen = strlen(str);
+ if (dlen * 2 < slen) {
+ WL_DBG((" destination buffer too short \n"));
+ return 0;
+ }
+
+ if (slen % 2) {
+ WL_DBG((" source buffer is of odd length \n"));
+ return 0;
+ }
+
+ for (count = 0; count < slen; count += 2) {
+ memcpy(tmp, ptr, 2);
+ hvalue = simple_strtol(tmp, &endp, 16);
+ if (*endp != '\0') {
+ WL_DBG((" non hexadecimal character encountered \n"));
+ return 0;
+ }
+ *data++ = (unsigned char)hvalue;
+ ptr += 2;
+ }
+
+ return (slen / 2);
+}
+
/* There isn't a lot of sense in it, but you can transmit anything you like */
static const struct ieee80211_txrx_stypes
wl_cfg80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
key->iv_initialized = dtoh32(key->iv_initialized);
}
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(WL_COMPAT_WIRELESS)
-/* For debug: Dump the contents of the encoded wps ie buffe */
+/* Dump the contents of the encoded wps ie buffer and get pbc value */
static void
wl_validate_wps_ie(char *wps_ie, s32 wps_ie_len, bool *pbc)
{
subel += subelt_len;
}
}
-#endif /* LINUX_VERSION < VERSION(3, 4, 0) && !WL_COMPAT_WIRELESS */
+
+s32 wl_set_tx_power(struct net_device *dev,
+ enum nl80211_tx_power_setting type, s32 dbm)
+{
+ s32 err = 0;
+ s32 disable = 0;
+ s32 txpwrqdbm;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+ /* Make sure radio is off or on as far as software is concerned */
+ disable = WL_RADIO_SW_DISABLE << 16;
+ disable = htod32(disable);
+ err = wldev_ioctl(dev, WLC_SET_RADIO, &disable, sizeof(disable), true);
+ if (unlikely(err)) {
+ WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
+ return err;
+ }
+
+ if (dbm > 0xffff)
+ dbm = 0xffff;
+ txpwrqdbm = dbm * 4;
+#ifdef SUPPORT_WL_TXPOWER
+ if (type == NL80211_TX_POWER_AUTOMATIC)
+ txpwrqdbm = 127;
+ else
+ txpwrqdbm |= WL_TXPWR_OVERRIDE;
+#endif /* SUPPORT_WL_TXPOWER */
+ err = wldev_iovar_setbuf_bsscfg(dev, "qtxpower", (void *)&txpwrqdbm,
+ sizeof(txpwrqdbm), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+ &cfg->ioctl_buf_sync);
+ if (unlikely(err))
+ WL_ERR(("qtxpower error (%d)\n", err));
+ else
+ WL_ERR(("dBm=%d, txpwrqdbm=0x%x\n", dbm, txpwrqdbm));
+
+ return err;
+}
+
+s32 wl_get_tx_power(struct net_device *dev, s32 *dbm)
+{
+ s32 err = 0;
+ s32 txpwrdbm;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+ err = wldev_iovar_getbuf_bsscfg(dev, "qtxpower",
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+ if (unlikely(err)) {
+ WL_ERR(("error (%d)\n", err));
+ return err;
+ }
+
+ memcpy(&txpwrdbm, cfg->ioctl_buf, sizeof(txpwrdbm));
+ txpwrdbm = dtoh32(txpwrdbm);
+ *dbm = (txpwrdbm & ~WL_TXPWR_OVERRIDE) / 4;
+
+ WL_INFORM(("dBm=%d, txpwrdbm=0x%x\n", *dbm, txpwrdbm));
+
+ return err;
+}
static chanspec_t wl_cfg80211_get_shared_freq(struct wiphy *wiphy)
{
chanspec_t chspec;
int err = 0;
- struct wl_priv *wl = wiphy_priv(wiphy);
- struct net_device *dev = wl_to_prmry_ndev(wl);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
struct ether_addr bssid;
struct wl_bss_info *bss = NULL;
}
- *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
- if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, wl->extra_buf,
+ *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+ if ((err = wldev_ioctl(dev, WLC_GET_BSS_INFO, cfg->extra_buf,
WL_EXTRA_BUF_MAX, false))) {
WL_ERR(("Failed to get associated bss info, use temp channel \n"));
chspec = wl_ch_host_to_driver(WL_P2P_TEMP_CHAN);
}
else {
- bss = (struct wl_bss_info *) (wl->extra_buf + 4);
+ bss = (struct wl_bss_info *) (cfg->extra_buf + 4);
chspec = bss->chanspec;
WL_DBG(("Valid BSS Found. chanspec:%d \n", chspec));
wl_cfg80211_add_monitor_if(char *name)
{
#if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
- WL_INFO(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
+ WL_INFORM(("wl_cfg80211_add_monitor_if: No more support monitor interface\n"));
return ERR_PTR(-EOPNOTSUPP);
#else
struct net_device* ndev = NULL;
dhd_add_monitor(name, &ndev);
- WL_INFO(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
+ WL_INFORM(("wl_cfg80211_add_monitor_if net device returned: 0x%p\n", ndev));
return ndev_to_cfgdev(ndev);
#endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
}
s32 val = 0;
s32 dhd_mode = 0;
chanspec_t chspec;
- struct wl_priv *wl = wiphy_priv(wiphy);
- struct net_device *_ndev;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *primary_ndev;
+ struct net_device *new_ndev;
struct ether_addr primary_mac;
- int (*net_attach)(void *dhdp, int ifidx);
- bool rollback_lock = false;
#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
s32 up = 1;
dhd_pub_t *dhd;
+ bool enabled;
+#endif
#endif /* PROP_TXSTATUS_VSDB */
- if (!wl)
+ if (!cfg)
return ERR_PTR(-EINVAL);
#ifdef PROP_TXSTATUS_VSDB
- dhd = (dhd_pub_t *)(wl->pub);
+#if defined(BCMSDIO)
+ dhd = (dhd_pub_t *)(cfg->pub);
+#endif
#endif /* PROP_TXSTATUS_VSDB */
+
/* Use primary I/F for sending cmds down to firmware */
- _ndev = wl_to_prmry_ndev(wl);
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ if (unlikely(!wl_get_drv_status(cfg, READY, primary_ndev))) {
+ WL_ERR(("device is not ready\n"));
+ return ERR_PTR(-ENODEV);
+ }
WL_DBG(("if name: %s, type: %d\n", name, type));
switch (type) {
case NL80211_IFTYPE_ADHOC:
+#ifdef WLAIBSS_MCHAN
+ return bcm_cfg80211_add_ibss_if(wiphy, (char *)name);
+#endif /* WLAIBSS_MCHAN */
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
return wl_cfg80211_add_monitor_if((char *)name);
#if defined(WL_CFG80211_P2P_DEV_IF)
case NL80211_IFTYPE_P2P_DEVICE:
- return wl_cfgp2p_add_p2p_disc_if();
+ return wl_cfgp2p_add_p2p_disc_if(cfg);
#endif /* WL_CFG80211_P2P_DEV_IF */
- case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
+#ifdef DUAL_STA
+#ifdef WLAIBSS_MCHAN
+ if (cfg->ibss_cfgdev) {
+ WL_ERR(("AIBSS is already operational. "
+ " AIBSS & DUALSTA can't be used together \n"));
+ return NULL;
+ }
+#endif /* WLAIBSS_MCHAN */
+ if (!name) {
+ WL_ERR(("Interface name not provided \n"));
+ return NULL;
+ }
+ return wl_cfg80211_create_iface(cfg->wdev->wiphy,
+ NL80211_IFTYPE_STATION, NULL, name);
+#endif /* DUAL_STA */
+ case NL80211_IFTYPE_P2P_CLIENT:
wlif_type = WL_P2P_IF_CLIENT;
mode = WL_MODE_BSS;
break;
WL_ERR(("name is NULL\n"));
return NULL;
}
- if (wl->p2p_supported && (wlif_type != -1)) {
- ASSERT(wl->p2p); /* ensure expectation of p2p initialization */
- if (wl_get_p2p_status(wl, IF_DELETING)) {
- /* wait till IF_DEL is complete
- * release the lock for the unregister to proceed
- */
- if (rtnl_is_locked()) {
- rtnl_unlock();
- rollback_lock = true;
- }
- WL_INFO(("Released the lock and wait till IF_DEL is complete\n"));
- timeout = wait_event_interruptible_timeout(wl->netif_change_event,
- (wl_get_p2p_status(wl, IF_DELETING) == false),
- msecs_to_jiffies(MAX_WAIT_TIME));
-
- /* put back the rtnl_lock again */
- if (rollback_lock) {
- rtnl_lock();
- rollback_lock = false;
- }
- if (timeout > 0) {
- WL_ERR(("IF DEL is Success\n"));
-
- } else {
- WL_ERR(("timeount < 0, return -EAGAIN\n"));
- return ERR_PTR(-EAGAIN);
- }
- /* It should be now be safe to put this check here since we are sure
- * by now netdev_notifier (unregister) would have been called
- */
- if (wl->iface_cnt == IFACE_MAX_CNT)
- return ERR_PTR(-ENOMEM);
- }
+ if (cfg->p2p_supported && (wlif_type != -1)) {
+ ASSERT(cfg->p2p); /* ensure expectation of p2p initialization */
#ifdef PROP_TXSTATUS_VSDB
+#if defined(BCMSDIO)
if (!dhd)
return ERR_PTR(-ENODEV);
+#endif
#endif /* PROP_TXSTATUS_VSDB */
- if (!wl->p2p)
+ if (!cfg->p2p)
return ERR_PTR(-ENODEV);
- if (wl->p2p && !wl->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
- p2p_on(wl) = true;
- wl_cfgp2p_set_firm_p2p(wl);
- wl_cfgp2p_init_discovery(wl);
- get_primary_mac(wl, &primary_mac);
+ if (cfg->p2p && !cfg->p2p->on && strstr(name, WL_P2P_INTERFACE_PREFIX)) {
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ wl_cfgp2p_init_discovery(cfg);
+ get_primary_mac(cfg, &primary_mac);
wl_cfgp2p_generate_bss_mac(&primary_mac,
- &wl->p2p->dev_addr, &wl->p2p->int_addr);
+ &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
}
- memset(wl->p2p->vir_ifname, 0, IFNAMSIZ);
- strncpy(wl->p2p->vir_ifname, name, IFNAMSIZ - 1);
+ memset(cfg->p2p->vir_ifname, 0, IFNAMSIZ);
+ strncpy(cfg->p2p->vir_ifname, name, IFNAMSIZ - 1);
- wl_cfg80211_scan_abort(wl);
+ wl_cfg80211_scan_abort(cfg);
#ifdef PROP_TXSTATUS_VSDB
- if (!wl->wlfc_on && !disable_proptx) {
- dhd->wlfc_enabled = true;
- dhd_wlfc_init(dhd);
- err = wldev_ioctl(_ndev, WLC_UP, &up, sizeof(s32), true);
- if (err < 0)
- WL_ERR(("WLC_UP return err:%d\n", err));
- wl->wlfc_on = true;
+#if defined(BCMSDIO)
+ if (!cfg->wlfc_on && !disable_proptx) {
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (!enabled && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
+ dhd_wlfc_init(dhd);
+ err = wldev_ioctl(primary_ndev, WLC_UP, &up, sizeof(s32), true);
+ if (err < 0)
+ WL_ERR(("WLC_UP return err:%d\n", err));
+ }
+ cfg->wlfc_on = true;
}
+#endif
#endif /* PROP_TXSTATUS_VSDB */
/* In concurrency case, STA may be already associated in a particular channel.
chspec = wl_cfg80211_get_shared_freq(wiphy);
/* For P2P mode, use P2P-specific driver features to create the
- * bss: "wl p2p_ifadd"
+ * bss: "cfg p2p_ifadd"
*/
- wl_set_p2p_status(wl, IF_ADD);
+ wl_set_p2p_status(cfg, IF_ADDING);
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
if (wlif_type == WL_P2P_IF_GO)
- wldev_iovar_setint(_ndev, "mpc", 0);
- err = wl_cfgp2p_ifadd(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec);
-
+ wldev_iovar_setint(primary_ndev, "mpc", 0);
+ err = wl_cfgp2p_ifadd(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
if (unlikely(err)) {
+ wl_clr_p2p_status(cfg, IF_ADDING);
WL_ERR((" virtual iface add failed (%d) \n", err));
return ERR_PTR(-ENOMEM);
}
- timeout = wait_event_interruptible_timeout(wl->netif_change_event,
- (wl_get_p2p_status(wl, IF_ADD) == false),
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ (wl_get_p2p_status(cfg, IF_ADDING) == false),
msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout > 0 && (!wl_get_p2p_status(wl, IF_ADD))) {
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_ADDING) && cfg->if_event_info.valid) {
struct wireless_dev *vwdev;
+ int pm_mode = PM_ENABLE;
+ wl_if_event_info *event = &cfg->if_event_info;
+
+ /* IF_ADD event has come back, we can proceed to to register
+ * the new interface now, use the interface name provided by caller (thus
+ * ignore the one from wlc)
+ */
+ strncpy(cfg->if_event_info.name, name, IFNAMSIZ - 1);
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, cfg->p2p->vir_ifname,
+ event->mac, event->bssidx);
+ if (new_ndev == NULL)
+ goto fail;
+
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = new_ndev;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = event->bssidx;
vwdev = kzalloc(sizeof(*vwdev), GFP_KERNEL);
if (unlikely(!vwdev)) {
WL_ERR(("Could not allocate wireless device\n"));
- return ERR_PTR(-ENOMEM);
+ goto fail;
}
- vwdev->wiphy = wl->wdev->wiphy;
- WL_INFO((" virtual interface(%s) is created memalloc done \n",
- wl->p2p->vir_ifname));
+ vwdev->wiphy = cfg->wdev->wiphy;
+ WL_INFORM(("virtual interface(%s) is created\n", cfg->p2p->vir_ifname));
vwdev->iftype = type;
- _ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
- _ndev->ieee80211_ptr = vwdev;
- SET_NETDEV_DEV(_ndev, wiphy_dev(vwdev->wiphy));
- vwdev->netdev = _ndev;
- wl_set_drv_status(wl, READY, _ndev);
- wl->p2p->vif_created = true;
- wl_set_mode_by_netdev(wl, _ndev, mode);
- net_attach = wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION);
- if (rtnl_is_locked()) {
- rtnl_unlock();
- rollback_lock = true;
- }
- if (net_attach && !net_attach(wl->pub, _ndev->ifindex)) {
- wl_alloc_netinfo(wl, _ndev, vwdev, mode, PM_ENABLE);
- val = 1;
- /* Disable firmware roaming for P2P interface */
- wldev_iovar_setint(_ndev, "roam_off", val);
-
- if (mode != WL_MODE_AP)
- wldev_iovar_setint(_ndev, "buf_key_b4_m4", 1);
-
- WL_ERR((" virtual interface(%s) is "
- "created net attach done\n", wl->p2p->vir_ifname));
- if (mode == WL_MODE_AP)
- wl_set_drv_status(wl, CONNECTED, _ndev);
- if (type == NL80211_IFTYPE_P2P_CLIENT)
- dhd_mode = DHD_FLAG_P2P_GC_MODE;
- else if (type == NL80211_IFTYPE_P2P_GO)
- dhd_mode = DHD_FLAG_P2P_GO_MODE;
- DNGL_FUNC(dhd_cfg80211_set_p2p_info, (wl, dhd_mode));
-#ifdef PROP_TXSTATUS_VSDB
- if (dhd->plat_enable)
- dhd->plat_enable((void *)dhd);
-#endif /* PROP_TXSTATUS_VSDB */
- /* reinitialize completion to clear previous count */
- INIT_COMPLETION(wl->iface_disable);
- } else {
- /* put back the rtnl_lock again */
- if (rollback_lock)
- rtnl_lock();
+ vwdev->netdev = new_ndev;
+ new_ndev->ieee80211_ptr = vwdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(vwdev->wiphy));
+ wl_set_drv_status(cfg, READY, new_ndev);
+ cfg->p2p->vif_created = true;
+ wl_set_mode_by_netdev(cfg, new_ndev, mode);
+
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
goto fail;
}
- /* put back the rtnl_lock again */
- if (rollback_lock)
- rtnl_lock();
- return ndev_to_cfgdev(_ndev);
+ wl_alloc_netinfo(cfg, new_ndev, vwdev, mode, pm_mode);
+ val = 1;
+ /* Disable firmware roaming for P2P interface */
+ wldev_iovar_setint(new_ndev, "roam_off", val);
+
+ if (mode != WL_MODE_AP)
+ wldev_iovar_setint(new_ndev, "buf_key_b4_m4", 1);
+
+ WL_ERR((" virtual interface(%s) is "
+ "created net attach done\n", cfg->p2p->vir_ifname));
+ if (mode == WL_MODE_AP)
+ wl_set_drv_status(cfg, CONNECTED, new_ndev);
+ if (type == NL80211_IFTYPE_P2P_CLIENT)
+ dhd_mode = DHD_FLAG_P2P_GC_MODE;
+ else if (type == NL80211_IFTYPE_P2P_GO)
+ dhd_mode = DHD_FLAG_P2P_GO_MODE;
+ DNGL_FUNC(dhd_cfg80211_set_p2p_info, (cfg, dhd_mode));
+ /* reinitialize completion to clear previous count */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ INIT_COMPLETION(cfg->iface_disable);
+#else
+ init_completion(&cfg->iface_disable);
+#endif
+ return ndev_to_cfgdev(new_ndev);
} else {
- wl_clr_p2p_status(wl, IF_ADD);
- WL_ERR((" virtual interface(%s) is not created \n", wl->p2p->vir_ifname));
- memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ);
- wl->p2p->vif_created = false;
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ WL_ERR((" virtual interface(%s) is not created \n", cfg->p2p->vir_ifname));
+ memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+ cfg->p2p->vif_created = false;
#ifdef PROP_TXSTATUS_VSDB
- if (dhd->wlfc_enabled && wl->wlfc_on) {
- dhd->wlfc_enabled = false;
+#if defined(BCMSDIO)
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
dhd_wlfc_deinit(dhd);
- if (dhd->plat_deinit)
- dhd->plat_deinit((void *)dhd);
- wl->wlfc_on = false;
+ cfg->wlfc_on = false;
}
+#endif
#endif /* PROP_TXSTATUS_VSDB */
}
}
+
fail:
if (wlif_type == WL_P2P_IF_GO)
- wldev_iovar_setint(_ndev, "mpc", 1);
+ wldev_iovar_setint(primary_ndev, "mpc", 1);
return ERR_PTR(-ENODEV);
}
{
struct net_device *dev = NULL;
struct ether_addr p2p_mac;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 timeout = -1;
s32 ret = 0;
s32 index = -1;
+#ifdef CUSTOM_SET_CPUCORE
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
WL_DBG(("Enter\n"));
+#ifdef CUSTOM_SET_CPUCORE
+ dhd->chan_isvht80 &= ~DHD_FLAG_P2P_MODE;
+ if (!(dhd->chan_isvht80))
+ dhd_set_cpucore(dhd, FALSE);
+#endif /* CUSTOM_SET_CPUCORE */
#if defined(WL_CFG80211_P2P_DEV_IF)
if (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
- return wl_cfgp2p_del_p2p_disc_if(cfgdev);
+ return wl_cfgp2p_del_p2p_disc_if(cfgdev, cfg);
}
#endif /* WL_CFG80211_P2P_DEV_IF */
- dev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+#ifdef WLAIBSS_MCHAN
+ if (cfgdev == cfg->ibss_cfgdev)
+ return bcm_cfg80211_del_ibss_if(wiphy, cfgdev);
+#endif /* WLAIBSS_MCHAN */
- if (wl_cfgp2p_find_idx(wl, dev, &index) != BCME_OK) {
+#ifdef DUAL_STA
+ if (cfgdev == cfg->bss_cfgdev)
+ return wl_cfg80211_del_iface(wiphy, cfgdev);
+#endif /* DUAL_STA */
+
+ if (wl_cfgp2p_find_idx(cfg, dev, &index) != BCME_OK) {
WL_ERR(("Find p2p index from ndev(%p) failed\n", dev));
return BCME_ERROR;
}
- if (wl->p2p_supported) {
- memcpy(p2p_mac.octet, wl->p2p->int_addr.octet, ETHER_ADDR_LEN);
+ if (cfg->p2p_supported) {
+ memcpy(p2p_mac.octet, cfg->p2p->int_addr.octet, ETHER_ADDR_LEN);
/* Clear GO_NEG_PHASE bit to take care of GO-NEG-FAIL cases
*/
WL_DBG(("P2P: GO_NEG_PHASE status cleared "));
- wl_clr_p2p_status(wl, GO_NEG_PHASE);
- if (wl->p2p->vif_created) {
- if (wl_get_drv_status(wl, SCANNING, dev)) {
- wl_notify_escan_complete(wl, dev, true, true);
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ if (cfg->p2p->vif_created) {
+ if (wl_get_drv_status(cfg, SCANNING, dev)) {
+ wl_notify_escan_complete(cfg, dev, true, true);
}
wldev_iovar_setint(dev, "mpc", 1);
-
- if (wl->pm_enable_work_on) {
- cancel_delayed_work_sync(&wl->pm_enable_work);
- wl->pm_enable_work_on = false;
- }
+ /* Delete pm_enable_work */
+ wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
/* for GC */
- if (wl_get_drv_status(wl, DISCONNECTING, dev) &&
- (wl_get_mode_by_netdev(wl, dev) != WL_MODE_AP)) {
+ if (wl_get_drv_status(cfg, DISCONNECTING, dev) &&
+ (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)) {
WL_ERR(("Wait for Link Down event for GC !\n"));
wait_for_completion_timeout
- (&wl->iface_disable, msecs_to_jiffies(500));
+ (&cfg->iface_disable, msecs_to_jiffies(500));
}
- wl_set_p2p_status(wl, IF_DELETING);
- DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (wl));
+
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ wl_set_p2p_status(cfg, IF_DELETING);
+ DNGL_FUNC(dhd_cfg80211_clean_p2p_info, (cfg));
/* for GO */
- if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP) {
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
/* disable interface before bsscfg free */
- ret = wl_cfgp2p_ifdisable(wl, &p2p_mac);
+ ret = wl_cfgp2p_ifdisable(cfg, &p2p_mac);
/* if fw doesn't support "ifdis",
do not wait for link down of ap mode
*/
if (ret == 0) {
WL_ERR(("Wait for Link Down event for GO !!!\n"));
- wait_for_completion_timeout(&wl->iface_disable,
+ wait_for_completion_timeout(&cfg->iface_disable,
msecs_to_jiffies(500));
+ } else if (ret != BCME_UNSUPPORTED) {
+ msleep(300);
}
}
- wl_cfgp2p_clear_management_ie(wl, index);
+ wl_cfgp2p_clear_management_ie(cfg, index);
- if (wl_get_mode_by_netdev(wl, dev) != WL_MODE_AP)
+ if (wl_get_mode_by_netdev(cfg, dev) != WL_MODE_AP)
wldev_iovar_setint(dev, "buf_key_b4_m4", 0);
/* delete interface after link down */
- ret = wl_cfgp2p_ifdel(wl, &p2p_mac);
- /* Firmware could not delete the interface so we will not get WLC_E_IF
- * event for cleaning the dhd virtual nw interace
- * So lets do it here. Failures from fw will ensure the application to do
- * ifconfig <inter> down and up sequnce, which will reload the fw
- * however we should cleanup the linux network virtual interfaces
- */
- /* Request framework to RESET and clean up */
- if (ret) {
- struct net_device *ndev = wl_to_prmry_ndev(wl);
- WL_ERR(("Firmware returned an error (%d) from p2p_ifdel"
- "HANG Notification sent to %s\n", ret, ndev->name));
+ ret = wl_cfgp2p_ifdel(cfg, &p2p_mac);
+
+ if (ret != BCME_OK) {
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ WL_ERR(("p2p_ifdel failed, error %d, sent HANG event to %s\n",
+ ret, ndev->name));
+ #if defined(BCMDONGLEHOST) && defined(OEM_ANDROID)
net_os_send_hang_message(ndev);
- }
- /* Wait for IF_DEL operation to be finished in firmware */
- timeout = wait_event_interruptible_timeout(wl->netif_change_event,
- (wl->p2p->vif_created == false),
- msecs_to_jiffies(MAX_WAIT_TIME));
- if (timeout > 0 && (wl->p2p->vif_created == false)) {
- WL_DBG(("IFDEL operation done\n"));
+ #endif
} else {
- WL_ERR(("IFDEL didn't complete properly\n"));
+ /* Wait for IF_DEL operation to be finished */
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ (wl_get_p2p_status(cfg, IF_DELETING) == false),
+ msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout > 0 && !wl_get_p2p_status(cfg, IF_DELETING) &&
+ cfg->if_event_info.valid) {
+
+ WL_DBG(("IFDEL operation done\n"));
+ wl_cfg80211_handle_ifdel(cfg, &cfg->if_event_info, dev);
+ } else {
+ WL_ERR(("IFDEL didn't complete properly\n"));
+ }
}
+
ret = dhd_del_monitor(dev);
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_CANCEL((dhd_pub_t *)(cfg->pub));
+ }
}
}
return ret;
s32 mode = 0;
s32 err = BCME_OK;
chanspec_t chspec;
- struct wl_priv *wl = wiphy_priv(wiphy);
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
WL_DBG(("Enter type %d\n", type));
switch (type) {
case NL80211_IFTYPE_MONITOR:
if (!dhd)
return -EINVAL;
if (ap) {
- wl_set_mode_by_netdev(wl, ndev, mode);
- if (wl->p2p_supported && wl->p2p->vif_created) {
- WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", wl->p2p->vif_created,
- p2p_on(wl)));
+ wl_set_mode_by_netdev(cfg, ndev, mode);
+ if (cfg->p2p_supported && cfg->p2p->vif_created) {
+ WL_DBG(("p2p_vif_created (%d) p2p_on (%d)\n", cfg->p2p->vif_created,
+ p2p_on(cfg)));
wldev_iovar_setint(ndev, "mpc", 0);
- wl_notify_escan_complete(wl, ndev, true, true);
+ wl_notify_escan_complete(cfg, ndev, true, true);
/* In concurrency case, STA may be already associated in a particular
* channel. so retrieve the current channel of primary interface and
wlif_type = WL_P2P_IF_GO;
printk("%s : ap (%d), infra (%d), iftype: (%d)\n",
ndev->name, ap, infra, type);
- wl_set_p2p_status(wl, IF_CHANGING);
- wl_clr_p2p_status(wl, IF_CHANGED);
- wl_cfgp2p_ifchange(wl, &wl->p2p->int_addr, htod32(wlif_type), chspec);
- wait_event_interruptible_timeout(wl->netif_change_event,
- (wl_get_p2p_status(wl, IF_CHANGED) == true),
+ wl_set_p2p_status(cfg, IF_CHANGING);
+ wl_clr_p2p_status(cfg, IF_CHANGED);
+ wl_cfgp2p_ifchange(cfg, &cfg->p2p->int_addr, htod32(wlif_type), chspec);
+ wait_event_interruptible_timeout(cfg->netif_change_event,
+ (wl_get_p2p_status(cfg, IF_CHANGED) == true),
msecs_to_jiffies(MAX_WAIT_TIME));
- wl_set_mode_by_netdev(wl, ndev, mode);
+ wl_set_mode_by_netdev(cfg, ndev, mode);
dhd->op_mode &= ~DHD_FLAG_P2P_GC_MODE;
dhd->op_mode |= DHD_FLAG_P2P_GO_MODE;
- wl_clr_p2p_status(wl, IF_CHANGING);
- wl_clr_p2p_status(wl, IF_CHANGED);
+ wl_clr_p2p_status(cfg, IF_CHANGING);
+ wl_clr_p2p_status(cfg, IF_CHANGED);
if (mode == WL_MODE_AP)
- wl_set_drv_status(wl, CONNECTED, ndev);
- } else if (ndev == wl_to_prmry_ndev(wl) &&
- !wl_get_drv_status(wl, AP_CREATED, ndev)) {
- wl_set_drv_status(wl, AP_CREATING, ndev);
- if (!wl->ap_info &&
- !(wl->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+ } else if (ndev == bcmcfg_to_prmry_ndev(cfg) &&
+ !wl_get_drv_status(cfg, AP_CREATED, ndev)) {
+ wl_set_drv_status(cfg, AP_CREATING, ndev);
+ if (!cfg->ap_info &&
+ !(cfg->ap_info = kzalloc(sizeof(struct ap_info), GFP_KERNEL))) {
WL_ERR(("struct ap_saved_ie allocation failed\n"));
return -ENOMEM;
}
if (ibss) {
infra = 0;
- wl_set_mode_by_netdev(wl, ndev, mode);
+ wl_set_mode_by_netdev(cfg, ndev, mode);
err = wldev_ioctl(ndev, WLC_SET_INFRA, &infra, sizeof(s32), true);
if (err < 0) {
WL_ERR(("SET Adhoc error %d\n", err));
return -EINVAL;
- }
+ }
}
ndev->ieee80211_ptr->iftype = type;
}
s32
-wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx,
- void* _net_attach)
+wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx)
{
- struct wl_priv *wl = wlcfg_drv_priv;
- s32 ret = BCME_OK;
- WL_DBG(("Enter"));
- if (!ndev) {
- WL_ERR(("net is NULL\n"));
- return 0;
+ bool ifadd_expected = FALSE;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+ /* P2P may send WLC_E_IF_ADD and/or WLC_E_IF_CHANGE during IF updating ("p2p_ifupd")
+ * redirect the IF_ADD event to ifchange as it is not a real "new" interface
+ */
+ if (wl_get_p2p_status(cfg, IF_CHANGING))
+ return wl_cfg80211_notify_ifchange(ifidx, name, mac, bssidx);
+
+ /* Okay, we are expecting IF_ADD (as IF_ADDING is true) */
+ if (wl_get_p2p_status(cfg, IF_ADDING)) {
+ ifadd_expected = TRUE;
+ wl_clr_p2p_status(cfg, IF_ADDING);
+ } else if (cfg->bss_pending_op) {
+ ifadd_expected = TRUE;
+ cfg->bss_pending_op = FALSE;
}
- if (wl->p2p_supported && wl_get_p2p_status(wl, IF_ADD)) {
- WL_DBG(("IF_ADD event called from dongle, old interface name: %s,"
- "new name: %s\n", ndev->name, wl->p2p->vir_ifname));
- /* Assign the net device to CONNECT BSSCFG */
- strncpy(ndev->name, wl->p2p->vir_ifname, IFNAMSIZ - 1);
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = ndev;
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = bssidx;
- wl_to_p2p_bss_private(wl, P2PAPI_BSSCFG_CONNECTION) = _net_attach;
- ndev->ifindex = idx;
- wl_clr_p2p_status(wl, IF_ADD);
-
- wake_up_interruptible(&wl->netif_change_event);
- } else {
- ret = BCME_NOTREADY;
+
+ if (ifadd_expected) {
+ wl_if_event_info *if_event_info = &cfg->if_event_info;
+
+ if_event_info->valid = TRUE;
+ if_event_info->ifidx = ifidx;
+ if_event_info->bssidx = bssidx;
+ strncpy(if_event_info->name, name, IFNAMSIZ);
+ if_event_info->name[IFNAMSIZ] = '\0';
+ if (mac)
+ memcpy(if_event_info->mac, mac, ETHER_ADDR_LEN);
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
}
- return ret;
+
+ return BCME_ERROR;
}
s32
-wl_cfg80211_notify_ifdel(void)
+wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ bool ifdel_expected = FALSE;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ wl_if_event_info *if_event_info = &cfg->if_event_info;
- WL_DBG(("Enter \n"));
- wl_clr_p2p_status(wl, IF_DELETING);
- wake_up_interruptible(&wl->netif_change_event);
- return 0;
+ if (wl_get_p2p_status(cfg, IF_DELETING)) {
+ ifdel_expected = TRUE;
+ wl_clr_p2p_status(cfg, IF_DELETING);
+ } else if (cfg->bss_pending_op) {
+ ifdel_expected = TRUE;
+ cfg->bss_pending_op = FALSE;
+ }
+
+ if (ifdel_expected) {
+ if_event_info->valid = TRUE;
+ if_event_info->ifidx = ifidx;
+ if_event_info->bssidx = bssidx;
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
+ }
+
+ return BCME_ERROR;
}
s32
-wl_cfg80211_ifdel_ops(struct net_device *ndev)
+wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+ if (wl_get_p2p_status(cfg, IF_CHANGING)) {
+ wl_set_p2p_status(cfg, IF_CHANGED);
+ wake_up_interruptible(&cfg->netif_change_event);
+ return BCME_OK;
+ }
+
+ return BCME_ERROR;
+}
+
+static s32 wl_cfg80211_handle_ifdel(struct bcm_cfg80211 *cfg, wl_if_event_info *if_event_info,
+ struct net_device* ndev)
{
- struct wl_priv *wl = wlcfg_drv_priv;
- bool rollback_lock = false;
s32 type = -1;
s32 bssidx = -1;
-
#ifdef PROP_TXSTATUS_VSDB
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+#if defined(BCMSDIO)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ bool enabled;
+#endif
#endif /* PROP_TXSTATUS_VSDB */
- if (!ndev || (strlen(ndev->name) == 0)) {
- WL_ERR(("net is NULL\n"));
- return 0;
+
+ bssidx = if_event_info->bssidx;
+ if (bssidx != wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION)) {
+ WL_ERR(("got IF_DEL for if %d, not owned by cfg driver\n", bssidx));
+ return BCME_ERROR;
}
- if (p2p_is_on(wl) && wl->p2p->vif_created &&
- wl_get_p2p_status(wl, IF_DELETING)) {
- if (wl->scan_request &&
- (wl->escan_info.ndev == ndev)) {
+ if (p2p_is_on(cfg) && cfg->p2p->vif_created) {
+
+ if (cfg->scan_request && (cfg->escan_info.ndev == ndev)) {
/* Abort any pending scan requests */
- wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- if (!rtnl_is_locked()) {
- rtnl_lock();
- rollback_lock = true;
- }
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
WL_DBG(("ESCAN COMPLETED\n"));
- wl_notify_escan_complete(wl, ndev, true, false);
- if (rollback_lock)
- rtnl_unlock();
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, false);
}
- WL_ERR(("IF_DEL event called from dongle, net %p, vif name: %s\n",
- ndev, wl->p2p->vir_ifname));
- memset(wl->p2p->vir_ifname, '\0', IFNAMSIZ);
- if (wl_cfgp2p_find_idx(wl, ndev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p bssidx from ndev(%p) failed\n", ndev));
- return BCME_ERROR;
- }
- if (wl_cfgp2p_find_type(wl, bssidx, &type) != BCME_OK) {
+ memset(cfg->p2p->vir_ifname, '\0', IFNAMSIZ);
+ if (wl_cfgp2p_find_type(cfg, bssidx, &type) != BCME_OK) {
WL_ERR(("Find p2p type from bssidx(%d) failed\n", bssidx));
return BCME_ERROR;
}
- wl_to_p2p_bss_ndev(wl, type) = NULL;
- wl_to_p2p_bss_bssidx(wl, type) = WL_INVALID;
- wl->p2p->vif_created = false;
+ wl_clr_drv_status(cfg, CONNECTED, wl_to_p2p_bss_ndev(cfg, type));
+ wl_to_p2p_bss_ndev(cfg, type) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, type) = WL_INVALID;
+ cfg->p2p->vif_created = false;
- WL_DBG(("type : %d\n", type));
#ifdef PROP_TXSTATUS_VSDB
- if (dhd->wlfc_enabled && wl->wlfc_on) {
- dhd->wlfc_enabled = false;
+#if defined(BCMSDIO)
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
dhd_wlfc_deinit(dhd);
- if (dhd->plat_deinit)
- dhd->plat_deinit((void *)dhd);
- wl->wlfc_on = false;
+ cfg->wlfc_on = false;
}
+#endif
#endif /* PROP_TXSTATUS_VSDB */
- wl_clr_drv_status(wl, CONNECTED, ndev);
}
- /* Wake up any waiting thread */
- wake_up_interruptible(&wl->netif_change_event);
-
- return 0;
-}
-
-s32
-wl_cfg80211_is_progress_ifadd(void)
-{
- s32 is_progress = 0;
- struct wl_priv *wl = wlcfg_drv_priv;
- if (wl_get_p2p_status(wl, IF_ADD))
- is_progress = 1;
- return is_progress;
-}
-
-s32
-wl_cfg80211_is_progress_ifchange(void)
-{
- s32 is_progress = 0;
- struct wl_priv *wl = wlcfg_drv_priv;
- if (wl_get_p2p_status(wl, IF_CHANGING))
- is_progress = 1;
- return is_progress;
-}
-
-s32
-wl_cfg80211_notify_ifchange(void)
-{
- struct wl_priv *wl = wlcfg_drv_priv;
- if (wl_get_p2p_status(wl, IF_CHANGING)) {
- wl_set_p2p_status(wl, IF_CHANGED);
- wake_up_interruptible(&wl->netif_change_event);
- }
- return 0;
+ wl_cfg80211_remove_if(cfg, if_event_info->ifidx, ndev);
+ return BCME_OK;
}
/* Find listen channel */
-static s32 wl_find_listen_channel(struct wl_priv *wl,
+static s32 wl_find_listen_channel(struct bcm_cfg80211 *cfg,
const u8 *ie, u32 ie_len)
{
wifi_p2p_ie_t *p2p_ie;
s32 i = 0, j = 0, offset;
char *ptr;
wlc_ssid_t ssid;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
memcpy(¶ms->bssid, ðer_bcast, ETHER_ADDR_LEN);
params->bss_type = DOT11_BSSTYPE_ANY;
chanspec = 0;
channel = ieee80211_frequency_to_channel(request->channels[i]->center_freq);
/* SKIP DFS channels for Secondary interface */
- if ((wl->escan_info.ndev != wl_to_prmry_ndev(wl)) &&
+ if ((cfg->escan_info.ndev != bcmcfg_to_prmry_ndev(cfg)) &&
(request->channels[i]->flags &
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
(IEEE80211_CHAN_RADAR | IEEE80211_CHAN_PASSIVE_SCAN)))
+#else
+ (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)))
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0) */
continue;
if (request->channels[i]->band == IEEE80211_BAND_2GHZ) {
#ifdef WL_HOST_BAND_MGMT
- if (wl->curr_band == WLC_BAND_5G) {
+ if (cfg->curr_band == WLC_BAND_5G) {
WL_DBG(("In 5G only mode, omit 2G channel:%d\n", channel));
continue;
}
chanspec |= WL_CHANSPEC_BAND_2G;
} else {
#ifdef WL_HOST_BAND_MGMT
- if (wl->curr_band == WLC_BAND_2G) {
+ if (cfg->curr_band == WLC_BAND_2G) {
WL_DBG(("In 2G only mode, omit 5G channel:%d\n", channel));
continue;
}
}
static s32
-wl_run_iscan(struct wl_iscan_ctrl *iscan, struct cfg80211_scan_request *request, u16 action)
+wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
{
- u32 n_channels;
- u32 n_ssids;
- s32 params_size =
- (WL_SCAN_PARAMS_FIXED_SIZE + offsetof(wl_iscan_params_t, params));
- struct wl_iscan_params *params = NULL;
- s32 err = 0;
-
- if (request != NULL) {
- n_channels = request->n_channels;
- n_ssids = request->n_ssids;
- /* Allocate space for populating ssids in wl_iscan_params struct */
- if (n_channels % 2)
- /* If n_channels is odd, add a padd of u16 */
- params_size += sizeof(u16) * (n_channels + 1);
- else
- params_size += sizeof(u16) * n_channels;
+ wl_uint32_list_t *list;
+ s32 err = BCME_OK;
+ if (valid_chan_list == NULL || size <= 0)
+ return -ENOMEM;
- /* Allocate space for populating ssids in wl_iscan_params struct */
- params_size += sizeof(struct wlc_ssid) * n_ssids;
- }
- params = (struct wl_iscan_params *)kzalloc(params_size, GFP_KERNEL);
- if (!params) {
- err = -ENOMEM;
- goto done;
+ memset(valid_chan_list, 0, size);
+ list = (wl_uint32_list_t *)(void *) valid_chan_list;
+ list->count = htod32(WL_NUMCHANNELS);
+ err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false);
+ if (err != 0) {
+ WL_ERR(("get channels failed with %d\n", err));
}
- wl_scan_prep(¶ms->params, request);
- params->version = htod32(ISCAN_REQ_VERSION);
- params->action = htod16(action);
- params->scan_duration = htod16(0);
-
- if (params_size + sizeof("iscan") >= WLC_IOCTL_MEDLEN) {
- WL_ERR(("ioctl buffer length is not sufficient\n"));
- err = -ENOMEM;
- goto done;
- }
- err = wldev_iovar_setbuf(iscan->dev, "iscan", params, params_size,
- iscan->ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
- if (unlikely(err)) {
- if (err == -EBUSY) {
- WL_ERR(("system busy : iscan canceled\n"));
- } else {
- WL_ERR(("error (%d)\n", err));
- }
- }
-
-done:
- if (params)
- kfree(params);
- return err;
-}
-
-static s32 wl_do_iscan(struct wl_priv *wl, struct cfg80211_scan_request *request)
-{
- struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
- struct net_device *ndev = wl_to_prmry_ndev(wl);
- s32 passive_scan;
- s32 err = 0;
-
- iscan->state = WL_ISCAN_STATE_SCANING;
-
- passive_scan = wl->active_scan ? 0 : 1;
- err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan), true);
- if (unlikely(err)) {
- WL_DBG(("error (%d)\n", err));
- return err;
- }
- wl->iscan_kickstart = true;
- wl_run_iscan(iscan, request, WL_SCAN_ACTION_START);
- mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms));
- iscan->timer_on = 1;
-
- return err;
-}
-
-static s32
-wl_get_valid_channels(struct net_device *ndev, u8 *valid_chan_list, s32 size)
-{
- wl_uint32_list_t *list;
- s32 err = BCME_OK;
- if (valid_chan_list == NULL || size <= 0)
- return -ENOMEM;
-
- memset(valid_chan_list, 0, size);
- list = (wl_uint32_list_t *)(void *) valid_chan_list;
- list->count = htod32(WL_NUMCHANNELS);
- err = wldev_ioctl(ndev, WLC_GET_VALID_CHANNELS, valid_chan_list, size, false);
- if (err != 0) {
- WL_ERR(("get channels failed with %d\n", err));
- }
-
- return err;
-}
+ return err;
+}
#if defined(USE_INITIAL_SHORT_DWELL_TIME)
#define FIRST_SCAN_ACTIVE_DWELL_TIME_MS 40
-static bool
-g_first_broadcast_scan = TRUE;
+bool g_first_broadcast_scan = TRUE;
#endif
static s32
-wl_run_escan(struct wl_priv *wl, struct net_device *ndev,
+wl_run_escan(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct cfg80211_scan_request *request, uint16 action)
{
s32 err = BCME_OK;
u16 *default_chan_list = NULL;
wl_uint32_list_t *list;
struct net_device *dev = NULL;
- scb_val_t scbval;
#if defined(USE_INITIAL_SHORT_DWELL_TIME)
bool is_first_init_2g_scan = false;
#endif
p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_PURPOSE_MIN;
+ scb_val_t scbval;
static int cnt = 0;
WL_DBG(("Enter \n"));
/* scan request can come with empty request : perform all default scan */
- if (!wl) {
+ if (!cfg) {
err = -EINVAL;
goto exit;
}
- if (!wl->p2p_supported || !p2p_scan(wl)) {
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
/* LEGACY SCAN TRIGGER */
WL_SCAN((" LEGACY E-SCAN START\n"));
err = -EINVAL;
goto exit;
}
- if (ndev == wl_to_prmry_ndev(wl) && g_first_broadcast_scan == true) {
+ if (ndev == bcmcfg_to_prmry_ndev(cfg) && g_first_broadcast_scan == true) {
is_first_init_2g_scan = true;
g_first_broadcast_scan = false;
}
if (request != NULL) {
n_channels = request->n_channels;
n_ssids = request->n_ssids;
- /* Allocate space for populating ssids in wl_iscan_params struct */
if (n_channels % 2)
/* If n_channels is odd, add a padd of u16 */
params_size += sizeof(u16) * (n_channels + 1);
else
params_size += sizeof(u16) * n_channels;
- /* Allocate space for populating ssids in wl_iscan_params struct */
+ /* Allocate space for populating ssids in wl_escan_params_t struct */
params_size += sizeof(struct wlc_ssid) * n_ssids;
}
params = (wl_escan_params_t *) kzalloc(params_size, GFP_KERNEL);
params->version = htod32(ESCAN_REQ_VERSION);
params->action = htod16(action);
- wl_escan_set_sync_id(params->sync_id, wl);
+ wl_escan_set_sync_id(params->sync_id, cfg);
+ wl_escan_set_type(cfg, WL_SCANTYPE_LEGACY);
if (params_size + sizeof("escan") >= WLC_IOCTL_MEDLEN) {
WL_ERR(("ioctl buffer length not sufficient\n"));
kfree(params);
goto exit;
}
err = wldev_iovar_setbuf(ndev, "escan", params, params_size,
- wl->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
+ cfg->escan_ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
if (unlikely(err)) {
if (err == BCME_EPERM)
/* Scan Not permitted at this point of time */
}
kfree(params);
}
- else if (p2p_is_on(wl) && p2p_scan(wl)) {
+ else if (p2p_is_on(cfg) && p2p_scan(cfg)) {
/* P2P SCAN TRIGGER */
s32 _freq = 0;
n_nodfs = 0;
#ifdef WL_HOST_BAND_MGMT
channel_band = (channel > CH_MAX_2G_CHANNEL) ?
WLC_BAND_5G : WLC_BAND_2G;
- if ((wl->curr_band != WLC_BAND_AUTO) &&
- (wl->curr_band != channel_band) &&
+ if ((cfg->curr_band != WLC_BAND_AUTO) &&
+ (cfg->curr_band != channel_band) &&
!IS_P2P_SOCIAL_CHANNEL(channel))
continue;
#endif /* WL_HOST_BAND_MGMT */
/* ignore DFS channels */
if (request->channels[i]->flags &
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ (IEEE80211_CHAN_NO_IR
+ | IEEE80211_CHAN_RADAR))
+#else
(IEEE80211_CHAN_RADAR
| IEEE80211_CHAN_PASSIVE_SCAN))
+#endif
continue;
for (j = 0; j < n_valid_chan; j++) {
/* SOCIAL CHANNELS 1, 6, 11 */
search_state = WL_P2P_DISC_ST_SEARCH;
p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
- WL_INFO(("P2P SEARCH PHASE START \n"));
- } else if ((dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION)) &&
- (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP)) {
+ WL_INFORM(("P2P SEARCH PHASE START \n"));
+ } else if ((dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
+ (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP)) {
/* If you are already a GO, then do SEARCH only */
- WL_INFO(("Already a GO. Do SEARCH Only"));
+ WL_INFORM(("Already a GO. Do SEARCH Only"));
search_state = WL_P2P_DISC_ST_SEARCH;
num_chans = n_nodfs;
p2p_scan_purpose = P2P_SCAN_NORMAL;
*/
p2p_scan_purpose = P2P_SCAN_SOCIAL_CHANNEL;
} else {
- WL_INFO(("P2P SCAN STATE START \n"));
+ WL_INFORM(("P2P SCAN STATE START \n"));
num_chans = n_nodfs;
p2p_scan_purpose = P2P_SCAN_NORMAL;
}
err = -EINVAL;
goto exit;
}
- err = wl_cfgp2p_escan(wl, ndev, wl->active_scan, num_chans, default_chan_list,
+ err = wl_cfgp2p_escan(cfg, ndev, cfg->active_scan, num_chans, default_chan_list,
search_state, action,
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE), NULL,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE), NULL,
p2p_scan_purpose);
if (!err)
- wl->p2p->search_state = search_state;
+ cfg->p2p->search_state = search_state;
kfree(default_chan_list);
}
exit:
if (unlikely(err)) {
/* Don't print Error incase of Scan suppress */
- if ((err == BCME_EPERM) && wl->scan_suppressed)
+ if ((err == BCME_EPERM) && cfg->scan_suppressed)
WL_DBG(("Escan failed: Scan Suppressed \n"));
else {
cnt++;
WL_ERR(("error (%d), cnt=%d\n", err, cnt));
// terence 20140111: send disassoc to firmware
if (cnt >= 4) {
- dev = wl_to_prmry_ndev(wl);
+ dev = bcmcfg_to_prmry_ndev(cfg);
memset(&scbval, 0, sizeof(scb_val_t));
wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), true);
WL_ERR(("Send disassoc to break the busy dev=%p\n", dev));
static s32
-wl_do_escan(struct wl_priv *wl, struct wiphy *wiphy, struct net_device *ndev,
+wl_do_escan(struct bcm_cfg80211 *cfg, struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
s32 err = BCME_OK;
s32 passive_scan;
wl_scan_results_t *results;
WL_SCAN(("Enter \n"));
- mutex_lock(&wl->usr_sync);
+ mutex_lock(&cfg->usr_sync);
- results = wl_escan_get_buf(wl, FALSE);
+ results = wl_escan_get_buf(cfg, FALSE);
results->version = 0;
results->count = 0;
results->buflen = WL_SCAN_RESULTS_FIXED_SIZE;
- wl->escan_info.ndev = ndev;
- wl->escan_info.wiphy = wiphy;
- wl->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
- passive_scan = wl->active_scan ? 0 : 1;
+ cfg->escan_info.ndev = ndev;
+ cfg->escan_info.wiphy = wiphy;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANING;
+ passive_scan = cfg->active_scan ? 0 : 1;
err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
&passive_scan, sizeof(passive_scan), true);
if (unlikely(err)) {
goto exit;
}
- err = wl_run_escan(wl, ndev, request, WL_SCAN_ACTION_START);
+ err = wl_run_escan(cfg, ndev, request, WL_SCAN_ACTION_START);
exit:
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
struct cfg80211_scan_request *request,
struct cfg80211_ssid *this_ssid)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct cfg80211_ssid *ssids;
- struct wl_scan_req *sr = wl_to_sr(wl);
struct ether_addr primary_mac;
- s32 passive_scan;
- bool iscan_req;
- bool escan_req = false;
bool p2p_ssid;
#ifdef WL11U
bcm_tlv_t *interworking_ie;
unsigned long flags;
static s32 busy_count = 0;
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+ struct net_device *remain_on_channel_ndev = NULL;
+#endif
dhd_pub_t *dhd;
- dhd = (dhd_pub_t *)(wl->pub);
+ dhd = (dhd_pub_t *)(cfg->pub);
+ /*
+ * Hostapd triggers scan before starting automatic channel selection
+ * also Dump stats IOVAR scans each channel hence returning from here.
+ */
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
+#ifdef WL_SUPPORT_ACS
+ WL_INFORM(("Scan Command at SoftAP mode\n"));
+ return 0;
+#else
WL_ERR(("Invalid Scan Command at SoftAP mode\n"));
return -EINVAL;
+#endif /* WL_SUPPORT_ACS */
}
- ndev = ndev_to_wlc_ndev(ndev, wl);
+ ndev = ndev_to_wlc_ndev(ndev, cfg);
- if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(wl)) {
+ if (WL_DRV_STATUS_SENDING_AF_FRM_EXT(cfg)) {
WL_ERR(("Sending Action Frames. Try it again.\n"));
return -EAGAIN;
}
WL_DBG(("Enter wiphy (%p)\n", wiphy));
- if (wl_get_drv_status_all(wl, SCANNING)) {
- if (wl->scan_request == NULL) {
- wl_clr_drv_status_all(wl, SCANNING);
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ if (cfg->scan_request == NULL) {
+ wl_clr_drv_status_all(cfg, SCANNING);
WL_DBG(("<<<<<<<<<<<Force Clear Scanning Status>>>>>>>>>>>\n"));
} else {
WL_ERR(("Scanning already\n"));
return -EAGAIN;
}
}
- if (wl_get_drv_status(wl, SCAN_ABORTING, ndev)) {
+ if (wl_get_drv_status(cfg, SCAN_ABORTING, ndev)) {
WL_ERR(("Scanning being aborted\n"));
return -EAGAIN;
}
return -EOPNOTSUPP;
}
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- if (wl_get_drv_status_all(wl, REMAINING_ON_CHANNEL)) {
+ remain_on_channel_ndev = wl_cfg80211_get_remain_on_channel_ndev(cfg);
+ if (remain_on_channel_ndev) {
WL_DBG(("Remain_on_channel bit is set, somehow it didn't get cleared\n"));
- wl_notify_escan_complete(wl, ndev, true, true);
+ wl_notify_escan_complete(cfg, remain_on_channel_ndev, true, true);
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
#ifdef WL_SDO
- if (wl_get_p2p_status(wl, DISC_IN_PROGRESS)) {
- wl_cfg80211_pause_sdo(ndev, wl);
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ wl_cfg80211_pause_sdo(ndev, cfg);
}
#endif
/* Arm scan timeout timer */
- mod_timer(&wl->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS));
- iscan_req = false;
+ mod_timer(&cfg->scan_timeout, jiffies + msecs_to_jiffies(WL_SCAN_TIMER_INTERVAL_MS));
if (request) { /* scan bss */
ssids = request->ssids;
- if (wl->iscan_on && (!ssids || !ssids->ssid_len || request->n_ssids != 1)) {
- iscan_req = true;
- } else if (wl->escan_on) {
- escan_req = true;
- p2p_ssid = false;
- for (i = 0; i < request->n_ssids; i++) {
- if (ssids[i].ssid_len &&
- IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
- p2p_ssid = true;
- break;
- }
+ p2p_ssid = false;
+ for (i = 0; i < request->n_ssids; i++) {
+ if (ssids[i].ssid_len &&
+ IS_P2P_SSID(ssids[i].ssid, ssids[i].ssid_len)) {
+ p2p_ssid = true;
+ break;
}
- if (p2p_ssid) {
- if (wl->p2p_supported) {
- /* p2p scan trigger */
- if (p2p_on(wl) == false) {
- /* p2p on at the first time */
- p2p_on(wl) = true;
- wl_cfgp2p_set_firm_p2p(wl);
- get_primary_mac(wl, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac,
- &wl->p2p->dev_addr, &wl->p2p->int_addr);
- }
- wl_clr_p2p_status(wl, GO_NEG_PHASE);
- WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
- p2p_scan(wl) = true;
+ }
+ if (p2p_ssid) {
+ if (cfg->p2p_supported) {
+ /* p2p scan trigger */
+ if (p2p_on(cfg) == false) {
+ /* p2p on at the first time */
+ p2p_on(cfg) = true;
+ wl_cfgp2p_set_firm_p2p(cfg);
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac,
+ &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
}
- } else {
- /* legacy scan trigger
- * So, we have to disable p2p discovery if p2p discovery is on
- */
- if (wl->p2p_supported) {
- p2p_scan(wl) = false;
- /* If Netdevice is not equals to primary and p2p is on
- * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
- */
-
- if (p2p_scan(wl) == false) {
- if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
- err = wl_cfgp2p_discover_enable_search(wl,
- false);
- if (unlikely(err)) {
- goto scan_out;
- }
-
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
+ WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
+ p2p_scan(cfg) = true;
+ }
+ } else {
+ /* legacy scan trigger
+ * So, we have to disable p2p discovery if p2p discovery is on
+ */
+ if (cfg->p2p_supported) {
+ p2p_scan(cfg) = false;
+ /* If Netdevice is not equals to primary and p2p is on
+ * , we will do p2p scan using P2PAPI_BSSCFG_DEVICE.
+ */
+
+ if (p2p_scan(cfg) == false) {
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
+ err = wl_cfgp2p_discover_enable_search(cfg,
+ false);
+ if (unlikely(err)) {
+ goto scan_out;
}
+
}
}
- if (!wl->p2p_supported || !p2p_scan(wl)) {
+ }
+ if (!cfg->p2p_supported || !p2p_scan(cfg)) {
- if (wl_cfgp2p_find_idx(wl, ndev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from ndev(%p) failed\n",
- ndev));
- err = BCME_ERROR;
- goto scan_out;
- }
+ if (wl_cfgp2p_find_idx(cfg, ndev, &bssidx) != BCME_OK) {
+ WL_ERR(("Find p2p index from ndev(%p) failed\n",
+ ndev));
+ err = BCME_ERROR;
+ goto scan_out;
+ }
#ifdef WL11U
- if ((interworking_ie = wl_cfg80211_find_interworking_ie(
- (u8 *)request->ie, request->ie_len)) != NULL) {
- err = wl_cfg80211_add_iw_ie(wl, ndev, bssidx,
- VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
- interworking_ie->data, interworking_ie->len);
-
- if (unlikely(err)) {
- goto scan_out;
- }
- } else if (wl->iw_ie_len != 0) {
- /* we have to clear IW IE and disable gratuitous APR */
- wl_cfg80211_add_iw_ie(wl, ndev, bssidx,
- VNDR_IE_CUSTOM_FLAG,
- DOT11_MNG_INTERWORKING_ID,
- 0, 0);
-
- wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0,
- bssidx);
- wl->wl11u = FALSE;
- /* we don't care about error */
- }
-#endif /* WL11U */
- err = wl_cfgp2p_set_management_ie(wl, ndev, bssidx,
- VNDR_IE_PRBREQ_FLAG, (u8 *)request->ie,
- request->ie_len);
+ if ((interworking_ie = wl_cfg80211_find_interworking_ie(
+ (u8 *)request->ie, request->ie_len)) != NULL) {
+ err = wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+ VNDR_IE_CUSTOM_FLAG, interworking_ie->id,
+ interworking_ie->data, interworking_ie->len);
if (unlikely(err)) {
goto scan_out;
}
+ } else if (cfg->iw_ie_len != 0) {
+ /* we have to clear IW IE and disable gratuitous APR */
+ wl_cfg80211_add_iw_ie(cfg, ndev, bssidx,
+ VNDR_IE_CUSTOM_FLAG,
+ DOT11_MNG_INTERWORKING_ID,
+ 0, 0);
+
+ wldev_iovar_setint_bsscfg(ndev, "grat_arp", 0,
+ bssidx);
+ cfg->wl11u = FALSE;
+ /* we don't care about error */
+ }
+#endif /* WL11U */
+ err = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx,
+ VNDR_IE_PRBREQ_FLAG, (u8 *)request->ie,
+ request->ie_len);
+ if (unlikely(err)) {
+ goto scan_out;
}
+
}
}
} else { /* scan in ibss */
- /* we don't do iscan in ibss */
ssids = this_ssid;
}
- if (request && !p2p_scan(wl))
- WL_TRACE_HW4(("START SCAN\n"));
- wl->scan_request = request;
- wl_set_drv_status(wl, SCANNING, ndev);
- if (iscan_req) {
- err = wl_do_iscan(wl, request);
- if (likely(!err))
- goto scan_success;
- else
- goto scan_out;
- } else if (escan_req) {
- if (wl->p2p_supported) {
- if (p2p_on(wl) && p2p_scan(wl)) {
-#ifdef WL_SDO
- if (wl_get_p2p_status(wl, DISC_IN_PROGRESS)) {
- /* We shouldn't be getting p2p_find while discovery
- * offload is in progress
- */
- WL_SD(("P2P_FIND: Discovery offload is in progress."
- " Do nothing\n"));
- err = -EINVAL;
- goto scan_out;
- }
-#endif
- /* find my listen channel */
- wl->afx_hdl->my_listen_chan =
- wl_find_listen_channel(wl, request->ie,
- request->ie_len);
- err = wl_cfgp2p_enable_discovery(wl, ndev,
- request->ie, request->ie_len);
+ cfg->scan_request = request;
+ wl_set_drv_status(cfg, SCANNING, ndev);
- if (unlikely(err)) {
- goto scan_out;
- }
- }
- }
- err = wl_do_escan(wl, wiphy, ndev, request);
- if (likely(!err))
- goto scan_success;
- else
- goto scan_out;
+ if (cfg->p2p_supported) {
+ if (p2p_on(cfg) && p2p_scan(cfg)) {
+#ifdef WL_SDO
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ /* We shouldn't be getting p2p_find while discovery
+ * offload is in progress
+ */
+ WL_SD(("P2P_FIND: Discovery offload is in progress."
+ " Do nothing\n"));
+ err = -EINVAL;
+ goto scan_out;
+ }
+#endif
+ /* find my listen channel */
+ cfg->afx_hdl->my_listen_chan =
+ wl_find_listen_channel(cfg, request->ie,
+ request->ie_len);
+ err = wl_cfgp2p_enable_discovery(cfg, ndev,
+ request->ie, request->ie_len);
- } else {
- memset(&sr->ssid, 0, sizeof(sr->ssid));
- sr->ssid.SSID_len =
- min_t(u8, sizeof(sr->ssid.SSID), ssids->ssid_len);
- if (sr->ssid.SSID_len) {
- memcpy(sr->ssid.SSID, ssids->ssid, sr->ssid.SSID_len);
- sr->ssid.SSID_len = htod32(sr->ssid.SSID_len);
- WL_SCAN(("Specific scan ssid=\"%s\" len=%d\n",
- sr->ssid.SSID, sr->ssid.SSID_len));
- } else {
- WL_SCAN(("Broadcast scan\n"));
- }
- WL_SCAN(("sr->ssid.SSID_len (%d)\n", sr->ssid.SSID_len));
- passive_scan = wl->active_scan ? 0 : 1;
- err = wldev_ioctl(ndev, WLC_SET_PASSIVE_SCAN,
- &passive_scan, sizeof(passive_scan), true);
- if (unlikely(err)) {
- WL_SCAN(("WLC_SET_PASSIVE_SCAN error (%d)\n", err));
- goto scan_out;
- }
- err = wldev_ioctl(ndev, WLC_SCAN, &sr->ssid,
- sizeof(sr->ssid), false);
- if (err) {
- if (err == -EBUSY) {
- WL_ERR(("system busy : scan for \"%s\" "
- "canceled\n", sr->ssid.SSID));
- } else {
- WL_ERR(("WLC_SCAN error (%d)\n", err));
+ if (unlikely(err)) {
+ goto scan_out;
}
- goto scan_out;
}
}
+ err = wl_do_escan(cfg, wiphy, ndev, request);
+ if (likely(!err))
+ goto scan_success;
+ else
+ goto scan_out;
scan_success:
busy_count = 0;
s32 ret = 0;
busy_count = 0;
WL_ERR(("Unusual continuous EBUSY error, %d %d %d %d %d %d %d %d %d\n",
- wl_get_drv_status(wl, SCANNING, ndev),
- wl_get_drv_status(wl, SCAN_ABORTING, ndev),
- wl_get_drv_status(wl, CONNECTING, ndev),
- wl_get_drv_status(wl, CONNECTED, ndev),
- wl_get_drv_status(wl, DISCONNECTING, ndev),
- wl_get_drv_status(wl, AP_CREATING, ndev),
- wl_get_drv_status(wl, AP_CREATED, ndev),
- wl_get_drv_status(wl, SENDING_ACT_FRM, ndev),
- wl_get_drv_status(wl, SENDING_ACT_FRM, ndev)));
+ wl_get_drv_status(cfg, SCANNING, ndev),
+ wl_get_drv_status(cfg, SCAN_ABORTING, ndev),
+ wl_get_drv_status(cfg, CONNECTING, ndev),
+ wl_get_drv_status(cfg, CONNECTED, ndev),
+ wl_get_drv_status(cfg, DISCONNECTING, ndev),
+ wl_get_drv_status(cfg, AP_CREATING, ndev),
+ wl_get_drv_status(cfg, AP_CREATED, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev),
+ wl_get_drv_status(cfg, SENDING_ACT_FRM, ndev)));
bzero(&bssid, sizeof(bssid));
if ((ret = wldev_ioctl(ndev, WLC_GET_BSSID,
else
WL_ERR(("GET BSSID failed with %d\n", ret));
- wl_cfg80211_scan_abort(wl);
+ wl_cfg80211_scan_abort(cfg);
+
}
} else {
busy_count = 0;
}
- wl_clr_drv_status(wl, SCANNING, ndev);
- if (timer_pending(&wl->scan_timeout))
- del_timer_sync(&wl->scan_timeout);
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
- wl->scan_request = NULL;
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+ if (timer_pending(&cfg->scan_timeout))
+ del_timer_sync(&cfg->scan_timeout);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ cfg->scan_request = NULL;
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
#ifdef WL_SDO
- if (wl_get_p2p_status(wl, DISC_IN_PROGRESS)) {
- wl_cfg80211_resume_sdo(ndev, wl);
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
+ wl_cfg80211_resume_sdo(ndev, cfg);
}
#endif
return err;
#endif /* WL_CFG80211_P2P_DEV_IF */
{
s32 err = 0;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
#if defined(WL_CFG80211_P2P_DEV_IF)
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct net_device *ndev = wdev_to_wlc_ndev(request->wdev, cfg);
#endif /* WL_CFG80211_P2P_DEV_IF */
WL_DBG(("Enter \n"));
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
err = __wl_cfg80211_scan(wiphy, ndev, request, NULL);
if (unlikely(err)) {
- if ((err == BCME_EPERM) && wl->scan_suppressed)
+ if ((err == BCME_EPERM) && cfg->scan_suppressed)
WL_DBG(("scan not permitted at this time (%d)\n", err));
else
WL_ERR(("scan error (%d)\n", err));
static s32 wl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
- struct wl_priv *wl = (struct wl_priv *)wiphy_priv(wiphy);
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
s32 err = 0;
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
WL_DBG(("Enter\n"));
if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
- (wl->conf->rts_threshold != wiphy->rts_threshold)) {
- wl->conf->rts_threshold = wiphy->rts_threshold;
- err = wl_set_rts(ndev, wl->conf->rts_threshold);
+ (cfg->conf->rts_threshold != wiphy->rts_threshold)) {
+ cfg->conf->rts_threshold = wiphy->rts_threshold;
+ err = wl_set_rts(ndev, cfg->conf->rts_threshold);
if (!err)
return err;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
- (wl->conf->frag_threshold != wiphy->frag_threshold)) {
- wl->conf->frag_threshold = wiphy->frag_threshold;
- err = wl_set_frag(ndev, wl->conf->frag_threshold);
+ (cfg->conf->frag_threshold != wiphy->frag_threshold)) {
+ cfg->conf->frag_threshold = wiphy->frag_threshold;
+ err = wl_set_frag(ndev, cfg->conf->frag_threshold);
if (!err)
return err;
}
if (changed & WIPHY_PARAM_RETRY_LONG &&
- (wl->conf->retry_long != wiphy->retry_long)) {
- wl->conf->retry_long = wiphy->retry_long;
- err = wl_set_retry(ndev, wl->conf->retry_long, true);
+ (cfg->conf->retry_long != wiphy->retry_long)) {
+ cfg->conf->retry_long = wiphy->retry_long;
+ err = wl_set_retry(ndev, cfg->conf->retry_long, true);
if (!err)
return err;
}
if (changed & WIPHY_PARAM_RETRY_SHORT &&
- (wl->conf->retry_short != wiphy->retry_short)) {
- wl->conf->retry_short = wiphy->retry_short;
- err = wl_set_retry(ndev, wl->conf->retry_short, false);
+ (cfg->conf->retry_short != wiphy->retry_short)) {
+ cfg->conf->retry_short = wiphy->retry_short;
+ err = wl_set_retry(ndev, cfg->conf->retry_short, false);
if (!err) {
return err;
}
return err;
}
-static chanspec_t channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel)
+static chanspec_t
+channel_to_chanspec(struct wiphy *wiphy, struct net_device *dev, u32 channel, u32 bw_cap)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
u8 *buf = NULL;
wl_uint32_list_t *list;
int err = BCME_OK;
chanspec_t c = 0, ret_c = 0;
int bw = 0, tmp_bw = 0;
int i;
- u32 tmp_c, sb;
+ u32 tmp_c;
u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
#define LOCAL_BUF_SIZE 1024
buf = (u8 *) kzalloc(LOCAL_BUF_SIZE, kflags);
list = (wl_uint32_list_t *)(void *)buf;
list->count = htod32(WL_NUMCHANSPECS);
err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
- 0, buf, LOCAL_BUF_SIZE, 0, &wl->ioctl_buf_sync);
+ 0, buf, LOCAL_BUF_SIZE, 0, &cfg->ioctl_buf_sync);
if (err != BCME_OK) {
WL_ERR(("get chanspecs failed with %d\n", err));
goto exit;
goto exit;
}
}
- if (CHSPEC_IS20(c)) {
- tmp_c = CHSPEC_CHANNEL(c);
- tmp_bw = 20;
- }
- else if (CHSPEC_IS40(c)) {
- tmp_c = CHSPEC_CHANNEL(c);
- if (CHSPEC_SB_UPPER(c)) {
- tmp_c += CH_10MHZ_APART;
- } else {
- tmp_c -= CH_10MHZ_APART;
- }
- tmp_bw = 40;
- }
- else {
- tmp_c = CHSPEC_CHANNEL(c);
- sb = c & WL_CHANSPEC_CTL_SB_MASK;
- if (sb == WL_CHANSPEC_CTL_SB_LL) {
- tmp_c -= (CH_10MHZ_APART + CH_20MHZ_APART);
- } else if (sb == WL_CHANSPEC_CTL_SB_LU) {
- tmp_c -= CH_10MHZ_APART;
- } else if (sb == WL_CHANSPEC_CTL_SB_UL) {
- tmp_c += CH_10MHZ_APART;
- } else {
- /* WL_CHANSPEC_CTL_SB_UU */
- tmp_c += (CH_10MHZ_APART + CH_20MHZ_APART);
- }
- tmp_bw = 80;
- }
+ tmp_c = wf_chspec_ctlchan(c);
+ tmp_bw = bw2cap[CHSPEC_BW(c) >> WL_CHANSPEC_BW_SHIFT];
if (tmp_c != channel)
continue;
- if (tmp_bw > bw) {
+ if ((tmp_bw > bw) && (tmp_bw <= bw_cap)) {
bw = tmp_bw;
ret_c = c;
+ if (bw == bw_cap)
+ goto exit;
}
}
exit:
if (buf)
kfree(buf);
#undef LOCAL_BUF_SIZE
- WL_INFO(("return chanspec %x %d\n", ret_c, bw));
+ WL_INFORM(("return chanspec %x %d\n", ret_c, bw));
return ret_c;
}
void
wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
- if (wl != NULL && ibss_vsie != NULL) {
- if (wl->ibss_vsie != NULL) {
- kfree(wl->ibss_vsie);
+ if (cfg != NULL && ibss_vsie != NULL) {
+ if (cfg->ibss_vsie != NULL) {
+ kfree(cfg->ibss_vsie);
}
- wl->ibss_vsie = ibss_vsie;
- wl->ibss_vsie_len = ibss_vsie_len;
+ cfg->ibss_vsie = ibss_vsie;
+ cfg->ibss_vsie_len = ibss_vsie_len;
}
}
static void
-wl_cfg80211_ibss_vsie_free(struct wl_priv *wl)
+wl_cfg80211_ibss_vsie_free(struct bcm_cfg80211 *cfg)
{
/* free & initiralize VSIE (Vendor Specific IE) */
- if (wl->ibss_vsie != NULL) {
- kfree(wl->ibss_vsie);
- wl->ibss_vsie = NULL;
- wl->ibss_vsie_len = 0;
+ if (cfg->ibss_vsie != NULL) {
+ kfree(cfg->ibss_vsie);
+ cfg->ibss_vsie = NULL;
+ cfg->ibss_vsie_len = 0;
}
}
s32
wl_cfg80211_ibss_vsie_delete(struct net_device *dev)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
char *ioctl_buf = NULL;
s32 ret = BCME_OK;
- if (wl != NULL && wl->ibss_vsie != NULL) {
+ if (cfg != NULL && cfg->ibss_vsie != NULL) {
ioctl_buf = kmalloc(WLC_IOCTL_MEDLEN, GFP_KERNEL);
if (!ioctl_buf) {
WL_ERR(("ioctl memory alloc failed\n"));
}
/* change the command from "add" to "del" */
- strncpy(wl->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
- wl->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
+ strncpy(cfg->ibss_vsie->cmd, "del", VNDR_IE_CMD_LEN - 1);
+ cfg->ibss_vsie->cmd[VNDR_IE_CMD_LEN - 1] = '\0';
ret = wldev_iovar_setbuf(dev, "ie",
- wl->ibss_vsie, wl->ibss_vsie_len,
+ cfg->ibss_vsie, cfg->ibss_vsie_len,
ioctl_buf, WLC_IOCTL_MEDLEN, NULL);
WL_ERR(("ret=%d\n", ret));
if (ret == BCME_OK) {
/* free & initiralize VSIE */
- kfree(wl->ibss_vsie);
- wl->ibss_vsie = NULL;
- wl->ibss_vsie_len = 0;
+ kfree(cfg->ibss_vsie);
+ cfg->ibss_vsie = NULL;
+ cfg->ibss_vsie_len = 0;
}
if (ioctl_buf) {
return ret;
}
+#ifdef WLAIBSS_MCHAN
+static bcm_struct_cfgdev*
+bcm_cfg80211_add_ibss_if(struct wiphy *wiphy, char *name)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wireless_dev* wdev = NULL;
+ struct net_device *new_ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ s32 timeout;
+ wl_aibss_if_t aibss_if;
+ wl_if_event_info *event = NULL;
+
+ if (cfg->ibss_cfgdev != NULL) {
+ WL_ERR(("IBSS interface %s already exists\n", name));
+ return NULL;
+ }
+
+ WL_ERR(("Try to create IBSS interface %s\n", name));
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+ /* generate a new MAC address for the IBSS interface */
+ get_primary_mac(cfg, &cfg->ibss_if_addr);
+ cfg->ibss_if_addr.octet[4] ^= 0x40;
+ memset(&aibss_if, sizeof(aibss_if), 0);
+ memcpy(&aibss_if.addr, &cfg->ibss_if_addr, sizeof(aibss_if.addr));
+ aibss_if.chspec = 0;
+ aibss_if.len = sizeof(aibss_if);
+
+ cfg->bss_pending_op = TRUE;
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ err = wldev_iovar_setbuf(primary_ndev, "aibss_ifadd", &aibss_if,
+ sizeof(aibss_if), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err) {
+ WL_ERR(("IOVAR aibss_ifadd failed with error %d\n", err));
+ goto fail;
+ }
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op)
+ goto fail;
+
+ event = &cfg->if_event_info;
+ strncpy(event->name, name, IFNAMSIZ - 1);
+ /* By calling wl_cfg80211_allocate_if (dhd_allocate_if eventually) we give the control
+ * over this net_device interface to dhd_linux, hence the interface is managed by dhd_liux
+ * and will be freed by dhd_detach unless it gets unregistered before that. The
+ * wireless_dev instance new_ndev->ieee80211_ptr associated with this net_device will
+ * be freed by wl_dealloc_netinfo
+ */
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx, event->name,
+ event->mac, event->bssidx);
+ if (new_ndev == NULL)
+ goto fail;
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (wdev == NULL)
+ goto fail;
+ wdev->wiphy = wiphy;
+ wdev->iftype = NL80211_IFTYPE_ADHOC;
+ wdev->netdev = new_ndev;
+ new_ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+ /* rtnl lock must have been acquired, if this is not the case, wl_cfg80211_register_if
+ * needs to be modified to take one parameter (bool need_rtnl_lock)
+ */
+ ASSERT_RTNL();
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK)
+ goto fail;
+
+ wl_alloc_netinfo(cfg, new_ndev, wdev, WL_MODE_IBSS, PM_ENABLE);
+ cfg->ibss_cfgdev = ndev_to_cfgdev(new_ndev);
+ WL_ERR(("IBSS interface %s created\n", new_ndev->name));
+ return cfg->ibss_cfgdev;
+
+fail:
+ WL_ERR(("failed to create IBSS interface %s \n", name));
+ cfg->bss_pending_op = FALSE;
+ if (new_ndev)
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+ if (wdev)
+ kfree(wdev);
+ return NULL;
+}
+
+static s32
+bcm_cfg80211_del_ibss_if(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+ int err = 0;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ s32 timeout;
+
+ if (!cfgdev || cfg->ibss_cfgdev != cfgdev || ETHER_ISNULLADDR(&cfg->ibss_if_addr.octet))
+ return -EINVAL;
+ ndev = (struct net_device *)cfgdev_to_ndev(cfg->ibss_cfgdev);
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ cfg->bss_pending_op = TRUE;
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+ err = wldev_iovar_setbuf(primary_ndev, "aibss_ifdel", &cfg->ibss_if_addr,
+ sizeof(cfg->ibss_if_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err) {
+ WL_ERR(("IOVAR aibss_ifdel failed with error %d\n", err));
+ goto fail;
+ }
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("timeout in waiting IF_DEL event\n"));
+ goto fail;
+ }
+
+ wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev);
+ cfg->ibss_cfgdev = NULL;
+ return 0;
+
+fail:
+ cfg->bss_pending_op = FALSE;
+ return -1;
+}
+#endif /* WLAIBSS_MCHAN */
+
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+s32
+wl_cfg80211_add_del_bss(struct bcm_cfg80211 *cfg,
+ struct net_device *ndev, s32 bsscfg_idx,
+ enum nl80211_iftype iface_type, s32 del, u8 *addr)
+{
+ s32 ret = BCME_OK;
+ s32 val = 0;
+
+ struct {
+ s32 cfg;
+ s32 val;
+ struct ether_addr ea;
+ } bss_setbuf;
+
+ WL_INFORM(("iface_type:%d del:%d \n", iface_type, del));
+
+ bzero(&bss_setbuf, sizeof(bss_setbuf));
+
+ /* AP=3, STA=2, up=1, down=0, val=-1 */
+ if (del) {
+ val = -1;
+ } else if (iface_type == NL80211_IFTYPE_AP) {
+ /* AP Interface */
+ WL_DBG(("Adding AP Interface \n"));
+ val = 3;
+ } else if (iface_type == NL80211_IFTYPE_STATION) {
+ WL_DBG(("Adding STA Interface \n"));
+ val = 2;
+ } else {
+ WL_ERR((" add_del_bss NOT supported for IFACE type:0x%x", iface_type));
+ return -EINVAL;
+ }
+
+ bss_setbuf.cfg = htod32(bsscfg_idx);
+ bss_setbuf.val = htod32(val);
+
+ if (addr) {
+ memcpy(&bss_setbuf.ea.octet, addr, ETH_ALEN);
+ }
+
+ ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret != 0)
+ WL_ERR(("'bss %d' failed with %d\n", val, ret));
+
+ return ret;
+}
+
+/* Create a Generic Network Interface and initialize it depending up on
+ * the interface type
+ */
+bcm_struct_cfgdev*
+wl_cfg80211_create_iface(struct wiphy *wiphy,
+ enum nl80211_iftype iface_type,
+ u8 *mac_addr, const char *name)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *new_ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ s32 ret = BCME_OK;
+ s32 bsscfg_idx = 1;
+ u32 timeout;
+ wl_if_event_info *event = NULL;
+ struct wireless_dev *wdev = NULL;
+ u8 addr[ETH_ALEN];
+
+ WL_DBG(("Enter\n"));
+
+ if (!name) {
+ WL_ERR(("Interface name not provided\n"));
+ return NULL;
+ }
+
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ if (likely(!mac_addr)) {
+ /* Use primary MAC with the locally administered bit for the Secondary STA I/F */
+ memcpy(addr, primary_ndev->dev_addr, ETH_ALEN);
+ addr[0] |= 0x02;
+ } else {
+ /* Use the application provided mac address (if any) */
+ memcpy(addr, mac_addr, ETH_ALEN);
+ }
+
+ if ((iface_type != NL80211_IFTYPE_STATION) && (iface_type != NL80211_IFTYPE_AP)) {
+ WL_ERR(("IFACE type:%d not supported. STA "
+ "or AP IFACE is only supported\n", iface_type));
+ return NULL;
+ }
+
+ cfg->bss_pending_op = TRUE;
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+ /* De-initialize the p2p discovery interface, if operational */
+ if (p2p_is_on(cfg)) {
+ WL_DBG(("Disabling P2P Discovery Interface \n"));
+#ifdef WL_CFG80211_P2P_DEV_IF
+ ret = wl_cfg80211_scan_stop(bcmcfg_to_p2p_wdev(cfg));
+#else
+ ret = wl_cfg80211_scan_stop(cfg->p2p_net);
+#endif
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
+ }
+
+ wl_cfgp2p_disable_discovery(cfg);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+ p2p_on(cfg) = false;
+ }
+
+ /*
+ * Intialize the firmware I/F.
+ */
+ if ((ret = wl_cfg80211_add_del_bss(cfg, primary_ndev,
+ bsscfg_idx, iface_type, 0, addr)) < 0) {
+ return NULL;
+ }
+
+ /*
+ * Wait till the firmware send a confirmation event back.
+ */
+ WL_DBG(("Wait for the FW I/F Event\n"));
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("ADD_IF event, didn't come. Return \n"));
+ goto fail;
+ }
+
+ /*
+ * Since FW operation is successful,we can go ahead with the
+ * the host interface creation.
+ */
+ event = &cfg->if_event_info;
+ strncpy(event->name, name, IFNAMSIZ - 1);
+ new_ndev = wl_cfg80211_allocate_if(cfg, event->ifidx,
+ event->name, addr, event->bssidx);
+ if (!new_ndev) {
+ WL_ERR(("I/F allocation failed! \n"));
+ goto fail;
+ } else
+ WL_DBG(("I/F allocation succeeded! ifidx:0x%x bssidx:0x%x \n",
+ event->ifidx, event->bssidx));
+
+ wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
+ if (!wdev) {
+ WL_ERR(("wireless_dev alloc failed! \n"));
+ goto fail;
+ }
+
+ wdev->wiphy = wiphy;
+ wdev->iftype = iface_type;
+ new_ndev->ieee80211_ptr = wdev;
+ SET_NETDEV_DEV(new_ndev, wiphy_dev(wdev->wiphy));
+
+ /* RTNL lock must have been acquired. */
+ ASSERT_RTNL();
+
+ /* Set the locally administed mac addr, if not applied already */
+ if (memcmp(addr, event->mac, ETH_ALEN) != 0) {
+ ret = wldev_iovar_setbuf_bsscfg(primary_ndev, "cur_etheraddr", addr, ETH_ALEN,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, event->bssidx, &cfg->ioctl_buf_sync);
+ if (unlikely(ret)) {
+ WL_ERR(("set cur_etheraddr Error (%d)\n", ret));
+ goto fail;
+ }
+ memcpy(new_ndev->dev_addr, addr, ETH_ALEN);
+ }
+
+ if (wl_cfg80211_register_if(cfg, event->ifidx, new_ndev) != BCME_OK) {
+ WL_ERR(("IFACE register failed \n"));
+ goto fail;
+ }
+
+ /* Initialize with the station mode params */
+ wl_alloc_netinfo(cfg, new_ndev, wdev,
+ (iface_type == NL80211_IFTYPE_STATION) ?
+ WL_MODE_BSS : WL_MODE_AP, PM_ENABLE);
+ cfg->bss_cfgdev = ndev_to_cfgdev(new_ndev);
+ cfg->cfgdev_bssidx = event->bssidx;
+
+ WL_DBG(("Host Network Interface for Secondary I/F created"));
+
+ return cfg->bss_cfgdev;
+
+fail:
+ cfg->bss_pending_op = FALSE;
+ if (new_ndev)
+ wl_cfg80211_remove_if(cfg, event->ifidx, new_ndev);
+ if (wdev)
+ kfree(wdev);
+
+ return NULL;
+}
+
+s32
+wl_cfg80211_del_iface(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = NULL;
+ struct net_device *primary_ndev = NULL;
+ s32 ret = BCME_OK;
+ s32 bsscfg_idx = 1;
+ u32 timeout;
+ enum nl80211_iftype iface_type = NL80211_IFTYPE_STATION;
+
+ WL_DBG(("Enter\n"));
+
+ if (!cfg->bss_cfgdev)
+ return 0;
+
+ /* If any scan is going on, abort it */
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ WL_DBG(("Scan in progress. Aborting the scan!\n"));
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ }
+
+ ndev = (struct net_device *)cfgdev_to_ndev(cfg->bss_cfgdev);
+ primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ cfg->bss_pending_op = TRUE;
+ memset(&cfg->if_event_info, 0, sizeof(cfg->if_event_info));
+
+ /* Delete the firmware interface */
+ if ((ret = wl_cfg80211_add_del_bss(cfg, ndev,
+ bsscfg_idx, iface_type, true, NULL)) < 0) {
+ WL_ERR(("DEL bss failed ret:%d \n", ret));
+ return ret;
+ }
+
+ timeout = wait_event_interruptible_timeout(cfg->netif_change_event,
+ !cfg->bss_pending_op, msecs_to_jiffies(MAX_WAIT_TIME));
+ if (timeout <= 0 || cfg->bss_pending_op) {
+ WL_ERR(("timeout in waiting IF_DEL event\n"));
+ }
+
+ wl_cfg80211_remove_if(cfg, cfg->if_event_info.ifidx, ndev);
+ cfg->bss_cfgdev = NULL;
+ cfg->cfgdev_bssidx = -1;
+ cfg->bss_pending_op = FALSE;
+
+ WL_DBG(("IF_DEL Done.\n"));
+
+ return ret;
+}
+#endif /* defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF) */
+
static s32
wl_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct cfg80211_bss *bss;
struct ieee80211_channel *chan;
struct wl_join_params join_params;
+ int scan_suppress;
struct cfg80211_ssid ssid;
s32 scan_retry = 0;
s32 err = 0;
- int scan_suppress = 1;
size_t join_params_size;
- chanspec_t chanspec;
+ chanspec_t chanspec = 0;
+ u32 param[2] = {0, 0};
+ u32 bw_cap = 0;
+#if defined(WLAIBSS) && defined(WLAIBSS_PS)
+ s32 atim = 10;
+#endif /* WLAIBSS & WLAIBSS_PS */
WL_TRACE(("In\n"));
- RETURN_EIO_IF_NOT_UP(wl);
- WL_INFO(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
+ RETURN_EIO_IF_NOT_UP(cfg);
+ WL_INFORM(("JOIN BSSID:" MACDBG "\n", MAC2STRDBG(params->bssid)));
if (!params->ssid || params->ssid_len <= 0) {
WL_ERR(("Invalid parameter\n"));
return -EINVAL;
}
- if (wl_get_drv_status(wl, CONNECTED, dev)) {
- struct wlc_ssid *ssid = (struct wlc_ssid *)wl_read_prof(wl, dev, WL_PROF_SSID);
- u8 *bssid = (u8 *)wl_read_prof(wl, dev, WL_PROF_BSSID);
- if (!params->bssid || (memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0 &&
- memcmp(params->ssid, ssid->SSID, ssid->SSID_len) == 0)) {
+#if defined(WL_CFG80211_P2P_DEV_IF)
+ chan = params->chandef.chan;
+#else
+ chan = params->channel;
+#endif /* WL_CFG80211_P2P_DEV_IF */
+ if (chan)
+ cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ if (wl_get_drv_status(cfg, CONNECTED, dev)) {
+ struct wlc_ssid *ssid = (struct wlc_ssid *)wl_read_prof(cfg, dev, WL_PROF_SSID);
+ u8 *bssid = (u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ u32 *channel = (u32 *)wl_read_prof(cfg, dev, WL_PROF_CHAN);
+ if (!params->bssid || ((memcmp(params->bssid, bssid, ETHER_ADDR_LEN) == 0) &&
+ (memcmp(params->ssid, ssid->SSID, ssid->SSID_len) == 0) &&
+ (*channel == cfg->channel))) {
WL_ERR(("Connection already existed to " MACDBG "\n",
- MAC2STRDBG((u8 *)wl_read_prof(wl, dev, WL_PROF_BSSID))));
+ MAC2STRDBG((u8 *)wl_read_prof(cfg, dev, WL_PROF_BSSID))));
return -EISCONN;
}
WL_ERR(("Ignore Previous connecton to %s (" MACDBG ")\n",
}
} while (++scan_retry < WL_SCAN_RETRY_MAX);
+ /* rtnl lock code is removed here. don't see why rtnl lock
+ * needs to be released.
+ */
+
/* wait 4 secons till scan done.... */
schedule_timeout_interruptible(msecs_to_jiffies(4000));
+
bss = cfg80211_get_ibss(wiphy, NULL,
params->ssid, params->ssid_len);
}
if (bss && ((IBSS_COALESCE_ALLOWED == TRUE) ||
((IBSS_COALESCE_ALLOWED == FALSE) && params->bssid &&
!memcmp(bss->bssid, params->bssid, ETHER_ADDR_LEN)))) {
- wl->ibss_starter = false;
+ cfg->ibss_starter = false;
WL_DBG(("Found IBSS\n"));
} else {
- wl->ibss_starter = true;
+ cfg->ibss_starter = true;
+ }
+ if (chan) {
+ if (chan->band == IEEE80211_BAND_5GHZ)
+ param[0] = WLC_BAND_5G;
+ else if (chan->band == IEEE80211_BAND_2GHZ)
+ param[0] = WLC_BAND_2G;
+ err = wldev_iovar_getint(dev, "bw_cap", param);
+ if (unlikely(err)) {
+ WL_ERR(("Get bw_cap Failed (%d)\n", err));
+ return err;
+ }
+ bw_cap = param[0];
+ chanspec = channel_to_chanspec(wiphy, dev, cfg->channel, bw_cap);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
- chan = params->chandef.chan;
-#else
- chan = params->channel;
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) */
- if (chan)
- wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
- chanspec = channel_to_chanspec(wiphy, dev, wl->channel);
/*
* Join with specific BSSID and cached SSID
* If SSID is zero join based on BSSID only
}
} else
memset(&join_params.params.bssid, 0, ETHER_ADDR_LEN);
-
wldev_iovar_setint(dev, "ibss_coalesce_allowed", IBSS_COALESCE_ALLOWED);
if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
scan_suppress = TRUE;
- /* Set the SCAN SUPRESS Flag in the firmware to skip join scan */
- err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS, &scan_suppress, sizeof(int), true);
+ /* Set the SCAN SUPPRESS Flag in the firmware to skip join scan */
+ err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
+ &scan_suppress, sizeof(int), true);
if (unlikely(err)) {
- WL_ERR(("Scan Supress Setting failed(%d)\n", err));
+ WL_ERR(("Scan Suppress Setting Failed (%d)\n", err));
return err;
}
}
wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_DISABLED);
wldev_iovar_setint(dev, "wsec", 0);
+#ifdef WLAIBSS
+ /* Enable custom ibss features */
+ err = wldev_iovar_setint(dev, "aibss", TRUE);
+
+ if (unlikely(err)) {
+ WL_ERR(("Enable custom IBSS mode failed (%d)\n", err));
+ return err;
+ }
+#ifdef WLAIBSS_PS
+ err = wldev_ioctl(dev, WLC_SET_ATIM, &atim, sizeof(int), true);
+ if (unlikely(err)) {
+ WL_ERR(("Enable custom IBSS ATIM mode failed (%d)\n", err));
+ return err;
+ }
+#endif /* WLAIBSS_PS */
+#endif /* WLAIBSS */
+
err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
join_params_size, true);
if (unlikely(err)) {
if (IBSS_INITIAL_SCAN_ALLOWED == FALSE) {
scan_suppress = FALSE;
- /* Restore the SCAN SUPPRESS Falg */
+ /* Reset the SCAN SUPPRESS Flag */
err = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
&scan_suppress, sizeof(int), true);
if (unlikely(err)) {
- WL_ERR(("Reset SCAN Suppress Flag failed (%d)\n", err));
- return err;
+ WL_ERR(("Reset Scan Suppress Flag Failed (%d)\n", err));
+ return err;
}
}
- wl_update_prof(wl, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+ wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+ wl_update_prof(cfg, dev, NULL, &cfg->channel, WL_PROF_CHAN);
+#ifdef WLAIBSS
+ cfg->aibss_txfail_seq = 0; /* initialize the sequence */
+#endif /* WLAIBSS */
+ cfg->rmc_event_seq = 0; /* initialize rmcfail sequence */
return err;
}
static s32 wl_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
scb_val_t scbval;
u8 *curbssid;
- RETURN_EIO_IF_NOT_UP(wl);
- wl_link_down(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
+ wl_link_down(cfg);
+
WL_ERR(("Leave IBSS\n"));
- curbssid = wl_read_prof(wl, dev, WL_PROF_BSSID);
- wl_set_drv_status(wl, DISCONNECTING, dev);
+ curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = 0;
memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
sizeof(scb_val_t), true);
if (unlikely(err)) {
- wl_clr_drv_status(wl, DISCONNECTING, dev);
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
WL_ERR(("error(%d)\n", err));
return err;
}
static s32
wl_set_wpa_version(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct wl_security *sec;
s32 val = 0;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
WL_ERR(("set wpa_auth failed (%d)\n", err));
return err;
}
- sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
sec->wpa_versions = sme->crypto.wpa_versions;
return err;
}
static s32
wl_set_set_wapi_ie(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
WL_DBG((" %s \n", __FUNCTION__));
if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
- err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", sme->ie,
- sme->ie_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wapiie", sme->ie, sme->ie_len,
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
WL_ERR(("===> set_wapi_ie Error (%d)\n", err));
static s32
wl_set_auth_type(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct wl_security *sec;
s32 val = 0;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
WL_ERR(("set auth failed (%d)\n", err));
return err;
}
- sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
sec->auth_type = sme->auth_type;
return err;
}
static s32
wl_set_set_cipher(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct wl_security *sec;
s32 pval = 0;
s32 gval = 0;
s32 val = 0;
#endif
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
* Note that the FW feature flag only exists on kernels that support the
* FT-EAP AKM suite.
*/
- if (wl->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) {
+ if (cfg->wdev->wiphy->features & NL80211_FEATURE_FW_4WAY_HANDSHAKE) {
if (pval == AES_ENABLED)
err = wldev_iovar_setint_bsscfg(dev, "sup_wpa", 1, bssidx);
else
return err;
}
- sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0];
sec->cipher_group = sme->crypto.cipher_group;
static s32
wl_set_key_mgmt(struct net_device *dev, struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct wl_security *sec;
s32 val = 0;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
#endif
WL_DBG(("setting wpa_auth to %d\n", val));
-
err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", val, bssidx);
if (unlikely(err)) {
WL_ERR(("could not set wpa_auth (%d)\n", err));
return err;
}
}
- sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
sec->wpa_auth = sme->crypto.akm_suites[0];
return err;
wl_set_set_sharedkey(struct net_device *dev,
struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct wl_security *sec;
struct wl_wsec_key key;
s32 val;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
WL_DBG(("key len (%d)\n", sme->key_len));
if (sme->key_len) {
- sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
WL_DBG(("wpa_versions 0x%x cipher_pairwise 0x%x\n",
sec->wpa_versions, sec->cipher_pairwise));
if (!(sec->wpa_versions & (NL80211_WPA_VERSION_1 |
WL_DBG(("key \"%s\"\n", key.data));
swap_key_from_BE(&key);
err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
WL_ERR(("WLC_SET_KEY error (%d)\n", err));
return err;
+#if defined(CUSTOM_SET_CPUCORE) || defined(CONFIG_TCPACK_FASTTX)
+static bool wl_get_chan_isvht80(struct net_device *net, dhd_pub_t *dhd)
+{
+ u32 chanspec = 0;
+ bool isvht80 = 0;
+
+ if (wldev_iovar_getint(net, "chanspec", (s32 *)&chanspec) == BCME_OK)
+ chanspec = wl_chspec_driver_to_host(chanspec);
+
+ isvht80 = chanspec & WL_CHANSPEC_BW_80;
+ WL_INFO(("%s: chanspec(%x:%d)\n", __FUNCTION__, chanspec, isvht80));
+
+ return isvht80;
+}
+#endif /* CUSTOM_SET_CPUCORE || CONFIG_TCPACK_FASTTX */
+
static s32
wl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct ieee80211_channel *chan = sme->channel;
wl_extjoin_params_t *ext_join_params;
struct wl_join_params join_params;
}
if (unlikely(sme->ssid_len > DOT11_MAX_SSID_LEN)) {
- WL_ERR(("Invalid SSID info: SSID=%s, length=%d\n",
+ WL_ERR(("Invalid SSID info: SSID=%s, length=%zd\n",
sme->ssid, sme->ssid_len));
return -EINVAL;
}
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
-#if !defined(ESCAN_RESULT_PATCH)
- if (wl->scan_request) {
- wl_notify_escan_complete(wl, dev, true, true);
+#if (!defined(ESCAN_RESULT_PATCH) || defined(CUSTOMER_HW10))
+ if (cfg->scan_request) {
+ wl_notify_escan_complete(cfg, dev, true, true);
+ }
+#endif
+#ifdef WL_SCHED_SCAN
+ if (cfg->sched_scan_req) {
+ wl_cfg80211_sched_scan_stop(wiphy, bcmcfg_to_prmry_ndev(cfg));
}
#endif
#if defined(ESCAN_RESULT_PATCH)
maxrxpktglom = 0;
#endif
bzero(&bssid, sizeof(bssid));
- if (!wl_get_drv_status(wl, CONNECTED, dev)&&
+ if (!wl_get_drv_status(cfg, CONNECTED, dev)&&
(ret = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false)) == 0) {
if (!ETHER_ISNULLADDR(&bssid)) {
scb_val_t scbval;
- wl_set_drv_status(wl, DISCONNECTING, dev);
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = DOT11_RC_DISASSOC_LEAVING;
memcpy(&scbval.ea, &bssid, ETHER_ADDR_LEN);
scbval.val = htod32(scbval.val);
err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
sizeof(scb_val_t), true);
if (unlikely(err)) {
- wl_clr_drv_status(wl, DISCONNECTING, dev);
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
WL_ERR(("error (%d)\n", err));
return err;
}
wait_cnt = 500/10;
- while (wl_get_drv_status(wl, DISCONNECTING, dev) && wait_cnt) {
+ while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n",
wait_cnt));
wait_cnt--;
} else {
/* if status is DISCONNECTING, wait for disconnection terminated max 500 ms */
wait_cnt = 500/10;
- while (wl_get_drv_status(wl, DISCONNECTING, dev) && wait_cnt) {
+ while (wl_get_drv_status(cfg, DISCONNECTING, dev) && wait_cnt) {
WL_DBG(("Waiting for disconnection terminated, wait_cnt: %d\n", wait_cnt));
wait_cnt--;
OSL_SLEEP(10);
/* Clean BSSID */
bzero(&bssid, sizeof(bssid));
- if (!wl_get_drv_status(wl, DISCONNECTING, dev))
- wl_update_prof(wl, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
+ if (!wl_get_drv_status(cfg, DISCONNECTING, dev))
+ wl_update_prof(cfg, dev, NULL, (void *)&bssid, WL_PROF_BSSID);
- if (p2p_is_on(wl) && (dev != wl_to_prmry_ndev(wl))) {
+ if (p2p_is_on(cfg) && (dev != bcmcfg_to_prmry_ndev(cfg))) {
/* we only allow to connect using virtual interface in case of P2P */
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
- wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len);
- } else if (dev == wl_to_prmry_ndev(wl)) {
+ } else if (dev == bcmcfg_to_prmry_ndev(cfg)) {
/* find the RSN_IE */
if ((wpa2_ie = bcm_parse_tlvs((u8 *)sme->ie, sme->ie_len,
DOT11_MNG_RSN_ID)) != NULL) {
wpaie_len = (wpa_ie != NULL) ? wpa_ie->length : wpa2_ie->len;
wpaie_len += WPA_RSN_IE_TAG_FIXED_LEN;
wldev_iovar_setbuf(dev, "wpaie", wpaie, wpaie_len,
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
} else {
wldev_iovar_setbuf(dev, "wpaie", NULL, 0,
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
}
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
- err = wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
VNDR_IE_ASSOCREQ_FLAG, (u8 *)sme->ie, sme->ie_len);
if (unlikely(err)) {
return err;
}
}
if (chan) {
- wl->channel = ieee80211_frequency_to_channel(chan->center_freq);
+ cfg->channel = ieee80211_frequency_to_channel(chan->center_freq);
chan_cnt = 1;
- WL_DBG(("channel (%d), center_req (%d), %d channels\n", wl->channel,
+ WL_DBG(("channel (%d), center_req (%d), %d channels\n", cfg->channel,
chan->center_freq, chan_cnt));
} else
- wl->channel = 0;
+ cfg->channel = 0;
#ifdef BCMWAPI_WPI
WL_DBG(("1. enable wapi auth\n"));
if (sme->crypto.wpa_versions & NL80211_WAPI_VERSION_1) {
ext_join_params = (wl_extjoin_params_t*)kzalloc(join_params_size, GFP_KERNEL);
if (ext_join_params == NULL) {
err = -ENOMEM;
- wl_clr_drv_status(wl, CONNECTING, dev);
+ wl_clr_drv_status(cfg, CONNECTING, dev);
goto exit;
}
ext_join_params->ssid.SSID_len = min(sizeof(ext_join_params->ssid.SSID), sme->ssid_len);
memcpy(&ext_join_params->ssid.SSID, sme->ssid, ext_join_params->ssid.SSID_len);
- wl_update_prof(wl, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
+ wl_update_prof(cfg, dev, NULL, &ext_join_params->ssid, WL_PROF_SSID);
ext_join_params->ssid.SSID_len = htod32(ext_join_params->ssid.SSID_len);
/* increate dwell time to receive probe response or detect Beacon
- * from target AP at a noisy air only when channel info is provided in connect command
+ * from target AP at a noisy air only during connect command
*/
ext_join_params->scan.active_time = chan_cnt ? WL_SCAN_JOIN_ACTIVE_DWELL_TIME_MS : -1;
ext_join_params->scan.passive_time = chan_cnt ? WL_SCAN_JOIN_PASSIVE_DWELL_TIME_MS : -1;
if (chan_cnt) {
u16 channel, band, bw, ctl_sb;
chanspec_t chspec;
- channel = wl->channel;
+ channel = cfg->channel;
band = (channel <= CH_MAX_2G_CHANNEL) ? WL_CHANSPEC_BAND_2G
: WL_CHANSPEC_BAND_5G;
bw = WL_CHANSPEC_BW_20;
}
ext_join_params->assoc.chanspec_num = htod32(ext_join_params->assoc.chanspec_num);
if (ext_join_params->ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
- WL_INFO(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
+ WL_INFORM(("ssid \"%s\", len (%d)\n", ext_join_params->ssid.SSID,
ext_join_params->ssid.SSID_len));
}
- wl_set_drv_status(wl, CONNECTING, dev);
+ wl_set_drv_status(cfg, CONNECTING, dev);
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ kfree(ext_join_params);
return BCME_ERROR;
}
err = wldev_iovar_setbuf_bsscfg(dev, "join", ext_join_params, join_params_size,
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
+
+ printk("Connectting with " MACDBG " channel (%d) ssid \"%s\", len (%d)\n\n",
+ MAC2STRDBG((u8*)(&ext_join_params->assoc.bssid)), cfg->channel,
+ ext_join_params->ssid.SSID, ext_join_params->ssid.SSID_len);
+
kfree(ext_join_params);
if (err) {
- wl_clr_drv_status(wl, CONNECTING, dev);
+ wl_clr_drv_status(cfg, CONNECTING, dev);
if (err == BCME_UNSUPPORTED) {
WL_DBG(("join iovar is not supported\n"));
goto set_ssid;
- } else
+ } else {
WL_ERR(("error (%d)\n", err));
+ goto exit;
+ }
} else
goto exit;
join_params.ssid.SSID_len = min(sizeof(join_params.ssid.SSID), sme->ssid_len);
memcpy(&join_params.ssid.SSID, sme->ssid, join_params.ssid.SSID_len);
join_params.ssid.SSID_len = htod32(join_params.ssid.SSID_len);
- wl_update_prof(wl, dev, NULL, &join_params.ssid, WL_PROF_SSID);
+ wl_update_prof(cfg, dev, NULL, &join_params.ssid, WL_PROF_SSID);
if (sme->bssid)
memcpy(&join_params.params.bssid, sme->bssid, ETH_ALEN);
else
memcpy(&join_params.params.bssid, ðer_bcast, ETH_ALEN);
- wl_ch_to_chanspec(wl->channel, &join_params, &join_params_size);
+ wl_ch_to_chanspec(cfg->channel, &join_params, &join_params_size);
WL_DBG(("join_param_size %zu\n", join_params_size));
if (join_params.ssid.SSID_len < IEEE80211_MAX_SSID_LEN) {
- WL_INFO(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
+ WL_INFORM(("ssid \"%s\", len (%d)\n", join_params.ssid.SSID,
join_params.ssid.SSID_len));
}
- wl_set_drv_status(wl, CONNECTING, dev);
+ wl_set_drv_status(cfg, CONNECTING, dev);
err = wldev_ioctl(dev, WLC_SET_SSID, &join_params, join_params_size, true);
if (err) {
WL_ERR(("error (%d)\n", err));
- wl_clr_drv_status(wl, CONNECTING, dev);
+ wl_clr_drv_status(cfg, CONNECTING, dev);
}
exit:
return err;
wl_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
scb_val_t scbval;
bool act = false;
s32 err = 0;
u8 *curbssid;
+#ifdef CUSTOM_SET_CPUCORE
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
WL_ERR(("Reason %d\n", reason_code));
- RETURN_EIO_IF_NOT_UP(wl);
- act = *(bool *) wl_read_prof(wl, dev, WL_PROF_ACT);
- curbssid = wl_read_prof(wl, dev, WL_PROF_BSSID);
+ RETURN_EIO_IF_NOT_UP(cfg);
+ act = *(bool *) wl_read_prof(cfg, dev, WL_PROF_ACT);
+ curbssid = wl_read_prof(cfg, dev, WL_PROF_BSSID);
if (act) {
/*
* Cancel ongoing scan to sync up with sme state machine of cfg80211.
*/
-#if !defined(ESCAN_RESULT_PATCH)
+#if (!defined(ESCAN_RESULT_PATCH) || defined(CUSTOMER_HW10))
/* Let scan aborted by F/W */
- if (wl->scan_request) {
- wl_notify_escan_complete(wl, dev, true, true);
+ if (cfg->scan_request) {
+ wl_notify_escan_complete(cfg, dev, true, true);
}
#endif /* ESCAN_RESULT_PATCH */
- wl_set_drv_status(wl, DISCONNECTING, dev);
+ wl_set_drv_status(cfg, DISCONNECTING, dev);
scbval.val = reason_code;
memcpy(&scbval.ea, curbssid, ETHER_ADDR_LEN);
scbval.val = htod32(scbval.val);
err = wldev_ioctl(dev, WLC_DISASSOC, &scbval,
sizeof(scb_val_t), true);
if (unlikely(err)) {
- wl_clr_drv_status(wl, DISCONNECTING, dev);
+ wl_clr_drv_status(cfg, DISCONNECTING, dev);
WL_ERR(("error (%d)\n", err));
return err;
}
}
+#ifdef CUSTOM_SET_CPUCORE
+ /* set default cpucore */
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
+ dhd->chan_isvht80 &= ~DHD_FLAG_STA_MODE;
+ if (!(dhd->chan_isvht80))
+ dhd_set_cpucore(dhd, FALSE);
+ }
+#endif /* CUSTOM_SET_CPUCORE */
return err;
}
#endif /* WL_CFG80211_P2P_DEV_IF */
{
- struct wl_priv *wl = wiphy_priv(wiphy);
- struct net_device *ndev = wl_to_prmry_ndev(wl);
- u16 txpwrmw;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
s32 err = 0;
- s32 disable = 0;
- s32 txpwrqdbm;
#if defined(WL_CFG80211_P2P_DEV_IF)
s32 dbm = MBM_TO_DBM(mbm);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || \
dbm = MBM_TO_DBM(dbm);
#endif /* WL_CFG80211_P2P_DEV_IF */
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
break;
}
break;
}
- /* Make sure radio is off or on as far as software is concerned */
- disable = WL_RADIO_SW_DISABLE << 16;
- disable = htod32(disable);
- err = wldev_ioctl(ndev, WLC_SET_RADIO, &disable, sizeof(disable), true);
- if (unlikely(err)) {
- WL_ERR(("WLC_SET_RADIO error (%d)\n", err));
- return err;
- }
- if (dbm > 0xffff)
- txpwrmw = 0xffff;
- else
- txpwrmw = (u16) dbm;
- txpwrqdbm = (s32)bcm_mw_to_qdbm(txpwrmw);
-#ifdef SUPPORT_WL_TXPOWER
- if (type == NL80211_TX_POWER_AUTOMATIC)
- txpwrqdbm = 127;
- txpwrqdbm |= WL_TXPWR_OVERRIDE;
-#endif /* SUPPORT_WL_TXPOWER */
- err = wldev_iovar_setint(ndev, "qtxpower", txpwrqdbm);
+ err = wl_set_tx_power(ndev, type, dbm);
if (unlikely(err)) {
- WL_ERR(("qtxpower error (%d)\n", err));
+ WL_ERR(("error (%d)\n", err));
return err;
}
- wl->conf->tx_power = dbm;
+
+ cfg->conf->tx_power = dbm;
return err;
}
wl_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
#endif /* WL_CFG80211_P2P_DEV_IF */
{
- struct wl_priv *wl = wiphy_priv(wiphy);
- struct net_device *ndev = wl_to_prmry_ndev(wl);
- s32 txpwrdbm;
- u8 result;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
s32 err = 0;
- RETURN_EIO_IF_NOT_UP(wl);
- err = wldev_iovar_getint(ndev, "qtxpower", &txpwrdbm);
- if (unlikely(err)) {
+ RETURN_EIO_IF_NOT_UP(cfg);
+ err = wl_get_tx_power(ndev, dbm);
+ if (unlikely(err))
WL_ERR(("error (%d)\n", err));
- return err;
- }
- result = (u8) (txpwrdbm & ~WL_TXPWR_OVERRIDE);
- *dbm = (s32) bcm_qdbm_to_mw(result);
return err;
}
wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool unicast, bool multicast)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
u32 index;
s32 wsec;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
WL_DBG(("key index (%d)\n", key_idx));
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
err = wldev_iovar_getint_bsscfg(dev, "wsec", &wsec, bssidx);
if (unlikely(err)) {
WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
return err;
}
- /* fix IOT issue with Apple Airport */
if (wsec == WEP_ENABLED) {
/* Just select a new current key */
index = (u32) key_idx;
wl_add_keyext(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct wl_wsec_key key;
s32 err = 0;
s32 bssidx;
- s32 mode = wl_get_mode_by_netdev(wl, dev);
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ s32 mode = wl_get_mode_by_netdev(cfg, dev);
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
/* key delete */
swap_key_from_BE(&key);
err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
WL_ERR(("key delete error (%d)\n", err));
return err;
/* need to guarantee EAPOL 4/4 send out before set key */
dhd_wait_pend8021x(dev);
err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
WL_ERR(("WLC_SET_KEY error (%d)\n", err));
return err;
return err;
}
+int
+wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable)
+{
+ int err;
+ wl_eventmsg_buf_t ev_buf;
+
+ if (dev != bcmcfg_to_prmry_ndev(g_bcm_cfg)) {
+ /* roam offload is only for the primary device */
+ return -1;
+ }
+ err = wldev_iovar_setint(dev, "roam_offload", (int)enable);
+ if (err)
+ return err;
+
+ bzero(&ev_buf, sizeof(wl_eventmsg_buf_t));
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_PSK_SUP, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_REQ_IE, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ASSOC_RESP_IE, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_REASSOC, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_JOIN, !enable);
+ wl_cfg80211_add_to_eventbuffer(&ev_buf, WLC_E_ROAM, !enable);
+ err = wl_cfg80211_apply_eventbuffer(dev, g_bcm_cfg, &ev_buf);
+ if (!err) {
+ g_bcm_cfg->roam_offload = enable;
+ }
+ return err;
+}
+
static s32
wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_idx, bool pairwise, const u8 *mac_addr,
s32 err = 0;
u8 keybuf[8];
s32 bssidx = 0;
- struct wl_priv *wl = wiphy_priv(wiphy);
- s32 mode = wl_get_mode_by_netdev(wl, dev);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ s32 mode = wl_get_mode_by_netdev(cfg, dev);
WL_DBG(("key index (%d)\n", key_idx));
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
char keystring[WSEC_MAX_PSK_LEN + 1];
char* charptr = keystring;
uint len;
-
+ struct wl_security *sec;
+
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
+ if (sec->wpa_auth == WLAN_AKM_SUITE_8021X) {
+ err = wldev_iovar_setbuf(dev, "okc_info_pmk", params->key,
+ WSEC_MAX_PSK_LEN / 2, keystring, sizeof(keystring), NULL);
+ if (err) {
+ /* could fail in case that 'okc' is not supported */
+ WL_INFORM(("Setting 'okc_info_pmk' failed, err=%d\n", err));
+ }
+ }
/* copy the raw hex key to the appropriate format */
for (j = 0; j < (WSEC_MAX_PSK_LEN / 2); j++) {
sprintf(charptr, "%02x", params->key[j]);
wldev_iovar_setint(dev, "wpa_auth", WPA_AUTH_NONE);
}
swap_key_from_BE(&key);
- err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf,
- WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
WL_ERR(("WLC_SET_KEY error (%d)\n", err));
return err;
u8 key_idx, bool pairwise, const u8 *mac_addr)
{
struct wl_wsec_key key;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
return -EINVAL;
#endif
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
memset(&key, 0, sizeof(key));
key.flags = WL_PRIMARY_KEY;
WL_DBG(("key index (%d)\n", key_idx));
/* Set the new key/index */
swap_key_from_BE(&key);
- err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), wl->ioctl_buf,
- WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ err = wldev_iovar_setbuf_bsscfg(dev, "wsec_key", &key, sizeof(key), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (unlikely(err)) {
if (err == -EINVAL) {
if (key.index >= DOT11_MAX_DEFAULT_KEYS) {
{
struct key_params params;
struct wl_wsec_key key;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct wl_security *sec;
s32 wsec;
s32 err = 0;
s32 bssidx;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
WL_DBG(("key index (%d)\n", key_idx));
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
memset(&key, 0, sizeof(key));
key.index = key_idx;
swap_key_to_BE(&key);
WL_ERR(("WLC_GET_WSEC error (%d)\n", err));
return err;
}
- switch (wsec & ~SES_OW_ENABLED) {
+ switch (WSEC_ENABLED(wsec)) {
case WEP_ENABLED:
- sec = wl_read_prof(wl, dev, WL_PROF_SEC);
+ sec = wl_read_prof(cfg, dev, WL_PROF_SEC);
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
WL_DBG(("WLAN_CIPHER_SUITE_WEP40\n"));
key.algo = CRYPTO_ALGO_SMS4;
WL_DBG(("WLAN_CIPHER_SUITE_SMS4\n"));
break;
+#endif
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+ /* to connect to mixed mode AP */
+ case (AES_ENABLED | TKIP_ENABLED): /* TKIP CCMP */
+ params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
+ WL_DBG(("WLAN_CIPHER_SUITE_TKIP\n"));
+ break;
#endif
default:
WL_ERR(("Invalid algo (0x%x)\n", wsec));
wl_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
struct net_device *dev, u8 key_idx)
{
- WL_INFO(("Not supported\n"));
+ WL_INFORM(("Not supported\n"));
return -EOPNOTSUPP;
}
wl_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
scb_val_t scb_val;
s32 rssi;
s32 rate;
s8 eabuf[ETHER_ADDR_STR_LEN];
#endif
static int err_cnt = 0;
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
- RETURN_EIO_IF_NOT_UP(wl);
- if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_AP) {
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ RETURN_EIO_IF_NOT_UP(cfg);
+ if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_AP) {
err = wldev_iovar_getbuf(dev, "sta_info", (struct ether_addr *)mac,
- ETHER_ADDR_LEN, wl->ioctl_buf, WLC_IOCTL_SMLEN, &wl->ioctl_buf_sync);
+ ETHER_ADDR_LEN, cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
if (err < 0) {
WL_ERR(("GET STA INFO failed, %d\n", err));
return err;
}
sinfo->filled = STATION_INFO_INACTIVE_TIME;
- sta = (sta_info_t *)wl->ioctl_buf;
+ sta = (sta_info_t *)cfg->ioctl_buf;
sta->len = dtoh16(sta->len);
sta->cap = dtoh16(sta->cap);
sta->flags = dtoh32(sta->flags);
sinfo->filled |= STATION_INFO_CONNECTED_TIME;
sinfo->connected_time = sta->in;
}
- WL_INFO(("STA %s : idle time : %d sec, connected time :%d ms\n",
+ WL_INFORM(("STA %s : idle time : %d sec, connected time :%d ms\n",
bcm_ether_ntoa((const struct ether_addr *)mac, eabuf), sinfo->inactive_time,
sta->idle * 1000));
#endif
- } else if (wl_get_mode_by_netdev(wl, dev) == WL_MODE_BSS ||
- wl_get_mode_by_netdev(wl, dev) == WL_MODE_IBSS) {
+ } else if (wl_get_mode_by_netdev(cfg, dev) == WL_MODE_BSS ||
+ wl_get_mode_by_netdev(cfg, dev) == WL_MODE_IBSS) {
get_pktcnt_t pktcnt;
- u8 *curmacp = wl_read_prof(wl, dev, WL_PROF_BSSID);
- if (!wl_get_drv_status(wl, CONNECTED, dev) ||
+ u8 *curmacp;
+
+ if (cfg->roam_offload) {
+ struct ether_addr bssid;
+ err = wldev_ioctl(dev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
+ if (err) {
+ WL_ERR(("Failed to get current BSSID\n"));
+ } else {
+ if (memcmp(mac, &bssid.octet, ETHER_ADDR_LEN) != 0) {
+ /* roaming is detected */
+ err = wl_cfg80211_delayed_roam(cfg, dev, &bssid);
+ if (err)
+ WL_ERR(("Failed to handle the delayed roam, "
+ "err=%d", err));
+ mac = (u8 *)bssid.octet;
+ }
+ }
+ }
+ if (!wl_get_drv_status(cfg, CONNECTED, dev) ||
(dhd_is_associated(dhd, NULL, &err) == FALSE)) {
WL_ERR(("NOT assoc\n"));
if (err == -ERESTARTSYS)
err = -ENODEV;
return err;
}
+ curmacp = wl_read_prof(cfg, dev, WL_PROF_BSSID);
if (memcmp(mac, curmacp, ETHER_ADDR_LEN)) {
WL_ERR(("Wrong Mac address: "MACDBG" != "MACDBG"\n",
MAC2STRDBG(mac), MAC2STRDBG(curmacp)));
WL_DBG(("Rate %d Mbps, update bus:maxtxpktglom=%d\n", (rate/2),
maxrxpktglom));
err = wldev_iovar_setbuf(dev, "bus:maxtxpktglom",
- (char*)&maxrxpktglom, 4, wl->ioctl_buf,
+ (char*)&maxrxpktglom, 4, cfg->ioctl_buf,
WLC_IOCTL_MAXLEN, NULL);
if (err < 0) {
WL_ERR(("set bus:maxtxpktglom failed, %d\n", err));
wl_reset_rssi_cache(&g_rssi2_cache_ctrl);
#endif
#if defined(RSSIOFFSET)
- rssi = wl_update_rssi_offset(rssi);
+ rssi = wl_update_rssi_offset(dev, rssi);
#endif
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = rssi;
if (err_cnt >= 3 && (err != -ERESTARTSYS)) {
/* Disconnect due to zero BSSID or error to get RSSI */
WL_ERR(("force cfg80211_disconnected: %d\n", err));
- wl_clr_drv_status(wl, CONNECTED, dev);
+ wl_clr_drv_status(cfg, CONNECTED, dev);
cfg80211_disconnected(dev, 0, NULL, 0, GFP_KERNEL);
- wl_link_down(wl);
+ wl_link_down(cfg);
}
}
else {
- WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(wl, dev)));
+ WL_ERR(("Invalid device mode %d\n", wl_get_mode_by_netdev(cfg, dev)));
}
return err;
}
-/* Function to update sta power save mode for Kernel wifi stack */
-int wl_cfg80211_update_power_mode(struct net_device *dev)
-{
- int pm = -1;
- int err;
-
- err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), false);
- if (err || (pm == -1)) {
- WL_ERR(("error (%d)\n", err));
- } else {
- pm = (pm == PM_OFF) ? false : true;
- WL_DBG(("%s: %d\n", __func__, pm));
- if (dev->ieee80211_ptr)
- dev->ieee80211_ptr->ps = pm;
- }
- return err;
-}
-
static s32
wl_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
bool enabled, s32 timeout)
{
s32 pm;
s32 err = 0;
- struct wl_priv *wl = wiphy_priv(wiphy);
- struct net_info *_net_info = wl_get_netinfo_by_netdev(wl, dev);
-#if !defined(SUPPORT_PM2_ONLY)
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
-#endif /* (OEM_ANDROID) */
- RETURN_EIO_IF_NOT_UP(wl);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_info *_net_info = wl_get_netinfo_by_netdev(cfg, dev);
+
+ RETURN_EIO_IF_NOT_UP(cfg);
WL_DBG(("Enter\n"));
-#if defined(WL_ENABLE_P2P_IF)
- if (wl->p2p_net == dev || _net_info == NULL || wl->vsdb_mode ||
- !wl_get_drv_status(wl, CONNECTED, dev))
-#else
- if (_net_info == NULL || wl->vsdb_mode ||
- !wl_get_drv_status(wl, CONNECTED, dev))
-#endif /* WL_ENABLE_P2P_IF */
- {
+ if (cfg->p2p_net == dev || _net_info == NULL || cfg->vsdb_mode ||
+ !wl_get_drv_status(cfg, CONNECTED, dev)) {
return err;
}
- WL_DBG(("%s: Enter power save enabled %d\n", dev->name, enabled));
/* Delete pm_enable_work */
- if (wl->pm_enable_work_on) {
- cancel_delayed_work_sync(&wl->pm_enable_work);
- wl->pm_enable_work_on = false;
- }
+ wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_PEND);
-#if !defined(SUPPORT_PM2_ONLY)
- /* android has special hooks to change pm when kernel suspended */
- pm = enabled ? ((dhd->in_suspend) ? PM_MAX : PM_FAST) : PM_OFF;
-#else
pm = enabled ? PM_FAST : PM_OFF;
-#endif /* SUPPORT_PM2_ONLY */
if (_net_info->pm_block) {
WL_ERR(("%s:Do not enable the power save for pm_block %d\n",
dev->name, _net_info->pm_block));
WL_ERR(("error (%d)\n", err));
return err;
}
+ wl_cfg80211_update_power_mode(dev);
return err;
}
+void wl_cfg80211_update_power_mode(struct net_device *dev)
+{
+ int err, pm = -1;
+
+ err = wldev_ioctl(dev, WLC_GET_PM, &pm, sizeof(pm), true);
+ if (err)
+ WL_ERR(("%s:error (%d)\n", __FUNCTION__, err));
+ else if (pm != -1 && dev->ieee80211_ptr)
+ dev->ieee80211_ptr->ps = (pm == PM_OFF) ? false : true;
+}
+
static __used u32 wl_find_msb(u16 bit16)
{
u32 ret = 0;
static s32 wl_cfg80211_resume(struct wiphy *wiphy)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
s32 err = 0;
- if (unlikely(!wl_get_drv_status(wl, READY, ndev))) {
- WL_INFO(("device is not ready\n"));
+ if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+ WL_INFORM(("device is not ready\n"));
return 0;
}
- wl_invoke_iscan(wl);
-
return err;
}
#endif
{
#ifdef DHD_CLEAR_ON_SUSPEND
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_info *iter, *next;
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
unsigned long flags;
- if (unlikely(!wl_get_drv_status(wl, READY, ndev))) {
- WL_INFO(("device is not ready : status (%d)\n",
- (int)wl->status));
+ if (unlikely(!wl_get_drv_status(cfg, READY, ndev))) {
+ WL_INFORM(("device is not ready : status (%d)\n",
+ (int)cfg->status));
return 0;
}
- for_each_ndev(wl, iter, next)
- wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev);
- wl_term_iscan(wl);
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
- if (wl->scan_request) {
- cfg80211_scan_done(wl->scan_request, true);
- wl->scan_request = NULL;
+ for_each_ndev(cfg, iter, next)
+ wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+ cfg80211_scan_done(cfg->scan_request, true);
+ cfg->scan_request = NULL;
}
- for_each_ndev(wl, iter, next) {
- wl_clr_drv_status(wl, SCANNING, iter->ndev);
- wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev);
+ for_each_ndev(cfg, iter, next) {
+ wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+ wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
}
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
- for_each_ndev(wl, iter, next) {
- if (wl_get_drv_status(wl, CONNECTING, iter->ndev)) {
- wl_bss_connect_done(wl, iter->ndev, NULL, NULL, false);
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+ for_each_ndev(cfg, iter, next) {
+ if (wl_get_drv_status(cfg, CONNECTING, iter->ndev)) {
+ wl_bss_connect_done(cfg, iter->ndev, NULL, NULL, false);
}
}
#endif /* DHD_CLEAR_ON_SUSPEND */
s32 err)
{
int i, j;
- struct wl_priv *wl = wlcfg_drv_priv;
- struct net_device *primary_dev = wl_to_prmry_ndev(wl);
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
if (!pmk_list) {
printk("pmk_list is NULL\n");
* Refer code wlc_bsscfg.c->wlc_bsscfg_sta_init
*/
if (primary_dev != dev) {
- WL_INFO(("Not supporting Flushing pmklist on virtual"
+ WL_INFORM(("Not supporting Flushing pmklist on virtual"
" interfaces than primary interface\n"));
return err;
}
}
if (likely(!err)) {
err = wldev_iovar_setbuf(dev, "pmkid_info", (char *)pmk_list,
- sizeof(*pmk_list), wl->ioctl_buf, WLC_IOCTL_MAXLEN, NULL);
+ sizeof(*pmk_list), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
}
return err;
wl_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
int i;
- RETURN_EIO_IF_NOT_UP(wl);
- for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
- if (!memcmp(pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ RETURN_EIO_IF_NOT_UP(cfg);
+ for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
+ if (!memcmp(pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
ETHER_ADDR_LEN))
break;
if (i < WL_NUM_PMKIDS_MAX) {
- memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID, pmksa->bssid,
ETHER_ADDR_LEN);
- memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID, pmksa->pmkid,
WPA2_PMKID_LEN);
- if (i == wl->pmk_list->pmkids.npmkid)
- wl->pmk_list->pmkids.npmkid++;
+ if (i == cfg->pmk_list->pmkids.npmkid)
+ cfg->pmk_list->pmkids.npmkid++;
} else {
err = -EINVAL;
}
WL_DBG(("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
- &wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].BSSID));
+ &cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].BSSID));
for (i = 0; i < WPA2_PMKID_LEN; i++) {
WL_DBG(("%02x\n",
- wl->pmk_list->pmkids.pmkid[wl->pmk_list->pmkids.npmkid - 1].
+ cfg->pmk_list->pmkids.pmkid[cfg->pmk_list->pmkids.npmkid - 1].
PMKID[i]));
}
- err = wl_update_pmklist(dev, wl->pmk_list, err);
+ err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
}
wl_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_pmksa *pmksa)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct _pmkid_list pmkid = {0};
s32 err = 0;
int i;
- RETURN_EIO_IF_NOT_UP(wl);
+ RETURN_EIO_IF_NOT_UP(cfg);
memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETHER_ADDR_LEN);
memcpy(pmkid.pmkid[0].PMKID, pmksa->pmkid, WPA2_PMKID_LEN);
WL_DBG(("%02x\n", pmkid.pmkid[0].PMKID[i]));
}
- for (i = 0; i < wl->pmk_list->pmkids.npmkid; i++)
+ for (i = 0; i < cfg->pmk_list->pmkids.npmkid; i++)
if (!memcmp
- (pmksa->bssid, &wl->pmk_list->pmkids.pmkid[i].BSSID,
+ (pmksa->bssid, &cfg->pmk_list->pmkids.pmkid[i].BSSID,
ETHER_ADDR_LEN))
break;
- if ((wl->pmk_list->pmkids.npmkid > 0) &&
- (i < wl->pmk_list->pmkids.npmkid)) {
- memset(&wl->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
- for (; i < (wl->pmk_list->pmkids.npmkid - 1); i++) {
- memcpy(&wl->pmk_list->pmkids.pmkid[i].BSSID,
- &wl->pmk_list->pmkids.pmkid[i + 1].BSSID,
+ if ((cfg->pmk_list->pmkids.npmkid > 0) &&
+ (i < cfg->pmk_list->pmkids.npmkid)) {
+ memset(&cfg->pmk_list->pmkids.pmkid[i], 0, sizeof(pmkid_t));
+ for (; i < (cfg->pmk_list->pmkids.npmkid - 1); i++) {
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].BSSID,
+ &cfg->pmk_list->pmkids.pmkid[i + 1].BSSID,
ETHER_ADDR_LEN);
- memcpy(&wl->pmk_list->pmkids.pmkid[i].PMKID,
- &wl->pmk_list->pmkids.pmkid[i + 1].PMKID,
+ memcpy(&cfg->pmk_list->pmkids.pmkid[i].PMKID,
+ &cfg->pmk_list->pmkids.pmkid[i + 1].PMKID,
WPA2_PMKID_LEN);
}
- wl->pmk_list->pmkids.npmkid--;
+ cfg->pmk_list->pmkids.npmkid--;
} else {
err = -EINVAL;
}
- err = wl_update_pmklist(dev, wl->pmk_list, err);
+ err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
static s32
wl_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *dev)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = 0;
- RETURN_EIO_IF_NOT_UP(wl);
- memset(wl->pmk_list, 0, sizeof(*wl->pmk_list));
- err = wl_update_pmklist(dev, wl->pmk_list, err);
+ RETURN_EIO_IF_NOT_UP(cfg);
+ memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
+ err = wl_update_pmklist(dev, cfg->pmk_list, err);
return err;
}
s32 err = BCME_OK;
struct ether_addr primary_mac;
struct net_device *ndev = NULL;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
WL_DBG(("Enter, channel: %d, duration ms (%d) SCANNING ?? %s \n",
ieee80211_frequency_to_channel(channel->center_freq),
- duration, (wl_get_drv_status(wl, SCANNING, ndev)) ? "YES":"NO"));
+ duration, (wl_get_drv_status(cfg, SCANNING, ndev)) ? "YES":"NO"));
- if (!wl->p2p) {
- WL_ERR(("wl->p2p is not initialized\n"));
+ if (!cfg->p2p) {
+ WL_ERR(("cfg->p2p is not initialized\n"));
err = BCME_ERROR;
goto exit;
}
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- if (wl_get_drv_status_all(wl, SCANNING)) {
- wl_notify_escan_complete(wl, wl->escan_info.ndev, true, true);
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
target_channel = ieee80211_frequency_to_channel(channel->center_freq);
- memcpy(&wl->remain_on_chan, channel, sizeof(struct ieee80211_channel));
+ memcpy(&cfg->remain_on_chan, channel, sizeof(struct ieee80211_channel));
#if defined(WL_ENABLE_P2P_IF)
- wl->remain_on_chan_type = channel_type;
+ cfg->remain_on_chan_type = channel_type;
#endif /* WL_ENABLE_P2P_IF */
- id = ++wl->last_roc_id;
+ id = ++cfg->last_roc_id;
if (id == 0)
- id = ++wl->last_roc_id;
+ id = ++cfg->last_roc_id;
*cookie = id;
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- if (wl_get_drv_status(wl, SCANNING, ndev)) {
+ if (wl_get_drv_status(cfg, SCANNING, ndev)) {
struct timer_list *_timer;
WL_DBG(("scan is running. go to fake listen state\n"));
- wl_set_drv_status(wl, FAKE_REMAINING_ON_CHANNEL, ndev);
+ wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
- if (timer_pending(&wl->p2p->listen_timer)) {
+ if (timer_pending(&cfg->p2p->listen_timer)) {
WL_DBG(("cancel current listen timer \n"));
- del_timer_sync(&wl->p2p->listen_timer);
+ del_timer_sync(&cfg->p2p->listen_timer);
}
- _timer = &wl->p2p->listen_timer;
- wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+ _timer = &cfg->p2p->listen_timer;
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration, 0);
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
#ifdef WL_CFG80211_SYNC_GON
- if (wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM_LISTEN)) {
+ if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
/* do not enter listen mode again if we are in listen mode already for next af.
* remain on channel completion will be returned by waiting next af completion.
*/
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- wl_set_drv_status(wl, FAKE_REMAINING_ON_CHANNEL, ndev);
+ wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
#else
- wl_set_drv_status(wl, REMAINING_ON_CHANNEL, ndev);
+ wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
goto exit;
}
#endif /* WL_CFG80211_SYNC_GON */
- if (wl->p2p && !wl->p2p->on) {
+ if (cfg->p2p && !cfg->p2p->on) {
/* In case of p2p_listen command, supplicant send remain_on_channel
* without turning on P2P
*/
- get_primary_mac(wl, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr, &wl->p2p->int_addr);
- p2p_on(wl) = true;
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
+ p2p_on(cfg) = true;
}
- if (p2p_is_on(wl)) {
- err = wl_cfgp2p_enable_discovery(wl, ndev, NULL, 0);
+ if (p2p_is_on(cfg)) {
+ err = wl_cfgp2p_enable_discovery(cfg, ndev, NULL, 0);
if (unlikely(err)) {
goto exit;
}
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- wl_set_drv_status(wl, REMAINING_ON_CHANNEL, ndev);
+ wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
- err = wl_cfgp2p_discover_listen(wl, target_channel, duration);
+ err = wl_cfgp2p_discover_listen(cfg, target_channel, duration);
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
if (err == BCME_OK) {
- wl_set_drv_status(wl, REMAINING_ON_CHANNEL, ndev);
+ wl_set_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
} else {
/* if failed, firmware may be internal scanning state.
* so other scan request shall not abort it
*/
- wl_set_drv_status(wl, FAKE_REMAINING_ON_CHANNEL, ndev);
+ wl_set_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
}
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
/* WAR: set err = ok to prevent cookie mismatch in wpa_supplicant
exit:
if (err == BCME_OK) {
- WL_INFO(("Success\n"));
+ WL_INFORM(("Success\n"));
#if defined(WL_CFG80211_P2P_DEV_IF)
cfg80211_ready_on_channel(cfgdev, *cookie, channel,
duration, GFP_KERNEL);
wl_cfg80211_afx_handler(struct work_struct *work)
{
struct afx_hdl *afx_instance;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
s32 ret = BCME_OK;
afx_instance = container_of(work, struct afx_hdl, work);
- if (afx_instance != NULL && wl->afx_hdl->is_active) {
- if (wl->afx_hdl->is_listen && wl->afx_hdl->my_listen_chan) {
- ret = wl_cfgp2p_discover_listen(wl, wl->afx_hdl->my_listen_chan,
+ if (afx_instance != NULL && cfg->afx_hdl->is_active) {
+ if (cfg->afx_hdl->is_listen && cfg->afx_hdl->my_listen_chan) {
+ ret = wl_cfgp2p_discover_listen(cfg, cfg->afx_hdl->my_listen_chan,
(100 * (1 + (RANDOM32() % 3)))); /* 100ms ~ 300ms */
} else {
- ret = wl_cfgp2p_act_frm_search(wl, wl->afx_hdl->dev,
- wl->afx_hdl->bssidx, wl->afx_hdl->peer_listen_chan,
+ ret = wl_cfgp2p_act_frm_search(cfg, cfg->afx_hdl->dev,
+ cfg->afx_hdl->bssidx, cfg->afx_hdl->peer_listen_chan,
NULL);
}
if (unlikely(ret != BCME_OK)) {
WL_ERR(("ERROR occurred! returned value is (%d)\n", ret));
- if (wl_get_drv_status_all(wl, FINDING_COMMON_CHANNEL))
- complete(&wl->act_frm_scan);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL))
+ complete(&cfg->act_frm_scan);
}
}
}
static s32
-wl_cfg80211_af_searching_channel(struct wl_priv *wl, struct net_device *dev)
+wl_cfg80211_af_searching_channel(struct bcm_cfg80211 *cfg, struct net_device *dev)
{
u32 max_retry = WL_CHANNEL_SYNC_RETRY;
WL_DBG((" enter ) \n"));
- wl_set_drv_status(wl, FINDING_COMMON_CHANNEL, dev);
- wl->afx_hdl->is_active = TRUE;
+ wl_set_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
+ cfg->afx_hdl->is_active = TRUE;
/* Loop to wait until we find a peer's channel or the
* pending action frame tx is cancelled.
*/
- while ((wl->afx_hdl->retry < max_retry) &&
- (wl->afx_hdl->peer_chan == WL_INVALID)) {
- wl->afx_hdl->is_listen = FALSE;
- wl_set_drv_status(wl, SCANNING, dev);
+ while ((cfg->afx_hdl->retry < max_retry) &&
+ (cfg->afx_hdl->peer_chan == WL_INVALID)) {
+ cfg->afx_hdl->is_listen = FALSE;
+ wl_set_drv_status(cfg, SCANNING, dev);
WL_DBG(("Scheduling the action frame for sending.. retry %d\n",
- wl->afx_hdl->retry));
+ cfg->afx_hdl->retry));
/* search peer on peer's listen channel */
- schedule_work(&wl->afx_hdl->work);
- wait_for_completion_timeout(&wl->act_frm_scan,
+ schedule_work(&cfg->afx_hdl->work);
+ wait_for_completion_timeout(&cfg->act_frm_scan,
msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
- if ((wl->afx_hdl->peer_chan != WL_INVALID) ||
- !(wl_get_drv_status(wl, FINDING_COMMON_CHANNEL, dev)))
+ if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+ !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
break;
- if (wl->afx_hdl->my_listen_chan) {
+ if (cfg->afx_hdl->my_listen_chan) {
WL_DBG(("Scheduling Listen peer in my listen channel = %d\n",
- wl->afx_hdl->my_listen_chan));
+ cfg->afx_hdl->my_listen_chan));
/* listen on my listen channel */
- wl->afx_hdl->is_listen = TRUE;
- schedule_work(&wl->afx_hdl->work);
- wait_for_completion_timeout(&wl->act_frm_scan,
+ cfg->afx_hdl->is_listen = TRUE;
+ schedule_work(&cfg->afx_hdl->work);
+ wait_for_completion_timeout(&cfg->act_frm_scan,
msecs_to_jiffies(WL_AF_SEARCH_TIME_MAX));
}
- if ((wl->afx_hdl->peer_chan != WL_INVALID) ||
- !(wl_get_drv_status(wl, FINDING_COMMON_CHANNEL, dev)))
+ if ((cfg->afx_hdl->peer_chan != WL_INVALID) ||
+ !(wl_get_drv_status(cfg, FINDING_COMMON_CHANNEL, dev)))
break;
- wl->afx_hdl->retry++;
+ cfg->afx_hdl->retry++;
- WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(wl);
+ WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
}
- wl->afx_hdl->is_active = FALSE;
+ cfg->afx_hdl->is_active = FALSE;
- wl_clr_drv_status(wl, SCANNING, dev);
- wl_clr_drv_status(wl, FINDING_COMMON_CHANNEL, dev);
+ wl_clr_drv_status(cfg, SCANNING, dev);
+ wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, dev);
- return (wl->afx_hdl->peer_chan);
+ return (cfg->afx_hdl->peer_chan);
}
struct p2p_config_af_params {
struct p2p_config_af_params *config_af_params)
{
s32 err = BCME_OK;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
wifi_p2p_pub_act_frame_t *act_frm =
(wifi_p2p_pub_act_frame_t *) (action_frame->data);
config_af_params->search_channel = false;
config_af_params->max_tx_retry = WL_AF_TX_MAX_RETRY;
config_af_params->mpc_onoff = -1;
- wl->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+ cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
switch (act_frm->subtype) {
case P2P_PAF_GON_REQ: {
WL_DBG(("P2P: GO_NEG_PHASE status set \n"));
- wl_set_p2p_status(wl, GO_NEG_PHASE);
+ wl_set_p2p_status(cfg, GO_NEG_PHASE);
config_af_params->mpc_onoff = 0;
config_af_params->search_channel = true;
- wl->next_af_subtype = act_frm->subtype + 1;
+ cfg->next_af_subtype = act_frm->subtype + 1;
/* increase dwell time to wait for RESP frame */
af_params->dwell_time = WL_MED_DWELL_TIME;
break;
}
case P2P_PAF_GON_RSP: {
- wl->next_af_subtype = act_frm->subtype + 1;
+ cfg->next_af_subtype = act_frm->subtype + 1;
/* increase dwell time to wait for CONF frame */
af_params->dwell_time = WL_MED_DWELL_TIME + 100;
break;
case P2P_PAF_GON_CONF: {
/* If we reached till GO Neg confirmation reset the filter */
WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
- wl_clr_p2p_status(wl, GO_NEG_PHASE);
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
/* turn on mpc again if go nego is done */
config_af_params->mpc_onoff = 1;
}
case P2P_PAF_INVITE_REQ: {
config_af_params->search_channel = true;
- wl->next_af_subtype = act_frm->subtype + 1;
+ cfg->next_af_subtype = act_frm->subtype + 1;
/* increase dwell time */
af_params->dwell_time = WL_MED_DWELL_TIME;
config_af_params->search_channel = true;
}
- wl->next_af_subtype = act_frm->subtype + 1;
+ cfg->next_af_subtype = act_frm->subtype + 1;
/* maximize dwell time to wait for RESP frame */
af_params->dwell_time = WL_LONG_DWELL_TIME;
break;
}
config_af_params->mpc_onoff = 0;
- wl->next_af_subtype = act_frm->subtype + 1;
+ cfg->next_af_subtype = act_frm->subtype + 1;
/* increase dwell time to wait for RESP frame */
af_params->dwell_time = WL_MED_DWELL_TIME;
break;
}
case P2P_PAF_PROVDIS_RSP: {
- wl->next_af_subtype = P2P_PAF_GON_REQ;
+ cfg->next_af_subtype = P2P_PAF_GON_REQ;
af_params->dwell_time = WL_MIN_DWELL_TIME;
#ifdef WL_CFG80211_SYNC_GON
config_af_params->extra_listen = false;
return err;
}
+#ifdef WL11U
+static bool
+wl_cfg80211_check_DFS_channel(struct bcm_cfg80211 *cfg, wl_af_params_t *af_params,
+ void *frame, u16 frame_len)
+{
+ struct wl_scan_results *bss_list;
+ struct wl_bss_info *bi = NULL;
+ bool result = false;
+ s32 i;
+ chanspec_t chanspec;
+
+ /* If DFS channel is 52~148, check to block it or not */
+ if (af_params &&
+ (af_params->channel >= 52 && af_params->channel <= 148)) {
+ if (!wl_cfgp2p_is_p2p_action(frame, frame_len)) {
+ bss_list = cfg->bss_list;
+ bi = next_bss(bss_list, bi);
+ for_each_bss(bss_list, bi, i) {
+ chanspec = wl_chspec_driver_to_host(bi->chanspec);
+ if (CHSPEC_IS5G(chanspec) &&
+ ((bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(chanspec))
+ == af_params->channel)) {
+ result = true; /* do not block the action frame */
+ break;
+ }
+ }
+ }
+ }
+ else {
+ result = true;
+ }
+
+ WL_DBG(("result=%s", result?"true":"false"));
+ return result;
+}
+#endif /* WL11U */
+
static bool
wl_cfg80211_send_action_frame(struct wiphy *wiphy, struct net_device *dev,
#ifdef WL11U
struct net_device *ndev = NULL;
#endif /* WL11U */
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
bool ack = false;
u8 category, action;
s32 tx_retry;
#ifdef VSDB
ulong off_chan_started_jiffies = 0;
#endif
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+
+
+ /* Add the default dwell time
+ * Dwell time to stay off-channel to wait for a response action frame
+ * after transmitting an GO Negotiation action frame
+ */
+ af_params->dwell_time = WL_DWELL_TIME;
#ifdef WL11U
#if defined(WL_CFG80211_P2P_DEV_IF)
/* initialize variables */
tx_retry = 0;
- wl->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
+ cfg->next_af_subtype = P2P_PAF_SUBTYPE_INVALID;
config_af_params.max_tx_retry = WL_AF_TX_MAX_RETRY;
config_af_params.mpc_onoff = -1;
config_af_params.search_channel = false;
config_af_params.search_channel = true;
/* save next af suptype to cancel remained dwell time */
- wl->next_af_subtype = action + 1;
+ cfg->next_af_subtype = action + 1;
af_params->dwell_time = WL_MED_DWELL_TIME;
} else if (action == P2PSD_ACTION_ID_GAS_IRESP ||
WL_DBG(("Unknown Frame: category 0x%x, action 0x%x\n",
category, action));
if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
- wl_clr_drv_status(wl, SENDING_ACT_FRM, dev);
+ wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
return false;
}
}
/* validate channel and p2p ies */
if (config_af_params.search_channel && IS_P2P_SOCIAL(af_params->channel) &&
- wl_to_p2p_bss_saved_ie(wl, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
+ wl_to_p2p_bss_saved_ie(cfg, P2PAPI_BSSCFG_DEVICE).p2p_probe_req_ie_len) {
config_af_params.search_channel = true;
} else {
config_af_params.search_channel = false;
}
-
#ifdef WL11U
- if (ndev == wl_to_prmry_ndev(wl))
+ if (ndev == bcmcfg_to_prmry_ndev(cfg))
config_af_params.search_channel = false;
#endif /* WL11U */
#ifdef VSDB
/* if connecting on primary iface, sleep for a while before sending af tx for VSDB */
- if (wl_get_drv_status(wl, CONNECTING, wl_to_prmry_ndev(wl))) {
+ if (wl_get_drv_status(cfg, CONNECTING, bcmcfg_to_prmry_ndev(cfg))) {
OSL_SLEEP(50);
}
#endif
/* if scan is ongoing, abort current scan. */
- if (wl_get_drv_status_all(wl, SCANNING)) {
- wl_notify_escan_complete(wl, wl->escan_info.ndev, true, true);
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
+ }
+
+#ifdef WL11U
+ /* handling DFS channel exceptions */
+ if (!wl_cfg80211_check_DFS_channel(cfg, af_params, action_frame->data, action_frame->len)) {
+ return false; /* the action frame was blocked */
}
+#endif /* WL11U */
/* set status and destination address before sending af */
- if (wl->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+ if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
/* set this status to cancel the remained dwell time in rx process */
- wl_set_drv_status(wl, WAITING_NEXT_ACT_FRM, dev);
+ wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
}
- wl_set_drv_status(wl, SENDING_ACT_FRM, dev);
- memcpy(wl->afx_hdl->tx_dst_addr.octet,
+ wl_set_drv_status(cfg, SENDING_ACT_FRM, dev);
+ memcpy(cfg->afx_hdl->tx_dst_addr.octet,
af_params->action_frame.da.octet,
- sizeof(wl->afx_hdl->tx_dst_addr.octet));
+ sizeof(cfg->afx_hdl->tx_dst_addr.octet));
/* save af_params for rx process */
- wl->afx_hdl->pending_tx_act_frm = af_params;
+ cfg->afx_hdl->pending_tx_act_frm = af_params;
/* search peer's channel */
if (config_af_params.search_channel) {
/* initialize afx_hdl */
- if (wl_cfgp2p_find_idx(wl, dev, &wl->afx_hdl->bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &cfg->afx_hdl->bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
goto exit;
}
- wl->afx_hdl->dev = dev;
- wl->afx_hdl->retry = 0;
- wl->afx_hdl->peer_chan = WL_INVALID;
+ cfg->afx_hdl->dev = dev;
+ cfg->afx_hdl->retry = 0;
+ cfg->afx_hdl->peer_chan = WL_INVALID;
- if (wl_cfg80211_af_searching_channel(wl, dev) == WL_INVALID) {
+ if (wl_cfg80211_af_searching_channel(cfg, dev) == WL_INVALID) {
WL_ERR(("couldn't find peer's channel.\n"));
wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len,
af_params->channel);
goto exit;
}
- wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
/*
* Abort scan even for VSDB scenarios. Scan gets aborted in firmware
* but after the check of piggyback algorithm.
* To take care of current piggback algo, lets abort the scan here itself.
*/
- wl_notify_escan_complete(wl, dev, true, true);
+ wl_notify_escan_complete(cfg, dev, true, true);
/* Suspend P2P discovery's search-listen to prevent it from
* starting a scan or changing the channel.
*/
- wl_cfgp2p_discover_enable_search(wl, false);
+ wl_cfgp2p_discover_enable_search(cfg, false);
/* update channel */
- af_params->channel = wl->afx_hdl->peer_chan;
+ af_params->channel = cfg->afx_hdl->peer_chan;
}
#ifdef VSDB
wl_cfgp2p_print_actframe(true, action_frame->data, action_frame->len, af_params->channel);
/* Now send a tx action frame */
- ack = wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx) ? false : true;
+ ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ? false : true;
/* if failed, retry it. tx_retry_max value is configure by .... */
while ((ack == false) && (tx_retry++ < config_af_params.max_tx_retry)) {
if (af_params->channel) {
if (jiffies_to_msecs(jiffies - off_chan_started_jiffies) >
OFF_CHAN_TIME_THRESHOLD_MS) {
- WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(wl);
+ WL_AF_TX_KEEP_PRI_CONNECTION_VSDB(cfg);
off_chan_started_jiffies = jiffies;
} else
OSL_SLEEP(AF_RETRY_DELAY_TIME);
}
#endif /* VSDB */
- ack = wl_cfgp2p_tx_action_frame(wl, dev, af_params, bssidx) ?
+ ack = wl_cfgp2p_tx_action_frame(cfg, dev, af_params, bssidx) ?
false : true;
}
+
if (ack == false) {
WL_ERR(("Failed to send Action Frame(retry %d)\n", tx_retry));
}
+ WL_DBG(("Complete to send action frame\n"));
exit:
/* Clear SENDING_ACT_FRM after all sending af is done */
- wl_clr_drv_status(wl, SENDING_ACT_FRM, dev);
+ wl_clr_drv_status(cfg, SENDING_ACT_FRM, dev);
#ifdef WL_CFG80211_SYNC_GON
/* WAR: sometimes dongle does not keep the dwell time of 'actframe'.
* the dwell time, go to listen state again to get next action response frame.
*/
if (ack && config_af_params.extra_listen &&
- wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM) &&
- wl->af_sent_channel == wl->afx_hdl->my_listen_chan) {
+ wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM) &&
+ cfg->af_sent_channel == cfg->afx_hdl->my_listen_chan) {
s32 extar_listen_time;
extar_listen_time = af_params->dwell_time -
- jiffies_to_msecs(jiffies - wl->af_tx_sent_jiffies);
+ jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies);
if (extar_listen_time > 50) {
- wl_set_drv_status(wl, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+ wl_set_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
WL_DBG(("Wait more time! actual af time:%d,"
"calculated extar listen:%d\n",
af_params->dwell_time, extar_listen_time));
- if (wl_cfgp2p_discover_listen(wl, wl->af_sent_channel,
+ if (wl_cfgp2p_discover_listen(cfg, cfg->af_sent_channel,
extar_listen_time + 100) == BCME_OK) {
- wait_for_completion_timeout(&wl->wait_next_af,
+ wait_for_completion_timeout(&cfg->wait_next_af,
msecs_to_jiffies(extar_listen_time + 100 + 300));
}
- wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM_LISTEN, dev);
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, dev);
}
}
#endif /* WL_CFG80211_SYNC_GON */
- wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM, dev);
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, dev);
- if (wl->afx_hdl->pending_tx_act_frm)
- wl->afx_hdl->pending_tx_act_frm = NULL;
+ if (cfg->afx_hdl->pending_tx_act_frm)
+ cfg->afx_hdl->pending_tx_act_frm = NULL;
- WL_INFO(("-- sending Action Frame is %s, listen chan: %d\n",
- (ack) ? "Succeeded!!":"Failed!!", wl->afx_hdl->my_listen_chan));
+ WL_INFORM(("-- sending Action Frame is %s, listen chan: %d\n",
+ (ack) ? "Succeeded!!":"Failed!!", cfg->afx_hdl->my_listen_chan));
/* if all done, turn mpc on again */
#define MAX_NUM_OF_ASSOCIATED_DEV 64
static s32
-#if defined(WL_CFG80211_P2P_DEV_IF)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
- struct ieee80211_channel *channel, bool offchan,
- unsigned int wait, const u8* buf, size_t len, bool no_cck,
- bool dont_wait_for_ack, u64 *cookie)
+ struct cfg80211_mgmt_tx_params *params, u64 *cookie)
#else
wl_cfg80211_mgmt_tx(struct wiphy *wiphy, bcm_struct_cfgdev *cfgdev,
struct ieee80211_channel *channel, bool offchan,
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0))
enum nl80211_channel_type channel_type,
- bool channel_type_valid, unsigned int wait,
- const u8* buf, size_t len,
+ bool channel_type_valid,
+#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(3, 7, 0) */
+ unsigned int wait, const u8* buf, size_t len,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
bool no_cck,
#endif
bool dont_wait_for_ack,
#endif
u64 *cookie)
-#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
{
wl_action_frame_t *action_frame;
wl_af_params_t *af_params;
scb_val_t scb_val;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ struct ieee80211_channel *channel = params->chan;
+ const u8 *buf = params->buf;
+ size_t len = params->len;
+#endif
const struct ieee80211_mgmt *mgmt;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct net_device *dev = NULL;
s32 err = BCME_OK;
s32 bssidx = 0;
WL_DBG(("Enter \n"));
- dev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ dev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- /* find bssidx based on dev */
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
- return BCME_ERROR;
+ /* set bsscfg idx for iovar (wlan0: P2PAPI_BSSCFG_PRIMARY, p2p: P2PAPI_BSSCFG_DEVICE) */
+ if (discover_cfgdev(cfgdev, cfg)) {
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ }
+ else {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+ WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ return BCME_ERROR;
+ }
}
- if (p2p_is_on(wl)) {
+
+ WL_DBG(("TX target bssidx=%d\n", bssidx));
+
+ if (p2p_is_on(cfg)) {
/* Suspend P2P discovery search-listen to prevent it from changing the
* channel.
*/
- if ((err = wl_cfgp2p_discover_enable_search(wl, false)) < 0) {
+ if ((err = wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
WL_ERR(("Can not disable discovery mode\n"));
return -EFAULT;
}
}
*cookie = 0;
- id = wl->send_action_id++;
+ id = cfg->send_action_id++;
if (id == 0)
- id = wl->send_action_id++;
+ id = cfg->send_action_id++;
*cookie = id;
mgmt = (const struct ieee80211_mgmt *)buf;
if (ieee80211_is_mgmt(mgmt->frame_control)) {
if (ieee80211_is_probe_resp(mgmt->frame_control)) {
s32 ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
s32 ie_len = len - ie_offset;
- if (dev == wl_to_prmry_ndev(wl))
- bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
- wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ if ((dev == bcmcfg_to_prmry_ndev(cfg)) && cfg->p2p)
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
VNDR_IE_PRBRSP_FLAG, (u8 *)(buf + ie_offset), ie_len);
cfg80211_mgmt_tx_status(cfgdev, *cookie, buf, len, true, GFP_KERNEL);
goto exit;
* And previous off-channel action frame must be ended before new af tx.
*/
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- wl_notify_escan_complete(wl, dev, true, true);
+ wl_notify_escan_complete(cfg, dev, true, true);
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
}
/* Add the channel */
af_params->channel =
ieee80211_frequency_to_channel(channel->center_freq);
-
/* Save listen_chan for searching common channel */
- wl->afx_hdl->peer_listen_chan = af_params->channel;
- WL_DBG(("channel from upper layer %d\n", wl->afx_hdl->peer_listen_chan));
+ cfg->afx_hdl->peer_listen_chan = af_params->channel;
+ WL_DBG(("channel from upper layer %d\n", cfg->afx_hdl->peer_listen_chan));
- /* Add the default dwell time
- * Dwell time to stay off-channel to wait for a response action frame
- * after transmitting an GO Negotiation action frame
- */
- af_params->dwell_time = WL_DWELL_TIME;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ af_params->dwell_time = params->wait;
+#else
+ af_params->dwell_time = wait;
+#endif
memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], action_frame->len);
struct net_device *dev,
struct bss_parameters *params)
{
+ s32 err = 0;
+ s32 ap_isolate = 0;
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+ dhd_pub_t *dhd;
+ s32 gmode = -1, nmode = -1;
+ s32 gmode_prev = -1, nmode_prev = -1;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#if defined(WL_ENABLE_P2P_IF)
+ if (cfg->p2p_net == dev)
+ dev = bcmcfg_to_prmry_ndev(cfg);
+#endif
+ dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+
if (params->use_cts_prot >= 0) {
}
}
if (params->basic_rates) {
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+ switch ((int)(params->basic_rates[params->basic_rates_len -1])) {
+ case 22: /* B only , rate 11 */
+ gmode = 0;
+ nmode = 0;
+ break;
+ case 108: /* G only , rate 54 */
+ gmode = 2;
+ nmode = 0;
+ break;
+ default:
+ gmode = -1;
+ nmode = -1;
+ break;
+ }
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
}
if (params->ap_isolate >= 0) {
+ ap_isolate = params->ap_isolate;
+ err = wldev_iovar_setint(dev, "ap_isolate", ap_isolate);
+ if (unlikely(err))
+ {
+ WL_ERR(("set ap_isolate Error (%d)\n", err));
+ }
}
if (params->ht_opmode >= 0) {
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+ nmode = 1;
+ gmode = 1;
+ } else {
+ nmode = 0;
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
+ }
+
+#if defined(SUPPORT_HOSTAPD_BGN_MODE)
+ err = wldev_iovar_getint(dev, "nmode", &nmode_prev);
+ if (unlikely(err)) {
+ WL_ERR(("error reading nmode (%d)\n", err));
+ }
+ if (nmode == nmode_prev) {
+ nmode = -1;
+ }
+ err = wldev_ioctl(dev, WLC_GET_GMODE, &gmode_prev, sizeof(gmode_prev), 0);
+ if (unlikely(err)) {
+ WL_ERR(("error reading gmode (%d)\n", err));
+ }
+ if (gmode == gmode_prev) {
+ gmode = -1;
+ }
+
+ if (((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) == DHD_FLAG_HOSTAP_MODE) &&
+ ((gmode > -1) || (nmode > -1))) {
+ s32 val = 0;
+
+ err = wldev_ioctl(dev, WLC_DOWN, &val, sizeof(s32), true);
+ if (unlikely(err))
+ WL_ERR(("WLC_DOWN command failed:[%d]\n", err));
+
+ if (nmode > -1) {
+ err = wldev_iovar_setint(dev, "nmode", nmode);
+ if (unlikely(err))
+ WL_ERR(("nmode command failed:mode[%d]:err[%d]\n", nmode, err));
+ }
+
+ if (gmode > -1) {
+ err = wldev_ioctl(dev, WLC_SET_GMODE, &gmode, sizeof(s32), true);
+ if (unlikely(err))
+ WL_ERR(("WLC_SET_GMODE command failed:mode[%d]:err[%d]\n",
+ gmode, err));
+ }
+
+ val = 0;
+ err = wldev_ioctl(dev, WLC_UP, &val, sizeof(s32), true);
+ if (unlikely(err))
+ WL_ERR(("WLC_UP command failed:err[%d]\n", err));
+
}
+#endif /* SUPPORT_HOSTAPD_BGN_MODE */
return 0;
}
u32 band;
u32 bw_cap;
} param = {0, 0};
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+#ifdef CUSTOM_SET_CPUCORE
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif /* CUSTOM_SET_CPUCORE */
- dev = ndev_to_wlc_ndev(dev, wl);
+ dev = ndev_to_wlc_ndev(dev, cfg);
_chan = ieee80211_frequency_to_channel(chan->center_freq);
printk("netdev_ifidx(%d), chan_type(%d) target channel(%d) \n",
dev->ifindex, channel_type, _chan);
if (chan->band == IEEE80211_BAND_5GHZ) {
param.band = WLC_BAND_5G;
err = wldev_iovar_getbuf(dev, "bw_cap", ¶m, sizeof(param),
- wl->ioctl_buf, WLC_IOCTL_SMLEN, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, &cfg->ioctl_buf_sync);
if (err) {
if (err != BCME_UNSUPPORTED) {
WL_ERR(("bw_cap failed, %d\n", err));
bw = WL_CHANSPEC_BW_40;
}
} else {
- if (WL_BW_CAP_80MHZ(wl->ioctl_buf[0]))
+ if (WL_BW_CAP_80MHZ(cfg->ioctl_buf[0]))
bw = WL_CHANSPEC_BW_80;
- else if (WL_BW_CAP_40MHZ(wl->ioctl_buf[0]))
+ else if (WL_BW_CAP_40MHZ(cfg->ioctl_buf[0]))
bw = WL_CHANSPEC_BW_40;
else
bw = WL_CHANSPEC_BW_20;
WL_ERR(("Invalid chanspec 0x%x\n", chspec));
err = BCME_ERROR;
}
+#ifdef CUSTOM_SET_CPUCORE
+ if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
+ WL_DBG(("SoftAP mode do not need to set cpucore\n"));
+ } else if ((dev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION)) &&
+ (chspec & WL_CHANSPEC_BW_80)) {
+ /* If GO is vht80 */
+ dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE;
+ dhd_set_cpucore(dhd, TRUE);
+ }
+#endif /* CUSTOM_SET_CPUCORE */
return err;
}
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *
+wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg)
+{
+ struct net_info *_net_info, *next;
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
+ if (_net_info->ndev &&
+ test_bit(WL_STATUS_REMAINING_ON_CHANNEL, &_net_info->sme_state))
+ return _net_info->ndev;
+ }
+ return NULL;
+}
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
static s32
wl_validate_opensecurity(struct net_device *dev, s32 bssidx)
{
WL_ERR(("auth error %d\n", err));
return BCME_ERROR;
}
+#ifndef CUSTOMER_HW10 /* for WEP Support */
/* set wsec */
err = wldev_iovar_setint_bsscfg(dev, "wsec", 0, bssidx);
if (err < 0) {
WL_ERR(("wsec error %d\n", err));
return BCME_ERROR;
}
+#endif /* CUSTOMER_HW10 */
+
/* set upper-layer auth */
err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", WPA_AUTH_NONE, bssidx);
if (err < 0) {
len -= WPA_IE_TAG_FIXED_LEN;
/* check for multicast cipher suite */
if (len < WPA_SUITE_LEN) {
- WL_INFO(("no multicast cipher suite\n"));
+ WL_INFORM(("no multicast cipher suite\n"));
goto exit;
}
}
/* Check for unicast suite(s) */
if (len < WPA_IE_SUITE_COUNT_LEN) {
- WL_INFO(("no unicast suite\n"));
+ WL_INFORM(("no unicast suite\n"));
+ goto exit;
+ }
+ /* walk thru unicast cipher list and pick up what we recognize */
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
+ count = ltoh16_ua(&ucast->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(ucast->list[i].type)) {
+ tmp = 0;
+ switch (ucast->list[i].type) {
+ case WPA_CIPHER_NONE:
+ tmp = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ tmp = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ tmp = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ tmp = AES_ENABLED;
+ break;
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ pval |= tmp;
+ }
+ }
+ }
+ len -= (count - i) * WPA_SUITE_LEN;
+ /* Check for auth key management suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFORM((" no auth key mgmt suite\n"));
+ goto exit;
+ }
+ /* walk thru auth management suite list and pick up what we recognize */
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[count];
+ count = ltoh16_ua(&mgmt->count);
+ len -= WPA_IE_SUITE_COUNT_LEN;
+ for (i = 0; i < count && len >= WPA_SUITE_LEN;
+ i++, len -= WPA_SUITE_LEN) {
+ if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_AKM(mgmt->list[i].type)) {
+ tmp = 0;
+ switch (mgmt->list[i].type) {
+ case RSN_AKM_NONE:
+ tmp = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ tmp = WPA_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ tmp = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ wpa_auth |= tmp;
+ }
+ }
+
+ }
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec = (pval | gval | SES_OW_ENABLED);
+ /* set auth */
+ err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("auth error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set wsec */
+ err = wldev_iovar_setint_bsscfg(dev, "wsec", wsec, bssidx);
+ if (err < 0) {
+ WL_ERR(("wsec error %d\n", err));
+ return BCME_ERROR;
+ }
+ /* set upper-layer auth */
+ err = wldev_iovar_setint_bsscfg(dev, "wpa_auth", wpa_auth, bssidx);
+ if (err < 0) {
+ WL_ERR(("wpa_auth error %d\n", err));
+ return BCME_ERROR;
+ }
+exit:
+ return 0;
+}
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+static u32 wl_get_cipher_type(uint8 type)
+{
+ u32 ret = 0;
+ switch (type) {
+ case WPA_CIPHER_NONE:
+ ret = 0;
+ break;
+ case WPA_CIPHER_WEP_40:
+ case WPA_CIPHER_WEP_104:
+ ret = WEP_ENABLED;
+ break;
+ case WPA_CIPHER_TKIP:
+ ret = TKIP_ENABLED;
+ break;
+ case WPA_CIPHER_AES_CCM:
+ ret = AES_ENABLED;
+ break;
+#ifdef BCMWAPI_WPI
+ case WAPI_CIPHER_SMS4:
+ ret = SMS4_ENABLED;
+ break;
+#endif
+ default:
+ WL_ERR(("No Security Info\n"));
+ }
+ return ret;
+}
+
+static u32 wl_get_suite_auth_key_mgmt_type(uint8 type)
+{
+ u32 ret = 0;
+ switch (type) {
+ case RSN_AKM_NONE:
+ ret = WPA_AUTH_NONE;
+ break;
+ case RSN_AKM_UNSPECIFIED:
+ ret = WPA_AUTH_UNSPECIFIED;
+ break;
+ case RSN_AKM_PSK:
+ ret = WPA_AUTH_PSK;
+ break;
+ default:
+ WL_ERR(("No Key Mgmt Info\n"));
+ }
+ return ret;
+}
+
+static s32
+wl_validate_wpaie_wpa2ie(struct net_device *dev, wpa_ie_fixed_t *wpaie,
+ bcm_tlv_t *wpa2ie, s32 bssidx)
+{
+ wpa_suite_mcast_t *mcast;
+ wpa_suite_ucast_t *ucast;
+ wpa_suite_auth_key_mgmt_t *mgmt;
+ u16 auth = 0; /* d11 open authentication */
+ u16 count;
+ s32 err = BCME_OK;
+ u32 wme_bss_disable;
+ u16 suite_count;
+ u8 rsn_cap[2];
+ s32 len = 0;
+ u32 i;
+ u32 wsec1, wsec2, wsec;
+ u32 pval = 0;
+ u32 gval = 0;
+ u32 wpa_auth = 0;
+ u32 wpa_auth1 = 0;
+ u32 wpa_auth2 = 0;
+ u8* ptmp;
+
+ if (wpaie == NULL || wpa2ie == NULL)
+ goto exit;
+
+ WL_DBG(("Enter \n"));
+ len = wpaie->length; /* value length */
+ len -= WPA_IE_TAG_FIXED_LEN;
+ /* check for multicast cipher suite */
+ if (len < WPA_SUITE_LEN) {
+ WL_INFORM(("no multicast cipher suite\n"));
+ goto exit;
+ }
+
+ /* pick up multicast cipher */
+ mcast = (wpa_suite_mcast_t *)&wpaie[1];
+ len -= WPA_SUITE_LEN;
+ if (!bcmp(mcast->oui, WPA_OUI, WPA_OUI_LEN)) {
+ if (IS_WPA_CIPHER(mcast->type)) {
+ gval |= wl_get_cipher_type(mcast->type);
+ }
+ }
+ WL_ERR(("\nwpa ie validate\n"));
+ WL_ERR(("wpa ie mcast cipher = 0x%X\n", gval));
+
+ /* Check for unicast suite(s) */
+ if (len < WPA_IE_SUITE_COUNT_LEN) {
+ WL_INFORM(("no unicast suite\n"));
goto exit;
}
+
/* walk thru unicast cipher list and pick up what we recognize */
ucast = (wpa_suite_ucast_t *)&mcast[1];
count = ltoh16_ua(&ucast->count);
i++, len -= WPA_SUITE_LEN) {
if (!bcmp(ucast->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
if (IS_WPA_CIPHER(ucast->list[i].type)) {
- tmp = 0;
- switch (ucast->list[i].type) {
- case WPA_CIPHER_NONE:
- tmp = 0;
- break;
- case WPA_CIPHER_WEP_40:
- case WPA_CIPHER_WEP_104:
- tmp = WEP_ENABLED;
- break;
- case WPA_CIPHER_TKIP:
- tmp = TKIP_ENABLED;
- break;
- case WPA_CIPHER_AES_CCM:
- tmp = AES_ENABLED;
- break;
- default:
- WL_ERR(("No Security Info\n"));
- }
- pval |= tmp;
+ pval |= wl_get_cipher_type(ucast->list[i].type);
}
}
}
+ WL_ERR(("wpa ie ucast count =%d, cipher = 0x%X\n", count, pval));
+
+ /* FOR WPS , set SEC_OW_ENABLED */
+ wsec1 = (pval | gval | SES_OW_ENABLED);
+ WL_ERR(("wpa ie wsec = 0x%X\n", wsec1));
+
len -= (count - i) * WPA_SUITE_LEN;
/* Check for auth key management suite(s) */
if (len < WPA_IE_SUITE_COUNT_LEN) {
- WL_INFO((" no auth key mgmt suite\n"));
+ WL_INFORM((" no auth key mgmt suite\n"));
goto exit;
}
/* walk thru auth management suite list and pick up what we recognize */
i++, len -= WPA_SUITE_LEN) {
if (!bcmp(mgmt->list[i].oui, WPA_OUI, WPA_OUI_LEN)) {
if (IS_WPA_AKM(mgmt->list[i].type)) {
- tmp = 0;
- switch (mgmt->list[i].type) {
- case RSN_AKM_NONE:
- tmp = WPA_AUTH_NONE;
- break;
- case RSN_AKM_UNSPECIFIED:
- tmp = WPA_AUTH_UNSPECIFIED;
- break;
- case RSN_AKM_PSK:
- tmp = WPA_AUTH_PSK;
- break;
- default:
- WL_ERR(("No Key Mgmt Info\n"));
- }
- wpa_auth |= tmp;
+
+ wpa_auth1 |= wl_get_suite_auth_key_mgmt_type(mgmt->list[i].type);
}
}
}
+ WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth1));
+ WL_ERR(("\nwpa2 ie validate\n"));
+
+ pval = 0;
+ gval = 0;
+ len = wpa2ie->len;
+ /* check the mcast cipher */
+ mcast = (wpa_suite_mcast_t *)&wpa2ie->data[WPA2_VERSION_LEN];
+ ptmp = mcast->oui;
+ gval = wl_get_cipher_type(ptmp[DOT11_OUI_LEN]);
+
+ WL_ERR(("wpa2 ie mcast cipher = 0x%X\n", gval));
+ if ((len -= WPA_SUITE_LEN) <= 0)
+ {
+ WL_ERR(("P:wpa2 ie len[%d]", len));
+ return BCME_BADLEN;
+ }
+
+ /* check the unicast cipher */
+ ucast = (wpa_suite_ucast_t *)&mcast[1];
+ suite_count = ltoh16_ua(&ucast->count);
+ WL_ERR((" WPA2 ucast cipher count=%d\n", suite_count));
+ pval |= wl_get_cipher_type(ucast->list[0].type);
+
+ if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) <= 0)
+ return BCME_BADLEN;
+
+ WL_ERR(("wpa2 ie ucast cipher = 0x%X\n", pval));
+
/* FOR WPS , set SEC_OW_ENABLED */
- wsec = (pval | gval | SES_OW_ENABLED);
+ wsec2 = (pval | gval | SES_OW_ENABLED);
+ WL_ERR(("wpa2 ie wsec = 0x%X\n", wsec2));
+
+ /* check the AKM */
+ mgmt = (wpa_suite_auth_key_mgmt_t *)&ucast->list[suite_count];
+ suite_count = ltoh16_ua(&mgmt->count);
+ ptmp = (u8 *)&mgmt->list[0];
+ wpa_auth2 = wl_get_suite_auth_key_mgmt_type(ptmp[DOT11_OUI_LEN]);
+ WL_ERR(("wpa ie wpa_suite_auth_key_mgmt count=%d, key_mgmt = 0x%X\n", count, wpa_auth2));
+
+ if ((len -= (WPA_IE_SUITE_COUNT_LEN + (WPA_SUITE_LEN * suite_count))) >= RSN_CAP_LEN) {
+ rsn_cap[0] = *(u8 *)&mgmt->list[suite_count];
+ rsn_cap[1] = *((u8 *)&mgmt->list[suite_count] + 1);
+ if (rsn_cap[0] & (RSN_CAP_16_REPLAY_CNTRS << RSN_CAP_PTK_REPLAY_CNTR_SHIFT)) {
+ wme_bss_disable = 0;
+ } else {
+ wme_bss_disable = 1;
+ }
+ WL_DBG(("P:rsn_cap[0]=[0x%X]:wme_bss_disabled[%d]\n", rsn_cap[0], wme_bss_disable));
+
+ /* set wme_bss_disable to sync RSN Capabilities */
+ err = wldev_iovar_setint_bsscfg(dev, "wme_bss_disable", wme_bss_disable, bssidx);
+ if (err < 0) {
+ WL_ERR(("wme_bss_disable error %d\n", err));
+ return BCME_ERROR;
+ }
+ } else {
+ WL_DBG(("There is no RSN Capabilities. remained len %d\n", len));
+ }
+
+ wsec = (wsec1 | wsec2);
+ wpa_auth = (wpa_auth1 | wpa_auth2);
+ WL_ERR(("wpa_wpa2 wsec=0x%X wpa_auth=0x%X\n", wsec, wpa_auth));
+
/* set auth */
err = wldev_iovar_setint_bsscfg(dev, "auth", auth, bssidx);
if (err < 0) {
exit:
return 0;
}
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
static s32
wl_cfg80211_bcn_validate_sec(
u32 dev_role,
s32 bssidx)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
if (dev_role == NL80211_IFTYPE_P2P_GO && (ies->wpa2_ie)) {
/* For P2P GO, the sec type is WPA2-PSK */
WL_DBG(("SoftAP: validating security"));
/* If wpa2_ie or wpa_ie is present validate it */
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+ if ((ies->wpa_ie != NULL && ies->wpa2_ie != NULL)) {
+ if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie, ies->wpa2_ie, bssidx) < 0) {
+ cfg->ap_info->security_mode = false;
+ return BCME_ERROR;
+ }
+ }
+ else {
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
if ((ies->wpa2_ie || ies->wpa_ie) &&
((wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 ||
wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0))) {
- wl->ap_info->security_mode = false;
+ cfg->ap_info->security_mode = false;
return BCME_ERROR;
}
- wl->ap_info->security_mode = true;
- if (wl->ap_info->rsn_ie) {
- kfree(wl->ap_info->rsn_ie);
- wl->ap_info->rsn_ie = NULL;
+ cfg->ap_info->security_mode = true;
+ if (cfg->ap_info->rsn_ie) {
+ kfree(cfg->ap_info->rsn_ie);
+ cfg->ap_info->rsn_ie = NULL;
}
- if (wl->ap_info->wpa_ie) {
- kfree(wl->ap_info->wpa_ie);
- wl->ap_info->wpa_ie = NULL;
+ if (cfg->ap_info->wpa_ie) {
+ kfree(cfg->ap_info->wpa_ie);
+ cfg->ap_info->wpa_ie = NULL;
}
- if (wl->ap_info->wps_ie) {
- kfree(wl->ap_info->wps_ie);
- wl->ap_info->wps_ie = NULL;
+ if (cfg->ap_info->wps_ie) {
+ kfree(cfg->ap_info->wps_ie);
+ cfg->ap_info->wps_ie = NULL;
}
if (ies->wpa_ie != NULL) {
/* WPAIE */
- wl->ap_info->rsn_ie = NULL;
- wl->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+ cfg->ap_info->rsn_ie = NULL;
+ cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
GFP_KERNEL);
} else if (ies->wpa2_ie != NULL) {
/* RSNIE */
- wl->ap_info->wpa_ie = NULL;
- wl->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+ cfg->ap_info->wpa_ie = NULL;
+ cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
GFP_KERNEL);
}
-
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+ }
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
if (!ies->wpa2_ie && !ies->wpa_ie) {
wl_validate_opensecurity(dev, bssidx);
- wl->ap_info->security_mode = false;
+ cfg->ap_info->security_mode = false;
}
if (ies->wps_ie) {
- wl->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+ cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
}
}
struct net_device *dev,
u32 dev_role, s32 bssidx)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
s32 err = BCME_OK;
WL_DBG(("interval (%d) \ndtim_period (%d) \n",
if ((info->ssid) && (info->ssid_len > 0) &&
(info->ssid_len <= 32)) {
- WL_DBG(("SSID (%s) len:%d \n", info->ssid, info->ssid_len));
+ WL_DBG(("SSID (%s) len:%zd \n", info->ssid, info->ssid_len));
if (dev_role == NL80211_IFTYPE_AP) {
/* Store the hostapd SSID */
- memset(wl->hostapd_ssid.SSID, 0x00, 32);
- memcpy(wl->hostapd_ssid.SSID, info->ssid, info->ssid_len);
- wl->hostapd_ssid.SSID_len = info->ssid_len;
+ memset(cfg->hostapd_ssid.SSID, 0x00, 32);
+ memcpy(cfg->hostapd_ssid.SSID, info->ssid, info->ssid_len);
+ cfg->hostapd_ssid.SSID_len = info->ssid_len;
} else {
/* P2P GO */
- memset(wl->p2p->ssid.SSID, 0x00, 32);
- memcpy(wl->p2p->ssid.SSID, info->ssid, info->ssid_len);
- wl->p2p->ssid.SSID_len = info->ssid_len;
+ memset(cfg->p2p->ssid.SSID, 0x00, 32);
+ memcpy(cfg->p2p->ssid.SSID, info->ssid, info->ssid_len);
+ cfg->p2p->ssid.SSID_len = info->ssid_len;
}
}
WL_DBG(("WPSIE in beacon \n"));
ies->wps_ie_len = ies->wps_ie->length + WPA_RSN_IE_TAG_FIXED_LEN;
} else {
- WL_ERR(("No WPSIE in beacon \n"));
+ WL_DBG(("No WPSIE in beacon \n"));
}
/* find the RSN_IE */
struct parsed_ies *ies,
u32 dev_role, s32 bssidx)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct wl_join_params join_params;
bool is_bssup = false;
s32 infra = 1;
goto exit;
}
- err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &wl->p2p->ssid,
- sizeof(wl->p2p->ssid), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &wl->ioctl_buf_sync);
+ err = wldev_iovar_setbuf_bsscfg(dev, "ssid", &cfg->p2p->ssid,
+ sizeof(cfg->p2p->ssid), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
if (err < 0) {
WL_ERR(("GO SSID setting error %d\n", err));
goto exit;
}
/* Do abort scan before creating GO */
- wl_cfg80211_scan_abort(wl);
+ wl_cfg80211_scan_abort(cfg);
- if ((err = wl_cfgp2p_bss(wl, dev, bssidx, 1)) < 0) {
+ if ((err = wl_cfgp2p_bss(cfg, dev, bssidx, 1)) < 0) {
WL_ERR(("GO Bring up error %d\n", err));
goto exit;
}
} else
WL_DBG(("Bss is already up\n"));
} else if ((dev_role == NL80211_IFTYPE_AP) &&
- (wl_get_drv_status(wl, AP_CREATING, dev))) {
+ (wl_get_drv_status(cfg, AP_CREATING, dev))) {
/* Device role SoftAP */
err = wldev_ioctl(dev, WLC_DOWN, &ap, sizeof(s32), true);
if (err < 0) {
memset(&join_params, 0, sizeof(join_params));
/* join parameters starts with ssid */
join_params_size = sizeof(join_params.ssid);
- memcpy(join_params.ssid.SSID, wl->hostapd_ssid.SSID,
- wl->hostapd_ssid.SSID_len);
- join_params.ssid.SSID_len = htod32(wl->hostapd_ssid.SSID_len);
+ memcpy(join_params.ssid.SSID, cfg->hostapd_ssid.SSID,
+ cfg->hostapd_ssid.SSID_len);
+ join_params.ssid.SSID_len = htod32(cfg->hostapd_ssid.SSID_len);
/* create softap */
if ((err = wldev_ioctl(dev, WLC_SET_SSID, &join_params,
join_params_size, true)) == 0) {
WL_DBG(("SoftAP set SSID (%s) success\n", join_params.ssid.SSID));
- wl_clr_drv_status(wl, AP_CREATING, dev);
- wl_set_drv_status(wl, AP_CREATED, dev);
+ wl_clr_drv_status(cfg, AP_CREATING, dev);
+ wl_set_drv_status(cfg, AP_CREATED, dev);
}
}
struct parsed_ies *ies)
{
struct parsed_ies prb_ies;
- struct wl_priv *wl = wlcfg_drv_priv;
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
u8 *vndr = NULL;
u32 vndr_ie_len = 0;
s32 err = BCME_OK;
struct cfg80211_beacon_data *info,
s32 bssidx)
{
- struct wl_priv *wl = wlcfg_drv_priv;
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
u8 *vndr = NULL;
u32 vndr_ie_len = 0;
s32 err = BCME_OK;
/* Set Beacon IEs to FW */
- if ((err = wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
info->tail_len)) < 0) {
WL_ERR(("Set Beacon IE Failed \n"));
}
/* Set Probe Response IEs to FW */
- if ((err = wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ if ((err = wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
VNDR_IE_PRBRSP_FLAG, vndr, vndr_ie_len)) < 0) {
WL_ERR(("Set Probe Resp IE Failed \n"));
} else {
s32 bssidx)
{
bool update_bss = 0;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
if (ies->wps_ie) {
- if (wl->ap_info->wps_ie &&
- memcmp(wl->ap_info->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
+ if (cfg->ap_info->wps_ie &&
+ memcmp(cfg->ap_info->wps_ie, ies->wps_ie, ies->wps_ie_len)) {
WL_DBG((" WPS IE is changed\n"));
- kfree(wl->ap_info->wps_ie);
- wl->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
- } else if (wl->ap_info->wps_ie == NULL) {
+ kfree(cfg->ap_info->wps_ie);
+ cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+ } else if (cfg->ap_info->wps_ie == NULL) {
WL_DBG((" WPS IE is added\n"));
- wl->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+ cfg->ap_info->wps_ie = kmemdup(ies->wps_ie, ies->wps_ie_len, GFP_KERNEL);
+ }
+
+#if defined(SUPPORT_SOFTAP_WPAWPA2_MIXED)
+ if (ies->wpa_ie != NULL && ies->wpa2_ie != NULL) {
+ WL_ERR(("update bss - wpa_ie and wpa2_ie is not null\n"));
+ if (!cfg->ap_info->security_mode) {
+ /* change from open mode to security mode */
+ update_bss = true;
+ cfg->ap_info->wpa_ie =
+ kmemdup(ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ cfg->ap_info->rsn_ie =
+ kmemdup(ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ } else {
+ /* change from (WPA or WPA2 or WPA/WPA2) to WPA/WPA2 mixed mode */
+ if (cfg->ap_info->wpa_ie) {
+ if (memcmp(cfg->ap_info->wpa_ie,
+ ies->wpa_ie, ies->wpa_ie->length +
+ WPA_RSN_IE_TAG_FIXED_LEN)) {
+ kfree(cfg->ap_info->wpa_ie);
+ update_bss = true;
+ cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ }
+ else {
+ update_bss = true;
+ cfg->ap_info->wpa_ie =
+ kmemdup(ies->wpa_ie,
+ ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ if (cfg->ap_info->rsn_ie) {
+ if (memcmp(cfg->ap_info->rsn_ie,
+ ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN)) {
+ update_bss = true;
+ kfree(cfg->ap_info->rsn_ie);
+ cfg->ap_info->rsn_ie =
+ kmemdup(ies->wpa2_ie,
+ ies->wpa2_ie->len +
+ WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ }
+ else {
+ update_bss = true;
+ cfg->ap_info->rsn_ie =
+ kmemdup(ies->wpa2_ie,
+ ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
+ GFP_KERNEL);
+ }
+ }
+ WL_ERR(("update_bss=%d\n", update_bss));
+ if (update_bss) {
+ cfg->ap_info->security_mode = true;
+ wl_cfgp2p_bss(cfg, dev, bssidx, 0);
+ if (wl_validate_wpaie_wpa2ie(dev, ies->wpa_ie,
+ ies->wpa2_ie, bssidx) < 0) {
+ return BCME_ERROR;
+ }
+ wl_cfgp2p_bss(cfg, dev, bssidx, 1);
+ }
+
}
+ else
+#endif /* SUPPORT_SOFTAP_WPAWPA2_MIXED */
if ((ies->wpa_ie != NULL || ies->wpa2_ie != NULL)) {
- if (!wl->ap_info->security_mode) {
+ if (!cfg->ap_info->security_mode) {
/* change from open mode to security mode */
update_bss = true;
if (ies->wpa_ie != NULL) {
- wl->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+ cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
GFP_KERNEL);
} else {
- wl->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+ cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
GFP_KERNEL);
}
- } else if (wl->ap_info->wpa_ie) {
+ } else if (cfg->ap_info->wpa_ie) {
/* change from WPA2 mode to WPA mode */
if (ies->wpa_ie != NULL) {
update_bss = true;
- kfree(wl->ap_info->rsn_ie);
- wl->ap_info->rsn_ie = NULL;
- wl->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
+ kfree(cfg->ap_info->rsn_ie);
+ cfg->ap_info->rsn_ie = NULL;
+ cfg->ap_info->wpa_ie = kmemdup(ies->wpa_ie,
ies->wpa_ie->length + WPA_RSN_IE_TAG_FIXED_LEN,
GFP_KERNEL);
- } else if (memcmp(wl->ap_info->rsn_ie,
+ } else if (memcmp(cfg->ap_info->rsn_ie,
ies->wpa2_ie, ies->wpa2_ie->len
+ WPA_RSN_IE_TAG_FIXED_LEN)) {
update_bss = true;
- kfree(wl->ap_info->rsn_ie);
- wl->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
+ kfree(cfg->ap_info->rsn_ie);
+ cfg->ap_info->rsn_ie = kmemdup(ies->wpa2_ie,
ies->wpa2_ie->len + WPA_RSN_IE_TAG_FIXED_LEN,
GFP_KERNEL);
- wl->ap_info->wpa_ie = NULL;
+ cfg->ap_info->wpa_ie = NULL;
}
}
if (update_bss) {
- wl->ap_info->security_mode = true;
- wl_cfgp2p_bss(wl, dev, bssidx, 0);
+ cfg->ap_info->security_mode = true;
+ wl_cfgp2p_bss(cfg, dev, bssidx, 0);
if (wl_validate_wpa2ie(dev, ies->wpa2_ie, bssidx) < 0 ||
wl_validate_wpaie(dev, ies->wpa_ie, bssidx) < 0) {
return BCME_ERROR;
}
- wl_cfgp2p_bss(wl, dev, bssidx, 1);
+ wl_cfgp2p_bss(cfg, dev, bssidx, 1);
}
}
} else {
u8* mac_addr)
{
struct net_device *dev;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
scb_val_t scb_val;
s8 eabuf[ETHER_ADDR_STR_LEN];
int err;
return 0;
}
- dev = ndev_to_wlc_ndev(ndev, wl);
+ dev = ndev_to_wlc_ndev(ndev, cfg);
- if (p2p_is_on(wl)) {
+ if (p2p_is_on(cfg)) {
/* Suspend P2P discovery search-listen to prevent it from changing the
* channel.
*/
- if ((wl_cfgp2p_discover_enable_search(wl, false)) < 0) {
+ if ((wl_cfgp2p_discover_enable_search(cfg, false)) < 0) {
WL_ERR(("Can not disable discovery mode\n"));
return -EFAULT;
}
return 0;
}
+
+static s32
+wl_cfg80211_change_station(
+ struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac,
+ struct station_parameters *params)
+{
+ int err;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct net_device *primary_ndev = bcmcfg_to_prmry_ndev(cfg);
+
+ /* Processing only authorize/de-authorize flag for now */
+ if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+ return -ENOTSUPP;
+
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))) {
+ err = wldev_ioctl(primary_ndev, WLC_SCB_DEAUTHORIZE, mac, ETH_ALEN, true);
+ if (err)
+ WL_ERR(("WLC_SCB_DEAUTHORIZE error (%d)\n", err));
+ return err;
+ }
+
+ err = wldev_ioctl(primary_ndev, WLC_SCB_AUTHORIZE, mac, ETH_ALEN, true);
+ if (err)
+ WL_ERR(("WLC_SCB_AUTHORIZE error (%d)\n", err));
+ return err;
+}
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VER >= KERNEL_VERSION(3, 2, 0)) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
struct net_device *dev,
struct cfg80211_ap_settings *info)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 err = BCME_OK;
struct parsed_ies ies;
s32 bssidx = 0;
u32 dev_role = 0;
WL_DBG(("Enter \n"));
- if (dev == wl_to_prmry_ndev(wl)) {
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
WL_DBG(("Start AP req on primary iface: Softap\n"));
dev_role = NL80211_IFTYPE_AP;
}
#if defined(WL_ENABLE_P2P_IF)
- else if (dev == wl->p2p_net) {
+ else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
WL_DBG(("Start AP req on P2P iface: GO\n"));
- dev = wl_to_prmry_ndev(wl);
+ dev = bcmcfg_to_prmry_ndev(cfg);
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
- if (p2p_is_on(wl) &&
- (bssidx == wl_to_p2p_bss_bssidx(wl,
+ if (p2p_is_on(cfg) &&
+ (bssidx == wl_to_p2p_bss_bssidx(cfg,
P2PAPI_BSSCFG_CONNECTION))) {
dev_role = NL80211_IFTYPE_P2P_GO;
WL_DBG(("Start AP req on P2P connection iface\n"));
}
- if (!check_dev_role_integrity(wl, dev_role))
+ if (!check_dev_role_integrity(cfg, dev_role))
goto fail;
-#if defined(WL_CFG80211_P2P_DEV_IF) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
+#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) && !defined(WL_COMPAT_WIRELESS))
if ((err = wl_cfg80211_set_channel(wiphy, dev,
dev->ieee80211_ptr->preset_chandef.chan,
NL80211_CHAN_HT20) < 0)) {
WL_ERR(("Set channel failed \n"));
goto fail;
}
-#endif /* WL_CFG80211_P2P_DEV_IF || (LINUX_VERSION >= VERSION(3, 6, 0)) */
+#endif /* ((LINUX_VERSION >= VERSION(3, 6, 0) && !WL_COMPAT_WIRELESS) */
if ((err = wl_cfg80211_bcn_set_params(info, dev,
dev_role, bssidx)) < 0) {
WL_DBG(("** AP/GO Created **\n"));
+#ifdef WL_CFG80211_ACL
+ /* Enfoce Admission Control. */
+ if ((err = wl_cfg80211_set_mac_acl(wiphy, dev, info->acl)) < 0) {
+ WL_ERR(("Set ACL failed\n"));
+ }
+#endif /* WL_CFG80211_ACL */
+
/* Set IEs to FW */
if ((err = wl_cfg80211_set_ies(dev, &info->beacon, bssidx)) < 0)
WL_ERR(("Set IEs failed \n"));
+ /* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+ if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+ bool pbc = 0;
+ wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+ if (pbc) {
+ WL_DBG(("set WLC_E_PROBREQ_MSG\n"));
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+ }
+ }
+
fail:
if (err) {
WL_ERR(("ADD/SET beacon failed\n"));
int infra = 0;
int ap = 0;
s32 bssidx = 0;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
WL_DBG(("Enter \n"));
- if (dev == wl_to_prmry_ndev(wl)) {
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
dev_role = NL80211_IFTYPE_AP;
}
#if defined(WL_ENABLE_P2P_IF)
- else if (dev == wl->p2p_net) {
+ else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
- dev = wl_to_prmry_ndev(wl);
+ dev = bcmcfg_to_prmry_ndev(cfg);
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
- if (p2p_is_on(wl) &&
- (bssidx == wl_to_p2p_bss_bssidx(wl,
+ if (p2p_is_on(cfg) &&
+ (bssidx == wl_to_p2p_bss_bssidx(cfg,
P2PAPI_BSSCFG_CONNECTION))) {
dev_role = NL80211_IFTYPE_P2P_GO;
}
- if (!check_dev_role_integrity(wl, dev_role))
+ if (!check_dev_role_integrity(cfg, dev_role))
goto exit;
if (dev_role == NL80211_IFTYPE_AP) {
goto exit;
}
- wl_clr_drv_status(wl, AP_CREATED, dev);
+ wl_clr_drv_status(cfg, AP_CREATED, dev);
/* Turn on the MPC */
wldev_iovar_setint(dev, "mpc", 1);
+ if (cfg->ap_info) {
+ kfree(cfg->ap_info->wpa_ie);
+ kfree(cfg->ap_info->rsn_ie);
+ kfree(cfg->ap_info->wps_ie);
+ kfree(cfg->ap_info);
+ cfg->ap_info = NULL;
+ }
} else {
WL_DBG(("Stopping P2P GO \n"));
+ DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE((dhd_pub_t *)(cfg->pub),
+ DHD_EVENT_TIMEOUT_MS*3);
+ DHD_OS_WAKE_LOCK_TIMEOUT((dhd_pub_t *)(cfg->pub));
}
exit:
struct cfg80211_beacon_data *info)
{
s32 err = BCME_OK;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct parsed_ies ies;
u32 dev_role = 0;
s32 bssidx = 0;
+ bool pbc = 0;
WL_DBG(("Enter \n"));
- if (dev == wl_to_prmry_ndev(wl)) {
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
dev_role = NL80211_IFTYPE_AP;
}
#if defined(WL_ENABLE_P2P_IF)
- else if (dev == wl->p2p_net) {
+ else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
- dev = wl_to_prmry_ndev(wl);
+ dev = bcmcfg_to_prmry_ndev(cfg);
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
- if (p2p_is_on(wl) &&
- (bssidx == wl_to_p2p_bss_bssidx(wl,
+ if (p2p_is_on(cfg) &&
+ (bssidx == wl_to_p2p_bss_bssidx(cfg,
P2PAPI_BSSCFG_CONNECTION))) {
dev_role = NL80211_IFTYPE_P2P_GO;
}
- if (!check_dev_role_integrity(wl, dev_role))
+ if (!check_dev_role_integrity(cfg, dev_role))
+ goto fail;
+
+ if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+ WL_ERR(("P2P already down status!\n"));
+ err = BCME_ERROR;
goto fail;
+ }
/* Parse IEs */
if ((err = wl_cfg80211_parse_ap_ies(dev, info, &ies)) < 0) {
err = -EINVAL;
goto fail;
}
+ /* Enable Probe Req filter, WPS-AP certification 4.2.13 */
+ if ((dev_role == NL80211_IFTYPE_AP) && (ies.wps_ie != NULL)) {
+ wl_validate_wps_ie((char *) ies.wps_ie, ies.wps_ie_len, &pbc);
+ WL_DBG((" WPS AP, wps_ie is exists pbc=%d\n", pbc));
+ if (pbc)
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, true);
+ else
+ wl_add_remove_eventmsg(dev, WLC_E_PROBREQ_MSG, false);
+ }
}
fail:
return err;
}
-#else /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) */
+#else
static s32
wl_cfg80211_add_set_beacon(struct wiphy *wiphy, struct net_device *dev,
struct beacon_parameters *info)
{
s32 err = BCME_OK;
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
s32 ie_offset = 0;
s32 bssidx = 0;
u32 dev_role = NL80211_IFTYPE_AP;
struct parsed_ies ies;
bcm_tlv_t *ssid_ie;
bool pbc = 0;
-
WL_DBG(("interval (%d) dtim_period (%d) head_len (%d) tail_len (%d)\n",
info->interval, info->dtim_period, info->head_len, info->tail_len));
- if (dev == wl_to_prmry_ndev(wl)) {
+ if (dev == bcmcfg_to_prmry_ndev(cfg)) {
dev_role = NL80211_IFTYPE_AP;
}
#if defined(WL_ENABLE_P2P_IF)
- else if (dev == wl->p2p_net) {
+ else if (dev == cfg->p2p_net) {
/* Group Add request on p2p0 */
- dev = wl_to_prmry_ndev(wl);
+ dev = bcmcfg_to_prmry_ndev(cfg);
dev_role = NL80211_IFTYPE_P2P_GO;
}
#endif /* WL_ENABLE_P2P_IF */
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
return BCME_ERROR;
}
- if (p2p_is_on(wl) &&
- (bssidx == wl_to_p2p_bss_bssidx(wl,
+ if (p2p_is_on(cfg) &&
+ (bssidx == wl_to_p2p_bss_bssidx(cfg,
P2PAPI_BSSCFG_CONNECTION))) {
dev_role = NL80211_IFTYPE_P2P_GO;
}
- if (!check_dev_role_integrity(wl, dev_role))
+ if (!check_dev_role_integrity(cfg, dev_role))
+ goto fail;
+
+ if ((dev_role == NL80211_IFTYPE_P2P_GO) && (cfg->p2p_wdev == NULL)) {
+ WL_ERR(("P2P already down status!\n"));
+ err = BCME_ERROR;
goto fail;
+ }
ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN;
/* find the SSID */
DOT11_MNG_SSID_ID)) != NULL) {
if (dev_role == NL80211_IFTYPE_AP) {
/* Store the hostapd SSID */
- memset(&wl->hostapd_ssid.SSID[0], 0x00, 32);
- memcpy(&wl->hostapd_ssid.SSID[0], ssid_ie->data, ssid_ie->len);
- wl->hostapd_ssid.SSID_len = ssid_ie->len;
+ memset(&cfg->hostapd_ssid.SSID[0], 0x00, 32);
+ memcpy(&cfg->hostapd_ssid.SSID[0], ssid_ie->data, ssid_ie->len);
+ cfg->hostapd_ssid.SSID_len = ssid_ie->len;
} else {
/* P2P GO */
- memset(&wl->p2p->ssid.SSID[0], 0x00, 32);
- memcpy(wl->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len);
- wl->p2p->ssid.SSID_len = ssid_ie->len;
+ memset(&cfg->p2p->ssid.SSID[0], 0x00, 32);
+ memcpy(cfg->p2p->ssid.SSID, ssid_ie->data, ssid_ie->len);
+ cfg->p2p->ssid.SSID_len = ssid_ie->len;
}
}
goto fail;
}
- if (wl_cfgp2p_set_management_ie(wl, dev, bssidx,
+ if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
VNDR_IE_BEACON_FLAG, (u8 *)info->tail,
info->tail_len) < 0) {
WL_ERR(("Beacon set IEs failed \n"));
} else {
WL_DBG(("Applied Vndr IEs for Beacon \n"));
}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
+ if (wl_cfgp2p_set_management_ie(cfg, dev, bssidx,
+ VNDR_IE_PRBRSP_FLAG, (u8 *)info->proberesp_ies,
+ info->proberesp_ies_len) < 0) {
+ WL_ERR(("ProbeRsp set IEs failed \n"));
+ goto fail;
+ } else {
+ WL_DBG(("Applied Vndr IEs for ProbeRsp \n"));
+ }
+#endif
+
if (!wl_cfgp2p_bss_isup(dev, bssidx) &&
(wl_cfg80211_bcn_validate_sec(dev, &ies, dev_role, bssidx) < 0))
{
goto fail;
}
- if (wl_get_drv_status(wl, AP_CREATED, dev)) {
+ if (wl_get_drv_status(cfg, AP_CREATED, dev)) {
/* Soft AP already running. Update changed params */
if (wl_cfg80211_hostapd_sec(dev, &ies, bssidx) < 0) {
WL_ERR(("Hostapd update sec failed \n"));
#define PNO_TIME 30
#define PNO_REPEAT 4
#define PNO_FREQ_EXPO_MAX 2
-int wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
+static int
+wl_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_sched_scan_request *request)
{
int pno_repeat = PNO_REPEAT;
int pno_freq_expo_max = PNO_FREQ_EXPO_MAX;
wlc_ssid_t ssids_local[MAX_PFN_LIST_COUNT];
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
struct cfg80211_ssid *ssid = NULL;
int ssid_count = 0;
int i;
WL_ERR(("PNO setup failed!! ret=%d \n", ret));
return -EINVAL;
}
- wl->sched_scan_req = request;
+ cfg->sched_scan_req = request;
} else {
return -EINVAL;
}
return 0;
}
-int wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+static int
+wl_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+
+ WL_DBG(("Enter \n"));
+ WL_PNO((">>> SCHED SCAN STOP\n"));
+
+ if (dhd_dev_pno_stop_for_ssid(dev) < 0)
+ WL_ERR(("PNO Stop for SSID failed"));
+
+ if (cfg->scan_request && cfg->sched_scan_running) {
+ WL_PNO((">>> Sched scan running. Aborting it..\n"));
+ wl_notify_escan_complete(cfg, dev, true, true);
+ }
+
+ cfg->sched_scan_req = NULL;
+ cfg->sched_scan_running = FALSE;
+
+ return 0;
+}
+#endif /* WL_SCHED_SCAN */
+
+#ifdef WL_SUPPORT_ACS
+/*
+ * Currently the dump_obss IOVAR is returning string as output so we need to
+ * parse the output buffer in an unoptimized way. Going forward if we get the
+ * IOVAR output in binary format this method can be optimized
+ */
+static int wl_parse_dump_obss(char *buf, struct wl_dump_survey *survey)
+{
+ int i;
+ char *token;
+ char delim[] = " \n";
+
+ token = strsep(&buf, delim);
+ while (token != NULL) {
+ if (!strcmp(token, "OBSS")) {
+ for (i = 0; i < OBSS_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->obss = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "IBSS")) {
+ for (i = 0; i < IBSS_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->ibss = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "TXDur")) {
+ for (i = 0; i < TX_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->tx = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "Category")) {
+ for (i = 0; i < CTG_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->no_ctg = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "Packet")) {
+ for (i = 0; i < PKT_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->no_pckt = simple_strtoul(token, NULL, 10);
+ }
+
+ if (!strcmp(token, "Opp(time):")) {
+ for (i = 0; i < IDLE_TOKEN_IDX; i++)
+ token = strsep(&buf, delim);
+ survey->idle = simple_strtoul(token, NULL, 10);
+ }
+
+ token = strsep(&buf, delim);
+ }
+
+ return 0;
+}
+
+static int wl_dump_obss(struct net_device *ndev, cca_msrmnt_query req,
+ struct wl_dump_survey *survey)
+{
+ cca_stats_n_flags *results;
+ char *buf;
+ int retry, err;
+
+ buf = kzalloc(sizeof(char) * WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!buf)) {
+ WL_ERR(("%s: buf alloc failed\n", __func__));
+ return -ENOMEM;
+ }
+
+ retry = IOCTL_RETRY_COUNT;
+ while (retry--) {
+ err = wldev_iovar_getbuf(ndev, "dump_obss", &req, sizeof(req),
+ buf, WLC_IOCTL_MAXLEN, NULL);
+ if (err >= 0) {
+ break;
+ }
+ WL_DBG(("attempt = %d, err = %d, \n",
+ (IOCTL_RETRY_COUNT - retry), err));
+ }
+
+ if (retry <= 0) {
+ WL_ERR(("failure, dump_obss IOVAR failed\n"));
+ err = -BCME_ERROR;
+ goto exit;
+ }
+
+ results = (cca_stats_n_flags *)(buf);
+ wl_parse_dump_obss(results->buf, survey);
+ kfree(buf);
+
+ return 0;
+exit:
+ kfree(buf);
+ return err;
+}
+
+static int wl_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
+ int idx, struct survey_info *info)
{
- struct wl_priv *wl = wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
+ struct wl_dump_survey *survey;
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel*chan;
+ cca_msrmnt_query req;
+ int val, err, noise, retry;
+
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+ if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
+ return -ENOENT;
+ }
+ band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (band && idx >= band->n_channels) {
+ idx -= band->n_channels;
+ band = NULL;
+ }
+
+ if (!band || idx >= band->n_channels) {
+ /* Move to 5G band */
+ band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (idx >= band->n_channels) {
+ return -ENOENT;
+ }
+ }
+
+ chan = &band->channels[idx];
+ /* Setting current channel to the requested channel */
+ if ((err = wl_cfg80211_set_channel(wiphy, ndev, chan,
+ NL80211_CHAN_HT20) < 0)) {
+ WL_ERR(("Set channel failed \n"));
+ }
+
+ if (!idx) {
+ /* Disable mpc */
+ val = 0;
+ err = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
+ sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+ &cfg->ioctl_buf_sync);
+ if (err < 0) {
+ WL_ERR(("set 'mpc' failed, error = %d\n", err));
+ }
+
+ /* Set interface up, explicitly. */
+ val = 1;
+ err = wldev_ioctl(ndev, WLC_UP, (void *)&val, sizeof(val), true);
+ if (err < 0) {
+ WL_ERR(("set interface up failed, error = %d\n", err));
+ }
+ }
+
+ /* Get noise value */
+ retry = IOCTL_RETRY_COUNT;
+ while (retry--) {
+ err = wldev_ioctl(ndev, WLC_GET_PHY_NOISE, &noise,
+ sizeof(noise), false);
+ if (err >= 0) {
+ break;
+ }
+ WL_DBG(("attempt = %d, err = %d, \n",
+ (IOCTL_RETRY_COUNT - retry), err));
+ }
+
+ if (retry <= 0) {
+ WL_ERR(("Get Phy Noise failed, error = %d\n", err));
+ noise = CHAN_NOISE_DUMMY;
+ }
+
+ survey = (struct wl_dump_survey *) kzalloc(sizeof(struct wl_dump_survey),
+ GFP_KERNEL);
+ if (unlikely(!survey)) {
+ WL_ERR(("%s: alloc failed\n", __func__));
+ return -ENOMEM;
+ }
- WL_DBG(("Enter \n"));
- WL_PNO((">>> SCHED SCAN STOP\n"));
+ /* Start Measurement for obss stats on current channel */
+ req.msrmnt_query = 0;
+ req.time_req = ACS_MSRMNT_DELAY;
+ if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+ goto exit;
+ }
- if (dhd_dev_pno_stop_for_ssid(dev) < 0)
- WL_ERR(("PNO Stop for SSID failed"));
+ /*
+ * Wait for the meaurement to complete, adding a buffer value of 10 to take
+ * into consideration any delay in IOVAR completion
+ */
+ msleep(ACS_MSRMNT_DELAY + 10);
- if (wl->scan_request && wl->sched_scan_running) {
- WL_PNO((">>> Sched scan running. Aborting it..\n"));
- wl_notify_escan_complete(wl, dev, true, true);
+ /* Issue IOVAR to collect measurement results */
+ req.msrmnt_query = 1;
+ if ((err = wl_dump_obss(ndev, req, survey)) < 0) {
+ goto exit;
}
- wl->sched_scan_req = NULL;
- wl->sched_scan_running = FALSE;
+ info->channel = chan;
+ info->noise = noise;
+ info->channel_time = ACS_MSRMNT_DELAY;
+ info->channel_time_busy = ACS_MSRMNT_DELAY - survey->idle;
+ info->channel_time_rx = survey->obss + survey->ibss + survey->no_ctg +
+ survey->no_pckt;
+ info->channel_time_tx = survey->tx;
+ info->filled = SURVEY_INFO_NOISE_DBM |SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY | SURVEY_INFO_CHANNEL_TIME_RX |
+ SURVEY_INFO_CHANNEL_TIME_TX;
+ kfree(survey);
return 0;
+exit:
+ kfree(survey);
+ return err;
}
-#endif /* WL_SCHED_SCAN */
+#endif /* WL_SUPPORT_ACS */
static struct cfg80211_ops wl_cfg80211_ops = {
.add_virtual_intf = wl_cfg80211_add_virtual_iface,
.mgmt_tx = wl_cfg80211_mgmt_tx,
.mgmt_frame_register = wl_cfg80211_mgmt_frame_register,
.change_bss = wl_cfg80211_change_bss,
-#if !defined(WL_CFG80211_P2P_DEV_IF) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)) || defined(WL_COMPAT_WIRELESS)
.set_channel = wl_cfg80211_set_channel,
-#endif /* !WL_CFG80211_P2P_DEV_IF && (LINUX_VERSION < VERSION(3, 6, 0)) */
+#endif /* ((LINUX_VERSION < VERSION(3, 6, 0)) || WL_COMPAT_WIRELESS */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0)) && !defined(WL_COMPAT_WIRELESS)
.set_beacon = wl_cfg80211_add_set_beacon,
.add_beacon = wl_cfg80211_add_set_beacon,
#ifdef WL_SCHED_SCAN
.sched_scan_start = wl_cfg80211_sched_scan_start,
.sched_scan_stop = wl_cfg80211_sched_scan_stop,
-#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) */
+#endif /* WL_SCHED_SCAN */
#if defined(WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, \
2, 0))
.del_station = wl_cfg80211_del_station,
+ .change_station = wl_cfg80211_change_station,
.mgmt_tx_cancel_wait = wl_cfg80211_mgmt_tx_cancel_wait,
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL_VERSION >= (3,2,0) */
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
- .tdls_oper = wl_cfg80211_tdls_oper
+ .tdls_oper = wl_cfg80211_tdls_oper,
#endif /* LINUX_VERSION > VERSION(3, 2, 0) || WL_COMPAT_WIRELESS */
+#ifdef WL_SUPPORT_ACS
+ .dump_survey = wl_cfg80211_dump_survey,
+#endif /* WL_SUPPORT_ACS */
+#ifdef WL_CFG80211_ACL
+ .set_mac_acl = wl_cfg80211_set_mac_acl,
+#endif /* WL_CFG80211_ACL */
};
s32 wl_mode_to_nl80211_iftype(s32 mode)
}
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
-/* Kernel Network Support->Wireless->Regulatory rules database
- options should be enabled and regulatory CRDA regdb table populated in Kernel
- for proper country reg notification
-*/
static int
wl_cfg80211_reg_notifier(
struct wiphy *wiphy,
struct regulatory_request *request)
{
- struct wl_priv *wl = (struct wl_priv *)wiphy_priv(wiphy);
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)wiphy_priv(wiphy);
int ret = 0;
- if (!request || !wl) {
+ if (!request || !cfg) {
WL_ERR(("Invalid arg\n"));
return -EINVAL;
}
request->alpha2[0], request->alpha2[1],
((request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) ? " 11d AP" : "User")));
- if ((ret = wldev_set_country(wl_to_prmry_ndev(wl), request->alpha2,
+ if ((ret = wldev_set_country(bcmcfg_to_prmry_ndev(cfg), request->alpha2,
false, (request->initiator == NL80211_REGDOM_SET_BY_USER ? true : false))) < 0) {
WL_ERR(("set country Failed :%d\n", ret));
}
}
#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
-static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *data)
+#ifdef CONFIG_PM
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+static const struct wiphy_wowlan_support brcm_wowlan_support = {
+ .flags = WIPHY_WOWLAN_ANY,
+};
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) */
+#endif /* CONFIG_PM */
+
+static s32 wl_setup_wiphy(struct wireless_dev *wdev, struct device *sdiofunc_dev, void *context)
{
s32 err = 0;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
- (defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
- dhd_pub_t *dhd = (dhd_pub_t *)data;
-#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+ dhd_pub_t *dhd = (dhd_pub_t *)context;
+ BCM_REFERENCE(dhd);
+
+ if (!dhd) {
+ WL_ERR(("DHD is NULL!!"));
+ err = -ENODEV;
+ return err;
+ }
+#endif
wdev->wiphy =
- wiphy_new(&wl_cfg80211_ops, sizeof(struct wl_priv));
+ wiphy_new(&wl_cfg80211_ops, sizeof(struct bcm_cfg80211));
if (unlikely(!wdev->wiphy)) {
WL_ERR(("Couldn not allocate wiphy device\n"));
err = -ENOMEM;
wdev->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION)
| BIT(NL80211_IFTYPE_ADHOC)
-#if !defined(WL_ENABLE_P2P_IF)
+#if !defined(WL_ENABLE_P2P_IF) && !defined(WL_CFG80211_P2P_DEV_IF)
| BIT(NL80211_IFTYPE_MONITOR)
-#endif /* !WL_ENABLE_P2P_IF */
+#endif /* !WL_ENABLE_P2P_IF && !WL_CFG80211_P2P_DEV_IF */
#if defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF)
| BIT(NL80211_IFTYPE_P2P_CLIENT)
| BIT(NL80211_IFTYPE_P2P_GO)
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && \
(defined(WL_IFACE_COMB_NUM_CHANNELS) || defined(WL_CFG80211_P2P_DEV_IF))
- if (dhd && dhd->op_mode == DHD_FLAG_HOSTAP_MODE) {
- WL_DBG(("Setting interface combinations for SoftAP mode\n"));
- wdev->wiphy->iface_combinations = softap_iface_combinations;
- wdev->wiphy->n_iface_combinations =
- ARRAY_SIZE(softap_iface_combinations);
- } else {
- WL_DBG(("Setting interface combinations for STA+P2P mode\n"));
- wdev->wiphy->iface_combinations = sta_p2p_iface_combinations;
- wdev->wiphy->n_iface_combinations =
- ARRAY_SIZE(sta_p2p_iface_combinations);
- }
+ WL_DBG(("Setting interface combinations for common mode\n"));
+ wdev->wiphy->iface_combinations = common_iface_combinations;
+ wdev->wiphy->n_iface_combinations =
+ ARRAY_SIZE(common_iface_combinations);
#endif /* LINUX_VER >= 3.0 && (WL_IFACE_COMB_NUM_CHANNELS || WL_CFG80211_P2P_DEV_IF) */
wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS |
#endif
WIPHY_FLAG_4ADDR_STATION;
- /* If driver advertises FW_ROAM, the supplicant wouldn't
- * send the BSSID & Freq in the connect command allowing the
- * the driver to choose the AP to connect to. But unless we
- * support ROAM_CACHE in firware this will delay the ASSOC as
- * as the FW need to do a full scan before attempting to connect
- * So that feature will just increase assoc. The better approach
- * to let Supplicant to provide channel info and FW letter may roam
- * if needed so DON'T advertise that featur eto Supplicant.
+#if (defined(ROAM_ENABLE) || defined(BCMFW_ROAM_ENABLE)) && ((LINUX_VERSION_CODE >= \
+ KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)) && !0
+ /* Please use supplicant ver >= 76 if FW_ROAM is enabled
+ * If driver advertises FW_ROAM, older supplicant wouldn't
+ * send the BSSID & Freq in the connect req command. This
+ * will delay the ASSOC as the FW need to do a full scan
+ * before attempting to connect. Supplicant >=76 has patch
+ * to allow bssid & freq to be sent down to driver even if
+ * FW ROAM is advertised.
*/
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
- /* wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; */
-#endif
+ wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+#endif
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)) || defined(WL_COMPAT_WIRELESS)
wdev->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_OFFCHAN_TX;
* to remove the patch from supplicant
*/
wdev->wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
+
+#ifdef WL_CFG80211_ACL
+ /* Configure ACL capabilities. */
+ wdev->wiphy->max_acl_mac_addrs = MAX_NUM_MAC_FILT;
+#endif
+
+#if 1 && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) || defined(WL_COMPAT_WIRELESS))
+ /* Supplicant distinguish between the SoftAP mode and other
+ * modes (e.g. P2P, WPS, HS2.0) when it builds the probe
+ * response frame from Supplicant MR1 and Kernel 3.4.0 or
+ * later version. To add Vendor specific IE into the
+ * probe response frame in case of SoftAP mode,
+ * AP_PROBE_RESP_OFFLOAD flag is set to wiphy->flags variable.
+ */
+ if (dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) {
+ wdev->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ wdev->wiphy->probe_resp_offload = 0;
+ }
+#endif
#endif /* WL_SUPPORT_BACKPORTED_KPATCHES) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) */
#ifdef CONFIG_CFG80211_INTERNAL_REGDB
#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 2, 0)) || defined(WL_COMPAT_WIRELESS)
wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
#endif
+
+#if defined(CONFIG_PM) && defined(WL_CFG80211_P2P_DEV_IF)
+ /*
+ * From linux-3.10 kernel, wowlan packet filter is mandated to avoid the
+ * disconnection of connected network before suspend. So a dummy wowlan
+ * filter is configured for kernels linux-3.8 and above.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
+ wdev->wiphy->wowlan = &brcm_wowlan_support;
+#else
+ wdev->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 10) */
+#endif /* CONFIG_PM && WL_CFG80211_P2P_DEV_IF */
+
WL_DBG(("Registering custom regulatory)\n"));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+ wdev->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG;
+#else
wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
+#endif
wiphy_apply_custom_regulatory(wdev->wiphy, &brcm_regdom);
+
+ WL_DBG(("Registering Vendor80211)\n"));
+ err = cfgvendor_attach(wdev->wiphy);
+ if (unlikely(err < 0)) {
+ WL_ERR(("Couldn not attach vendor commands (%d)\n", err));
+ }
+
/* Now we can register wiphy with cfg80211 module */
err = wiphy_register(wdev->wiphy);
if (unlikely(err < 0)) {
#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)) && (LINUX_VERSION_CODE <= \
KERNEL_VERSION(3, 3, 0))) && defined(WL_IFACE_COMB_NUM_CHANNELS)
wdev->wiphy->flags &= ~WIPHY_FLAG_ENFORCE_COMBINATIONS;
-#endif /* ((LINUX_VER >= 3.0) && (LINUX_VER <= 3.3)) && WL_IFACE_COMB_NUM_CHANNELS */
+#endif
return err;
}
-static void wl_free_wdev(struct wl_priv *wl)
+static void wl_free_wdev(struct bcm_cfg80211 *cfg)
{
- struct wireless_dev *wdev = wl->wdev;
+ struct wireless_dev *wdev = cfg->wdev;
struct wiphy *wiphy;
if (!wdev) {
WL_ERR(("wdev is invalid\n"));
return;
}
wiphy = wdev->wiphy;
+
+ cfgvendor_detach(wdev->wiphy);
+
wiphy_unregister(wdev->wiphy);
wdev->wiphy->dev.parent = NULL;
- wl_delete_all_netinfo(wl);
+ wl_delete_all_netinfo(cfg);
wiphy_free(wiphy);
- /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "wl",
+ /* PLEASE do NOT call any function after wiphy_free, the driver's private structure "cfg",
* which is the private part of wiphy, has been freed in wiphy_free !!!!!!!!!!!
*/
}
-static s32 wl_inform_bss(struct wl_priv *wl)
+static s32 wl_inform_bss(struct bcm_cfg80211 *cfg)
{
struct wl_scan_results *bss_list;
struct wl_bss_info *bi = NULL; /* must be initialized */
s32 err = 0;
s32 i;
#if defined(RSSIAVG)
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
int rssi;
#endif
#if defined(BSSCACHE)
wl_bss_cache_t *node;
#endif
- bss_list = wl->bss_list;
+ bss_list = cfg->bss_list;
#if defined(BSSCACHE)
- if (g_bss_cache_ctrl.m_timer_expired || (p2p_is_on(wl) && p2p_scan(wl))) {
+ if (p2p_is_on(cfg) && p2p_scan(cfg)) {
#if defined(RSSIAVG)
wl_free_rssi_cache(&g_rssi_cache_ctrl);
#endif
wl_free_bss_cache(&g_bss_cache_ctrl);
- g_bss_cache_ctrl.m_timer_expired ^= 1;
}
wl_update_bss_cache(&g_bss_cache_ctrl, bss_list);
wl_delete_dirty_bss_cache(&g_bss_cache_ctrl);
if (node->dirty > 1) {
// just inform dirty bss
bi = node->results.bss_info;
- err = wl_inform_single_bss(wl, bi);
+ err = wl_inform_single_bss(cfg, bi, false);
}
node = node->next;
}
- wl_run_bss_cache_timer(&g_bss_cache_ctrl, 0);
- wl_run_bss_cache_timer(&g_bss_cache_ctrl, 1);
bi = NULL;
#endif
WL_SCAN(("scanned AP count (%d)\n", bss_list->count));
+
bi = next_bss(bss_list, bi);
for_each_bss(bss_list, bi, i) {
if (p2p_disconnected > 0 && !memcmp(&bi->BSSID, &p2p_disconnected_bssid, ETHER_ADDR_LEN))
continue;
- err = wl_inform_single_bss(wl, bi);
+ err = wl_inform_single_bss(cfg, bi, false);
}
if (p2p_disconnected > 0) {
return err;
}
-static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi)
+static s32 wl_inform_single_bss(struct bcm_cfg80211 *cfg, struct wl_bss_info *bi, bool roam)
{
- struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ieee80211_mgmt *mgmt;
struct ieee80211_channel *channel;
struct ieee80211_supported_band *band;
struct wl_cfg80211_bss_info *notif_bss_info;
- struct wl_scan_req *sr = wl_to_sr(wl);
+ struct wl_scan_req *sr = wl_to_sr(cfg);
struct beacon_proberesp *beacon_proberesp;
struct cfg80211_bss *cbss = NULL;
s32 mgmt_type;
gfp_t aflags;
if (unlikely(dtoh32(bi->length) > WL_BSS_INFO_MAX)) {
- WL_ERR(("Beacon is larger than buffer. Discarding\n"));
+ WL_DBG(("Beacon is larger than buffer. Discarding\n"));
return err;
}
aflags = (in_atomic()) ? GFP_ATOMIC : GFP_KERNEL;
}
mgmt = (struct ieee80211_mgmt *)notif_bss_info->frame_buf;
notif_bss_info->channel =
- bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+ wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
if (notif_bss_info->channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
else
band = wiphy->bands[IEEE80211_BAND_5GHZ];
if (!band) {
- WL_DBG(("No valid band\n"));
+ WL_ERR(("No valid band\n"));
kfree(notif_bss_info);
return -EINVAL;
}
notif_bss_info->rssi = MIN(dtoh16(bi->RSSI), RSSI_MAXVAL);
#endif
#if defined(RSSIOFFSET)
- notif_bss_info->rssi = wl_update_rssi_offset(notif_bss_info->rssi);
+ notif_bss_info->rssi = wl_update_rssi_offset(bcmcfg_to_prmry_ndev(cfg), notif_bss_info->rssi);
#endif
memcpy(mgmt->bssid, &bi->BSSID, ETHER_ADDR_LEN);
- mgmt_type = wl->active_scan ?
+ mgmt_type = cfg->active_scan ?
IEEE80211_STYPE_PROBE_RESP : IEEE80211_STYPE_BEACON;
if (!memcmp(bi->SSID, sr->ssid.SSID, bi->SSID_len)) {
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | mgmt_type);
}
- beacon_proberesp = wl->active_scan ?
+ beacon_proberesp = cfg->active_scan ?
(struct beacon_proberesp *)&mgmt->u.probe_resp :
(struct beacon_proberesp *)&mgmt->u.beacon;
beacon_proberesp->timestamp = 0;
beacon_proberesp->beacon_int = cpu_to_le16(bi->beacon_period);
beacon_proberesp->capab_info = cpu_to_le16(bi->capability);
- wl_rst_ie(wl);
- wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length);
- wl_mrg_ie(wl, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
- wl_cp_ie(wl, beacon_proberesp->variable, WL_BSS_INFO_MAX -
+ wl_rst_ie(cfg);
+ wl_update_hidden_ap_ie(bi, ((u8 *) bi) + bi->ie_offset, &bi->ie_length, roam);
+ wl_mrg_ie(cfg, ((u8 *) bi) + bi->ie_offset, bi->ie_length);
+ wl_cp_ie(cfg, beacon_proberesp->variable, WL_BSS_INFO_MAX -
offsetof(struct wl_cfg80211_bss_info, frame_buf));
notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt,
- u.beacon.variable) + wl_get_ielen(wl);
+ u.beacon.variable) + wl_get_ielen(cfg);
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
freq = ieee80211_channel_to_frequency(notif_bss_info->channel);
(void)band->band;
#endif
}
+
cbss = cfg80211_inform_bss_frame(wiphy, channel, mgmt,
le16_to_cpu(notif_bss_info->frame_len), signal, aflags);
if (unlikely(!cbss)) {
return err;
}
-static bool wl_is_linkup(struct wl_priv *wl, const wl_event_msg_t *e, struct net_device *ndev)
+static bool wl_is_linkup(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e, struct net_device *ndev)
{
u32 event = ntoh32(e->event_type);
u32 status = ntoh32(e->status);
WL_DBG(("event %d, status %d flags %x\n", event, status, flags));
if (event == WLC_E_SET_SSID) {
if (status == WLC_E_STATUS_SUCCESS) {
- if (!wl_is_ibssmode(wl, ndev))
+ if (!wl_is_ibssmode(cfg, ndev))
return true;
}
} else if (event == WLC_E_LINK) {
return false;
}
-static bool wl_is_linkdown(struct wl_priv *wl, const wl_event_msg_t *e)
+static bool wl_is_linkdown(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
u32 event = ntoh32(e->event_type);
u16 flags = ntoh16(e->flags);
return false;
}
-static bool wl_is_nonetwork(struct wl_priv *wl, const wl_event_msg_t *e)
+static bool wl_is_nonetwork(struct bcm_cfg80211 *cfg, const wl_event_msg_t *e)
{
u32 event = ntoh32(e->event_type);
u32 status = ntoh32(e->status);
* should use this new/del sta event mechanism for BRCM supplicant >= 22.
*/
static s32
-wl_notify_connect_status_ap(struct wl_priv *wl, struct net_device *ndev,
+wl_notify_connect_status_ap(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data)
{
s32 err = 0;
struct ieee80211_supported_band *band;
struct ether_addr da;
struct ether_addr bssid;
- struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
channel_info_t ci;
#else
struct station_info sinfo;
WL_DBG(("event %d status %d reason %d\n", event, ntoh32(e->status), reason));
/* if link down, bsscfg is disabled. */
if (event == WLC_E_LINK && reason == WLC_E_LINK_BSSCFG_DIS &&
- wl_get_p2p_status(wl, IF_DELETING) && (ndev != wl_to_prmry_ndev(wl))) {
+ wl_get_p2p_status(cfg, IF_DELETING) && (ndev != bcmcfg_to_prmry_ndev(cfg))) {
wl_add_remove_eventmsg(ndev, WLC_E_PROBREQ_MSG, false);
- WL_INFO(("AP mode link down !! \n"));
- complete(&wl->iface_disable);
+ WL_INFORM(("AP mode link down !! \n"));
+ complete(&cfg->iface_disable);
return 0;
}
+ if (event == WLC_E_DISASSOC_IND || event == WLC_E_DEAUTH_IND || event == WLC_E_DEAUTH) {
+ WL_ERR(("event %s(%d) status %d reason %d\n",
+ bcmevent_get_name(event), event, ntoh32(e->status), reason));
+ }
+
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)) && !defined(WL_CFG80211_STA_EVENT) \
&& !defined(WL_COMPAT_WIRELESS)
WL_DBG(("Enter \n"));
}
memset(&bssid, 0, ETHER_ADDR_LEN);
WL_DBG(("Enter event %d ndev %p\n", event, ndev));
- if (wl_get_mode_by_netdev(wl, ndev) == WL_INVALID) {
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_INVALID) {
kfree(body);
return WL_INVALID;
}
memcpy(body, data, len);
wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
- NULL, 0, wl->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &wl->ioctl_buf_sync);
- memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
+ memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
switch (event) {
case WLC_E_ASSOC_IND:
else
band = wiphy->bands[IEEE80211_BAND_5GHZ];
if (!band) {
- WL_DBG(("No valid band\n"));
+ WL_ERR(("No valid band\n"));
if (body)
kfree(body);
return -EINVAL;
isfree = true;
if (event == WLC_E_ASSOC_IND && reason == DOT11_SC_SUCCESS) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
} else if (event == WLC_E_DISASSOC_IND) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
} else if ((event == WLC_E_DEAUTH_IND) || (event == WLC_E_DEAUTH)) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
cfg80211_rx_mgmt(ndev, freq, 0, mgmt_frame, len, GFP_ATOMIC);
#else
cfg80211_rx_mgmt(ndev, freq, mgmt_frame, len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3,4,0) || WL_COMPAT_WIRELESS */
+#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
}
exit:
}
static s32
-wl_get_auth_assoc_status(struct wl_priv *wl, struct net_device *ndev,
+wl_get_auth_assoc_status(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e)
{
u32 reason = ntoh32(e->reason);
u32 event = ntoh32(e->event_type);
- struct wl_security *sec = wl_read_prof(wl, ndev, WL_PROF_SEC);
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
WL_DBG(("event type : %d, reason : %d\n", event, reason));
if (sec) {
switch (event) {
}
static s32
-wl_notify_connect_status_ibss(struct wl_priv *wl, struct net_device *ndev,
+wl_notify_connect_status_ibss(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data)
{
s32 err = 0;
}
if (event == WLC_E_JOIN || event == WLC_E_START ||
(event == WLC_E_LINK && (flags == WLC_EVENT_MSG_LINK))) {
- if (wl_get_drv_status(wl, CONNECTED, ndev)) {
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
/* ROAM or Redundant */
- u8 *cur_bssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ u8 *cur_bssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
if (memcmp(cur_bssid, &e->addr, ETHER_ADDR_LEN) == 0) {
WL_DBG(("IBSS connected event from same BSSID("
MACDBG "), ignore it\n", MAC2STRDBG(cur_bssid)));
return err;
}
- WL_INFO(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
+ WL_INFORM(("IBSS BSSID is changed from " MACDBG " to " MACDBG "\n",
MAC2STRDBG(cur_bssid), MAC2STRDBG((u8 *)&e->addr)));
- wl_get_assoc_ies(wl, ndev);
- wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
- wl_update_bss_info(wl, ndev);
+ wl_get_assoc_ies(cfg, ndev);
+ wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ wl_update_bss_info(cfg, ndev, false);
cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
}
else {
/* New connection */
- WL_INFO(("IBSS connected to " MACDBG "\n", MAC2STRDBG((u8 *)&e->addr)));
- wl_link_up(wl);
- wl_get_assoc_ies(wl, ndev);
- wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
- wl_update_bss_info(wl, ndev);
+ WL_INFORM(("IBSS connected to " MACDBG "\n", MAC2STRDBG((u8 *)&e->addr)));
+ wl_link_up(cfg);
+ wl_get_assoc_ies(cfg, ndev);
+ wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ wl_update_bss_info(cfg, ndev, false);
cfg80211_ibss_joined(ndev, (s8 *)&e->addr, GFP_KERNEL);
- wl_set_drv_status(wl, CONNECTED, ndev);
+ wl_set_drv_status(cfg, CONNECTED, ndev);
active = true;
- wl_update_prof(wl, ndev, NULL, (void *)&active, WL_PROF_ACT);
+ wl_update_prof(cfg, ndev, NULL, (void *)&active, WL_PROF_ACT);
}
} else if ((event == WLC_E_LINK && !(flags & WLC_EVENT_MSG_LINK)) ||
event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND) {
- wl_clr_drv_status(wl, CONNECTED, ndev);
- wl_link_down(wl);
- wl_init_prof(wl, ndev);
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+ wl_link_down(cfg);
+ wl_init_prof(cfg, ndev);
}
else if (event == WLC_E_SET_SSID && status == WLC_E_STATUS_NO_NETWORKS) {
WL_DBG(("no action - join fail (IBSS mode)\n"));
}
else {
WL_DBG(("no action (IBSS mode)\n"));
- }
+}
return err;
}
static s32
-wl_notify_connect_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_notify_connect_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
bool act;
s32 err = 0;
u32 event = ntoh32(e->event_type);
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
- err = wl_notify_connect_status_ap(wl, ndev, e, data);
- } else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_IBSS) {
- err = wl_notify_connect_status_ibss(wl, ndev, e, data);
- } else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) {
+ if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
+ err = wl_notify_connect_status_ap(cfg, ndev, e, data);
+ } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS) {
+ err = wl_notify_connect_status_ibss(cfg, ndev, e, data);
+ } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
WL_DBG(("wl_notify_connect_status : event %d status : %d ndev %p\n",
ntoh32(e->event_type), ntoh32(e->status), ndev));
if (event == WLC_E_ASSOC || event == WLC_E_AUTH) {
- wl_get_auth_assoc_status(wl, ndev, e);
+ wl_get_auth_assoc_status(cfg, ndev, e);
return 0;
}
- if (wl_is_linkup(wl, e, ndev)) {
- wl_link_up(wl);
+ if (wl_is_linkup(cfg, e, ndev)) {
+ wl_link_up(cfg);
act = true;
- if (!wl_get_drv_status(wl, DISCONNECTING, ndev)) {
+ if (!wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
printk("wl_bss_connect_done succeeded with " MACDBG "\n",
MAC2STRDBG((u8*)(&e->addr)));
- wl_bss_connect_done(wl, ndev, e, data, true);
- dhd_conf_set_phyoclscdenable((dhd_pub_t *)wl->pub);
+ wl_bss_connect_done(cfg, ndev, e, data, true);
+ dhd_conf_set_phyoclscdenable((dhd_pub_t *)cfg->pub);
WL_DBG(("joined in BSS network \"%s\"\n",
((struct wlc_ssid *)
- wl_read_prof(wl, ndev, WL_PROF_SSID))->SSID));
- }
- wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT);
- wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
- dhd_conf_set_wme((dhd_pub_t *)wl->pub);
- } else if (wl_is_linkdown(wl, e)) {
- if (wl->scan_request) {
- if (wl->escan_on) {
- wl_notify_escan_complete(wl, ndev, true, true);
- } else {
- del_timer_sync(&wl->scan_timeout);
- wl_iscan_aborted(wl);
+ wl_read_prof(cfg, ndev, WL_PROF_SSID))->SSID));
}
- }
- if (wl_get_drv_status(wl, CONNECTED, ndev)) {
+ wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+ wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
+
+ } else if (wl_is_linkdown(cfg, e)) {
+ if (cfg->scan_request)
+ wl_notify_escan_complete(cfg, ndev, true, true);
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
scb_val_t scbval;
- u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
s32 reason = 0;
if (event == WLC_E_DEAUTH_IND || event == WLC_E_DISASSOC_IND)
reason = ntoh32(e->reason);
"event : %d, reason=%d from " MACDBG "\n",
ndev->name, event, ntoh32(e->reason),
MAC2STRDBG((u8*)(&e->addr)));
- if (memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
+ if (!cfg->roam_offload &&
+ memcmp(curbssid, &e->addr, ETHER_ADDR_LEN) != 0) {
WL_ERR(("BSSID of event is not the connected BSSID"
"(ignore it) cur: " MACDBG " event: " MACDBG"\n",
MAC2STRDBG(curbssid), MAC2STRDBG((u8*)(&e->addr))));
p2p_disconnected = 1;
memcpy(&p2p_disconnected_bssid, curbssid, ETHER_ADDR_LEN);
}
- wl_clr_drv_status(wl, CONNECTED, ndev);
- if (! wl_get_drv_status(wl, DISCONNECTING, ndev)) {
+ wl_clr_drv_status(cfg, CONNECTED, ndev);
+ if (! wl_get_drv_status(cfg, DISCONNECTING, ndev)) {
/* To make sure disconnect, explictly send dissassoc
* for BSSID 00:00:00:00:00:00 issue
*/
err = 0;
}
cfg80211_disconnected(ndev, reason, NULL, 0, GFP_KERNEL);
- wl_link_down(wl);
- wl_init_prof(wl, ndev);
+ wl_link_down(cfg);
+ wl_init_prof(cfg, ndev);
}
}
- else if (wl_get_drv_status(wl, CONNECTING, ndev)) {
-
+ else if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
printk("link down, during connecting\n");
#ifdef ESCAN_RESULT_PATCH
if ((memcmp(connect_req_bssid, broad_bssid, ETHER_ADDR_LEN) == 0) ||
(memcmp(&e->addr, connect_req_bssid, ETHER_ADDR_LEN) == 0))
/* In case this event comes while associating another AP */
#endif /* ESCAN_RESULT_PATCH */
- wl_bss_connect_done(wl, ndev, e, data, false);
+ wl_bss_connect_done(cfg, ndev, e, data, false);
}
- wl_clr_drv_status(wl, DISCONNECTING, ndev);
+ wl_clr_drv_status(cfg, DISCONNECTING, ndev);
/* if link down, bsscfg is diabled */
- if (ndev != wl_to_prmry_ndev(wl))
- complete(&wl->iface_disable);
+ if (ndev != bcmcfg_to_prmry_ndev(cfg))
+ complete(&cfg->iface_disable);
- } else if (wl_is_nonetwork(wl, e)) {
+ } else if (wl_is_nonetwork(cfg, e)) {
printk("connect failed event=%d e->status %d e->reason %d \n",
event, (int)ntoh32(e->status), (int)ntoh32(e->reason));
/* Clean up any pending scan request */
- if (wl->scan_request) {
- if (wl->escan_on) {
- wl_notify_escan_complete(wl, ndev, true, true);
- } else {
- del_timer_sync(&wl->scan_timeout);
- wl_iscan_aborted(wl);
- }
- }
- if (wl_get_drv_status(wl, CONNECTING, ndev))
- wl_bss_connect_done(wl, ndev, e, data, false);
+ if (cfg->scan_request)
+ wl_notify_escan_complete(cfg, ndev, true, true);
+ if (wl_get_drv_status(cfg, CONNECTING, ndev))
+ wl_bss_connect_done(cfg, ndev, e, data, false);
} else {
- printk("%s nothing\n", __FUNCTION__);
+ WL_DBG(("%s nothing\n", __FUNCTION__));
}
- } else {
- WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(wl, ndev)));
+ }
+ else {
+ WL_ERR(("Invalid ndev status %d\n", wl_get_mode_by_netdev(cfg, ndev)));
}
return err;
}
+void wl_cfg80211_set_rmc_pid(int pid)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ if (pid > 0)
+ cfg->rmc_event_pid = pid;
+ WL_DBG(("set pid for rmc event : pid=%d\n", pid));
+}
+
+#ifdef WLAIBSS
+void wl_cfg80211_set_txfail_pid(int pid)
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ if (pid > 0)
+ cfg->aibss_txfail_pid = pid;
+ WL_DBG(("set pid for aibss fail event : pid=%d\n", pid));
+}
+
+static s32
+wl_notify_aibss_txfail(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 evt = ntoh32(e->event_type);
+ int ret = -1;
+
+ if (cfg->aibss_txfail_pid != 0) {
+ ret = wl_netlink_send_msg(cfg->aibss_txfail_pid, AIBSS_EVENT_TXFAIL,
+ cfg->aibss_txfail_seq++, (void *)&e->addr, ETHER_ADDR_LEN);
+ }
+
+ WL_DBG(("txfail : evt=%d, pid=%d, ret=%d, mac=" MACF "\n",
+ evt, cfg->aibss_txfail_pid, ret, ETHERP_TO_MACF(&e->addr)));
+ return ret;
+}
+#endif /* WLAIBSS */
+
+static s32
+wl_notify_rmc_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ u32 evt = ntoh32(e->event_type);
+ u32 reason = ntoh32(e->reason);
+ int ret = -1;
+
+ switch (reason) {
+ case WLC_E_REASON_RMC_AR_LOST:
+ case WLC_E_REASON_RMC_AR_NO_ACK:
+ if (cfg->rmc_event_pid != 0) {
+ ret = wl_netlink_send_msg(cfg->rmc_event_pid,
+ RMC_EVENT_LEADER_CHECK_FAIL,
+ cfg->rmc_event_seq++, NULL, 0);
+ }
+ break;
+ default:
+ break;
+ }
+ WL_DBG(("rmcevent : evt=%d, pid=%d, ret=%d\n", evt, cfg->rmc_event_pid, ret));
+ return ret;
+}
+
static s32
-wl_notify_roaming_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_notify_roaming_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
bool act;
s32 err = 0;
u32 event = be32_to_cpu(e->event_type);
u32 status = be32_to_cpu(e->status);
-
WL_DBG(("Enter \n"));
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ if ((!cfg->disable_roam_event) && (event == WLC_E_BSSID)) {
+ wl_add_remove_eventmsg(ndev, WLC_E_ROAM, false);
+ cfg->disable_roam_event = TRUE;
+ }
+
+ if ((cfg->disable_roam_event) && (event == WLC_E_ROAM))
+ return err;
- if (event == WLC_E_ROAM && status == WLC_E_STATUS_SUCCESS) {
- if (wl_get_drv_status(wl, CONNECTED, ndev))
- wl_bss_roaming_done(wl, ndev, e, data);
+ if ((event == WLC_E_ROAM || event == WLC_E_BSSID) && status == WLC_E_STATUS_SUCCESS) {
+ if (wl_get_drv_status(cfg, CONNECTED, ndev))
+ wl_bss_roaming_done(cfg, ndev, e, data);
else
- wl_bss_connect_done(wl, ndev, e, data, true);
+ wl_bss_connect_done(cfg, ndev, e, data, true);
act = true;
- wl_update_prof(wl, ndev, e, &act, WL_PROF_ACT);
- wl_update_prof(wl, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
- dhd_conf_set_wme((dhd_pub_t *)wl->pub);
+ wl_update_prof(cfg, ndev, e, &act, WL_PROF_ACT);
+ wl_update_prof(cfg, ndev, NULL, (void *)&e->addr, WL_PROF_BSSID);
+ dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
}
return err;
}
-static s32 wl_get_assoc_ies(struct wl_priv *wl, struct net_device *ndev)
+static s32 wl_get_assoc_ies(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
wl_assoc_info_t assoc_info;
- struct wl_connect_info *conn_info = wl_to_conn(wl);
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
s32 err = 0;
WL_DBG(("Enter \n"));
- err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, wl->extra_buf,
+ err = wldev_iovar_getbuf(ndev, "assoc_info", NULL, 0, cfg->extra_buf,
WL_ASSOC_INFO_MAX, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc info (%d)\n", err));
return err;
}
- memcpy(&assoc_info, wl->extra_buf, sizeof(wl_assoc_info_t));
+ memcpy(&assoc_info, cfg->extra_buf, sizeof(wl_assoc_info_t));
assoc_info.req_len = htod32(assoc_info.req_len);
assoc_info.resp_len = htod32(assoc_info.resp_len);
assoc_info.flags = htod32(assoc_info.flags);
bzero(conn_info->resp_ie, sizeof(conn_info->resp_ie));
}
if (assoc_info.req_len) {
- err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, wl->extra_buf,
+ err = wldev_iovar_getbuf(ndev, "assoc_req_ies", NULL, 0, cfg->extra_buf,
WL_ASSOC_INFO_MAX, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc req (%d)\n", err));
conn_info->req_ie_len -= ETHER_ADDR_LEN;
}
if (conn_info->req_ie_len <= MAX_REQ_LINE)
- memcpy(conn_info->req_ie, wl->extra_buf, conn_info->req_ie_len);
+ memcpy(conn_info->req_ie, cfg->extra_buf, conn_info->req_ie_len);
else {
WL_ERR(("IE size %d above max %d size \n",
conn_info->req_ie_len, MAX_REQ_LINE));
conn_info->req_ie_len = 0;
}
if (assoc_info.resp_len) {
- err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, wl->extra_buf,
+ err = wldev_iovar_getbuf(ndev, "assoc_resp_ies", NULL, 0, cfg->extra_buf,
WL_ASSOC_INFO_MAX, NULL);
if (unlikely(err)) {
WL_ERR(("could not get assoc resp (%d)\n", err));
}
conn_info->resp_ie_len = assoc_info.resp_len -sizeof(struct dot11_assoc_resp);
if (conn_info->resp_ie_len <= MAX_REQ_LINE)
- memcpy(conn_info->resp_ie, wl->extra_buf, conn_info->resp_ie_len);
+ memcpy(conn_info->resp_ie, cfg->extra_buf, conn_info->resp_ie_len);
else {
WL_ERR(("IE size %d above max %d size \n",
conn_info->resp_ie_len, MAX_REQ_LINE));
}
}
-static s32 wl_update_bss_info(struct wl_priv *wl, struct net_device *ndev)
+static s32 wl_update_bss_info(struct bcm_cfg80211 *cfg, struct net_device *ndev, bool roam)
{
struct cfg80211_bss *bss;
struct wl_bss_info *bi;
u8 *curbssid;
s32 err = 0;
struct wiphy *wiphy;
+ u32 channel;
- wiphy = wl_to_wiphy(wl);
+ wiphy = bcmcfg_to_wiphy(cfg);
- ssid = (struct wlc_ssid *)wl_read_prof(wl, ndev, WL_PROF_SSID);
- curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ ssid = (struct wlc_ssid *)wl_read_prof(cfg, ndev, WL_PROF_SSID);
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
bss = cfg80211_get_bss(wiphy, NULL, curbssid,
ssid->SSID, ssid->SSID_len, WLAN_CAPABILITY_ESS,
WLAN_CAPABILITY_ESS);
- mutex_lock(&wl->usr_sync);
+ mutex_lock(&cfg->usr_sync);
+
+ *(u32 *) cfg->extra_buf = htod32(WL_EXTRA_BUF_MAX);
+ err = wldev_ioctl(ndev, WLC_GET_BSS_INFO,
+ cfg->extra_buf, WL_EXTRA_BUF_MAX, false);
+ if (unlikely(err)) {
+ WL_ERR(("Could not get bss info %d\n", err));
+ goto update_bss_info_out;
+ }
+ bi = (struct wl_bss_info *)(cfg->extra_buf + 4);
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
+ wl_update_prof(cfg, ndev, NULL, &channel, WL_PROF_CHAN);
+
if (!bss) {
WL_DBG(("Could not find the AP\n"));
- *(u32 *) wl->extra_buf = htod32(WL_EXTRA_BUF_MAX);
- err = wldev_ioctl(ndev, WLC_GET_BSS_INFO,
- wl->extra_buf, WL_EXTRA_BUF_MAX, false);
- if (unlikely(err)) {
- WL_ERR(("Could not get bss info %d\n", err));
- goto update_bss_info_out;
- }
- bi = (struct wl_bss_info *)(wl->extra_buf + 4);
if (memcmp(bi->BSSID.octet, curbssid, ETHER_ADDR_LEN)) {
WL_ERR(("Bssid doesn't match\n"));
err = -EIO;
goto update_bss_info_out;
}
- err = wl_inform_single_bss(wl, bi);
+ err = wl_inform_single_bss(cfg, bi, roam);
if (unlikely(err))
goto update_bss_info_out;
}
}
- wl_update_prof(wl, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
- wl_update_prof(wl, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
+ wl_update_prof(cfg, ndev, NULL, &beacon_interval, WL_PROF_BEACONINT);
+ wl_update_prof(cfg, ndev, NULL, &dtim_period, WL_PROF_DTIMPERIOD);
update_bss_info_out:
if (unlikely(err)) {
WL_ERR(("Failed with error %d\n", err));
}
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
static s32
-wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev,
+wl_bss_roaming_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data)
{
- struct wl_connect_info *conn_info = wl_to_conn(wl);
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
s32 err = 0;
u8 *curbssid;
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
- struct wl_bss_info *bss_info;
- struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ieee80211_supported_band *band;
struct ieee80211_channel *notify_channel = NULL;
- u8 *buf;
- u16 channel;
+ u32 *channel;
u32 freq;
#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
- wl_get_assoc_ies(wl, ndev);
- wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
- curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
- wl_update_bss_info(wl, ndev);
- wl_update_pmklist(ndev, wl->pmk_list, err);
+#ifdef WLFBT
+ uint32 data_len = 0;
+ if (data)
+ data_len = ntoh32(e->datalen);
+#endif /* WLFBT */
+
+ wl_get_assoc_ies(cfg, ndev);
+ wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ wl_update_bss_info(cfg, ndev, true);
+ wl_update_pmklist(ndev, cfg->pmk_list, err);
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
/* channel info for cfg80211_roamed introduced in 2.6.39-rc1 */
- buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
- if (!buf)
- goto done;
-
- *(__le32 *)buf = htod32(WL_EXTRA_BUF_MAX);
- err = wldev_ioctl(ndev, WLC_GET_BSS_INFO, buf, WL_EXTRA_BUF_MAX, false);
- if (err)
- goto done;
-
- bss_info = (struct wl_bss_info *)(buf + 4);
- channel = bss_info->ctl_ch ? bss_info->ctl_ch :
- CHSPEC_CHANNEL(wl_chspec_driver_to_host(bss_info->chanspec));
- if (channel <= CH_MAX_2G_CHANNEL)
+ channel = (u32 *)wl_read_prof(cfg, ndev, WL_PROF_CHAN);
+ if (*channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
else
band = wiphy->bands[IEEE80211_BAND_5GHZ];
- freq = ieee80211_channel_to_frequency(channel, band->band);
+ freq = ieee80211_channel_to_frequency(*channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
-done:
- kfree(buf);
#endif /* LINUX_VERSION > 2.6.39 || WL_COMPAT_WIRELESS */
-
+#ifdef WLFBT
+ /* back up the given FBT key for the further supplicant request,
+ * currently not checking the FBT is enabled for current BSS in DHD,
+ * because the supplicant decides to take it or not.
+ */
+ if (data && (data_len == FBT_KEYLEN)) {
+ memcpy(cfg->fbt_key, data, FBT_KEYLEN);
+ }
+#endif /* WLFBT */
printk("wl_bss_roaming_done succeeded to " MACDBG "\n",
MAC2STRDBG((u8*)(&e->addr)));
- dhd_conf_set_wme((dhd_pub_t *)wl->pub);
+ dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
cfg80211_roamed(ndev,
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 39)) || defined(WL_COMPAT_WIRELESS)
conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
WL_DBG(("Report roaming result\n"));
- wl_set_drv_status(wl, CONNECTED, ndev);
+ wl_set_drv_status(cfg, CONNECTED, ndev);
return err;
}
static s32
-wl_bss_connect_done(struct wl_priv *wl, struct net_device *ndev,
+wl_bss_connect_done(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, bool completed)
{
- struct wl_connect_info *conn_info = wl_to_conn(wl);
- struct wl_security *sec = wl_read_prof(wl, ndev, WL_PROF_SEC);
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
+ struct wl_security *sec = wl_read_prof(cfg, ndev, WL_PROF_SEC);
+#if defined(CUSTOM_SET_CPUCORE)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif
s32 err = 0;
- u8 *curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
+ u8 *curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
if (!sec) {
WL_ERR(("sec is NULL\n"));
return -ENODEV;
}
WL_DBG((" enter\n"));
#ifdef ESCAN_RESULT_PATCH
- if (wl_get_drv_status(wl, CONNECTED, ndev)) {
+ if (wl_get_drv_status(cfg, CONNECTED, ndev)) {
if (memcmp(curbssid, connect_req_bssid, ETHER_ADDR_LEN) == 0) {
WL_DBG((" Connected event of connected device e=%d s=%d, ignore it\n",
ntoh32(e->event_type), ntoh32(e->status)));
}
#else
- if (wl->scan_request) {
- wl_notify_escan_complete(wl, ndev, true, true);
+ if (cfg->scan_request) {
+ wl_notify_escan_complete(cfg, ndev, true, true);
}
#endif /* ESCAN_RESULT_PATCH */
- if (wl_get_drv_status(wl, CONNECTING, ndev)) {
- wl_cfg80211_scan_abort(wl);
- wl_clr_drv_status(wl, CONNECTING, ndev);
+ if (wl_get_drv_status(cfg, CONNECTING, ndev)) {
+ wl_cfg80211_scan_abort(cfg);
+ wl_clr_drv_status(cfg, CONNECTING, ndev);
if (completed) {
- wl_get_assoc_ies(wl, ndev);
- wl_update_prof(wl, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
- curbssid = wl_read_prof(wl, ndev, WL_PROF_BSSID);
- wl_update_bss_info(wl, ndev);
- wl_update_pmklist(ndev, wl->pmk_list, err);
- wl_set_drv_status(wl, CONNECTED, ndev);
- if (ndev != wl_to_prmry_ndev(wl)) {
+ wl_get_assoc_ies(cfg, ndev);
+ wl_update_prof(cfg, ndev, NULL, (void *)(e->addr.octet), WL_PROF_BSSID);
+ curbssid = wl_read_prof(cfg, ndev, WL_PROF_BSSID);
+ wl_update_bss_info(cfg, ndev, false);
+ wl_update_pmklist(ndev, cfg->pmk_list, err);
+ wl_set_drv_status(cfg, CONNECTED, ndev);
+ if (ndev != bcmcfg_to_prmry_ndev(cfg)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
+ init_completion(&cfg->iface_disable);
+#else
/* reinitialize completion to clear previous count */
- INIT_COMPLETION(wl->iface_disable);
+ INIT_COMPLETION(cfg->iface_disable);
+#endif
+ }
+#ifdef CUSTOM_SET_CPUCORE
+ if (wl_get_chan_isvht80(ndev, dhd)) {
+ if (ndev == bcmcfg_to_prmry_ndev(cfg))
+ dhd->chan_isvht80 |= DHD_FLAG_STA_MODE; /* STA mode */
+ else if (ndev == wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION))
+ dhd->chan_isvht80 |= DHD_FLAG_P2P_MODE; /* p2p mode */
+ dhd_set_cpucore(dhd, TRUE);
}
+#endif /* CUSTOM_SET_CPUCORE */
+
}
cfg80211_connect_result(ndev,
curbssid,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
if (completed) {
- WL_INFO(("Report connect result - connection succeeded\n"));
- dhd_conf_set_wme((dhd_pub_t *)wl->pub);
+ WL_INFORM(("Report connect result - connection succeeded\n"));
+ dhd_conf_set_wme((dhd_pub_t *)cfg->pub);
} else
WL_ERR(("Report connect result - connection failed\n"));
}
+#ifdef CONFIG_TCPACK_FASTTX
+ if (wl_get_chan_isvht80(ndev, dhd))
+ wldev_iovar_setint(ndev, "tcpack_fast_tx", 0);
+ else
+ wldev_iovar_setint(ndev, "tcpack_fast_tx", 1);
+#endif /* CONFIG_TCPACK_FASTTX */
+
return err;
}
static s32
-wl_notify_mic_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_notify_mic_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
struct net_device *ndev = NULL;
u16 flags = ntoh16(e->flags);
enum nl80211_key_type key_type;
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- mutex_lock(&wl->usr_sync);
+ mutex_lock(&cfg->usr_sync);
if (flags & WLC_EVENT_MSG_GROUP)
key_type = NL80211_KEYTYPE_GROUP;
else
cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1,
NULL, GFP_KERNEL);
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
return 0;
}
+#ifdef BT_WIFI_HANDOVER
+static s32
+wl_notify_bt_wifi_handover_req(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
+ const wl_event_msg_t *e, void *data)
+{
+ struct net_device *ndev = NULL;
+ u32 event = ntoh32(e->event_type);
+ u32 datalen = ntoh32(e->datalen);
+ s32 err;
+
+ WL_ERR(("wl_notify_bt_wifi_handover_req: event_type : %d, datalen : %d\n", event, datalen));
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+ err = wl_genl_send_msg(ndev, event, data, (u16)datalen, 0, 0);
+
+ return err;
+}
+#endif /* BT_WIFI_HANDOVER */
+
#ifdef PNO_SUPPORT
static s32
-wl_notify_pfn_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_notify_pfn_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
struct net_device *ndev = NULL;
WL_ERR((">>> PNO Event\n"));
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
#ifndef WL_SCHED_SCAN
- mutex_lock(&wl->usr_sync);
+ mutex_lock(&cfg->usr_sync);
/* TODO: Use cfg80211_sched_scan_results(wiphy); */
cfg80211_disconnected(ndev, 0, NULL, 0, GFP_KERNEL);
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
#else
/* If cfg80211 scheduled scan is supported, report the pno results via sched
* scan results
*/
- wl_notify_sched_scan_results(wl, ndev, e, data);
+ wl_notify_sched_scan_results(cfg, ndev, e, data);
#endif /* WL_SCHED_SCAN */
return 0;
}
#endif /* PNO_SUPPORT */
static s32
-wl_notify_scan_status(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_notify_scan_status(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
struct channel_info channel_inform;
unsigned long flags;
WL_DBG(("Enter \n"));
- if (!wl_get_drv_status(wl, SCANNING, ndev)) {
+ if (!wl_get_drv_status(cfg, SCANNING, ndev)) {
WL_ERR(("scan is not ready \n"));
return err;
}
- if (wl->iscan_on && wl->iscan_kickstart)
- return wl_wakeup_iscan(wl_to_iscan(wl));
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
-
- mutex_lock(&wl->usr_sync);
- wl_clr_drv_status(wl, SCANNING, ndev);
+ mutex_lock(&cfg->usr_sync);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
err = wldev_ioctl(ndev, WLC_GET_CHANNEL, &channel_inform,
sizeof(channel_inform), false);
if (unlikely(err)) {
WL_DBG(("channel_inform.scan_channel (%d)\n",
channel_inform.scan_channel));
}
- wl->bss_list = wl->scan_results;
- bss_list = wl->bss_list;
+ cfg->bss_list = cfg->scan_results;
+ bss_list = cfg->bss_list;
memset(bss_list, 0, len);
bss_list->buflen = htod32(len);
err = wldev_ioctl(ndev, WLC_SCAN_RESULTS, bss_list, len, false);
- if (unlikely(err) && unlikely(!wl->scan_suppressed)) {
+ if (unlikely(err) && unlikely(!cfg->scan_suppressed)) {
WL_ERR(("%s Scan_results error (%d)\n", ndev->name, err));
err = -EINVAL;
goto scan_done_out;
bss_list->version = dtoh32(bss_list->version);
bss_list->count = dtoh32(bss_list->count);
- err = wl_inform_bss(wl);
+ err = wl_inform_bss(cfg);
scan_done_out:
- del_timer_sync(&wl->scan_timeout);
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
- if (wl->scan_request) {
- cfg80211_scan_done(wl->scan_request, false);
- wl->scan_request = NULL;
+ del_timer_sync(&cfg->scan_timeout);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+ cfg80211_scan_done(cfg->scan_request, false);
+ cfg->scan_request = NULL;
}
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
WL_DBG(("cfg80211_scan_done\n"));
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
void
-wl_stop_wait_next_action_frame(struct wl_priv *wl)
+wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
- if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
- if (!(wl_get_p2p_status(wl, ACTION_TX_COMPLETED) ||
- wl_get_p2p_status(wl, ACTION_TX_NOACK)))
- wl_set_p2p_status(wl, ACTION_TX_COMPLETED);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ if (timer_pending(&cfg->p2p->listen_timer)) {
+ del_timer_sync(&cfg->p2p->listen_timer);
+ }
+ if (cfg->afx_hdl != NULL) {
+ if (cfg->afx_hdl->dev != NULL) {
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ wl_clr_drv_status(cfg, FINDING_COMMON_CHANNEL, cfg->afx_hdl->dev);
+ }
+ cfg->afx_hdl->peer_chan = WL_INVALID;
+ }
+ complete(&cfg->act_frm_scan);
+ WL_DBG(("*** Wake UP ** Working afx searching is cleared\n"));
+ } else if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
+ if (!(wl_get_p2p_status(cfg, ACTION_TX_COMPLETED) ||
+ wl_get_p2p_status(cfg, ACTION_TX_NOACK)))
+ wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
WL_DBG(("*** Wake UP ** abort actframe iovar\n"));
/* if channel is not zero, "actfame" uses off channel scan.
* So abort scan for off channel completion.
*/
- if (wl->af_sent_channel)
- wl_cfg80211_scan_abort(wl);
+ if (cfg->af_sent_channel)
+ wl_cfg80211_scan_abort(cfg);
}
#ifdef WL_CFG80211_SYNC_GON
- else if (wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM_LISTEN)) {
+ else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
WL_DBG(("*** Wake UP ** abort listen for next af frame\n"));
/* So abort scan to cancel listen */
- wl_cfg80211_scan_abort(wl);
+ wl_cfg80211_scan_abort(cfg);
}
#endif /* WL_CFG80211_SYNC_GON */
}
}
static s32
-wl_notify_rx_mgmt_frame(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_notify_rx_mgmt_frame(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
struct ieee80211_supported_band *band;
- struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
struct ether_addr da;
struct ether_addr bssid;
bool isfree = false;
u8 bsscfgidx = e->bsscfgidx;
u32 mgmt_frame_len = ntoh32(e->datalen) - sizeof(wl_event_rx_frame_data_t);
u16 channel = ((ntoh16(rxframe->channel) & WL_CHANSPEC_CHAN_MASK));
+ bool retval;
memset(&bssid, 0, ETHER_ADDR_LEN);
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
if (channel <= CH_MAX_2G_CHANNEL)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
else
band = wiphy->bands[IEEE80211_BAND_5GHZ];
if (!band) {
- WL_DBG(("No valid band\n"));
+ WL_ERR(("No valid band\n"));
return -EINVAL;
}
#if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 38) && !defined(WL_COMPAT_WIRELESS)
#endif
if (event == WLC_E_ACTION_FRAME_RX) {
wldev_iovar_getbuf_bsscfg(ndev, "cur_etheraddr",
- NULL, 0, wl->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &wl->ioctl_buf_sync);
+ NULL, 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, bsscfgidx, &cfg->ioctl_buf_sync);
err = wldev_ioctl(ndev, WLC_GET_BSSID, &bssid, ETHER_ADDR_LEN, false);
if (err < 0)
WL_ERR(("WLC_GET_BSSID error %d\n", err));
- memcpy(da.octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+ memcpy(da.octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
err = wl_frame_get_mgmt(FC_ACTION, &da, &e->addr, &bssid,
&mgmt_frame, &mgmt_frame_len,
(u8 *)((wl_event_rx_frame_data_t *)rxframe + 1));
} else if (wl_cfgp2p_is_gas_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
mgmt_frame_len - DOT11_MGMT_HDR_LEN)) {
#ifdef WL_SDO
- if (wl_get_p2p_status(wl, DISC_IN_PROGRESS)) {
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS)) {
WL_ERR(("SD offload is in progress. Don't report the"
"frame via rx_mgmt path\n"));
goto exit;
sd_act_frm = (wifi_p2psd_gas_pub_act_frame_t *)
(&mgmt_frame[DOT11_MGMT_HDR_LEN]);
- if (sd_act_frm && wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM)) {
- if (wl->next_af_subtype == sd_act_frm->action) {
+ if (sd_act_frm && wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+ if (cfg->next_af_subtype == sd_act_frm->action) {
WL_DBG(("We got a right next frame of SD!(%d)\n",
sd_act_frm->action));
- wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM, ndev);
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
/* Stop waiting for next AF. */
- wl_stop_wait_next_action_frame(wl);
+ wl_stop_wait_next_action_frame(cfg, ndev);
}
}
(void) sd_act_frm;
} else {
- /*
- * if we got normal action frame and ndev is p2p0,
- * we have to change ndev from p2p0 to wlan0
- */
-#if defined(WL_ENABLE_P2P_IF)
- if (wl->p2p_net == cfgdev)
- cfgdev = wl_to_prmry_ndev(wl);
-#endif /* WL_ENABLE_P2P_IF */
- if (wl->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
+ if (cfg->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) {
u8 action = 0;
if (wl_get_public_action(&mgmt_frame[DOT11_MGMT_HDR_LEN],
mgmt_frame_len - DOT11_MGMT_HDR_LEN, &action) != BCME_OK) {
WL_DBG(("Recived action is not public action frame\n"));
- } else if (wl->next_af_subtype == action) {
+ } else if (cfg->next_af_subtype == action) {
WL_DBG(("Recived action is the waiting action(%d)\n",
action));
- wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM, ndev);
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
/* Stop waiting for next AF. */
- wl_stop_wait_next_action_frame(wl);
+ wl_stop_wait_next_action_frame(cfg, ndev);
}
}
}
if (act_frm) {
- if (wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM)) {
- if (wl->next_af_subtype == act_frm->subtype) {
+ if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM)) {
+ if (cfg->next_af_subtype == act_frm->subtype) {
WL_DBG(("We got a right next frame!(%d)\n",
act_frm->subtype));
- wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM, ndev);
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
+
+ if (cfg->next_af_subtype == P2P_PAF_GON_CONF) {
+ OSL_SLEEP(20);
+ }
/* Stop waiting for next AF. */
- wl_stop_wait_next_action_frame(wl);
+ wl_stop_wait_next_action_frame(cfg, ndev);
}
}
}
}
if (act_frm && (act_frm->subtype == P2P_PAF_GON_CONF)) {
WL_DBG(("P2P: GO_NEG_PHASE status cleared \n"));
- wl_clr_p2p_status(wl, GO_NEG_PHASE);
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
}
+ } else if (event == WLC_E_PROBREQ_MSG) {
+
+ /* Handle probe reqs frame
+ * WPS-AP certification 4.2.13
+ */
+ struct parsed_ies prbreq_ies;
+ u32 prbreq_ie_len = 0;
+ bool pbc = 0;
+
+ WL_DBG((" Event WLC_E_PROBREQ_MSG received\n"));
+ mgmt_frame = (u8 *)(data);
+ mgmt_frame_len = ntoh32(e->datalen);
+
+ prbreq_ie_len = mgmt_frame_len - DOT11_MGMT_HDR_LEN;
+
+ /* Parse prob_req IEs */
+ if (wl_cfg80211_parse_ies(&mgmt_frame[DOT11_MGMT_HDR_LEN],
+ prbreq_ie_len, &prbreq_ies) < 0) {
+ WL_ERR(("Prob req get IEs failed\n"));
+ return 0;
+ }
+ if (prbreq_ies.wps_ie != NULL) {
+ wl_validate_wps_ie((char *)prbreq_ies.wps_ie, prbreq_ies.wps_ie_len, &pbc);
+ WL_DBG((" wps_ie exist pbc = %d\n", pbc));
+ /* if pbc method, send prob_req mgmt frame to upper layer */
+ if (!pbc)
+ return 0;
+ } else
+ return 0;
} else {
mgmt_frame = (u8 *)((wl_event_rx_frame_data_t *)rxframe + 1);
/* Filter any P2P probe reqs arriving during the
* GO-NEG Phase
*/
- if (wl->p2p &&
- wl_get_p2p_status(wl, GO_NEG_PHASE)) {
+ if (cfg->p2p &&
+ wl_get_p2p_status(cfg, GO_NEG_PHASE)) {
WL_DBG(("Filtering P2P probe_req while "
"being in GO-Neg state\n"));
return 0;
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || defined(WL_COMPAT_WIRELESS)
- cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0))
+ retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, 0, GFP_ATOMIC);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) || \
+ defined(WL_COMPAT_WIRELESS)
+ retval = cfg80211_rx_mgmt(cfgdev, freq, 0, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
#else
- cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
-#endif /* LINUX_VERSION >= VERSION(3, 4, 0) || WL_COMPAT_WIRELESS */
+ retval = cfg80211_rx_mgmt(cfgdev, freq, mgmt_frame, mgmt_frame_len, GFP_ATOMIC);
+#endif /* LINUX_VERSION >= VERSION(3, 12, 0) */
- WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d)\n",
- mgmt_frame_len, ntoh32(e->datalen), channel, freq));
+ WL_DBG(("mgmt_frame_len (%d) , e->datalen (%d), channel (%d), freq (%d) retval (%d)\n",
+ mgmt_frame_len, ntoh32(e->datalen), channel, freq, retval));
exit:
if (isfree)
kfree(mgmt_frame);
*/
#define FULL_ESCAN_ON_PFN_NET_FOUND 0
static s32
-wl_notify_sched_scan_results(struct wl_priv *wl, struct net_device *ndev,
+wl_notify_sched_scan_results(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data)
{
wl_pfn_net_info_t *netinfo, *pnetinfo;
- struct wiphy *wiphy = wl_to_wiphy(wl);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
int err = 0;
struct cfg80211_scan_request *request = NULL;
struct cfg80211_ssid ssid[MAX_PFN_LIST_COUNT];
if (request->n_ssids)
request->ssids = &ssid[0];
- if (wl_get_drv_status_all(wl, SCANNING)) {
+ if (wl_get_drv_status_all(cfg, SCANNING)) {
/* Abort any on-going scan */
- wl_notify_escan_complete(wl, ndev, true, true);
+ wl_notify_escan_complete(cfg, ndev, true, true);
}
- if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
WL_PNO((">>> P2P discovery was ON. Disabling it\n"));
- err = wl_cfgp2p_discover_enable_search(wl, false);
+ err = wl_cfgp2p_discover_enable_search(cfg, false);
if (unlikely(err)) {
- wl_clr_drv_status(wl, SCANNING, ndev);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
goto out_err;
}
+ p2p_scan(cfg) = false;
}
- wl_set_drv_status(wl, SCANNING, ndev);
+ wl_set_drv_status(cfg, SCANNING, ndev);
#if FULL_ESCAN_ON_PFN_NET_FOUND
WL_PNO((">>> Doing Full ESCAN on PNO event\n"));
- err = wl_do_escan(wl, wiphy, ndev, NULL);
+ err = wl_do_escan(cfg, wiphy, ndev, NULL);
#else
WL_PNO((">>> Doing targeted ESCAN on PNO event\n"));
- err = wl_do_escan(wl, wiphy, ndev, request);
+ err = wl_do_escan(cfg, wiphy, ndev, request);
#endif
if (err) {
- wl_clr_drv_status(wl, SCANNING, ndev);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
goto out_err;
}
- wl->sched_scan_running = TRUE;
+ cfg->sched_scan_running = TRUE;
}
else {
WL_ERR(("FALSE PNO Event. (pfn_count == 0) \n"));
conf->tx_power = -1;
}
-static void wl_init_prof(struct wl_priv *wl, struct net_device *ndev)
+static void wl_init_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
unsigned long flags;
- struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+ struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
memset(profile, 0, sizeof(struct wl_profile));
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
-}
-
-static void wl_init_event_handler(struct wl_priv *wl)
-{
- memset(wl->evt_handler, 0, sizeof(wl->evt_handler));
-
- wl->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
- wl->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
- wl->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
- wl->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
- wl->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
- wl->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
- wl->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
- wl->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
- wl->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
- wl->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
- wl->evt_handler[WLC_E_START] = wl_notify_connect_status;
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+}
+
+static void wl_init_event_handler(struct bcm_cfg80211 *cfg)
+{
+ memset(cfg->evt_handler, 0, sizeof(cfg->evt_handler));
+
+ cfg->evt_handler[WLC_E_SCAN_COMPLETE] = wl_notify_scan_status;
+ cfg->evt_handler[WLC_E_AUTH] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ASSOC] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_LINK] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_DEAUTH_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_DEAUTH] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_DISASSOC_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ASSOC_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_REASSOC_IND] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ROAM] = wl_notify_roaming_status;
+ cfg->evt_handler[WLC_E_MIC_ERROR] = wl_notify_mic_status;
+ cfg->evt_handler[WLC_E_SET_SSID] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_ACTION_FRAME_RX] = wl_notify_rx_mgmt_frame;
+ cfg->evt_handler[WLC_E_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ cfg->evt_handler[WLC_E_P2P_PROBREQ_MSG] = wl_notify_rx_mgmt_frame;
+ cfg->evt_handler[WLC_E_P2P_DISC_LISTEN_COMPLETE] = wl_cfgp2p_listen_complete;
+ cfg->evt_handler[WLC_E_ACTION_FRAME_COMPLETE] = wl_cfgp2p_action_tx_complete;
+ cfg->evt_handler[WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE] = wl_cfgp2p_action_tx_complete;
+ cfg->evt_handler[WLC_E_JOIN] = wl_notify_connect_status;
+ cfg->evt_handler[WLC_E_START] = wl_notify_connect_status;
#ifdef PNO_SUPPORT
- wl->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
+ cfg->evt_handler[WLC_E_PFN_NET_FOUND] = wl_notify_pfn_status;
#endif /* PNO_SUPPORT */
#ifdef WL_SDO
- wl->evt_handler[WLC_E_SERVICE_FOUND] = wl_svc_resp_handler;
- wl->evt_handler[WLC_E_P2PO_ADD_DEVICE] = wl_notify_device_discovery;
- wl->evt_handler[WLC_E_P2PO_DEL_DEVICE] = wl_notify_device_discovery;
+ cfg->evt_handler[WLC_E_SERVICE_FOUND] = wl_svc_resp_handler;
+ cfg->evt_handler[WLC_E_P2PO_ADD_DEVICE] = wl_notify_device_discovery;
+ cfg->evt_handler[WLC_E_P2PO_DEL_DEVICE] = wl_notify_device_discovery;
#endif
#ifdef WLTDLS
- wl->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler;
+ cfg->evt_handler[WLC_E_TDLS_PEER_EVENT] = wl_tdls_event_handler;
#endif /* WLTDLS */
+ cfg->evt_handler[WLC_E_BSSID] = wl_notify_roaming_status;
+#ifdef WLAIBSS
+ cfg->evt_handler[WLC_E_AIBSS_TXFAIL] = wl_notify_aibss_txfail;
+#endif /* WLAIBSS */
+#ifdef BT_WIFI_HANDOVER
+ cfg->evt_handler[WLC_E_BT_WIFI_HANDOVER_REQ] = wl_notify_bt_wifi_handover_req;
+#endif
+#ifdef WL_NAN
+ cfg->evt_handler[WLC_E_NAN] = wl_cfgnan_notify_nan_status;
+ cfg->evt_handler[WLC_E_PROXD] = wl_cfgnan_notify_proxd_status;
+#endif /* WL_NAN */
+ cfg->evt_handler[WLC_E_RMC_EVENT] = wl_notify_rmc_status;
}
#if defined(STATIC_WL_PRIV_STRUCT)
static void
-wl_init_escan_result_buf(struct wl_priv *wl)
+wl_init_escan_result_buf(struct bcm_cfg80211 *cfg)
{
- wl->escan_info.escan_buf = dhd_os_prealloc(NULL, DHD_PREALLOC_WIPHY_ESCAN0, 0);
- bzero(wl->escan_info.escan_buf, ESCAN_BUF_SIZE);
+ cfg->escan_info.escan_buf = DHD_OS_PREALLOC(cfg->pub,
+ DHD_PREALLOC_WIPHY_ESCAN0, ESCAN_BUF_SIZE);
+ bzero(cfg->escan_info.escan_buf, ESCAN_BUF_SIZE);
}
static void
-wl_deinit_escan_result_buf(struct wl_priv *wl)
+wl_deinit_escan_result_buf(struct bcm_cfg80211 *cfg)
{
- wl->escan_info.escan_buf = NULL;
+ cfg->escan_info.escan_buf = NULL;
}
#endif /* STATIC_WL_PRIV_STRUCT */
-static s32 wl_init_priv_mem(struct wl_priv *wl)
+static s32 wl_init_priv_mem(struct bcm_cfg80211 *cfg)
{
WL_DBG(("Enter \n"));
- wl->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
- if (unlikely(!wl->scan_results)) {
+ cfg->scan_results = (void *)kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!cfg->scan_results)) {
WL_ERR(("Scan results alloc failed\n"));
goto init_priv_mem_out;
}
- wl->conf = (void *)kzalloc(sizeof(*wl->conf), GFP_KERNEL);
- if (unlikely(!wl->conf)) {
+ cfg->conf = (void *)kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
+ if (unlikely(!cfg->conf)) {
WL_ERR(("wl_conf alloc failed\n"));
goto init_priv_mem_out;
}
- wl->scan_req_int =
- (void *)kzalloc(sizeof(*wl->scan_req_int), GFP_KERNEL);
- if (unlikely(!wl->scan_req_int)) {
+ cfg->scan_req_int =
+ (void *)kzalloc(sizeof(*cfg->scan_req_int), GFP_KERNEL);
+ if (unlikely(!cfg->scan_req_int)) {
WL_ERR(("Scan req alloc failed\n"));
goto init_priv_mem_out;
}
- wl->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
- if (unlikely(!wl->ioctl_buf)) {
+ cfg->ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!cfg->ioctl_buf)) {
WL_ERR(("Ioctl buf alloc failed\n"));
goto init_priv_mem_out;
}
- wl->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
- if (unlikely(!wl->escan_ioctl_buf)) {
+ cfg->escan_ioctl_buf = (void *)kzalloc(WLC_IOCTL_MAXLEN, GFP_KERNEL);
+ if (unlikely(!cfg->escan_ioctl_buf)) {
WL_ERR(("Ioctl buf alloc failed\n"));
goto init_priv_mem_out;
}
- wl->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
- if (unlikely(!wl->extra_buf)) {
+ cfg->extra_buf = (void *)kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
+ if (unlikely(!cfg->extra_buf)) {
WL_ERR(("Extra buf alloc failed\n"));
goto init_priv_mem_out;
}
- wl->iscan = (void *)kzalloc(sizeof(*wl->iscan), GFP_KERNEL);
- if (unlikely(!wl->iscan)) {
- WL_ERR(("Iscan buf alloc failed\n"));
- goto init_priv_mem_out;
- }
- wl->pmk_list = (void *)kzalloc(sizeof(*wl->pmk_list), GFP_KERNEL);
- if (unlikely(!wl->pmk_list)) {
+ cfg->pmk_list = (void *)kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
+ if (unlikely(!cfg->pmk_list)) {
WL_ERR(("pmk list alloc failed\n"));
goto init_priv_mem_out;
- }
- wl->sta_info = (void *)kzalloc(sizeof(*wl->sta_info), GFP_KERNEL);
- if (unlikely(!wl->sta_info)) {
- WL_ERR(("sta info alloc failed\n"));
- goto init_priv_mem_out;
- }
-
-#if defined(STATIC_WL_PRIV_STRUCT)
- wl->conn_info = (void *)kzalloc(sizeof(*wl->conn_info), GFP_KERNEL);
- if (unlikely(!wl->conn_info)) {
- WL_ERR(("wl->conn_info alloc failed\n"));
- goto init_priv_mem_out;
- }
- wl->ie = (void *)kzalloc(sizeof(*wl->ie), GFP_KERNEL);
- if (unlikely(!wl->ie)) {
- WL_ERR(("wl->ie alloc failed\n"));
- goto init_priv_mem_out;
- }
- wl_init_escan_result_buf(wl);
-#endif /* STATIC_WL_PRIV_STRUCT */
- wl->afx_hdl = (void *)kzalloc(sizeof(*wl->afx_hdl), GFP_KERNEL);
- if (unlikely(!wl->afx_hdl)) {
- WL_ERR(("afx hdl alloc failed\n"));
- goto init_priv_mem_out;
- } else {
- init_completion(&wl->act_frm_scan);
- init_completion(&wl->wait_next_af);
-
- INIT_WORK(&wl->afx_hdl->work, wl_cfg80211_afx_handler);
- }
- return 0;
-
-init_priv_mem_out:
- wl_deinit_priv_mem(wl);
-
- return -ENOMEM;
-}
-
-static void wl_deinit_priv_mem(struct wl_priv *wl)
-{
- kfree(wl->scan_results);
- wl->scan_results = NULL;
- kfree(wl->conf);
- wl->conf = NULL;
- kfree(wl->scan_req_int);
- wl->scan_req_int = NULL;
- kfree(wl->ioctl_buf);
- wl->ioctl_buf = NULL;
- kfree(wl->escan_ioctl_buf);
- wl->escan_ioctl_buf = NULL;
- kfree(wl->extra_buf);
- wl->extra_buf = NULL;
- kfree(wl->iscan);
- wl->iscan = NULL;
- kfree(wl->pmk_list);
- wl->pmk_list = NULL;
- kfree(wl->sta_info);
- wl->sta_info = NULL;
-#if defined(STATIC_WL_PRIV_STRUCT)
- kfree(wl->conn_info);
- wl->conn_info = NULL;
- kfree(wl->ie);
- wl->ie = NULL;
- wl_deinit_escan_result_buf(wl);
-#endif /* STATIC_WL_PRIV_STRUCT */
- if (wl->afx_hdl) {
- cancel_work_sync(&wl->afx_hdl->work);
- kfree(wl->afx_hdl);
- wl->afx_hdl = NULL;
- }
-
- if (wl->ap_info) {
- kfree(wl->ap_info->wpa_ie);
- kfree(wl->ap_info->rsn_ie);
- kfree(wl->ap_info->wps_ie);
- kfree(wl->ap_info);
- wl->ap_info = NULL;
- }
-}
-
-static s32 wl_create_event_handler(struct wl_priv *wl)
-{
- int ret = 0;
- WL_DBG(("Enter \n"));
-
- /* Do not use DHD in cfg driver */
- wl->event_tsk.thr_pid = -1;
-
- PROC_START(wl_event_handler, wl, &wl->event_tsk, 0, "wl_event_handler");
- if (wl->event_tsk.thr_pid < 0)
- ret = -ENOMEM;
- return ret;
-}
-
-static void wl_destroy_event_handler(struct wl_priv *wl)
-{
- if (wl->event_tsk.thr_pid >= 0)
- PROC_STOP(&wl->event_tsk);
-}
-
-static void wl_term_iscan(struct wl_priv *wl)
-{
- struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
- WL_TRACE(("In\n"));
- if (wl->iscan_on && iscan->tsk) {
- iscan->state = WL_ISCAN_STATE_IDLE;
- WL_INFO(("SIGTERM\n"));
- send_sig(SIGTERM, iscan->tsk, 1);
- WL_DBG(("kthread_stop\n"));
- kthread_stop(iscan->tsk);
- iscan->tsk = NULL;
- }
-}
-
-static void wl_notify_iscan_complete(struct wl_iscan_ctrl *iscan, bool aborted)
-{
- struct wl_priv *wl = iscan_to_wl(iscan);
- struct net_device *ndev = wl_to_prmry_ndev(wl);
- unsigned long flags;
-
- WL_DBG(("Enter \n"));
- if (!wl_get_drv_status(wl, SCANNING, ndev)) {
- wl_clr_drv_status(wl, SCANNING, ndev);
- WL_ERR(("Scan complete while device not scanning\n"));
- return;
- }
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
- wl_clr_drv_status(wl, SCANNING, ndev);
- if (likely(wl->scan_request)) {
- cfg80211_scan_done(wl->scan_request, aborted);
- wl->scan_request = NULL;
- }
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
- wl->iscan_kickstart = false;
-}
-
-static s32 wl_wakeup_iscan(struct wl_iscan_ctrl *iscan)
-{
- if (likely(iscan->state != WL_ISCAN_STATE_IDLE)) {
- WL_DBG(("wake up iscan\n"));
- up(&iscan->sync);
- return 0;
- }
-
- return -EIO;
-}
-
-static s32
-wl_get_iscan_results(struct wl_iscan_ctrl *iscan, u32 *status,
- struct wl_scan_results **bss_list)
-{
- struct wl_iscan_results list;
- struct wl_scan_results *results;
- struct wl_iscan_results *list_buf;
- s32 err = 0;
-
- WL_DBG(("Enter \n"));
- memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
- list_buf = (struct wl_iscan_results *)iscan->scan_buf;
- results = &list_buf->results;
- results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
- results->version = 0;
- results->count = 0;
-
- memset(&list, 0, sizeof(list));
- list.results.buflen = htod32(WL_ISCAN_BUF_MAX);
- err = wldev_iovar_getbuf(iscan->dev, "iscanresults", &list,
- WL_ISCAN_RESULTS_FIXED_SIZE, iscan->scan_buf,
- WL_ISCAN_BUF_MAX, NULL);
- if (unlikely(err)) {
- WL_ERR(("error (%d)\n", err));
- return err;
- }
- results->buflen = dtoh32(results->buflen);
- results->version = dtoh32(results->version);
- results->count = dtoh32(results->count);
- WL_DBG(("results->count = %d\n", results->count));
- WL_DBG(("results->buflen = %d\n", results->buflen));
- *status = dtoh32(list_buf->status);
- *bss_list = results;
-
- return err;
-}
-
-static s32 wl_iscan_done(struct wl_priv *wl)
-{
- struct wl_iscan_ctrl *iscan = wl->iscan;
- s32 err = 0;
-
- iscan->state = WL_ISCAN_STATE_IDLE;
- mutex_lock(&wl->usr_sync);
- wl_inform_bss(wl);
- wl_notify_iscan_complete(iscan, false);
- mutex_unlock(&wl->usr_sync);
+ }
+ cfg->sta_info = (void *)kzalloc(sizeof(*cfg->sta_info), GFP_KERNEL);
+ if (unlikely(!cfg->sta_info)) {
+ WL_ERR(("sta info alloc failed\n"));
+ goto init_priv_mem_out;
+ }
- return err;
-}
+#if defined(STATIC_WL_PRIV_STRUCT)
+ cfg->conn_info = (void *)kzalloc(sizeof(*cfg->conn_info), GFP_KERNEL);
+ if (unlikely(!cfg->conn_info)) {
+ WL_ERR(("cfg->conn_info alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ cfg->ie = (void *)kzalloc(sizeof(*cfg->ie), GFP_KERNEL);
+ if (unlikely(!cfg->ie)) {
+ WL_ERR(("cfg->ie alloc failed\n"));
+ goto init_priv_mem_out;
+ }
+ wl_init_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+ cfg->afx_hdl = (void *)kzalloc(sizeof(*cfg->afx_hdl), GFP_KERNEL);
+ if (unlikely(!cfg->afx_hdl)) {
+ WL_ERR(("afx hdl alloc failed\n"));
+ goto init_priv_mem_out;
+ } else {
+ init_completion(&cfg->act_frm_scan);
+ init_completion(&cfg->wait_next_af);
-static s32 wl_iscan_pending(struct wl_priv *wl)
-{
- struct wl_iscan_ctrl *iscan = wl->iscan;
- s32 err = 0;
+ INIT_WORK(&cfg->afx_hdl->work, wl_cfg80211_afx_handler);
+ }
+ return 0;
- /* Reschedule the timer */
- mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms));
- iscan->timer_on = 1;
+init_priv_mem_out:
+ wl_deinit_priv_mem(cfg);
- return err;
+ return -ENOMEM;
}
-static s32 wl_iscan_inprogress(struct wl_priv *wl)
-{
- struct wl_iscan_ctrl *iscan = wl->iscan;
- s32 err = 0;
-
- mutex_lock(&wl->usr_sync);
- wl_inform_bss(wl);
- wl_run_iscan(iscan, NULL, WL_SCAN_ACTION_CONTINUE);
- mutex_unlock(&wl->usr_sync);
- /* Reschedule the timer */
- mod_timer(&iscan->timer, jiffies + msecs_to_jiffies(iscan->timer_ms));
- iscan->timer_on = 1;
+static void wl_deinit_priv_mem(struct bcm_cfg80211 *cfg)
+{
+ kfree(cfg->scan_results);
+ cfg->scan_results = NULL;
+ kfree(cfg->conf);
+ cfg->conf = NULL;
+ kfree(cfg->scan_req_int);
+ cfg->scan_req_int = NULL;
+ kfree(cfg->ioctl_buf);
+ cfg->ioctl_buf = NULL;
+ kfree(cfg->escan_ioctl_buf);
+ cfg->escan_ioctl_buf = NULL;
+ kfree(cfg->extra_buf);
+ cfg->extra_buf = NULL;
+ kfree(cfg->pmk_list);
+ cfg->pmk_list = NULL;
+ kfree(cfg->sta_info);
+ cfg->sta_info = NULL;
+#if defined(STATIC_WL_PRIV_STRUCT)
+ kfree(cfg->conn_info);
+ cfg->conn_info = NULL;
+ kfree(cfg->ie);
+ cfg->ie = NULL;
+ wl_deinit_escan_result_buf(cfg);
+#endif /* STATIC_WL_PRIV_STRUCT */
+ if (cfg->afx_hdl) {
+ cancel_work_sync(&cfg->afx_hdl->work);
+ kfree(cfg->afx_hdl);
+ cfg->afx_hdl = NULL;
+ }
- return err;
+ if (cfg->ap_info) {
+ kfree(cfg->ap_info->wpa_ie);
+ kfree(cfg->ap_info->rsn_ie);
+ kfree(cfg->ap_info->wps_ie);
+ kfree(cfg->ap_info);
+ cfg->ap_info = NULL;
+ }
}
-static s32 wl_iscan_aborted(struct wl_priv *wl)
+static s32 wl_create_event_handler(struct bcm_cfg80211 *cfg)
{
- struct wl_iscan_ctrl *iscan = wl->iscan;
- s32 err = 0;
+ int ret = 0;
+ WL_DBG(("Enter \n"));
- iscan->state = WL_ISCAN_STATE_IDLE;
- mutex_lock(&wl->usr_sync);
- wl_notify_iscan_complete(iscan, true);
- mutex_unlock(&wl->usr_sync);
+ /* Do not use DHD in cfg driver */
+ cfg->event_tsk.thr_pid = -1;
- return err;
+ PROC_START(wl_event_handler, cfg, &cfg->event_tsk, 0, "wl_event_handler");
+ if (cfg->event_tsk.thr_pid < 0)
+ ret = -ENOMEM;
+ return ret;
}
-static s32 wl_iscan_thread(void *data)
+static void wl_destroy_event_handler(struct bcm_cfg80211 *cfg)
{
- struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
- struct wl_priv *wl = iscan_to_wl(iscan);
- u32 status;
- int err = 0;
-
- allow_signal(SIGTERM);
- status = WL_SCAN_RESULTS_PARTIAL;
- while (likely(!down_interruptible(&iscan->sync))) {
- if (kthread_should_stop())
- break;
- if (iscan->timer_on) {
- del_timer_sync(&iscan->timer);
- iscan->timer_on = 0;
- }
- mutex_lock(&wl->usr_sync);
- err = wl_get_iscan_results(iscan, &status, &wl->bss_list);
- if (unlikely(err)) {
- status = WL_SCAN_RESULTS_ABORTED;
- WL_ERR(("Abort iscan\n"));
- }
- mutex_unlock(&wl->usr_sync);
- iscan->iscan_handler[status] (wl);
- }
- if (iscan->timer_on) {
- del_timer_sync(&iscan->timer);
- iscan->timer_on = 0;
- }
- WL_DBG(("was terminated\n"));
-
- return 0;
+ if (cfg->event_tsk.thr_pid >= 0)
+ PROC_STOP(&cfg->event_tsk);
}
static void wl_scan_timeout(unsigned long data)
{
wl_event_msg_t msg;
- struct wl_priv *wl = (struct wl_priv *)data;
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- if (!(wl->scan_request)) {
+ if (!(cfg->scan_request)) {
WL_ERR(("timer expired but no scan request\n"));
return;
}
bzero(&msg, sizeof(wl_event_msg_t));
WL_ERR(("timer expired\n"));
- if (wl->escan_on) {
- msg.event_type = hton32(WLC_E_ESCAN_RESULT);
- msg.status = hton32(WLC_E_STATUS_TIMEOUT);
- msg.reason = 0xFFFFFFFF;
- wl_cfg80211_event(wl_to_prmry_ndev(wl), &msg, NULL);
- } else {
- /* Need to check it try to access SDIO */
- WL_ERR(("SCAN Timeout(ISCAN)\n"));
- wl_notify_iscan_complete(wl_to_iscan(wl), true);
- }
- // terence 20130729: work around to fix out of memory in firmware
- WL_ERR(("Send hang event\n"));
- net_os_send_hang_message(ndev);
-}
-
-static void wl_iscan_timer(unsigned long data)
-{
- struct wl_iscan_ctrl *iscan = (struct wl_iscan_ctrl *)data;
-
- if (iscan) {
- iscan->timer_on = 0;
- WL_DBG(("timer expired\n"));
- wl_wakeup_iscan(iscan);
- }
-}
-
-static s32 wl_invoke_iscan(struct wl_priv *wl)
-{
- struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
- int err = 0;
+ msg.event_type = hton32(WLC_E_ESCAN_RESULT);
+ msg.status = hton32(WLC_E_STATUS_TIMEOUT);
+ msg.reason = 0xFFFFFFFF;
+ wl_cfg80211_event(bcmcfg_to_prmry_ndev(cfg), &msg, NULL);
- if (wl->iscan_on && !iscan->tsk) {
- iscan->state = WL_ISCAN_STATE_IDLE;
- sema_init(&iscan->sync, 0);
- iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
- if (IS_ERR(iscan->tsk)) {
- WL_ERR(("Could not create iscan thread\n"));
- iscan->tsk = NULL;
- return -ENOMEM;
- }
+ // terence 20130729: workaround to fix out of memory in firmware
+ if (dhd_conf_get_chip(dhd_get_pub(dev)) == BCM43362_CHIP_ID) {
+ WL_ERR(("Send hang event\n"));
+ net_os_send_hang_message(dev);
}
-
- return err;
-}
-
-static void wl_init_iscan_handler(struct wl_iscan_ctrl *iscan)
-{
- memset(iscan->iscan_handler, 0, sizeof(iscan->iscan_handler));
- iscan->iscan_handler[WL_SCAN_RESULTS_SUCCESS] = wl_iscan_done;
- iscan->iscan_handler[WL_SCAN_RESULTS_PARTIAL] = wl_iscan_inprogress;
- iscan->iscan_handler[WL_SCAN_RESULTS_PENDING] = wl_iscan_pending;
- iscan->iscan_handler[WL_SCAN_RESULTS_ABORTED] = wl_iscan_aborted;
- iscan->iscan_handler[WL_SCAN_RESULTS_NO_MEM] = wl_iscan_aborted;
}
static s32
{
struct net_device *dev = ndev;
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct wl_priv *wl = wlcfg_drv_priv;
- int refcnt = 0;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
WL_DBG(("Enter \n"));
- if (!wdev || !wl || dev == wl_to_prmry_ndev(wl))
+
+ if (!wdev || !cfg || dev == bcmcfg_to_prmry_ndev(cfg))
return NOTIFY_DONE;
+
switch (state) {
case NETDEV_DOWN:
{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0))
int max_wait_timeout = 2;
int max_wait_count = 100;
+ int refcnt = 0;
unsigned long limit = jiffies + max_wait_timeout * HZ;
while (work_pending(&wdev->cleanup_work)) {
if (refcnt%5 == 0) {
break;
}
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(100);
+ (void)schedule_timeout(100);
set_current_state(TASK_RUNNING);
refcnt++;
}
+#endif /* LINUX_VERSION < VERSION(3, 14, 0) */
break;
}
case NETDEV_UNREGISTER:
/* after calling list_del_rcu(&wdev->list) */
- wl_dealloc_netinfo(wl, ndev);
+ wl_dealloc_netinfo(cfg, ndev);
break;
case NETDEV_GOING_DOWN:
/* At NETDEV_DOWN state, wdev_cleanup_work work will be called.
* If the scanning is still working, wdev_cleanup_work call WARN_ON and
* make the scan done forcibly.
*/
- if (wl_get_drv_status(wl, SCANNING, dev)) {
- if (wl->escan_on) {
- wl_notify_escan_complete(wl, dev, true, true);
- }
- }
+ if (wl_get_drv_status(cfg, SCANNING, dev))
+ wl_notify_escan_complete(cfg, dev, true, true);
break;
}
return NOTIFY_DONE;
static struct notifier_block wl_cfg80211_netdev_notifier = {
.notifier_call = wl_cfg80211_netdev_notifier_call,
};
+/* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
+ * created in kernel notifier link list (with 'next' pointing to itself)
+ */
+static bool wl_cfg80211_netdev_notifier_registered = FALSE;
-static void wl_cfg80211_scan_abort(struct wl_priv *wl)
+static void wl_cfg80211_scan_abort(struct bcm_cfg80211 *cfg)
{
wl_scan_params_t *params = NULL;
s32 params_size = 0;
s32 err = BCME_OK;
- struct net_device *dev = wl_to_prmry_ndev(wl);
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
if (!in_atomic()) {
/* Our scan params only need space for 1 channel and 0 ssids */
params = wl_cfg80211_scan_alloc_params(-1, 0, ¶ms_size);
}
}
-static s32 wl_notify_escan_complete(struct wl_priv *wl,
+static s32 wl_notify_escan_complete(struct bcm_cfg80211 *cfg,
struct net_device *ndev,
bool aborted, bool fw_abort)
{
return err;
}
- if (wl->escan_info.ndev != ndev) {
- WL_ERR(("ndev is different %p %p\n", wl->escan_info.ndev, ndev));
+ if (cfg->escan_info.ndev != ndev) {
+ WL_ERR(("ndev is different %p %p\n", cfg->escan_info.ndev, ndev));
err = BCME_ERROR;
return err;
}
- if (wl->scan_request) {
- dev = wl_to_prmry_ndev(wl);
+ if (cfg->scan_request) {
+ dev = bcmcfg_to_prmry_ndev(cfg);
#if defined(WL_ENABLE_P2P_IF)
- if (wl->scan_request->dev != wl->p2p_net)
- dev = wl->scan_request->dev;
+ if (cfg->scan_request->dev != cfg->p2p_net)
+ dev = cfg->scan_request->dev;
#endif /* WL_ENABLE_P2P_IF */
}
else {
- WL_DBG(("wl->scan_request is NULL may be internal scan."
+ WL_DBG(("cfg->scan_request is NULL may be internal scan."
"doing scan_abort for ndev %p primary %p",
- ndev, wl_to_prmry_ndev(wl)));
+ ndev, bcmcfg_to_prmry_ndev(cfg)));
dev = ndev;
}
- if (fw_abort && !in_atomic()) {
- wl_cfg80211_scan_abort(wl);
- }
-
- if (timer_pending(&wl->scan_timeout))
- del_timer_sync(&wl->scan_timeout);
+ if (fw_abort && !in_atomic())
+ wl_cfg80211_scan_abort(cfg);
+ if (timer_pending(&cfg->scan_timeout))
+ del_timer_sync(&cfg->scan_timeout);
#if defined(ESCAN_RESULT_PATCH)
- if (likely(wl->scan_request)) {
- wl->bss_list = wl_escan_get_buf(wl, aborted);
- wl_inform_bss(wl);
+ if (likely(cfg->scan_request)) {
+ cfg->bss_list = wl_escan_get_buf(cfg, aborted);
+ wl_inform_bss(cfg);
}
#endif /* ESCAN_RESULT_PATCH */
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
#ifdef WL_SCHED_SCAN
- if (wl->sched_scan_req && !wl->scan_request) {
+ if (cfg->sched_scan_req && !cfg->scan_request) {
WL_PNO((">>> REPORTING SCHED SCAN RESULTS \n"));
if (!aborted)
- cfg80211_sched_scan_results(wl->sched_scan_req->wiphy);
- wl->sched_scan_running = FALSE;
- wl->sched_scan_req = NULL;
+ cfg80211_sched_scan_results(cfg->sched_scan_req->wiphy);
+ cfg->sched_scan_running = FALSE;
+ cfg->sched_scan_req = NULL;
}
#endif /* WL_SCHED_SCAN */
- if (likely(wl->scan_request)) {
- cfg80211_scan_done(wl->scan_request, aborted);
- wl->scan_request = NULL;
- }
- if (p2p_is_on(wl))
- wl_clr_p2p_status(wl, SCANNING);
- wl_clr_drv_status(wl, SCANNING, dev);
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ if (likely(cfg->scan_request)) {
+ cfg80211_scan_done(cfg->scan_request, aborted);
+ cfg->scan_request = NULL;
+ }
+ if (p2p_is_on(cfg))
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, dev);
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
#ifdef WL_SDO
- if (wl_get_p2p_status(wl, DISC_IN_PROGRESS) && !in_atomic()) {
- wl_cfg80211_resume_sdo(ndev, wl);
+ if (wl_get_p2p_status(cfg, DISC_IN_PROGRESS) && !in_atomic()) {
+ wl_cfg80211_resume_sdo(ndev, cfg);
}
#endif
+
return err;
}
-static s32 wl_escan_handler(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+static s32 wl_escan_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
s32 err = BCME_OK;
u32 bi_length;
u32 i;
u8 *p2p_dev_addr = NULL;
- struct wiphy *wiphy = wl_to_wiphy(wl);
u16 channel;
struct ieee80211_supported_band *band;
WL_DBG((" enter event type : %d, status : %d \n",
ntoh32(e->event_type), ntoh32(e->status)));
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- mutex_lock(&wl->usr_sync);
+ mutex_lock(&cfg->usr_sync);
/* P2P SCAN is coming from primary interface */
- if (wl_get_p2p_status(wl, SCANNING)) {
- if (wl_get_drv_status_all(wl, SENDING_ACT_FRM))
- ndev = wl->afx_hdl->dev;
+ if (wl_get_p2p_status(cfg, SCANNING)) {
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+ ndev = cfg->afx_hdl->dev;
else
- ndev = wl->escan_info.ndev;
+ ndev = cfg->escan_info.ndev;
}
- if (!ndev || !wl->escan_on ||
- (!wl_get_drv_status(wl, SCANNING, ndev) &&
- !wl->sched_scan_running)) {
- WL_ERR(("escan is not ready ndev %p wl->escan_on %d"
- " drv_status 0x%x e_type %d e_states %d\n",
- ndev, wl->escan_on, wl_get_drv_status(wl, SCANNING, ndev),
+ if (!ndev || (!wl_get_drv_status(cfg, SCANNING, ndev) && !cfg->sched_scan_running)) {
+ WL_ERR(("escan is not ready ndev %p drv_status 0x%x e_type %d e_states %d\n",
+ ndev, wl_get_drv_status(cfg, SCANNING, ndev),
ntoh32(e->event_type), ntoh32(e->status)));
goto exit;
}
escan_result = (wl_escan_result_t *)data;
if (status == WLC_E_STATUS_PARTIAL) {
- WL_INFO(("WLC_E_STATUS_PARTIAL \n"));
+ WL_INFORM(("WLC_E_STATUS_PARTIAL \n"));
if (!escan_result) {
WL_ERR(("Invalid escan result (NULL pointer)\n"));
goto exit;
channel =
bi->ctl_ch ? bi->ctl_ch : CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
if (channel <= CH_MAX_2G_CHANNEL)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = bcmcfg_to_wiphy(cfg)->bands[IEEE80211_BAND_5GHZ];
if (!band) {
- WL_DBG(("No valid band\n"));
+ WL_ERR(("No valid band\n"));
goto exit;
}
- if (!dhd_conf_match_channel((dhd_pub_t *)wl->pub, channel))
+ if (!dhd_conf_match_channel((dhd_pub_t *)cfg->pub, channel))
goto exit;
/* ----- terence 20130524: skip invalid bss */
if (wl_escan_check_sync_id(status, escan_result->sync_id,
- wl->escan_info.cur_sync_id) < 0)
+ cfg->escan_info.cur_sync_id) < 0)
goto exit;
- if (!(wl_to_wiphy(wl)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
+ if (!(bcmcfg_to_wiphy(cfg)->interface_modes & BIT(NL80211_IFTYPE_ADHOC))) {
if (dtoh16(bi->capability) & DOT11_CAP_IBSS) {
WL_DBG(("Ignoring IBSS result\n"));
goto exit;
}
}
- if (wl_get_drv_status_all(wl, FINDING_COMMON_CHANNEL)) {
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
p2p_dev_addr = wl_cfgp2p_retreive_p2p_dev_addr(bi, bi_length);
if (p2p_dev_addr && !memcmp(p2p_dev_addr,
- wl->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
+ cfg->afx_hdl->tx_dst_addr.octet, ETHER_ADDR_LEN)) {
s32 channel = wf_chspec_ctlchan(
wl_chspec_driver_to_host(bi->chanspec));
else
WL_ERR(("ACTION FRAME SCAN : Peer " MACDBG " found,"
" channel : %d\n",
- MAC2STRDBG(wl->afx_hdl->tx_dst_addr.octet),
+ MAC2STRDBG(cfg->afx_hdl->tx_dst_addr.octet),
channel));
- wl_clr_p2p_status(wl, SCANNING);
- wl->afx_hdl->peer_chan = channel;
- complete(&wl->act_frm_scan);
+ wl_clr_p2p_status(cfg, SCANNING);
+ cfg->afx_hdl->peer_chan = channel;
+ complete(&cfg->act_frm_scan);
goto exit;
}
} else {
int cur_len = WL_SCAN_RESULTS_FIXED_SIZE;
- list = wl_escan_get_buf(wl, FALSE);
- if (scan_req_match(wl)) {
+ list = wl_escan_get_buf(cfg, FALSE);
+ if (scan_req_match(cfg)) {
#ifdef WL_HOST_BAND_MGMT
s32 channel = 0;
s32 channel_band = 0;
+ chanspec_t chspec;
#endif /* WL_HOST_BAND_MGMT */
/* p2p scan && allow only probe response */
- if ((wl->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
+ if ((cfg->p2p->search_state != WL_P2P_DISC_ST_SCAN) &&
(bi->flags & WL_BSS_FLAGS_FROM_BEACON))
goto exit;
if ((p2p_ie = wl_cfgp2p_find_p2pie(((u8 *) bi) + bi->ie_offset,
bi->ie_length)) == NULL) {
- WL_INFO(("Couldn't find P2PIE in probe"
+ WL_INFORM(("Couldn't find P2PIE in probe"
" response/beacon\n"));
goto exit;
}
#ifdef WL_HOST_BAND_MGMT
- channel = CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
- channel_band = (channel > CH_MAX_2G_CHANNEL) ?
- WLC_BAND_5G : WLC_BAND_2G;
-
+ chspec = wl_chspec_driver_to_host(bi->chanspec);
+ channel = wf_chspec_ctlchan(chspec);
+ channel_band = CHSPEC2WLC_BAND(chspec);
- if ((wl->curr_band == WLC_BAND_5G) &&
+ if ((cfg->curr_band == WLC_BAND_5G) &&
(channel_band == WLC_BAND_2G)) {
/* Avoid sending the GO results in band conflict */
if (wl_cfgp2p_retreive_p2pattrib(p2p_ie,
}
else if (status == WLC_E_STATUS_SUCCESS) {
- wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_print_sync_id(status, wl->escan_info.cur_sync_id,
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_print_sync_id(status, cfg->escan_info.cur_sync_id,
escan_result->sync_id);
- if (wl_get_drv_status_all(wl, FINDING_COMMON_CHANNEL)) {
- WL_INFO(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(wl, SCANNING);
- wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
- if (wl->afx_hdl->peer_chan == WL_INVALID)
- complete(&wl->act_frm_scan);
- } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) {
- WL_INFO(("ESCAN COMPLETED\n"));
- wl->bss_list = wl_escan_get_buf(wl, FALSE);
- if (!scan_req_match(wl)) {
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM(("ESCAN COMPLETED\n"));
+ cfg->bss_list = wl_escan_get_buf(cfg, FALSE);
+ if (!scan_req_match(cfg)) {
WL_TRACE_HW4(("SCAN COMPLETED: scanned AP count=%d\n",
- wl->bss_list->count));
+ cfg->bss_list->count));
}
- wl_inform_bss(wl);
- wl_notify_escan_complete(wl, ndev, false, false);
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, false, false);
}
- wl_escan_increment_sync_id(wl, SCAN_BUF_NEXT);
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_NEXT);
}
else if (status == WLC_E_STATUS_ABORT) {
- wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
wl_escan_print_sync_id(status, escan_result->sync_id,
- wl->escan_info.cur_sync_id);
- if (wl_get_drv_status_all(wl, FINDING_COMMON_CHANNEL)) {
- WL_INFO(("ACTION FRAME SCAN DONE\n"));
- wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
- wl_clr_p2p_status(wl, SCANNING);
- if (wl->afx_hdl->peer_chan == WL_INVALID)
- complete(&wl->act_frm_scan);
- } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) {
- WL_INFO(("ESCAN ABORTED\n"));
- wl->bss_list = wl_escan_get_buf(wl, TRUE);
- if (!scan_req_match(wl)) {
+ cfg->escan_info.cur_sync_id);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ wl_clr_p2p_status(cfg, SCANNING);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ WL_INFORM(("ESCAN ABORTED\n"));
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+ if (!scan_req_match(cfg)) {
WL_TRACE_HW4(("SCAN ABORTED: scanned AP count=%d\n",
- wl->bss_list->count));
+ cfg->bss_list->count));
}
- wl_inform_bss(wl);
- wl_notify_escan_complete(wl, ndev, true, false);
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
}
- wl_escan_increment_sync_id(wl, SCAN_BUF_CNT);
+ wl_escan_increment_sync_id(cfg, SCAN_BUF_CNT);
} else if (status == WLC_E_STATUS_NEWSCAN) {
- WL_ERR(("WLC_E_STATUS_NEWSCAN : scan_request[%p]\n", wl->scan_request));
+ WL_ERR(("WLC_E_STATUS_NEWSCAN : scan_request[%p]\n", cfg->scan_request));
WL_ERR(("sync_id[%d], bss_count[%d]\n", escan_result->sync_id,
escan_result->bss_count));
} else if (status == WLC_E_STATUS_TIMEOUT) {
- WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", wl->scan_request));
- WL_ERR(("escan_on[%d], reason[0x%x]\n", wl->escan_on, e->reason));
+ WL_ERR(("WLC_E_STATUS_TIMEOUT : scan_request[%p]\n", cfg->scan_request));
+ WL_ERR(("reason[0x%x]\n", e->reason));
if (e->reason == 0xFFFFFFFF) {
- wl_notify_escan_complete(wl, wl->escan_info.ndev, true, true);
+ wl_notify_escan_complete(cfg, cfg->escan_info.ndev, true, true);
}
} else {
WL_ERR(("unexpected Escan Event %d : abort\n", status));
- wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
wl_escan_print_sync_id(status, escan_result->sync_id,
- wl->escan_info.cur_sync_id);
- if (wl_get_drv_status_all(wl, FINDING_COMMON_CHANNEL)) {
- WL_INFO(("ACTION FRAME SCAN DONE\n"));
- wl_clr_p2p_status(wl, SCANNING);
- wl_clr_drv_status(wl, SCANNING, wl->afx_hdl->dev);
- if (wl->afx_hdl->peer_chan == WL_INVALID)
- complete(&wl->act_frm_scan);
- } else if ((likely(wl->scan_request)) || (wl->sched_scan_running)) {
- wl->bss_list = wl_escan_get_buf(wl, TRUE);
- if (!scan_req_match(wl)) {
+ cfg->escan_info.cur_sync_id);
+ if (wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
+ WL_INFORM(("ACTION FRAME SCAN DONE\n"));
+ wl_clr_p2p_status(cfg, SCANNING);
+ wl_clr_drv_status(cfg, SCANNING, cfg->afx_hdl->dev);
+ if (cfg->afx_hdl->peer_chan == WL_INVALID)
+ complete(&cfg->act_frm_scan);
+ } else if ((likely(cfg->scan_request)) || (cfg->sched_scan_running)) {
+ cfg->bss_list = wl_escan_get_buf(cfg, TRUE);
+ if (!scan_req_match(cfg)) {
WL_TRACE_HW4(("SCAN ABORTED(UNEXPECTED): "
"scanned AP count=%d\n",
- wl->bss_list->count));
+ cfg->bss_list->count));
}
- wl_inform_bss(wl);
- wl_notify_escan_complete(wl, ndev, true, false);
+ wl_inform_bss(cfg);
+ wl_notify_escan_complete(cfg, ndev, true, false);
}
- wl_escan_increment_sync_id(wl, 2);
+ wl_escan_increment_sync_id(cfg, 2);
}
exit:
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
-static void wl_cfg80211_concurrent_roam(struct wl_priv *wl, int enable)
+static void wl_cfg80211_concurrent_roam(struct bcm_cfg80211 *cfg, int enable)
{
- u32 connected_cnt = wl_get_drv_status_all(wl, CONNECTED);
+ u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
struct net_info *iter, *next;
int err;
- if (!wl->roamoff_on_concurrent)
+ if (!cfg->roamoff_on_concurrent)
return;
if (enable && connected_cnt > 1) {
- for_each_ndev(wl, iter, next) {
+ for_each_ndev(cfg, iter, next) {
/* Save the current roam setting */
if ((err = wldev_iovar_getint(iter->ndev, "roam_off",
(s32 *)&iter->roam_off)) != BCME_OK) {
}
}
else if (!enable) {
- for_each_ndev(wl, iter, next) {
+ for_each_ndev(cfg, iter, next) {
if (iter->roam_off != WL_INVALID) {
if ((err = wldev_iovar_setint(iter->ndev, "roam_off",
iter->roam_off)) == BCME_OK)
return;
}
-static void wl_cfg80211_determine_vsdb_mode(struct wl_priv *wl)
+static void wl_cfg80211_determine_vsdb_mode(struct bcm_cfg80211 *cfg)
{
struct net_info *iter, *next;
u32 ctl_chan = 0;
u32 chanspec = 0;
u32 pre_ctl_chan = 0;
- u32 connected_cnt = wl_get_drv_status_all(wl, CONNECTED);
- wl->vsdb_mode = false;
+ u32 connected_cnt = wl_get_drv_status_all(cfg, CONNECTED);
+ cfg->vsdb_mode = false;
if (connected_cnt <= 1) {
return;
}
- for_each_ndev(wl, iter, next) {
+ for_each_ndev(cfg, iter, next) {
chanspec = 0;
ctl_chan = 0;
- if (wl_get_drv_status(wl, CONNECTED, iter->ndev)) {
+ if (wl_get_drv_status(cfg, CONNECTED, iter->ndev)) {
if (wldev_iovar_getint(iter->ndev, "chanspec",
(s32 *)&chanspec) == BCME_OK) {
chanspec = wl_chspec_driver_to_host(chanspec);
ctl_chan = wf_chspec_ctlchan(chanspec);
- wl_update_prof(wl, iter->ndev, NULL,
+ wl_update_prof(cfg, iter->ndev, NULL,
&ctl_chan, WL_PROF_CHAN);
}
- if (!wl->vsdb_mode) {
+ if (!cfg->vsdb_mode) {
if (!pre_ctl_chan && ctl_chan)
pre_ctl_chan = ctl_chan;
else if (pre_ctl_chan && (pre_ctl_chan != ctl_chan)) {
- wl->vsdb_mode = true;
+ cfg->vsdb_mode = true;
}
}
}
}
- printk("%s concurrency is enabled\n", wl->vsdb_mode ? "Multi Channel" : "Same Channel");
+ printk("%s concurrency is enabled\n", cfg->vsdb_mode ? "Multi Channel" : "Same Channel");
return;
}
-static s32 wl_notifier_change_state(struct wl_priv *wl, struct net_info *_net_info,
+static s32 wl_notifier_change_state(struct bcm_cfg80211 *cfg, struct net_info *_net_info,
enum wl_status state, bool set)
{
s32 pm = PM_FAST;
u32 mode;
u32 chan = 0;
struct net_info *iter, *next;
- struct net_device *primary_dev = wl_to_prmry_ndev(wl);
+ struct net_device *primary_dev = bcmcfg_to_prmry_ndev(cfg);
WL_DBG(("Enter state %d set %d _net_info->pm_restore %d iface %s\n",
state, set, _net_info->pm_restore, _net_info->ndev->name));
if (state != WL_STATUS_CONNECTED)
return 0;
- mode = wl_get_mode_by_netdev(wl, _net_info->ndev);
+ mode = wl_get_mode_by_netdev(cfg, _net_info->ndev);
if (set) {
- wl_cfg80211_concurrent_roam(wl, 1);
+ wl_cfg80211_concurrent_roam(cfg, 1);
if (mode == WL_MODE_AP) {
if (wl_add_remove_eventmsg(primary_dev, WLC_E_P2P_PROBREQ_MSG, false))
WL_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
}
- wl_cfg80211_determine_vsdb_mode(wl);
- if (wl->vsdb_mode || _net_info->pm_block) {
- if (wl->pm_enable_work_on) {
- cancel_delayed_work_sync(&wl->pm_enable_work);
- wl->pm_enable_work_on = false;
- }
+ wl_cfg80211_determine_vsdb_mode(cfg);
+ if (cfg->vsdb_mode || _net_info->pm_block) {
+ /* Delete pm_enable_work */
+ wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_MAINTAIN);
/* save PM_FAST in _net_info to restore this
* if _net_info->pm_block is false
*/
_net_info->pm_restore = true;
}
pm = PM_OFF;
- for_each_ndev(wl, iter, next) {
+ for_each_ndev(cfg, iter, next) {
if (iter->pm_restore)
continue;
/* Save the current power mode */
}
}
- for_each_ndev(wl, iter, next) {
+ for_each_ndev(cfg, iter, next) {
+ if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev))
+ continue;
if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
sizeof(pm), true)) != 0) {
if (err == -ENODEV)
WL_DBG(("%s:netdev not ready\n", iter->ndev->name));
else
WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
- } else
- iter->ndev->ieee80211_ptr->ps = false;
+ wl_cfg80211_update_power_mode(iter->ndev);
+ }
}
} else {
/* add PM Enable timer to go to power save mode
* before calling pm_enable_timer, we need to set PM -1 for all ndev
*/
pm = PM_OFF;
-
- for_each_ndev(wl, iter, next) {
+ if (!_net_info->pm_block) {
+ for_each_ndev(cfg, iter, next) {
+ if (iter->pm_restore)
+ continue;
+ /* Save the current power mode */
+ err = wldev_ioctl(iter->ndev, WLC_GET_PM, &iter->pm,
+ sizeof(iter->pm), false);
+ WL_DBG(("%s:power save %s\n", iter->ndev->name,
+ iter->pm ? "enabled" : "disabled"));
+ if (!err && iter->pm) {
+ iter->pm_restore = true;
+ }
+ }
+ }
+ for_each_ndev(cfg, iter, next) {
+ if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev))
+ continue;
if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM, &pm,
sizeof(pm), true)) != 0) {
if (err == -ENODEV)
WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
}
}
- wl->pm_enable_work_on = true;
- schedule_delayed_work(&wl->pm_enable_work,
- msecs_to_jiffies(WL_PM_ENABLE_TIMEOUT));
+
+ if (cfg->pm_enable_work_on) {
+ wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+ }
+
+ cfg->pm_enable_work_on = true;
+ wl_add_remove_pm_enable_work(cfg, TRUE, WL_HANDLER_NOTUSE);
}
+#if defined(WLTDLS)
+#if defined(DISABLE_TDLS_IN_P2P)
+ if (cfg->vsdb_mode || p2p_is_on(cfg))
+#else
+ if (cfg->vsdb_mode)
+#endif /* defined(DISABLE_TDLS_IN_P2P) */
+ {
+
+ err = wldev_iovar_setint(primary_dev, "tdls_enable", 0);
+ }
+#endif /* defined(WLTDLS) */
}
else { /* clear */
chan = 0;
/* clear chan information when the net device is disconnected */
- wl_update_prof(wl, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
- wl_cfg80211_determine_vsdb_mode(wl);
- for_each_ndev(wl, iter, next) {
+ wl_update_prof(cfg, _net_info->ndev, NULL, &chan, WL_PROF_CHAN);
+ wl_cfg80211_determine_vsdb_mode(cfg);
+ for_each_ndev(cfg, iter, next) {
if (iter->pm_restore && iter->pm) {
WL_DBG(("%s:restoring power save %s\n",
iter->ndev->name, (iter->pm ? "enabled" : "disabled")));
break;
}
iter->pm_restore = 0;
- iter->ndev->ieee80211_ptr->ps = true;
+ wl_cfg80211_update_power_mode(iter->ndev);
}
}
- wl_cfg80211_concurrent_roam(wl, 0);
+ wl_cfg80211_concurrent_roam(cfg, 0);
+#if defined(WLTDLS)
+ if (!cfg->vsdb_mode) {
+ err = wldev_iovar_setint(primary_dev, "tdls_enable", 1);
+ }
+#endif /* defined(WLTDLS) */
}
return err;
}
-
-static s32 wl_init_scan(struct wl_priv *wl)
+static s32 wl_init_scan(struct bcm_cfg80211 *cfg)
{
- struct wl_iscan_ctrl *iscan = wl_to_iscan(wl);
int err = 0;
- if (wl->iscan_on) {
- iscan->dev = wl_to_prmry_ndev(wl);
- iscan->state = WL_ISCAN_STATE_IDLE;
- wl_init_iscan_handler(iscan);
- iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
- init_timer(&iscan->timer);
- iscan->timer.data = (unsigned long) iscan;
- iscan->timer.function = wl_iscan_timer;
- sema_init(&iscan->sync, 0);
- iscan->tsk = kthread_run(wl_iscan_thread, iscan, "wl_iscan");
- if (IS_ERR(iscan->tsk)) {
- WL_ERR(("Could not create iscan thread\n"));
- iscan->tsk = NULL;
- return -ENOMEM;
- }
- iscan->data = wl;
- } else if (wl->escan_on) {
- wl->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
- wl->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
- wl_escan_init_sync_id(wl);
- }
+ cfg->evt_handler[WLC_E_ESCAN_RESULT] = wl_escan_handler;
+ cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
+ wl_escan_init_sync_id(cfg);
+
/* Init scan_timeout timer */
- init_timer(&wl->scan_timeout);
- wl->scan_timeout.data = (unsigned long) wl;
- wl->scan_timeout.function = wl_scan_timeout;
+ init_timer(&cfg->scan_timeout);
+ cfg->scan_timeout.data = (unsigned long) cfg;
+ cfg->scan_timeout.function = wl_scan_timeout;
return err;
}
-static s32 wl_init_priv(struct wl_priv *wl)
+static s32 wl_init_priv(struct bcm_cfg80211 *cfg)
{
- struct wiphy *wiphy = wl_to_wiphy(wl);
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct wiphy *wiphy = bcmcfg_to_wiphy(cfg);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
s32 err = 0;
- wl->scan_request = NULL;
- wl->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
- wl->iscan_on = false;
- wl->escan_on = true;
- wl->roam_on = false;
- wl->iscan_kickstart = false;
- wl->active_scan = true;
- wl->rf_blocked = false;
- wl->vsdb_mode = false;
- wl->wlfc_on = false;
- wl->roamoff_on_concurrent = true;
+ cfg->scan_request = NULL;
+ cfg->pwr_save = !!(wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT);
+ cfg->roam_on = false;
+ cfg->active_scan = true;
+ cfg->rf_blocked = false;
+ cfg->vsdb_mode = false;
+#if defined(BCMSDIO)
+ cfg->wlfc_on = false;
+#endif
+ cfg->roamoff_on_concurrent = true;
+ cfg->disable_roam_event = false;
/* register interested state */
- set_bit(WL_STATUS_CONNECTED, &wl->interrested_state);
- spin_lock_init(&wl->cfgdrv_lock);
- mutex_init(&wl->ioctl_buf_sync);
- init_waitqueue_head(&wl->netif_change_event);
- init_completion(&wl->send_af_done);
- init_completion(&wl->iface_disable);
- wl_init_eq(wl);
- err = wl_init_priv_mem(wl);
+ set_bit(WL_STATUS_CONNECTED, &cfg->interrested_state);
+ spin_lock_init(&cfg->cfgdrv_lock);
+ mutex_init(&cfg->ioctl_buf_sync);
+ init_waitqueue_head(&cfg->netif_change_event);
+ init_completion(&cfg->send_af_done);
+ init_completion(&cfg->iface_disable);
+ wl_init_eq(cfg);
+ err = wl_init_priv_mem(cfg);
if (err)
return err;
- if (wl_create_event_handler(wl))
+ if (wl_create_event_handler(cfg))
return -ENOMEM;
- wl_init_event_handler(wl);
- mutex_init(&wl->usr_sync);
- mutex_init(&wl->event_sync);
- err = wl_init_scan(wl);
+ wl_init_event_handler(cfg);
+ mutex_init(&cfg->usr_sync);
+ mutex_init(&cfg->event_sync);
+ err = wl_init_scan(cfg);
if (err)
return err;
- wl_init_conf(wl->conf);
- wl_init_prof(wl, ndev);
- wl_link_down(wl);
- DNGL_FUNC(dhd_cfg80211_init, (wl));
+ wl_init_conf(cfg->conf);
+ wl_init_prof(cfg, ndev);
+ wl_link_down(cfg);
+ DNGL_FUNC(dhd_cfg80211_init, (cfg));
return err;
}
-static void wl_deinit_priv(struct wl_priv *wl)
+static void wl_deinit_priv(struct bcm_cfg80211 *cfg)
{
- DNGL_FUNC(dhd_cfg80211_deinit, (wl));
- wl_destroy_event_handler(wl);
- wl_flush_eq(wl);
- wl_link_down(wl);
- del_timer_sync(&wl->scan_timeout);
- wl_term_iscan(wl);
- wl_deinit_priv_mem(wl);
- unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ DNGL_FUNC(dhd_cfg80211_deinit, (cfg));
+ wl_destroy_event_handler(cfg);
+ wl_flush_eq(cfg);
+ wl_link_down(cfg);
+ del_timer_sync(&cfg->scan_timeout);
+ wl_deinit_priv_mem(cfg);
+ if (wl_cfg80211_netdev_notifier_registered) {
+ wl_cfg80211_netdev_notifier_registered = FALSE;
+ unregister_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ }
}
-#if defined(WL_ENABLE_P2P_IF)
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
static s32 wl_cfg80211_attach_p2p(void)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
WL_TRACE(("Enter \n"));
- if (wl_cfgp2p_register_ndev(wl) < 0) {
+ if (wl_cfgp2p_register_ndev(cfg) < 0) {
WL_ERR(("P2P attach failed. \n"));
return -ENODEV;
}
static s32 wl_cfg80211_detach_p2p(void)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct wireless_dev *wdev;
WL_DBG(("Enter \n"));
- if (!wl) {
+ if (!cfg) {
WL_ERR(("Invalid Ptr\n"));
return -EINVAL;
} else
- wdev = wl->p2p_wdev;
+ wdev = cfg->p2p_wdev;
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
if (!wdev) {
WL_ERR(("Invalid Ptr\n"));
return -EINVAL;
}
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
- wl_cfgp2p_unregister_ndev(wl);
+ wl_cfgp2p_unregister_ndev(cfg);
- wl->p2p_wdev = NULL;
- wl->p2p_net = NULL;
+ cfg->p2p_wdev = NULL;
+ cfg->p2p_net = NULL;
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
WL_DBG(("Freeing 0x%08x \n", (unsigned int)wdev));
kfree(wdev);
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
return 0;
}
-#endif /* WL_ENABLE_P2P_IF */
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
s32 wl_cfg80211_attach_post(struct net_device *ndev)
{
- struct wl_priv * wl = NULL;
+ struct bcm_cfg80211 * cfg = NULL;
s32 err = 0;
+ s32 ret = 0;
WL_TRACE(("In\n"));
if (unlikely(!ndev)) {
WL_ERR(("ndev is invaild\n"));
return -ENODEV;
}
- wl = wlcfg_drv_priv;
- if (unlikely(!wl)) {
- WL_ERR(("wl is invaild\n"));
+ cfg = g_bcm_cfg;
+ if (unlikely(!cfg)) {
+ WL_ERR(("cfg is invaild\n"));
return -EINVAL;
}
- if (!wl_get_drv_status(wl, READY, ndev)) {
- if (wl->wdev && wl_cfgp2p_supported(wl, ndev)) {
+ if (!wl_get_drv_status(cfg, READY, ndev)) {
+ if (cfg->wdev) {
+ ret = wl_cfgp2p_supported(cfg, ndev);
+ if (ret > 0) {
#if !defined(WL_ENABLE_P2P_IF)
- wl->wdev->wiphy->interface_modes |=
+ cfg->wdev->wiphy->interface_modes |=
(BIT(NL80211_IFTYPE_P2P_CLIENT)|
BIT(NL80211_IFTYPE_P2P_GO));
#endif /* !WL_ENABLE_P2P_IF */
- if ((err = wl_cfgp2p_init_priv(wl)) != 0)
+ if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
goto fail;
#if defined(WL_ENABLE_P2P_IF)
- if (wl->p2p_net) {
+ if (cfg->p2p_net) {
/* Update MAC addr for p2p0 interface here. */
- memcpy(wl->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
- wl->p2p_net->dev_addr[0] |= 0x02;
- printk("%s: %s p2p_dev_addr="MACDBG "\n", __FUNCTION__,
- wl->p2p_net->name,
- MAC2STRDBG(wl->p2p_net->dev_addr));
+ memcpy(cfg->p2p_net->dev_addr, ndev->dev_addr, ETH_ALEN);
+ cfg->p2p_net->dev_addr[0] |= 0x02;
+ printk("%s: p2p_dev_addr="MACDBG "\n",
+ cfg->p2p_net->name,
+ MAC2STRDBG(cfg->p2p_net->dev_addr));
} else {
WL_ERR(("p2p_net not yet populated."
" Couldn't update the MAC Address for p2p0 \n"));
return -ENODEV;
}
#endif /* WL_ENABLE_P2P_IF */
-
- wl->p2p_supported = true;
+ cfg->p2p_supported = true;
+ } else if (ret == 0) {
+ if ((err = wl_cfgp2p_init_priv(cfg)) != 0)
+ goto fail;
+ } else {
+ /* SDIO bus timeout */
+ err = -ENODEV;
+ goto fail;
}
+ }
}
- wl_set_drv_status(wl, READY, ndev);
+ wl_set_drv_status(cfg, READY, ndev);
fail:
return err;
}
-s32 wl_cfg80211_attach(struct net_device *ndev, void *data)
+s32 wl_cfg80211_attach(struct net_device *ndev, void *context)
{
struct wireless_dev *wdev;
- struct wl_priv *wl;
+ struct bcm_cfg80211 *cfg;
s32 err = 0;
struct device *dev;
WL_ERR(("Could not allocate wireless device\n"));
return -ENOMEM;
}
- err = wl_setup_wiphy(wdev, dev, data);
+ err = wl_setup_wiphy(wdev, dev, context);
if (unlikely(err)) {
kfree(wdev);
return -ENOMEM;
}
wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
- wl = (struct wl_priv *)wiphy_priv(wdev->wiphy);
- wl->wdev = wdev;
- wl->pub = data;
- INIT_LIST_HEAD(&wl->net_list);
+ cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+ cfg->wdev = wdev;
+ cfg->pub = context;
+ INIT_LIST_HEAD(&cfg->net_list);
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
- wl->state_notifier = wl_notifier_change_state;
- err = wl_alloc_netinfo(wl, ndev, wdev, WL_MODE_BSS, PM_ENABLE);
+ cfg->state_notifier = wl_notifier_change_state;
+ err = wl_alloc_netinfo(cfg, ndev, wdev, WL_MODE_BSS, PM_ENABLE);
if (err) {
WL_ERR(("Failed to alloc net_info (%d)\n", err));
goto cfg80211_attach_out;
}
- err = wl_init_priv(wl);
+ err = wl_init_priv(cfg);
if (err) {
WL_ERR(("Failed to init iwm_priv (%d)\n", err));
goto cfg80211_attach_out;
}
- err = wl_setup_rfkill(wl, TRUE);
+ err = wl_setup_rfkill(cfg, TRUE);
if (err) {
WL_ERR(("Failed to setup rfkill %d\n", err));
goto cfg80211_attach_out;
}
#ifdef DEBUGFS_CFG80211
- err = wl_setup_debugfs(wl);
+ err = wl_setup_debugfs(cfg);
if (err) {
WL_ERR(("Failed to setup debugfs %d\n", err));
goto cfg80211_attach_out;
}
#endif
- err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
- if (err) {
- WL_ERR(("Failed to register notifierl %d\n", err));
- goto cfg80211_attach_out;
+ if (!wl_cfg80211_netdev_notifier_registered) {
+ wl_cfg80211_netdev_notifier_registered = TRUE;
+ err = register_netdevice_notifier(&wl_cfg80211_netdev_notifier);
+ if (err) {
+ wl_cfg80211_netdev_notifier_registered = FALSE;
+ WL_ERR(("Failed to register notifierl %d\n", err));
+ goto cfg80211_attach_out;
+ }
}
#if defined(COEX_DHCP)
- err = wl_cfg80211_btcoex_init(wl);
- if (err)
- goto cfg80211_attach_out;
-#endif
-#if defined(BSSCACHE)
- if (wl_init_bss_cache_ctrl(&g_bss_cache_ctrl))
+ cfg->btcoex_info = wl_cfg80211_btcoex_init(cfg->wdev->netdev);
+ if (!cfg->btcoex_info)
goto cfg80211_attach_out;
#endif
- wlcfg_drv_priv = wl;
+ g_bcm_cfg = cfg;
-#if defined(WL_ENABLE_P2P_IF)
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
err = wl_cfg80211_attach_p2p();
if (err)
goto cfg80211_attach_out;
-#endif /* WL_ENABLE_P2P_IF */
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
return err;
cfg80211_attach_out:
- wl_setup_rfkill(wl, FALSE);
- wl_free_wdev(wl);
+ wl_setup_rfkill(cfg, FALSE);
+ wl_free_wdev(cfg);
return err;
}
void wl_cfg80211_detach(void *para)
{
- struct wl_priv *wl;
+ struct bcm_cfg80211 *cfg;
(void)para;
- wl = wlcfg_drv_priv;
+ cfg = g_bcm_cfg;
WL_TRACE(("In\n"));
+ wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
+
#if defined(COEX_DHCP)
- wl_cfg80211_btcoex_deinit(wl);
+ wl_cfg80211_btcoex_deinit();
+ cfg->btcoex_info = NULL;
#endif
- wl_setup_rfkill(wl, FALSE);
+ wl_setup_rfkill(cfg, FALSE);
#ifdef DEBUGFS_CFG80211
- wl_free_debugfs(wl);
+ wl_free_debugfs(cfg);
#endif
- if (wl->p2p_supported) {
- if (timer_pending(&wl->p2p->listen_timer))
- del_timer_sync(&wl->p2p->listen_timer);
- wl_cfgp2p_deinit_priv(wl);
+ if (cfg->p2p_supported) {
+ if (timer_pending(&cfg->p2p->listen_timer))
+ del_timer_sync(&cfg->p2p->listen_timer);
+ wl_cfgp2p_deinit_priv(cfg);
}
+ if (timer_pending(&cfg->scan_timeout))
+ del_timer_sync(&cfg->scan_timeout);
+
#if defined(WL_CFG80211_P2P_DEV_IF)
- wl_cfgp2p_del_p2p_disc_if(wl->p2p_wdev);
-#elif defined(WL_ENABLE_P2P_IF)
+ wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+#endif /* WL_CFG80211_P2P_DEV_IF */
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
wl_cfg80211_detach_p2p();
-#endif /* WL_CFG80211_P2P_DEV_IF */
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
- wl_cfg80211_ibss_vsie_free(wl);
- wl_deinit_priv(wl);
- wlcfg_drv_priv = NULL;
+ wl_cfg80211_ibss_vsie_free(cfg);
+ wl_deinit_priv(cfg);
+ g_bcm_cfg = NULL;
wl_cfg80211_clear_parent_dev();
- wl_free_wdev(wl);
+ wl_free_wdev(cfg);
#if defined(RSSIAVG)
wl_free_rssi_cache(&g_rssi_cache_ctrl);
wl_free_rssi_cache(&g_rssi2_cache_ctrl);
#if defined(BSSCACHE)
wl_release_bss_cache_ctrl(&g_bss_cache_ctrl);
#endif
- /* PLEASE do NOT call any function after wl_free_wdev, the driver's private structure "wl",
- * which is the private part of wiphy, has been freed in wl_free_wdev !!!!!!!!!!!
- */
-}
-
-static void wl_wakeup_event(struct wl_priv *wl)
-{
- if (wl->event_tsk.thr_pid >= 0) {
- DHD_OS_WAKE_LOCK(wl->pub);
- up(&wl->event_tsk.sema);
- }
+ /* PLEASE do NOT call any function after wl_free_wdev, the driver's private
+ * structure "cfg", which is the private part of wiphy, has been freed in
+ * wl_free_wdev !!!!!!!!!!!
+ */
}
-#if (defined(WL_CFG80211_P2P_DEV_IF) || defined(WL_ENABLE_P2P_IF))
-static int wl_is_p2p_event(struct wl_event_q *e)
+static void wl_wakeup_event(struct bcm_cfg80211 *cfg)
{
- switch (e->etype) {
- /* We have to seperate out the P2P events received
- * on primary interface so that it can be send up
- * via p2p0 interface.
- */
- case WLC_E_P2P_PROBREQ_MSG:
- case WLC_E_P2P_DISC_LISTEN_COMPLETE:
- case WLC_E_ACTION_FRAME_RX:
- case WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE:
- case WLC_E_ACTION_FRAME_COMPLETE:
-
- if (e->emsg.ifidx != 0) {
- WL_TRACE(("P2P event(%d) on virtual interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
- /* We are only bothered about the P2P events received
- * on primary interface. For rest of them return false
- * so that it is sent over the interface corresponding
- * to the ifidx.
- */
- return FALSE;
- } else {
- WL_TRACE(("P2P event(%d) on interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
- return TRUE;
- }
- break;
-
- default:
- WL_TRACE(("NON-P2P event(%d) on interface(ifidx:%d)\n",
- e->etype, e->emsg.ifidx));
- return FALSE;
+ if (cfg->event_tsk.thr_pid >= 0) {
+ DHD_OS_WAKE_LOCK(cfg->pub);
+ up(&cfg->event_tsk.sema);
}
}
-#endif /* BCMDONGLEHOST && (WL_CFG80211_P2P_DEV_IF || WL_ENABLE_P2P_IF) */
static s32 wl_event_handler(void *data)
{
- struct wl_priv *wl = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
struct wl_event_q *e;
tsk_ctl_t *tsk = (tsk_ctl_t *)data;
bcm_struct_cfgdev *cfgdev = NULL;
- wl = (struct wl_priv *)tsk->parent;
+ cfg = (struct bcm_cfg80211 *)tsk->parent;
- printk("tsk Enter, tsk = 0x%08x\n", (unsigned int)tsk);
+ printk("tsk Enter, tsk = 0x%p\n", tsk);
while (down_interruptible (&tsk->sema) == 0) {
SMP_RD_BARRIER_DEPENDS();
if (tsk->terminated)
break;
- while ((e = wl_deq_event(wl))) {
+ while ((e = wl_deq_event(cfg))) {
WL_DBG(("event type (%d), if idx: %d\n", e->etype, e->emsg.ifidx));
/* All P2P device address related events comes on primary interface since
* there is no corresponding bsscfg for P2P interface. Map it to p2p0
* interface.
*/
#if defined(WL_CFG80211_P2P_DEV_IF)
- if ((wl_is_p2p_event(e) == TRUE) && (wl->p2p_wdev)) {
- cfgdev = wl_to_p2p_wdev(wl);
+ if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_wdev)) {
+ cfgdev = bcmcfg_to_p2p_wdev(cfg);
} else {
- cfgdev = ndev_to_wdev(dhd_idx2net((struct dhd_pub *)(wl->pub),
- e->emsg.ifidx));
+ struct net_device *ndev = NULL;
+
+ ndev = dhd_idx2net((struct dhd_pub *)(cfg->pub), e->emsg.ifidx);
+ if (ndev)
+ cfgdev = ndev_to_wdev(ndev);
}
#elif defined(WL_ENABLE_P2P_IF)
- if ((wl_is_p2p_event(e) == TRUE) && (wl->p2p_net)) {
- cfgdev = wl->p2p_net;
+ if (WL_IS_P2P_DEV_EVENT(e) && (cfg->p2p_net)) {
+ cfgdev = cfg->p2p_net;
} else {
- cfgdev = dhd_idx2net((struct dhd_pub *)(wl->pub),
+ cfgdev = dhd_idx2net((struct dhd_pub *)(cfg->pub),
e->emsg.ifidx);
}
#endif /* WL_CFG80211_P2P_DEV_IF */
if (!cfgdev) {
#if defined(WL_CFG80211_P2P_DEV_IF)
- cfgdev = wl_to_prmry_wdev(wl);
+ cfgdev = bcmcfg_to_prmry_wdev(cfg);
#elif defined(WL_ENABLE_P2P_IF)
- cfgdev = wl_to_prmry_ndev(wl);
+ cfgdev = bcmcfg_to_prmry_ndev(cfg);
#endif /* WL_CFG80211_P2P_DEV_IF */
}
- if (e->etype < WLC_E_LAST && wl->evt_handler[e->etype]) {
- wl->evt_handler[e->etype] (wl, cfgdev, &e->emsg, e->edata);
+ if (e->etype < WLC_E_LAST && cfg->evt_handler[e->etype]) {
+ cfg->evt_handler[e->etype] (cfg, cfgdev, &e->emsg, e->edata);
} else {
WL_DBG(("Unknown Event (%d): ignoring\n", e->etype));
}
wl_put_event(e);
}
- DHD_OS_WAKE_UNLOCK(wl->pub);
+ DHD_OS_WAKE_UNLOCK(cfg->pub);
}
- WL_ERR(("was terminated\n"));
+ printk("%s: was terminated\n", __FUNCTION__);
complete_and_exit(&tsk->completed, 0);
return 0;
}
wl_cfg80211_event(struct net_device *ndev, const wl_event_msg_t * e, void *data)
{
u32 event_type = ntoh32(e->event_type);
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
#if (WL_DBG_LEVEL > 0)
s8 *estr = (event_type <= sizeof(wl_dbg_estr) / WL_DBG_ESTR_MAX - 1) ?
WL_DBG(("event_type (%d):" "WLC_E_" "%s\n", event_type, estr));
#endif /* (WL_DBG_LEVEL > 0) */
+ if (wl_get_p2p_status(cfg, IF_CHANGING) || wl_get_p2p_status(cfg, IF_ADDING)) {
+ WL_ERR(("during IF change, ignore event %d\n", event_type));
+ return;
+ }
+
+ if (ndev != bcmcfg_to_prmry_ndev(cfg) && cfg->p2p_supported) {
+ if ((cfg->bss_cfgdev) &&
+ (ndev == cfgdev_to_wlc_ndev(cfg->bss_cfgdev, cfg))) {
+ /* Event is corresponding to the secondary STA interface */
+ WL_DBG(("DualSta event (%d), proceed to enqueue it \n", event_type));
+ } else if (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) &&
+#if defined(WL_ENABLE_P2P_IF)
+ (ndev != (cfg->p2p_net ? cfg->p2p_net :
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE))) &&
+#else
+ (ndev != wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE)) &&
+#endif /* WL_ENABLE_P2P_IF */
+ TRUE) {
+ WL_ERR(("ignore event %d, not interested\n", event_type));
+ return;
+ }
+ }
+
if (event_type == WLC_E_PFN_NET_FOUND) {
WL_DBG((" PNOEVENT: PNO_NET_FOUND\n"));
}
WL_DBG((" PNOEVENT: PNO_NET_LOST\n"));
}
- if (likely(!wl_enq_event(wl, ndev, event_type, e, data)))
- wl_wakeup_event(wl);
+ if (likely(!wl_enq_event(cfg, ndev, event_type, e, data)))
+ wl_wakeup_event(cfg);
}
-static void wl_init_eq(struct wl_priv *wl)
+static void wl_init_eq(struct bcm_cfg80211 *cfg)
{
- wl_init_eq_lock(wl);
- INIT_LIST_HEAD(&wl->eq_list);
+ wl_init_eq_lock(cfg);
+ INIT_LIST_HEAD(&cfg->eq_list);
}
-static void wl_flush_eq(struct wl_priv *wl)
+static void wl_flush_eq(struct bcm_cfg80211 *cfg)
{
struct wl_event_q *e;
unsigned long flags;
- flags = wl_lock_eq(wl);
- while (!list_empty(&wl->eq_list)) {
- e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ flags = wl_lock_eq(cfg);
+ while (!list_empty(&cfg->eq_list)) {
+ e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
list_del(&e->eq_list);
kfree(e);
}
- wl_unlock_eq(wl, flags);
+ wl_unlock_eq(cfg, flags);
}
/*
* retrieve first queued event from head
*/
-static struct wl_event_q *wl_deq_event(struct wl_priv *wl)
+static struct wl_event_q *wl_deq_event(struct bcm_cfg80211 *cfg)
{
struct wl_event_q *e = NULL;
unsigned long flags;
- flags = wl_lock_eq(wl);
- if (likely(!list_empty(&wl->eq_list))) {
- e = list_first_entry(&wl->eq_list, struct wl_event_q, eq_list);
+ flags = wl_lock_eq(cfg);
+ if (likely(!list_empty(&cfg->eq_list))) {
+ e = list_first_entry(&cfg->eq_list, struct wl_event_q, eq_list);
list_del(&e->eq_list);
}
- wl_unlock_eq(wl, flags);
+ wl_unlock_eq(cfg, flags);
return e;
}
*/
static s32
-wl_enq_event(struct wl_priv *wl, struct net_device *ndev, u32 event, const wl_event_msg_t *msg,
- void *data)
+wl_enq_event(struct bcm_cfg80211 *cfg, struct net_device *ndev, u32 event,
+ const wl_event_msg_t *msg, void *data)
{
struct wl_event_q *e;
s32 err = 0;
memcpy(&e->emsg, msg, sizeof(wl_event_msg_t));
if (data)
memcpy(e->edata, data, data_len);
- flags = wl_lock_eq(wl);
- list_add_tail(&e->eq_list, &wl->eq_list);
- wl_unlock_eq(wl, flags);
+ flags = wl_lock_eq(cfg);
+ list_add_tail(&e->eq_list, &cfg->eq_list);
+ wl_unlock_eq(cfg, flags);
return err;
}
kfree(e);
}
-static s32 wl_config_ifmode(struct wl_priv *wl, struct net_device *ndev, s32 iftype)
+static s32 wl_config_ifmode(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 iftype)
{
s32 infra = 0;
s32 err = 0;
return err;
}
- wl_set_mode_by_netdev(wl, ndev, mode);
+ wl_set_mode_by_netdev(cfg, ndev, mode);
return 0;
}
s32 wl_cfg80211_apply_eventbuffer(
struct net_device *ndev,
- struct wl_priv *wl,
+ struct bcm_cfg80211 *cfg,
wl_eventmsg_buf_t *ev)
{
char eventmask[WL_EVENTING_MASK_LEN];
if (!ev || (!ev->num))
return -EINVAL;
- mutex_lock(&wl->event_sync);
+ mutex_lock(&cfg->event_sync);
/* Read event_msgs mask */
bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
}
exit:
- mutex_unlock(&wl->event_sync);
+ mutex_unlock(&cfg->event_sync);
return ret;
}
s8 iovbuf[WL_EVENTING_MASK_LEN + 12];
s8 eventmask[WL_EVENTING_MASK_LEN];
s32 err = 0;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
- if (!ndev || !wl)
+ if (!ndev || !cfg)
return -ENODEV;
- mutex_lock(&wl->event_sync);
+ mutex_lock(&cfg->event_sync);
/* Setup event_msgs */
bcm_mkiovar("event_msgs", NULL, 0, iovbuf,
}
eventmsg_out:
- mutex_unlock(&wl->event_sync);
+ mutex_unlock(&cfg->event_sync);
return err;
}
-static int wl_construct_reginfo(struct wl_priv *wl, s32 bw_cap)
+static int wl_construct_reginfo(struct bcm_cfg80211 *cfg, s32 bw_cap)
{
- struct net_device *dev = wl_to_prmry_ndev(wl);
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
struct ieee80211_channel *band_chan_arr = NULL;
wl_uint32_list_t *list;
u32 i, j, index, n_2g, n_5g, band, channel, array_size;
err = wldev_iovar_getbuf_bsscfg(dev, "chanspecs", NULL,
- 0, pbuf, LOCAL_BUF_LEN, 0, &wl->ioctl_buf_sync);
+ 0, pbuf, LOCAL_BUF_LEN, 0, &cfg->ioctl_buf_sync);
if (err != 0) {
WL_ERR(("get chanspecs failed with %d\n", err));
kfree(pbuf);
ht40_allowed = false;
c = (chanspec_t)dtoh32(list->element[i]);
c = wl_chspec_driver_to_host(c);
- channel = CHSPEC_CHANNEL(c);
- if (!dhd_conf_match_channel((dhd_pub_t *)wl->pub, channel))
- continue;
- if (CHSPEC_IS40(c)) {
- if (CHSPEC_SB_UPPER(c))
- channel += CH_10MHZ_APART;
- else
- channel -= CH_10MHZ_APART;
- } else if (CHSPEC_IS80(c)) {
- WL_DBG(("HT80 center channel : %d\n", channel));
+ channel = wf_chspec_ctlchan(c);
+
+ if (!CHSPEC_IS40(c) && ! CHSPEC_IS20(c)) {
+ WL_DBG(("HT80/160/80p80 center channel : %d\n", channel));
continue;
}
if (CHSPEC_IS2G(c) && (channel >= CH_MIN_2G_CHANNEL) &&
channel = wl_chspec_host_to_driver(channel);
err = wldev_iovar_getint(dev, "per_chan_info", &channel);
if (!err) {
- if (channel & WL_CHAN_RADAR)
+ if (channel & WL_CHAN_RADAR) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
band_chan_arr[index].flags |=
- (IEEE80211_CHAN_RADAR |
- IEEE80211_CHAN_NO_IBSS);
+ (IEEE80211_CHAN_RADAR
+ | IEEE80211_CHAN_NO_IBSS);
+#else
+ band_chan_arr[index].flags |=
+ IEEE80211_CHAN_RADAR;
+#endif
+ }
+
if (channel & WL_CHAN_PASSIVE)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
band_chan_arr[index].flags |=
IEEE80211_CHAN_PASSIVE_SCAN;
+#else
+ band_chan_arr[index].flags |=
+ IEEE80211_CHAN_NO_IR;
+#endif
} else if (err == BCME_UNSUPPORTED) {
dfs_radar_disabled = TRUE;
WL_ERR(("does not support per_chan_info\n"));
return err;
}
-s32 wl_update_wiphybands(struct wl_priv *wl, bool notify)
+s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify)
{
struct wiphy *wiphy;
struct net_device *dev;
s32 cur_band = -1;
struct ieee80211_supported_band *bands[IEEE80211_NUM_BANDS] = {NULL, };
- if (wl == NULL) {
- wl = wlcfg_drv_priv;
- mutex_lock(&wl->usr_sync);
+ if (cfg == NULL) {
+ cfg = g_bcm_cfg;
+ mutex_lock(&cfg->usr_sync);
rollback_lock = true;
}
- dev = wl_to_prmry_ndev(wl);
+ dev = bcmcfg_to_prmry_ndev(cfg);
memset(bandlist, 0, sizeof(bandlist));
err = wldev_ioctl(dev, WLC_GET_BANDLIST, bandlist,
if (unlikely(err)) {
WL_ERR(("error reading nmode (%d)\n", err));
} else {
- /* For nmodeonly check bw cap */
+ /* For nmodeonly check bw cap */
err = wldev_iovar_getint(dev, "mimo_bw_cap", &bw_cap);
if (unlikely(err)) {
WL_ERR(("error get mimo_bw_cap (%d)\n", err));
}
}
- err = wl_construct_reginfo(wl, bw_cap);
+ err = wl_construct_reginfo(cfg, bw_cap);
if (err) {
WL_ERR(("wl_construct_reginfo() fails err=%d\n", err));
if (err != BCME_UNSUPPORTED)
goto end_bands;
err = 0;
}
- wiphy = wl_to_wiphy(wl);
+ wiphy = bcmcfg_to_wiphy(cfg);
nband = bandlist[0];
for (i = 1; i <= nband && i < ARRAYSIZE(bandlist); i++) {
end_bands:
if (rollback_lock)
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
-static s32 __wl_cfg80211_up(struct wl_priv *wl)
+static s32 __wl_cfg80211_up(struct bcm_cfg80211 *cfg)
{
s32 err = 0;
#ifdef WL_HOST_BAND_MGMT
s32 ret = 0;
#endif /* WL_HOST_BAND_MGMT */
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
struct wireless_dev *wdev = ndev->ieee80211_ptr;
WL_DBG(("In\n"));
- err = dhd_config_dongle(wl, false);
+ err = dhd_config_dongle(cfg);
if (unlikely(err))
return err;
- err = wl_config_ifmode(wl, ndev, wdev->iftype);
+ err = wl_config_ifmode(cfg, ndev, wdev->iftype);
if (unlikely(err && err != -EINPROGRESS)) {
WL_ERR(("wl_config_ifmode failed\n"));
+ if (err == -1) {
+ WL_ERR(("return error %d\n", err));
+ return err;
+ }
}
- err = wl_update_wiphybands(wl, true);
+ err = wl_update_wiphybands(cfg, true);
if (unlikely(err)) {
WL_ERR(("wl_update_wiphybands failed\n"));
+ if (err == -1) {
+ WL_ERR(("return error %d\n", err));
+ return err;
+ }
}
- err = dhd_monitor_init(wl->pub);
- err = wl_invoke_iscan(wl);
+ err = dhd_monitor_init(cfg->pub);
#ifdef WL_HOST_BAND_MGMT
/* By default the curr_band is initialized to BAND_AUTO */
}
}
#endif /* WL_HOST_BAND_MGMT */
-
#if defined(DHCP_SCAN_SUPPRESS)
/* wlan scan_supp timer and work thread info */
- init_timer(&wl->scan_supp_timer);
- wl->scan_supp_timer.data = (ulong)wl;
- wl->scan_supp_timer.function = wl_cfg80211_scan_supp_timerfunc;
- INIT_WORK(&wl->wlan_work, wl_cfg80211_work_handler);
+ init_timer(&cfg->scan_supp_timer);
+ cfg->scan_supp_timer.data = (ulong)cfg;
+ cfg->scan_supp_timer.function = wl_cfg80211_scan_supp_timerfunc;
+ INIT_WORK(&cfg->wlan_work, wl_cfg80211_work_handler);
#endif /* DHCP_SCAN_SUPPRESS */
- INIT_DELAYED_WORK(&wl->pm_enable_work, wl_cfg80211_work_handler);
- wl_set_drv_status(wl, READY, ndev);
+ INIT_DELAYED_WORK(&cfg->pm_enable_work, wl_cfg80211_work_handler);
+ wl_set_drv_status(cfg, READY, ndev);
return err;
}
-static s32 __wl_cfg80211_down(struct wl_priv *wl)
+static s32 __wl_cfg80211_down(struct bcm_cfg80211 *cfg)
{
s32 err = 0;
unsigned long flags;
struct net_info *iter, *next;
- struct net_device *ndev = wl_to_prmry_ndev(wl);
-#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF)
-// struct net_device *p2p_net = wl->p2p_net;
-#endif /* WL_CFG80211 && WL_ENABLE_P2P_IF */
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
+#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF)|| \
+ defined(WL_NEWCFG_PRIVCMD_SUPPORT))
+ struct net_device *p2p_net = cfg->p2p_net;
+#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) */
u32 bssidx = 0;
#ifdef PROP_TXSTATUS_VSDB
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+#if defined(BCMSDIO)
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
+#endif
#endif /* PROP_TXSTATUS_VSDB */
WL_DBG(("In\n"));
- if (wl->pm_enable_work_on) {
- cancel_delayed_work_sync(&wl->pm_enable_work);
- wl->pm_enable_work_on = false;
- }
+ /* Delete pm_enable_work */
+ wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
- if (wl->p2p_supported) {
- wl_clr_p2p_status(wl, GO_NEG_PHASE);
+#ifdef WL_NAN
+ wl_cfgnan_stop_handler(ndev, g_bcm_cfg, NULL, NULL);
+#endif /* WL_NAN */
+
+ if (cfg->p2p_supported) {
+ wl_clr_p2p_status(cfg, GO_NEG_PHASE);
#ifdef PROP_TXSTATUS_VSDB
- if (wl->p2p->vif_created) {
- if (dhd->wlfc_enabled && wl->wlfc_on) {
- dhd->wlfc_enabled = false;
+#if defined(BCMSDIO)
+ if (cfg->p2p->vif_created) {
+ bool enabled = false;
+ dhd_wlfc_get_enable(dhd, &enabled);
+ if (enabled && cfg->wlfc_on && dhd->op_mode != DHD_FLAG_HOSTAP_MODE &&
+ dhd->op_mode != DHD_FLAG_IBSS_MODE) {
dhd_wlfc_deinit(dhd);
- if (dhd->plat_deinit)
- dhd->plat_deinit((void *)dhd);
- wl->wlfc_on = false;
+ cfg->wlfc_on = false;
}
}
+#endif
#endif /* PROP_TXSTATUS_VSDB */
}
#if defined(DHCP_SCAN_SUPPRESS)
/* Force clear of scan_suppress */
- if (wl->scan_suppressed)
+ if (cfg->scan_suppressed)
wl_cfg80211_scan_suppress(ndev, 0);
- if (timer_pending(&wl->scan_supp_timer))
- del_timer_sync(&wl->scan_supp_timer);
- cancel_work_sync(&wl->wlan_work);
+ if (timer_pending(&cfg->scan_supp_timer))
+ del_timer_sync(&cfg->scan_supp_timer);
+ cancel_work_sync(&cfg->wlan_work);
#endif /* DHCP_SCAN_SUPPRESS */
/* If primary BSS is operational (for e.g SoftAP), bring it down */
- if (!(wl_cfgp2p_find_idx(wl, ndev, &bssidx)) &&
+ if (!(wl_cfgp2p_find_idx(cfg, ndev, &bssidx)) &&
wl_cfgp2p_bss_isup(ndev, bssidx)) {
- if (wl_cfgp2p_bss(wl, ndev, bssidx, 0) < 0)
+ if (wl_cfgp2p_bss(cfg, ndev, bssidx, 0) < 0)
WL_ERR(("BSS down failed \n"));
}
/* Check if cfg80211 interface is already down */
- if (!wl_get_drv_status(wl, READY, ndev))
+ if (!wl_get_drv_status(cfg, READY, ndev))
return err; /* it is even not ready */
- for_each_ndev(wl, iter, next)
- wl_set_drv_status(wl, SCAN_ABORTING, iter->ndev);
+ for_each_ndev(cfg, iter, next)
+ wl_set_drv_status(cfg, SCAN_ABORTING, iter->ndev);
#ifdef WL_SDO
- wl_cfg80211_sdo_deinit(wl);
+ wl_cfg80211_sdo_deinit(cfg);
#endif
- wl_term_iscan(wl);
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
- if (wl->scan_request) {
- cfg80211_scan_done(wl->scan_request, true);
- wl->scan_request = NULL;
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+ if (cfg->scan_request) {
+ cfg80211_scan_done(cfg->scan_request, true);
+ cfg->scan_request = NULL;
}
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
- for_each_ndev(wl, iter, next) {
- wl_clr_drv_status(wl, READY, iter->ndev);
- wl_clr_drv_status(wl, SCANNING, iter->ndev);
- wl_clr_drv_status(wl, SCAN_ABORTING, iter->ndev);
- wl_clr_drv_status(wl, CONNECTING, iter->ndev);
- wl_clr_drv_status(wl, CONNECTED, iter->ndev);
- wl_clr_drv_status(wl, DISCONNECTING, iter->ndev);
- wl_clr_drv_status(wl, AP_CREATED, iter->ndev);
- wl_clr_drv_status(wl, AP_CREATING, iter->ndev);
+ for_each_ndev(cfg, iter, next) {
+ wl_clr_drv_status(cfg, READY, iter->ndev);
+ wl_clr_drv_status(cfg, SCANNING, iter->ndev);
+ wl_clr_drv_status(cfg, SCAN_ABORTING, iter->ndev);
+ wl_clr_drv_status(cfg, CONNECTING, iter->ndev);
+ wl_clr_drv_status(cfg, CONNECTED, iter->ndev);
+ wl_clr_drv_status(cfg, DISCONNECTING, iter->ndev);
+ wl_clr_drv_status(cfg, AP_CREATED, iter->ndev);
+ wl_clr_drv_status(cfg, AP_CREATING, iter->ndev);
}
- wl_to_prmry_ndev(wl)->ieee80211_ptr->iftype =
+ bcmcfg_to_prmry_ndev(cfg)->ieee80211_ptr->iftype =
NL80211_IFTYPE_STATION;
-#if defined(WL_CFG80211) && defined(WL_ENABLE_P2P_IF)
-// if (p2p_net) // terence 20131219: don't close p2p0 or p2p0 never go back anymore
-// dev_close(p2p_net);
-#endif /* WL_CFG80211 && WL_ENABLE_P2P_IF */
- DNGL_FUNC(dhd_cfg80211_down, (wl));
- wl_flush_eq(wl);
- wl_link_down(wl);
- //if (wl->p2p_supported)
- // wl_cfgp2p_down(wl);
+#if defined(WL_CFG80211) && (defined(WL_ENABLE_P2P_IF)|| \
+ defined(WL_NEWCFG_PRIVCMD_SUPPORT))
+ if (p2p_net)
+ dev_close(p2p_net);
+#endif /* WL_CFG80211 && (WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT) */
+ wl_flush_eq(cfg);
+ wl_link_down(cfg);
+ if (cfg->p2p_supported)
+ wl_cfgp2p_down(cfg);
+ if (cfg->ap_info) {
+ kfree(cfg->ap_info->wpa_ie);
+ kfree(cfg->ap_info->rsn_ie);
+ kfree(cfg->ap_info->wps_ie);
+ kfree(cfg->ap_info);
+ cfg->ap_info = NULL;
+ }
dhd_monitor_uninit();
+#ifdef WLAIBSS_MCHAN
+ bcm_cfg80211_del_ibss_if(cfg->wdev->wiphy, cfg->ibss_cfgdev);
+#endif /* WLAIBSS_MCHAN */
+
+#if defined(DUAL_STA) || defined(DUAL_STA_STATIC_IF)
+ /* Clean up if not removed already */
+ if (cfg->bss_cfgdev)
+ wl_cfg80211_del_iface(cfg->wdev->wiphy, cfg->bss_cfgdev);
+#endif /* defined (DUAL_STA) || defined (DUAL_STA_STATIC_IF) */
+
+ DNGL_FUNC(dhd_cfg80211_down, (cfg));
return err;
}
s32 wl_cfg80211_up(void *para)
{
- struct wl_priv *wl;
+ struct bcm_cfg80211 *cfg;
s32 err = 0;
int val = 1;
dhd_pub_t *dhd;
(void)para;
WL_DBG(("In\n"));
- wl = wlcfg_drv_priv;
+ cfg = g_bcm_cfg;
- if ((err = wldev_ioctl(wl_to_prmry_ndev(wl), WLC_GET_VERSION, &val,
+ if ((err = wldev_ioctl(bcmcfg_to_prmry_ndev(cfg), WLC_GET_VERSION, &val,
sizeof(int), false) < 0)) {
WL_ERR(("WLC_GET_VERSION failed, err=%d\n", err));
return err;
ioctl_version = val;
WL_TRACE(("WLC_GET_VERSION=%d\n", ioctl_version));
- mutex_lock(&wl->usr_sync);
- dhd = (dhd_pub_t *)(wl->pub);
+ mutex_lock(&cfg->usr_sync);
+ dhd = (dhd_pub_t *)(cfg->pub);
if (!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) {
- err = wl_cfg80211_attach_post(wl_to_prmry_ndev(wl));
+ err = wl_cfg80211_attach_post(bcmcfg_to_prmry_ndev(cfg));
if (unlikely(err))
return err;
}
#if defined(BCMSUP_4WAY_HANDSHAKE) && defined(WLAN_AKM_SUITE_FT_8021X)
if (dhd->fw_4way_handshake)
- wl->wdev->wiphy->features |= NL80211_FEATURE_FW_4WAY_HANDSHAKE;
+ cfg->wdev->wiphy->features |= NL80211_FEATURE_FW_4WAY_HANDSHAKE;
#endif
- err = __wl_cfg80211_up(wl);
+ err = __wl_cfg80211_up(cfg);
if (unlikely(err))
WL_ERR(("__wl_cfg80211_up failed\n"));
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
+
+#ifdef WLAIBSS_MCHAN
+ bcm_cfg80211_add_ibss_if(cfg->wdev->wiphy, IBSS_IF_NAME);
+#endif /* WLAIBSS_MCHAN */
+
+#ifdef DUAL_STA_STATIC_IF
+#ifdef DUAL_STA
+#error "Both DUAL_STA and DUAL_STA_STATIC_IF can't be enabled together"
+#endif
+ /* Static Interface support is currently supported only for STA only builds (without P2P) */
+ wl_cfg80211_create_iface(cfg->wdev->wiphy, NL80211_IFTYPE_STATION, NULL, "wlan%d");
+#endif /* DUAL_STA_STATIC_IF */
+
return err;
}
/* Private Event to Supplicant with indication that chip hangs */
int wl_cfg80211_hang(struct net_device *dev, u16 reason)
{
- struct wl_priv *wl;
- wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg;
+ cfg = g_bcm_cfg;
WL_ERR(("In : chip crash eventing\n"));
+ wl_add_remove_pm_enable_work(cfg, FALSE, WL_HANDLER_DEL);
cfg80211_disconnected(dev, reason, NULL, 0, GFP_KERNEL);
#if defined(RSSIAVG)
wl_free_rssi_cache(&g_rssi_cache_ctrl);
#endif
#if defined(BSSCACHE)
wl_free_bss_cache(&g_bss_cache_ctrl);
- wl_run_bss_cache_timer(&g_bss_cache_ctrl, 0);
#endif
- if (wl != NULL) {
- wl_link_down(wl);
+ if (cfg != NULL) {
+ wl_link_down(cfg);
}
return 0;
}
s32 wl_cfg80211_down(void *para)
{
- struct wl_priv *wl;
+ struct bcm_cfg80211 *cfg;
s32 err = 0;
(void)para;
WL_DBG(("In\n"));
- wl = wlcfg_drv_priv;
- mutex_lock(&wl->usr_sync);
+ cfg = g_bcm_cfg;
+ mutex_lock(&cfg->usr_sync);
#if defined(RSSIAVG)
wl_free_rssi_cache(&g_rssi_cache_ctrl);
#endif
#if defined(BSSCACHE)
wl_free_bss_cache(&g_bss_cache_ctrl);
- wl_run_bss_cache_timer(&g_bss_cache_ctrl, 0);
#endif
- err = __wl_cfg80211_down(wl);
- mutex_unlock(&wl->usr_sync);
+ err = __wl_cfg80211_down(cfg);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
-static void *wl_read_prof(struct wl_priv *wl, struct net_device *ndev, s32 item)
+static void *wl_read_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 item)
{
unsigned long flags;
void *rptr = NULL;
- struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+ struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
if (!profile)
return NULL;
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
switch (item) {
case WL_PROF_SEC:
rptr = &profile->sec;
rptr = &profile->channel;
break;
}
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
if (!rptr)
WL_ERR(("invalid item (%d)\n", item));
return rptr;
}
static s32
-wl_update_prof(struct wl_priv *wl, struct net_device *ndev,
+wl_update_prof(struct bcm_cfg80211 *cfg, struct net_device *ndev,
const wl_event_msg_t *e, void *data, s32 item)
{
s32 err = 0;
struct wlc_ssid *ssid;
unsigned long flags;
- struct wl_profile *profile = wl_get_profile_by_netdev(wl, ndev);
+ struct wl_profile *profile = wl_get_profile_by_netdev(cfg, ndev);
if (!profile)
return WL_INVALID;
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
switch (item) {
case WL_PROF_SSID:
ssid = (wlc_ssid_t *) data;
err = -EOPNOTSUPP;
break;
}
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
if (err == -EOPNOTSUPP)
WL_ERR(("unsupported item (%d)\n", item));
/* wl_dbg_level = level; */
}
-static bool wl_is_ibssmode(struct wl_priv *wl, struct net_device *ndev)
+static bool wl_is_ibssmode(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
- return wl_get_mode_by_netdev(wl, ndev) == WL_MODE_IBSS;
+ return wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_IBSS;
}
-static __used bool wl_is_ibssstarter(struct wl_priv *wl)
+static __used bool wl_is_ibssstarter(struct bcm_cfg80211 *cfg)
{
- return wl->ibss_starter;
+ return cfg->ibss_starter;
}
-static void wl_rst_ie(struct wl_priv *wl)
+static void wl_rst_ie(struct bcm_cfg80211 *cfg)
{
- struct wl_ie *ie = wl_to_ie(wl);
+ struct wl_ie *ie = wl_to_ie(cfg);
ie->offset = 0;
}
-static __used s32 wl_add_ie(struct wl_priv *wl, u8 t, u8 l, u8 *v)
+static __used s32 wl_add_ie(struct bcm_cfg80211 *cfg, u8 t, u8 l, u8 *v)
{
- struct wl_ie *ie = wl_to_ie(wl);
+ struct wl_ie *ie = wl_to_ie(cfg);
s32 err = 0;
if (unlikely(ie->offset + l + 2 > WL_TLV_INFO_MAX)) {
return err;
}
-static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size)
+static void wl_update_hidden_ap_ie(struct wl_bss_info *bi, u8 *ie_stream, u32 *ie_size, bool roam)
{
u8 *ssidie;
ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ie_stream, *ie_size);
if (ssidie[1]) {
WL_ERR(("%s: Wrong SSID len: %d != %d\n",
__FUNCTION__, ssidie[1], bi->SSID_len));
- return;
}
- memmove(ssidie + bi->SSID_len + 2, ssidie + 2, *ie_size - (ssidie + 2 - ie_stream));
- memcpy(ssidie + 2, bi->SSID, bi->SSID_len);
- *ie_size = *ie_size + bi->SSID_len;
- ssidie[1] = bi->SSID_len;
+ if (roam) {
+ WL_ERR(("Changing the SSID Info.\n"));
+ memmove(ssidie + bi->SSID_len + 2,
+ (ssidie + 2) + ssidie[1],
+ *ie_size - (ssidie + 2 + ssidie[1] - ie_stream));
+ memcpy(ssidie + 2, bi->SSID, bi->SSID_len);
+ *ie_size = *ie_size + bi->SSID_len - ssidie[1];
+ ssidie[1] = bi->SSID_len;
+ }
return;
}
if (*(ssidie + 2) == '\0')
return;
}
-static s32 wl_mrg_ie(struct wl_priv *wl, u8 *ie_stream, u16 ie_size)
+static s32 wl_mrg_ie(struct bcm_cfg80211 *cfg, u8 *ie_stream, u16 ie_size)
{
- struct wl_ie *ie = wl_to_ie(wl);
+ struct wl_ie *ie = wl_to_ie(cfg);
s32 err = 0;
if (unlikely(ie->offset + ie_size > WL_TLV_INFO_MAX)) {
return err;
}
-static s32 wl_cp_ie(struct wl_priv *wl, u8 *dst, u16 dst_size)
+static s32 wl_cp_ie(struct bcm_cfg80211 *cfg, u8 *dst, u16 dst_size)
{
- struct wl_ie *ie = wl_to_ie(wl);
+ struct wl_ie *ie = wl_to_ie(cfg);
s32 err = 0;
if (unlikely(ie->offset > dst_size)) {
return err;
}
-static u32 wl_get_ielen(struct wl_priv *wl)
+static u32 wl_get_ielen(struct bcm_cfg80211 *cfg)
{
- struct wl_ie *ie = wl_to_ie(wl);
+ struct wl_ie *ie = wl_to_ie(cfg);
return ie->offset;
}
-static void wl_link_up(struct wl_priv *wl)
+static void wl_link_up(struct bcm_cfg80211 *cfg)
{
- wl->link_up = true;
+ cfg->link_up = true;
}
-static void wl_link_down(struct wl_priv *wl)
+static void wl_link_down(struct bcm_cfg80211 *cfg)
{
- struct wl_connect_info *conn_info = wl_to_conn(wl);
+ struct wl_connect_info *conn_info = wl_to_conn(cfg);
WL_DBG(("In\n"));
- wl->link_up = false;
+ cfg->link_up = false;
conn_info->req_ie_len = 0;
conn_info->resp_ie_len = 0;
}
-static unsigned long wl_lock_eq(struct wl_priv *wl)
+static unsigned long wl_lock_eq(struct bcm_cfg80211 *cfg)
{
unsigned long flags;
- spin_lock_irqsave(&wl->eq_lock, flags);
+ spin_lock_irqsave(&cfg->eq_lock, flags);
return flags;
}
-static void wl_unlock_eq(struct wl_priv *wl, unsigned long flags)
+static void wl_unlock_eq(struct bcm_cfg80211 *cfg, unsigned long flags)
{
- spin_unlock_irqrestore(&wl->eq_lock, flags);
+ spin_unlock_irqrestore(&cfg->eq_lock, flags);
}
-static void wl_init_eq_lock(struct wl_priv *wl)
+static void wl_init_eq_lock(struct bcm_cfg80211 *cfg)
{
- spin_lock_init(&wl->eq_lock);
+ spin_lock_init(&cfg->eq_lock);
}
static void wl_delay(u32 ms)
s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
struct ether_addr p2pif_addr;
struct ether_addr primary_mac;
- if (!wl->p2p)
+ if (!cfg->p2p)
return -1;
- if (!p2p_is_on(wl)) {
- get_primary_mac(wl, &primary_mac);
+ if (!p2p_is_on(cfg)) {
+ get_primary_mac(cfg, &primary_mac);
wl_cfgp2p_generate_bss_mac(&primary_mac, p2pdev_addr, &p2pif_addr);
} else {
memcpy(p2pdev_addr->octet,
- wl->p2p->dev_addr.octet, ETHER_ADDR_LEN);
+ cfg->p2p->dev_addr.octet, ETHER_ADDR_LEN);
}
}
s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len)
{
- struct wl_priv *wl;
+ struct bcm_cfg80211 *cfg;
- wl = wlcfg_drv_priv;
+ cfg = g_bcm_cfg;
- return wl_cfgp2p_set_p2p_noa(wl, net, buf, len);
+ return wl_cfgp2p_set_p2p_noa(cfg, net, buf, len);
}
s32 wl_cfg80211_get_p2p_noa(struct net_device *net, char* buf, int len)
{
- struct wl_priv *wl;
- wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg;
+ cfg = g_bcm_cfg;
- return wl_cfgp2p_get_p2p_noa(wl, net, buf, len);
+ return wl_cfgp2p_get_p2p_noa(cfg, net, buf, len);
}
s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len)
{
- struct wl_priv *wl;
- wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg;
+ cfg = g_bcm_cfg;
- return wl_cfgp2p_set_p2p_ps(wl, net, buf, len);
+ return wl_cfgp2p_set_p2p_ps(cfg, net, buf, len);
}
s32 wl_cfg80211_channel_to_freq(u32 channel)
} wl_cfg80211_dev_info_t;
static s32
-wl_notify_device_discovery(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_notify_device_discovery(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
int err = 0;
return -EINVAL;
}
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- mutex_lock(&wl->usr_sync);
+ mutex_lock(&cfg->usr_sync);
if (event == WLC_E_P2PO_DEL_DEVICE) {
WL_SD(("DEV_LOST MAC:"MACDBG" \n", MAC2STRDBG(e->addr.octet)));
err = wl_genl_send_msg(ndev, event, (u8 *)e->addr.octet, ETH_ALEN, 0, 0);
/* Update sub-header */
bzero(&info, sizeof(wl_cfg80211_dev_info_t));
- channel = bi->ctl_ch ? bi->ctl_ch :
- CHSPEC_CHANNEL(wl_chspec_driver_to_host(bi->chanspec));
+ channel = wf_chspec_ctlchan(wl_chspec_driver_to_host(bi->chanspec));
info.freq = wl_cfg80211_channel_to_freq(channel);
info.rssi = dtoh16(bi->RSSI);
#if defined(RSSIOFFSET)
- info.rssi = wl_update_rssi_offset(info.rssi);
+ info.rssi = wl_update_rssi_offset(ndev, info.rssi);
#endif
memcpy(info.bssid, &bi->BSSID, ETH_ALEN);
info.ie_len = buflen;
buflen, (u8 *)&info, sizeof(wl_cfg80211_dev_info_t));
}
exit:
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
return err;
}
s32
-wl_cfg80211_sdo_init(struct wl_priv *wl)
+wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg)
{
u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
- if (wl->sdo) {
+ if (cfg->sdo) {
WL_SD(("SDO already initialized\n"));
return 0;
}
- wl->sdo = kzalloc(sizeof(sd_offload_t), kflags);
- if (!wl->sdo) {
+ cfg->sdo = kzalloc(sizeof(sd_offload_t), kflags);
+ if (!cfg->sdo) {
WL_ERR(("malloc failed for SDO \n"));
return -ENOMEM;
}
}
s32
-wl_cfg80211_sdo_deinit(struct wl_priv *wl)
+wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg)
{
s32 bssidx;
int ret = 0;
int sdo_pause = 0;
- if (!wl || !wl->p2p) {
- WL_ERR(("Wl %p or wl->p2p %p is null\n",
- wl, wl ? wl->p2p : 0));
+ if (!cfg || !cfg->p2p) {
+ WL_ERR(("Wl %p or cfg->p2p %p is null\n",
+ cfg, cfg ? cfg->p2p : 0));
return 0;
}
- bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
- if (!wl->sdo) {
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ if (!cfg->sdo) {
WL_DBG(("SDO Not Initialized. Do nothing. \n"));
return 0;
}
- if (wl->sdo->dd_state &&
- (ret = wldev_iovar_setbuf_bsscfg(wl_to_prmry_ndev(wl),
+ if (cfg->sdo->dd_state &&
+ (ret = wldev_iovar_setbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg),
"p2po_stop", (void*)&sdo_pause, sizeof(sdo_pause),
- wl->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) {
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) {
WL_ERR(("p2po_stop Failed :%d\n", ret));
}
- kfree(wl->sdo);
- wl->sdo = NULL;
+ kfree(cfg->sdo);
+ cfg->sdo = NULL;
WL_SD(("SDO Deinit Done \n"));
}
s32
-wl_cfg80211_resume_sdo(struct net_device *dev, struct wl_priv *wl)
+wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg)
{
wl_sd_listen_t sd_listen;
int ret = 0;
- s32 bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
WL_DBG(("Enter\n"));
- if (!wl->sdo) {
+ if (!cfg->sdo) {
return -EINVAL;
}
if (dev == NULL)
- dev = wl_to_prmry_ndev(wl);
+ dev = bcmcfg_to_prmry_ndev(cfg);
/* Disable back the ESCAN events for the offload */
wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
/* Resume according to the saved state */
- if (wl->sdo->dd_state == WL_DD_STATE_SEARCH) {
+ if (cfg->sdo->dd_state == WL_DD_STATE_SEARCH) {
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0,
- wl->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) {
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, NULL)) < 0) {
WL_ERR(("p2po_find Failed :%d\n", ret));
}
- } else if (wl->sdo->dd_state == WL_DD_STATE_LISTEN) {
- sd_listen.interval = wl->sdo->sd_listen.interval;
- sd_listen.period = wl->sdo->sd_listen.period;
+ } else if (cfg->sdo->dd_state == WL_DD_STATE_LISTEN) {
+ sd_listen.interval = cfg->sdo->sd_listen.interval;
+ sd_listen.period = cfg->sdo->sd_listen.period;
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen,
- sizeof(wl_sd_listen_t), wl->ioctl_buf, WLC_IOCTL_SMLEN,
+ sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
bssidx, NULL)) < 0) {
WL_ERR(("p2po_listen Failed :%d\n", ret));
}
return ret;
}
-s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct wl_priv *wl)
+s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg)
{
int ret = 0;
- s32 bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
int sdo_pause = 1;
WL_DBG(("Enter \n"));
- if (!wl->sdo) {
+ if (!cfg->sdo) {
WL_ERR(("SDO not initialized \n"));
return -EINVAL;
}
if (dev == NULL)
- dev = wl_to_prmry_ndev(wl);
+ dev = bcmcfg_to_prmry_ndev(cfg);
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop",
(void*)&sdo_pause, sizeof(sdo_pause),
- wl->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &wl->ioctl_buf_sync)) < 0) {
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("p2po_stop Failed :%d\n", ret));
}
}
static s32
-wl_svc_resp_handler(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_svc_resp_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
u32 event = ntoh32(e->event_type);
WL_SD(("Enter event_type:%d status:%d\n", event, status));
- if (!wl->sdo) {
+ if (!cfg->sdo) {
WL_ERR(("SDO Not initialized \n"));
return -EINVAL;
}
- if (!(wl->sdo->sd_state & WL_SD_SEARCH_SVC)) {
+ if (!(cfg->sdo->sd_state & WL_SD_SEARCH_SVC)) {
/* We are not searching for any service. Drop
* any bogus Event
*/
return -1;
}
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- mutex_lock(&wl->usr_sync);
+ mutex_lock(&cfg->usr_sync);
if (event == WLC_E_SERVICE_FOUND) {
if ((status != WLC_E_STATUS_SUCCESS) && (status != WLC_E_STATUS_PARTIAL)) {
}
exit:
- mutex_unlock(&wl->usr_sync);
+ mutex_unlock(&cfg->usr_sync);
return ret;
}
* ./DRIVER P2P_SD_REQ upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1
*
* Enable discovery
- * ./wl p2po_find
+ * ./cfg p2po_find
*/
#define UPNP_QUERY_VER_OFFSET 3
s32 wl_sd_handle_sd_req(
u8 * buf,
int len)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
s32 bssidx = 0;
wl_sd_qr_t *sdreq;
u8 proto = 0;
u32 tot_len = len + sizeof(wl_sd_qr_t);
u16 version = 0;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("find_idx failed\n"));
return -EINVAL;
}
}
/* Enable discovery */
- if ((ret = wl_cfgp2p_enable_discovery(wl, dev, NULL, 0)) < 0) {
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
WL_ERR(("cfgp2p_enable discovery failed"));
goto exit;
}
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_req_resp", (void*)sdreq,
- tot_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &wl->ioctl_buf_sync)) < 0) {
+ tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("Find SVC Failed \n"));
goto exit;
}
- wl->sdo->sd_state |= WL_SD_SEARCH_SVC;
+ cfg->sdo->sd_state |= WL_SD_SEARCH_SVC;
exit:
kfree(sdreq);
struct net_device *dev,
u8 *buf)
{
- struct wl_priv *wl = wlcfg_drv_priv;
- s32 bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
if (wldev_iovar_setbuf_bsscfg(dev, "p2po_sd_cancel", NULL,
- 0, wl->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &wl->ioctl_buf_sync) < 0) {
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync) < 0) {
WL_ERR(("Cancel SD Failed \n"));
return -EINVAL;
}
- wl->sdo->sd_state &= ~WL_SD_SEARCH_SVC;
+ cfg->sdo->sd_state &= ~WL_SD_SEARCH_SVC;
return 0;
}
/*
* register a UPnP service to be discovered
- * ./wl P2P_SD_SVC_ADD upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1 0x10uu
+ * ./cfg P2P_SD_SVC_ADD upnp 0x10urn:schemas-upnporg:device:InternetGatewayDevice:1 0x10uu
* id:6859dede-8574-59ab-9332-123456789012::urn:schemas-upnporg:device:InternetGate
* wayDevice:1
*/
u8 * buf,
int len)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
s32 bssidx = 0;
wl_sd_qr_t *sdreq;
u8 proto = 0;
memcpy((sdreq->qrbuf + sdreq->query_len), resp, sdreq->response_len);
/* Enable discovery */
- if ((ret = wl_cfgp2p_enable_discovery(wl, dev, NULL, 0)) < 0) {
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
WL_ERR(("cfgp2p_enable discovery failed"));
goto exit;
}
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_addsvc", (void*)sdreq,
- tot_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &wl->ioctl_buf_sync)) < 0) {
+ tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("FW Failed in doing p2po_addsvc. RET:%d \n", ret));
goto exit;
}
- wl->sdo->sd_state |= WL_SD_ADV_SVC;
+ cfg->sdo->sd_state |= WL_SD_ADV_SVC;
exit:
kfree(sdreq);
u8 * buf,
int len)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
s32 bssidx = 0;
wl_sd_qr_t *sdreq;
u8 proto = 0;
u16 kflags = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
u16 version = 0;
- if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
+ if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
WL_ERR(("find_idx failed\n"));
return -EINVAL;
}
WL_SD(("Proto: %d \n", proto));
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_delsvc", (void*)sdreq,
- tot_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &wl->ioctl_buf_sync)) < 0) {
+ tot_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("FW Failed in doing sd_delsvc. ret=%d \n", ret));
goto exit;
}
- wl->sdo->sd_state &= ~WL_SD_ADV_SVC;
+ cfg->sdo->sd_state &= ~WL_SD_ADV_SVC;
exit:
if (sdreq)
u8 * buf,
int len)
{
- struct wl_priv *wl = wlcfg_drv_priv;
- s32 bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
int ret = 0;
int sdo_pause = 0;
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_stop", (void*)&sdo_pause,
- sizeof(sdo_pause), wl->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &wl->ioctl_buf_sync)) < 0) {
+ sizeof(sdo_pause), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("p2po_stop Failed :%d\n", ret));
return -1;
}
}
/* clear the states */
- wl->sdo->dd_state = WL_DD_STATE_IDLE;
- wl_clr_p2p_status(wl, DISC_IN_PROGRESS);
+ cfg->sdo->dd_state = WL_DD_STATE_IDLE;
+ wl_clr_p2p_status(cfg, DISC_IN_PROGRESS);
- bzero(&wl->sdo->sd_listen, sizeof(wl_sd_listen_t));
+ bzero(&cfg->sdo->sd_listen, sizeof(wl_sd_listen_t));
/* Remove ESCAN from waking up the host if ofind/olisten is enabled */
wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, true);
u8 * buf,
int len)
{
- struct wl_priv *wl = wlcfg_drv_priv;
- s32 bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
int ret = 0;
- s32 disc_bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ s32 disc_bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
vndr_ie_setbuf_t *ie_setbuf;
vndr_ie_t *vndrie;
vndr_ie_buf_t *vndriebuf;
};
/* Enable discovery */
- if ((ret = wl_cfgp2p_enable_discovery(wl, dev, NULL, 0)) < 0) {
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
WL_ERR(("cfgp2p_enable discovery failed"));
return -1;
}
channel = simple_strtol(buf, NULL, 10);
WL_SD(("listen_chan to be set:%d\n", channel));
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
- sizeof(channel), wl->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &wl->ioctl_buf_sync)) < 0) {
+ sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
return -1;
}
wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
if (wldev_iovar_setbuf_bsscfg(dev, "ie", (void*)ie_setbuf,
- tot_len, wl->ioctl_buf, WLC_IOCTL_SMLEN,
- disc_bssidx, &wl->ioctl_buf_sync) < 0) {
+ tot_len, cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ disc_bssidx, &cfg->ioctl_buf_sync) < 0) {
WL_ERR(("p2p add_ie failed \n"));
ret = -EINVAL;
goto exit;
}
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_find", NULL, 0,
- wl->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &wl->ioctl_buf_sync)) < 0) {
+ cfg->ioctl_buf, WLC_IOCTL_SMLEN, bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("p2po_find Failed :%d\n", ret));
ret = -1;
goto exit;
}
/* set the states */
- wl->sdo->dd_state = WL_DD_STATE_SEARCH;
- wl_set_p2p_status(wl, DISC_IN_PROGRESS);
+ cfg->sdo->dd_state = WL_DD_STATE_SEARCH;
+ wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
exit:
if (ie_setbuf)
u8 *buf,
int len)
{
- struct wl_priv *wl = wlcfg_drv_priv;
- s32 bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+ s32 bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
wl_sd_listen_t sd_listen;
int ret = 0;
u8 * ptr = NULL;
uint channel = 0;
/* Just in case if it is not enabled */
- if ((ret = wl_cfgp2p_enable_discovery(wl, dev, NULL, 0)) < 0) {
+ if ((ret = wl_cfgp2p_enable_discovery(cfg, dev, NULL, 0)) < 0) {
WL_ERR(("cfgp2p_enable discovery failed"));
return -1;
}
sd_listen.period, sd_listen.interval, channel));
}
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen_channel", (void*)&channel,
- sizeof(channel), wl->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &wl->ioctl_buf_sync)) < 0) {
+ sizeof(channel), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("p2po_listen_channel Failed :%d\n", ret));
return -1;
}
WL_SD(("p2po_listen period:%d interval:%d \n",
sd_listen.period, sd_listen.interval));
if ((ret = wldev_iovar_setbuf_bsscfg(dev, "p2po_listen", (void*)&sd_listen,
- sizeof(wl_sd_listen_t), wl->ioctl_buf, WLC_IOCTL_SMLEN,
- bssidx, &wl->ioctl_buf_sync)) < 0) {
+ sizeof(wl_sd_listen_t), cfg->ioctl_buf, WLC_IOCTL_SMLEN,
+ bssidx, &cfg->ioctl_buf_sync)) < 0) {
WL_ERR(("p2po_listen Failed :%d\n", ret));
return -1;
}
wl_add_remove_eventmsg(dev, WLC_E_ESCAN_RESULT, false);
/* Store the extended listen values for use in sdo_resume */
- wl->sdo->sd_listen.interval = sd_listen.interval;
- wl->sdo->sd_listen.period = sd_listen.period;
+ cfg->sdo->sd_listen.interval = sd_listen.interval;
+ cfg->sdo->sd_listen.period = sd_listen.period;
/* set the states */
- wl->sdo->dd_state = WL_DD_STATE_LISTEN;
- wl_set_p2p_status(wl, DISC_IN_PROGRESS);
+ cfg->sdo->dd_state = WL_DD_STATE_LISTEN;
+ wl_set_p2p_status(cfg, DISC_IN_PROGRESS);
return 0;
}
s32 wl_cfg80211_sd_offload(struct net_device *dev, char *cmd, char* buf, int len)
{
int ret = 0;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
WL_SD(("Entry cmd:%s arg_len:%d \n", cmd, len));
- if (!wl->sdo) {
+ if (!cfg->sdo) {
WL_SD(("Initializing SDO \n"));
- if ((ret = wl_cfg80211_sdo_init(wl)) < 0)
+ if ((ret = wl_cfg80211_sdo_init(cfg)) < 0)
goto exit;
}
#ifdef WLTDLS
static s32
-wl_tdls_event_handler(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_tdls_event_handler(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data) {
struct net_device *ndev = NULL;
u32 reason = ntoh32(e->reason);
s8 *msg = NULL;
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
switch (reason) {
case WLC_E_TDLS_PEER_DISCOVERED :
msg = " TDLS PEER DISCOVERD ";
break;
case WLC_E_TDLS_PEER_CONNECTED :
+#ifdef PCIE_FULL_DONGLE
+ dhd_tdls_update_peer_info(ndev, TRUE, (uint8 *)&e->addr.octet[0]);
+#endif /* PCIE_FULL_DONGLE */
msg = " TDLS PEER CONNECTED ";
break;
case WLC_E_TDLS_PEER_DISCONNECTED :
+#ifdef PCIE_FULL_DONGLE
+ dhd_tdls_update_peer_info(ndev, FALSE, (uint8 *)&e->addr.octet[0]);
+#endif /* PCIE_FULL_DONGLE */
msg = "TDLS PEER DISCONNECTED ";
break;
}
if (msg) {
WL_ERR(("%s: " MACDBG " on %s ndev\n", msg, MAC2STRDBG((u8*)(&e->addr)),
- (wl_to_prmry_ndev(wl) == ndev) ? "primary" : "secondary"));
+ (bcmcfg_to_prmry_ndev(cfg) == ndev) ? "primary" : "secondary"));
}
return 0;
{
s32 ret = 0;
#ifdef WLTDLS
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg;
tdls_iovar_t info;
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ cfg = g_bcm_cfg;
memset(&info, 0, sizeof(tdls_iovar_t));
if (peer)
memcpy(&info.ea, peer, ETHER_ADDR_LEN);
switch (oper) {
case NL80211_TDLS_DISCOVERY_REQ:
- if (!dhd->tdls_enable)
- ret = dhd_tdls_enable_disable(dhd, 1);
+ /* turn on TDLS */
+ ret = dhd_tdls_enable(dev, true, false, NULL);
if (ret < 0)
return ret;
info.mode = TDLS_MANUAL_EP_DISCOVERY;
break;
case NL80211_TDLS_SETUP:
- info.mode = TDLS_MANUAL_EP_CREATE;
+ /* auto mode on */
+ ret = dhd_tdls_enable(dev, true, true, (struct ether_addr *)peer);
+ if (ret < 0)
+ return ret;
break;
case NL80211_TDLS_TEARDOWN:
info.mode = TDLS_MANUAL_EP_DELETE;
+ /* auto mode off */
+ ret = dhd_tdls_enable(dev, true, false, (struct ether_addr *)peer);
+ if (ret < 0)
+ return ret;
break;
default:
WL_ERR(("Unsupported operation : %d\n", oper));
goto out;
}
- ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
- if (ret) {
- WL_ERR(("tdls_endpoint error %d\n", ret));
+ if (info.mode) {
+ ret = wldev_iovar_setbuf(dev, "tdls_endpoint", &info, sizeof(info),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
+ if (ret) {
+ WL_ERR(("tdls_endpoint error %d\n", ret));
+ }
}
out:
#endif /* WLTDLS */
s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
enum wl_management_type type)
{
- struct wl_priv *wl;
+ struct bcm_cfg80211 *cfg;
struct net_device *ndev = NULL;
struct ether_addr primary_mac;
s32 ret = 0;
s32 bssidx = 0;
s32 pktflag = 0;
- wl = wlcfg_drv_priv;
+ cfg = g_bcm_cfg;
- if (wl_get_drv_status(wl, AP_CREATING, net)) {
+ if (wl_get_drv_status(cfg, AP_CREATING, net)) {
/* Vendor IEs should be set to FW
* after SoftAP interface is brought up
*/
goto exit;
- } else if (wl_get_drv_status(wl, AP_CREATED, net)) {
+ } else if (wl_get_drv_status(cfg, AP_CREATED, net)) {
ndev = net;
bssidx = 0;
- } else if (wl->p2p) {
- net = ndev_to_wlc_ndev(net, wl);
- if (!wl->p2p->on) {
- get_primary_mac(wl, &primary_mac);
- wl_cfgp2p_generate_bss_mac(&primary_mac, &wl->p2p->dev_addr,
- &wl->p2p->int_addr);
+ } else if (cfg->p2p) {
+ net = ndev_to_wlc_ndev(net, cfg);
+ if (!cfg->p2p->on) {
+ get_primary_mac(cfg, &primary_mac);
+ wl_cfgp2p_generate_bss_mac(&primary_mac, &cfg->p2p->dev_addr,
+ &cfg->p2p->int_addr);
/* In case of p2p_listen command, supplicant send remain_on_channel
* without turning on P2P
*/
- p2p_on(wl) = true;
- ret = wl_cfgp2p_enable_discovery(wl, net, NULL, 0);
+ p2p_on(cfg) = true;
+ ret = wl_cfgp2p_enable_discovery(cfg, net, NULL, 0);
if (unlikely(ret)) {
goto exit;
}
}
- if (net != wl_to_prmry_ndev(wl)) {
- if (wl_get_mode_by_netdev(wl, net) == WL_MODE_AP) {
- ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
- bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION);
+ if (net != bcmcfg_to_prmry_ndev(cfg)) {
+ if (wl_get_mode_by_netdev(cfg, net) == WL_MODE_AP) {
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION);
}
} else {
- ndev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
- bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ ndev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
}
}
if (ndev != NULL) {
break;
}
if (pktflag)
- ret = wl_cfgp2p_set_management_ie(wl, ndev, bssidx, pktflag, buf, len);
+ ret = wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, pktflag, buf, len);
}
exit:
return ret;
{
u32 val = 0;
s32 ret = BCME_ERROR;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
/* Disable mpc, to avoid automatic interface down. */
val = 0;
ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
- sizeof(val), wl->ioctl_buf, WLC_IOCTL_SMLEN, 0,
- &wl->ioctl_buf_sync);
+ sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+ &cfg->ioctl_buf_sync);
if (ret < 0) {
WL_ERR(("set 'mpc' failed, error = %d\n", ret));
goto done;
}
/* Stop all scan explicitly, till auto channel selection complete. */
- wl_set_drv_status(wl, SCANNING, ndev);
- ret = wl_notify_escan_complete(wl, ndev, true, true);
+ wl_set_drv_status(cfg, SCANNING, ndev);
+ if (cfg->escan_info.ndev == NULL) {
+ ret = BCME_OK;
+ goto done;
+ }
+ ret = wl_notify_escan_complete(cfg, ndev, true, true);
if (ret < 0) {
WL_ERR(("set scan abort failed, error = %d\n", ret));
ret = BCME_OK; // terence 20140115: fix escan_complete error
wl_cfg80211_valid_chanspec_p2p(chanspec_t chanspec)
{
bool valid = false;
+ char chanbuf[CHANSPEC_STR_LEN];
/* channel 1 to 14 */
if ((chanspec >= 0x2b01) && (chanspec <= 0x2b0e)) {
}
else {
valid = false;
- WL_INFO(("invalid P2P chanspec, channel = %d, chanspec = %04x\n",
- CHSPEC_CHANNEL(chanspec), chanspec));
+ WL_INFORM(("invalid P2P chanspec, chanspec = %s\n",
+ wf_chspec_ntoa_ex(chanspec, chanbuf)));
}
return valid;
wl_cfg80211_get_chanspecs_2g(struct net_device *ndev, void *buf, s32 buflen)
{
s32 ret = BCME_ERROR;
- struct wl_priv *wl = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
wl_uint32_list_t *list = NULL;
chanspec_t chanspec = 0;
memset(buf, 0, buflen);
- wl = wlcfg_drv_priv;
+ cfg = g_bcm_cfg;
list = (wl_uint32_list_t *)buf;
list->count = htod32(WL_NUMCHANSPECS);
chanspec = wl_chspec_host_to_driver(chanspec);
ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
- sizeof(chanspec), buf, buflen, 0, &wl->ioctl_buf_sync);
+ sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
if (ret < 0) {
WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
}
s32 ret = BCME_ERROR;
s32 i = 0;
s32 j = 0;
- struct wl_priv *wl = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
wl_uint32_list_t *list = NULL;
chanspec_t chanspec = 0;
memset(buf, 0, buflen);
- wl = wlcfg_drv_priv;
+ cfg = g_bcm_cfg;
list = (wl_uint32_list_t *)buf;
list->count = htod32(WL_NUMCHANSPECS);
chanspec = wl_chspec_host_to_driver(chanspec);
ret = wldev_iovar_getbuf_bsscfg(ndev, "chanspecs", (void *)&chanspec,
- sizeof(chanspec), buf, buflen, 0, &wl->ioctl_buf_sync);
+ sizeof(chanspec), buf, buflen, 0, &cfg->ioctl_buf_sync);
if (ret < 0) {
WL_ERR(("get 'chanspecs' failed, error = %d\n", ret));
goto done;
s32 ret = BCME_ERROR;
int chosen = 0;
int retry = 0;
+ uint chip;
/* Start auto channel selection scan. */
ret = wldev_ioctl(ndev, WLC_START_CHANNEL_SEL, buf, buflen, true);
ret = wldev_ioctl(ndev, WLC_GET_CHANNEL_SEL, &chosen, sizeof(chosen),
false);
if ((ret == 0) && (dtoh32(chosen) != 0)) {
- if (dhd_bus_chip_id(bcmsdh_get_drvdata()) != BCM43362_CHIP_ID &&
- dhd_bus_chip_id(bcmsdh_get_drvdata()) != BCM4330_CHIP_ID) {
+ chip = dhd_conf_get_chip(dhd_get_pub(ndev));
+ if (chip != BCM43362_CHIP_ID && chip != BCM4330_CHIP_ID) {
u32 chanspec = 0;
int ctl_chan;
chanspec = wl_chspec_driver_to_host(chosen);
*channel = (u16)(ctl_chan & 0x00FF);
} else
*channel = (u16)(chosen & 0x00FF);
- WL_INFO(("selected channel = %d\n", *channel));
+ WL_INFORM(("selected channel = %d\n", *channel));
break;
}
- WL_INFO(("attempt = %d, ret = %d, chosen = %d\n",
+ WL_INFORM(("attempt = %d, ret = %d, chosen = %d\n",
(CHAN_SEL_RETRY_COUNT - retry), ret, dtoh32(chosen)));
}
{
u32 val = 0;
s32 ret = BCME_ERROR;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
/* Clear scan stop driver status. */
- wl_clr_drv_status(wl, SCANNING, ndev);
+ wl_clr_drv_status(cfg, SCANNING, ndev);
/* Enable mpc back to 1, irrespective of initial state. */
val = 1;
ret = wldev_iovar_setbuf_bsscfg(ndev, "mpc", (void *)&val,
- sizeof(val), wl->ioctl_buf, WLC_IOCTL_SMLEN, 0,
- &wl->ioctl_buf_sync);
+ sizeof(val), cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0,
+ &cfg->ioctl_buf_sync);
if (ret < 0) {
WL_ERR(("set 'mpc' failed, error = %d\n", ret));
}
s32 ret = BCME_ERROR;
u8 *buf = NULL;
char *pos = cmd;
- struct wl_priv *wl = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
struct net_device *ndev = NULL;
memset(cmd, 0, total_len);
* Always use primary interface, irrespective of interface on which
* command came.
*/
- wl = wlcfg_drv_priv;
- ndev = wl_to_prmry_ndev(wl);
+ cfg = g_bcm_cfg;
+ ndev = bcmcfg_to_prmry_ndev(cfg);
/*
* Make sure that FW and driver are in right state to do auto channel
static int wl_rfkill_set(void *data, bool blocked)
{
- struct wl_priv *wl = (struct wl_priv *)data;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
WL_DBG(("Enter \n"));
WL_DBG(("RF %s\n", blocked ? "blocked" : "unblocked"));
- if (!wl)
+ if (!cfg)
return -EINVAL;
- wl->rf_blocked = blocked;
+ cfg->rf_blocked = blocked;
return 0;
}
-static int wl_setup_rfkill(struct wl_priv *wl, bool setup)
+static int wl_setup_rfkill(struct bcm_cfg80211 *cfg, bool setup)
{
s32 err = 0;
WL_DBG(("Enter \n"));
- if (!wl)
+ if (!cfg)
return -EINVAL;
if (setup) {
- wl->rfkill = rfkill_alloc("brcmfmac-wifi",
+ cfg->rfkill = rfkill_alloc("brcmfmac-wifi",
wl_cfg80211_get_parent_dev(),
- RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)wl);
+ RFKILL_TYPE_WLAN, &wl_rfkill_ops, (void *)cfg);
- if (!wl->rfkill) {
+ if (!cfg->rfkill) {
err = -ENOMEM;
goto err_out;
}
- err = rfkill_register(wl->rfkill);
+ err = rfkill_register(cfg->rfkill);
if (err)
- rfkill_destroy(wl->rfkill);
+ rfkill_destroy(cfg->rfkill);
} else {
- if (!wl->rfkill) {
+ if (!cfg->rfkill) {
err = -ENOMEM;
goto err_out;
}
- rfkill_unregister(wl->rfkill);
- rfkill_destroy(wl->rfkill);
+ rfkill_unregister(cfg->rfkill);
+ rfkill_destroy(cfg->rfkill);
}
err_out:
.llseek = NULL,
};
-static s32 wl_setup_debugfs(struct wl_priv *wl)
+static s32 wl_setup_debugfs(struct bcm_cfg80211 *cfg)
{
s32 err = 0;
struct dentry *_dentry;
- if (!wl)
+ if (!cfg)
return -EINVAL;
- wl->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!wl->debugfs || IS_ERR(wl->debugfs)) {
- if (wl->debugfs == ERR_PTR(-ENODEV))
+ cfg->debugfs = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!cfg->debugfs || IS_ERR(cfg->debugfs)) {
+ if (cfg->debugfs == ERR_PTR(-ENODEV))
WL_ERR(("Debugfs is not enabled on this kernel\n"));
else
WL_ERR(("Can not create debugfs directory\n"));
- wl->debugfs = NULL;
+ cfg->debugfs = NULL;
goto exit;
}
_dentry = debugfs_create_file("debug_level", S_IRUSR | S_IWUSR,
- wl->debugfs, wl, &fops_debuglevel);
+ cfg->debugfs, cfg, &fops_debuglevel);
if (!_dentry || IS_ERR(_dentry)) {
WL_ERR(("failed to create debug_level debug file\n"));
- wl_free_debugfs(wl);
+ wl_free_debugfs(cfg);
}
exit:
return err;
}
-static s32 wl_free_debugfs(struct wl_priv *wl)
+static s32 wl_free_debugfs(struct bcm_cfg80211 *cfg)
{
- if (!wl)
+ if (!cfg)
return -EINVAL;
- if (wl->debugfs)
- debugfs_remove_recursive(wl->debugfs);
- wl->debugfs = NULL;
+ if (cfg->debugfs)
+ debugfs_remove_recursive(cfg->debugfs);
+ cfg->debugfs = NULL;
return 0;
}
#endif /* DEBUGFS_CFG80211 */
cfg80211_parent_dev = NULL;
}
-void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac)
+void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
{
- wldev_iovar_getbuf_bsscfg(wl_to_prmry_ndev(wl), "cur_etheraddr", NULL,
- 0, wl->ioctl_buf, WLC_IOCTL_SMLEN, 0, &wl->ioctl_buf_sync);
- memcpy(mac->octet, wl->ioctl_buf, ETHER_ADDR_LEN);
+ wldev_iovar_getbuf_bsscfg(bcmcfg_to_prmry_ndev(cfg), "cur_etheraddr", NULL,
+ 0, cfg->ioctl_buf, WLC_IOCTL_SMLEN, 0, &cfg->ioctl_buf_sync);
+ memcpy(mac->octet, cfg->ioctl_buf, ETHER_ADDR_LEN);
}
-static bool check_dev_role_integrity(struct wl_priv *wl, u32 dev_role)
+static bool check_dev_role_integrity(struct bcm_cfg80211 *cfg, u32 dev_role)
{
- dhd_pub_t *dhd = (dhd_pub_t *)(wl->pub);
+ dhd_pub_t *dhd = (dhd_pub_t *)(cfg->pub);
if (((dev_role == NL80211_IFTYPE_AP) &&
!(dhd->op_mode & DHD_FLAG_HOSTAP_MODE)) ||
((dev_role == NL80211_IFTYPE_P2P_GO) &&
int wl_cfg80211_do_driver_init(struct net_device *net)
{
- struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net);
+ struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
- if (!wl || !wl->wdev)
+ if (!cfg || !cfg->wdev)
return -EINVAL;
- if (dhd_do_driver_init(wl->wdev->netdev) < 0)
+ if (dhd_do_driver_init(cfg->wdev->netdev) < 0)
return -1;
return 0;
return 0;
}
-#endif /* WL_SUPPORT_BACKPORTED_KPATCHES || KERNEL >= 3.2.0 */
+#endif /* WL_SUPPORT_BACKPORTED_PATCHES || KERNEL >= 3.2.0 */
#ifdef WL11U
bcm_tlv_t *
return NULL;
}
+
static s32
-wl_cfg80211_add_iw_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag,
+wl_cfg80211_add_iw_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx, s32 pktflag,
uint8 ie_id, uint8 *data, uint8 data_len)
{
s32 err = BCME_OK;
return -ENOMEM;
}
- if (wl->iw_ie_len == data_len && !memcmp(wl->iw_ie, data, data_len)) {
+ if (cfg->iw_ie_len == data_len && !memcmp(cfg->iw_ie, data, data_len)) {
WL_ERR(("Previous IW IE is equals to current IE\n"));
err = BCME_OK;
goto exit;
ie_setbuf->ie_buffer.ie_list[0].ie_data.id = ie_id;
/* if already set with previous values, delete it first */
- if (wl->iw_ie_len != 0) {
+ if (cfg->iw_ie_len != 0) {
WL_DBG(("Different IW_IE was already set. clear first\n"));
ie_setbuf->ie_buffer.ie_list[0].ie_data.len = 0;
err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (err != BCME_OK)
goto exit;
memcpy((uchar *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], data, data_len);
err = wldev_iovar_setbuf_bsscfg(ndev, "ie", ie_setbuf, buf_len,
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (err == BCME_OK) {
- memcpy(wl->iw_ie, data, data_len);
- wl->iw_ie_len = data_len;
- wl->wl11u = TRUE;
+ memcpy(cfg->iw_ie, data, data_len);
+ cfg->iw_ie_len = data_len;
+ cfg->wl11u = TRUE;
err = wldev_iovar_setint_bsscfg(ndev, "grat_arp", 1, bssidx);
}
s32
wl_cfg80211_set_band(struct net_device *ndev, int band)
{
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
int ret = 0;
+ s32 roam_off;
char ioctl_buf[50];
if ((band < WLC_BAND_AUTO) || (band > WLC_BAND_2G)) {
WL_ERR(("Invalid band\n"));
return -EINVAL;
}
+
+ if ((ret = wldev_iovar_getint(ndev, "roam_off", &roam_off)) < 0) {
+ WL_ERR(("geting roam_off failed code=%d\n", ret));
+ return ret;
+ } else if (roam_off == 1) {
+ WL_DBG(("Roaming off, no need to set roam_band\n"));
+ cfg->curr_band = band;
+ return 0;
+ }
if ((ret = wldev_iovar_setbuf(ndev, "roam_band", &band,
sizeof(int), ioctl_buf, sizeof(ioctl_buf), NULL)) < 0) {
}
WL_DBG(("Setting band to %d\n", band));
- wl->curr_band = band;
+ cfg->curr_band = band;
return 0;
}
#if defined(DHCP_SCAN_SUPPRESS)
static void wl_cfg80211_scan_supp_timerfunc(ulong data)
{
- struct wl_priv *wl = (struct wl_priv *)data;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *)data;
WL_DBG(("Enter \n"));
- schedule_work(&wl->wlan_work);
+ schedule_work(&cfg->wlan_work);
}
int wl_cfg80211_scan_suppress(struct net_device *dev, int suppress)
{
int ret = 0;
struct wireless_dev *wdev;
- struct wl_priv *wl;
+ struct bcm_cfg80211 *cfg;
if (!dev || ((suppress != 0) && (suppress != 1))) {
ret = -EINVAL;
goto exit;
ret = -EINVAL;
goto exit;
}
- wl = (struct wl_priv *)wiphy_priv(wdev->wiphy);
- if (!wl) {
+ cfg = (struct bcm_cfg80211 *)wiphy_priv(wdev->wiphy);
+ if (!cfg) {
ret = -EINVAL;
goto exit;
}
- if (suppress == wl->scan_suppressed) {
+ if (suppress == cfg->scan_suppressed) {
WL_DBG(("No change in scan_suppress state. Ignoring cmd..\n"));
return 0;
}
- if (timer_pending(&wl->scan_supp_timer))
- del_timer_sync(&wl->scan_supp_timer);
+ if (timer_pending(&cfg->scan_supp_timer))
+ del_timer_sync(&cfg->scan_supp_timer);
if ((ret = wldev_ioctl(dev, WLC_SET_SCANSUPPRESS,
&suppress, sizeof(int), true)) < 0) {
WL_ERR(("Scan suppress setting failed ret:%d \n", ret));
} else {
WL_DBG(("Scan suppress %s \n", suppress ? "Enabled" : "Disabled"));
- wl->scan_suppressed = suppress;
+ cfg->scan_suppressed = suppress;
}
/* If scan_suppress is set, Start a timer to monitor it (just incase) */
- if (wl->scan_suppressed) {
+ if (cfg->scan_suppressed) {
if (ret) {
WL_ERR(("Retry scan_suppress reset at a later time \n"));
- mod_timer(&wl->scan_supp_timer,
+ mod_timer(&cfg->scan_supp_timer,
jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_RETRY));
} else {
WL_DBG(("Start wlan_timer to clear of scan_suppress \n"));
- mod_timer(&wl->scan_supp_timer,
+ mod_timer(&cfg->scan_supp_timer,
jiffies + msecs_to_jiffies(WL_SCAN_SUPPRESS_TIMEOUT));
}
}
}
#endif /* DHCP_SCAN_SUPPRESS */
+int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev)
+{
+ struct bcm_cfg80211 *cfg = NULL;
+ struct net_device *ndev = NULL;
+ unsigned long flags;
+ int clear_flag = 0;
+ int ret = 0;
+
+ WL_TRACE(("Enter\n"));
+
+ cfg = g_bcm_cfg;
+ if (!cfg)
+ return -EINVAL;
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ spin_lock_irqsave(&cfg->cfgdrv_lock, flags);
+#ifdef WL_CFG80211_P2P_DEV_IF
+ if (cfg->scan_request && cfg->scan_request->wdev == cfgdev)
+#else
+ if (cfg->scan_request && cfg->scan_request->dev == cfgdev)
+#endif
+ {
+ cfg80211_scan_done(cfg->scan_request, true);
+ cfg->scan_request = NULL;
+ clear_flag = 1;
+ }
+ spin_unlock_irqrestore(&cfg->cfgdrv_lock, flags);
+
+ if (clear_flag)
+ wl_clr_drv_status(cfg, SCANNING, ndev);
+
+ return ret;
+}
+
+bool wl_cfg80211_is_vsdb_mode(void)
+{
+ return (g_bcm_cfg && g_bcm_cfg->vsdb_mode);
+}
+
+void* wl_cfg80211_get_dhdp()
+{
+ struct bcm_cfg80211 *cfg = g_bcm_cfg;
+
+ return cfg->pub;
+}
+
+bool wl_cfg80211_is_p2p_active(void)
+{
+ return (g_bcm_cfg && g_bcm_cfg->p2p);
+}
+
static void wl_cfg80211_work_handler(struct work_struct * work)
{
- struct wl_priv *wl = NULL;
+ struct bcm_cfg80211 *cfg = NULL;
struct net_info *iter, *next;
s32 err = BCME_OK;
s32 pm = PM_FAST;
- wl = container_of(work, struct wl_priv, pm_enable_work.work);
+ cfg = container_of(work, struct bcm_cfg80211, pm_enable_work.work);
WL_DBG(("Enter \n"));
- if (wl->pm_enable_work_on) {
- wl->pm_enable_work_on = false;
- for_each_ndev(wl, iter, next) {
- if (!wl_get_drv_status(wl, CONNECTED, iter->ndev) ||
- (wl_get_mode_by_netdev(wl, iter->ndev) != WL_MODE_BSS))
+ if (cfg->pm_enable_work_on) {
+ cfg->pm_enable_work_on = false;
+ for_each_ndev(cfg, iter, next) {
+ if (!wl_get_drv_status(cfg, CONNECTED, iter->ndev) ||
+ (wl_get_mode_by_netdev(cfg, iter->ndev) != WL_MODE_BSS))
continue;
if (iter->ndev) {
if ((err = wldev_ioctl(iter->ndev, WLC_SET_PM,
else
WL_ERR(("%s:error (%d)\n", iter->ndev->name, err));
} else
- iter->ndev->ieee80211_ptr->ps = true;
+ wl_cfg80211_update_power_mode(iter->ndev);
}
}
-
}
#if defined(DHCP_SCAN_SUPPRESS)
- else if (wl->scan_suppressed) {
+ else if (cfg->scan_suppressed) {
/* There is pending scan_suppress. Clean it */
WL_ERR(("Clean up from timer after %d msec\n", WL_SCAN_SUPPRESS_TIMEOUT));
- wl_cfg80211_scan_suppress(wl_to_prmry_ndev(wl), 0);
+ wl_cfg80211_scan_suppress(bcmcfg_to_prmry_ndev(cfg), 0);
}
#endif /* DHCP_SCAN_SUPPRESS */
}
if (frame_len < DOT11_ACTION_HDR_LEN)
return DOT11_ACTION_CAT_ERR_MASK;
category = ptr[DOT11_ACTION_CAT_OFF];
- WL_INFO(("Action Category: %d\n", category));
+ WL_INFORM(("Action Category: %d\n", category));
return category;
}
if (DOT11_ACTION_CAT_PUBLIC != wl_get_action_category(frame, frame_len))
return BCME_ERROR;
*ret_action = ptr[DOT11_ACTION_ACT_OFF];
- WL_INFO(("Public Action : %d\n", *ret_action));
+ WL_INFORM(("Public Action : %d\n", *ret_action));
return BCME_OK;
}
-#ifdef POWER_OFF_IN_SUSPEND
-static void wl_scan_complete(struct wl_priv *wl)
+#ifdef WLFBT
+void
+wl_cfg80211_get_fbt_key(uint8 *key)
{
- wl_event_msg_t msg;
- WL_TRACE(("In\n"));
-
- if (!(wl->scan_request)) {
- WL_ERR(("timer expired but no scan request\n"));
- return;
- }
- bzero(&msg, sizeof(wl_event_msg_t));
- if (wl->escan_on && wl->escan_info.escan_state == WL_ESCAN_STATE_SCANING) {
- msg.event_type = hton32(WLC_E_ESCAN_RESULT);
- msg.status = hton32(WLC_E_STATUS_SUCCESS);
- msg.reason = 0;
- wl_cfg80211_event(wl_to_prmry_ndev(wl), &msg, NULL);
- }
+ memcpy(key, g_bcm_cfg->fbt_key, FBT_KEYLEN);
}
+#endif /* WLFBT */
-void wl_cfg80211_stop(void)
+static int
+wl_cfg80211_delayed_roam(struct bcm_cfg80211 *cfg, struct net_device *ndev,
+ const struct ether_addr *bssid)
{
- struct wl_priv *wl;
+ s32 err;
+ wl_event_msg_t e;
- wl = wlcfg_drv_priv;
+ bzero(&e, sizeof(e));
+ e.event_type = cpu_to_be32(WLC_E_ROAM);
+ memcpy(&e.addr, bssid, ETHER_ADDR_LEN);
+ /* trigger the roam event handler */
+ err = wl_notify_roaming_status(cfg, ndev_to_cfgdev(ndev), &e, NULL);
- WL_TRACE(("In\n"));
+ return err;
+}
- wl_scan_complete(wl);
+#ifdef WL_CFG80211_ACL
+static int
+wl_cfg80211_set_mac_acl(struct wiphy *wiphy, struct net_device *cfgdev,
+ const struct cfg80211_acl_data *acl)
+{
+ int i;
+ int ret = 0;
+ int macnum = 0;
+ int macmode = MACLIST_MODE_DISABLED;
+ struct maclist *list;
- if (wl->btcoex_info && wl->btcoex_info->timer_on) {
- wl->btcoex_info->timer_on = 0;
- del_timer_sync(&wl->btcoex_info->timer);
+ /* get the MAC filter mode */
+ if (acl && acl->acl_policy == NL80211_ACL_POLICY_DENY_UNLESS_LISTED) {
+ macmode = MACLIST_MODE_ALLOW;
+ } else if (acl && acl->acl_policy == NL80211_ACL_POLICY_ACCEPT_UNLESS_LISTED &&
+ acl->n_acl_entries) {
+ macmode = MACLIST_MODE_DENY;
}
- if (wl->p2p_supported) {
- if (timer_pending(&wl->p2p->listen_timer))
- del_timer_sync(&wl->p2p->listen_timer);
+ /* if acl == NULL, macmode is still disabled.. */
+ if (macmode == MACLIST_MODE_DISABLED) {
+ if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, NULL)) != 0)
+ WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
+
+ return ret;
}
- del_timer_sync(&wl->scan_timeout);
+ macnum = acl->n_acl_entries;
+ if (macnum < 0 || macnum > MAX_NUM_MAC_FILT) {
+ WL_ERR(("%s : invalid number of MAC address entries %d\n",
+ __FUNCTION__, macnum));
+ return -1;
+ }
-#if defined(RSSIAVG)
- wl_free_rssi_cache(&g_rssi_cache_ctrl);
- wl_free_rssi_cache(&g_rssi2_cache_ctrl);
-#endif
-#if defined(BSSCACHE)
- wl_free_bss_cache(&g_bss_cache_ctrl);
- wl_run_bss_cache_timer(&g_bss_cache_ctrl, 0);
-#endif
-}
+ /* allocate memory for the MAC list */
+ list = (struct maclist*)kmalloc(sizeof(int) +
+ sizeof(struct ether_addr) * macnum, GFP_KERNEL);
+ if (!list) {
+ WL_ERR(("%s : failed to allocate memory\n", __FUNCTION__));
+ return -1;
+ }
-void wl_cfg80211_send_disconnect(void)
-{
- struct wl_priv *wl;
+ /* prepare the MAC list */
+ list->count = htod32(macnum);
+ for (i = 0; i < macnum; i++) {
+ memcpy(&list->ea[i], &acl->mac_addrs[i], ETHER_ADDR_LEN);
+ }
+ /* set the list */
+ if ((ret = wl_android_set_ap_mac_list(cfgdev, macmode, list)) != 0)
+ WL_ERR(("%s : Setting MAC list failed error=%d\n", __FUNCTION__, ret));
- wl = wlcfg_drv_priv;
+ kfree(list);
- WL_TRACE(("In\n"));
- cfg80211_disconnected(wl_to_prmry_ndev(wl), 0, NULL, 0, GFP_KERNEL);
+ return ret;
}
+#endif /* WL_CFG80211_ACL */
-void wl_cfg80211_user_sync(bool lock)
+#ifdef WL_NAN
+int
+wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd, int cmd_len)
{
- struct wl_priv *wl;
-
- wl = wlcfg_drv_priv;
- if (lock)
- mutex_lock(&wl->usr_sync);
- else
- mutex_unlock(&wl->usr_sync);
+ return wl_cfgnan_cmd_handler(ndev, g_bcm_cfg, cmd, cmd_len);
}
-#endif
+#endif /* WL_NAN */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_cfg80211.h 418267 2013-08-14 12:49:52Z $
+ * $Id: wl_cfg80211.h 491407 2014-07-16 09:23:04Z $
+ */
+
+/**
+ * Older Linux versions support the 'iw' interface, more recent ones the 'cfg80211' interface.
*/
#ifndef _wl_cfg80211_h_
struct wl_conf;
struct wl_iface;
-struct wl_priv;
+struct bcm_cfg80211;
struct wl_security;
struct wl_ibss;
-#define htod32(i) i
-#define htod16(i) i
-#define dtoh32(i) i
-#define dtoh16(i) i
-#define htodchanspec(i) i
-#define dtohchanspec(i) i
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
#define WL_DBG_NONE 0
#define WL_DBG_P2P_ACTION (1 << 5)
} while (0)
#endif /* defined(DHD_DEBUG) */
-#ifdef WL_INFO
-#undef WL_INFO
+#ifdef WL_INFORM
+#undef WL_INFORM
#endif
-#define WL_INFO(args) \
+
+#define WL_INFORM(args) \
do { \
if (wl_dbg_level & WL_DBG_INFO) { \
printk(KERN_INFO "CFG80211-INFO) %s : ", __func__); \
printk args; \
} \
} while (0)
+
+
#ifdef WL_SCAN
#undef WL_SCAN
#endif
#define WL_ASSOC_INFO_MAX 512
#define WL_IOCTL_LEN_MAX 2048
#define WL_EXTRA_BUF_MAX 2048
-#define WL_ISCAN_BUF_MAX 2048
-#define WL_ISCAN_TIMER_INTERVAL_MS 3000
#define WL_SCAN_ERSULTS_LAST (WL_SCAN_RESULTS_NO_MEM+1)
#define WL_AP_MAX 256
#define WL_FILE_NAME_MAX 256
#define WL_AF_SEARCH_TIME_MAX 450
#define WL_AF_TX_EXTRA_TIME_MAX 200
-#define WL_SCAN_TIMER_INTERVAL_MS 8000 /* Scan timeout */
+#define WL_SCAN_TIMER_INTERVAL_MS 10000 /* Scan timeout */
#define WL_CHANNEL_SYNC_RETRY 5
#define WL_INVALID -1
#define WL_SCAN_SUPPRESS_TIMEOUT 31000 /* default Framwork DHCP timeout is 30 sec */
#define WL_SCAN_SUPPRESS_RETRY 3000
-#define WL_PM_ENABLE_TIMEOUT 3000
+#define WL_PM_ENABLE_TIMEOUT 10000
+
+#ifdef WLAIBSS
+/* Custom AIBSS beacon parameters */
+#define AIBSS_INITIAL_MIN_BCN_DUR 500
+#define AIBSS_MIN_BCN_DUR 5000
+#define AIBSS_BCN_FLOOD_DUR 5000
+#endif /* WLAIBSS */
/* driver status */
enum wl_status {
WL_PROF_DTIMPERIOD
};
-/* driver iscan state */
-enum wl_iscan_state {
- WL_ISCAN_STATE_IDLE,
- WL_ISCAN_STATE_SCANING
-};
-
/* donlge escan state */
enum wl_escan_state {
WL_ESCAN_STATE_IDLE,
WL_PROBE_RESP = 0x2,
WL_ASSOC_RESP = 0x4
};
+
+enum wl_handler_del_type {
+ WL_HANDLER_NOTUSE,
+ WL_HANDLER_DEL,
+ WL_HANDLER_MAINTAIN,
+ WL_HANDLER_PEND
+};
+
/* beacon / probe_response */
struct beacon_proberesp {
__le64 timestamp;
struct ieee80211_channel channel;
};
-typedef s32(*EVENT_HANDLER) (struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+typedef s32(*EVENT_HANDLER) (struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
/* bss inform structure for cfg80211 interface */
u8 channel;
};
-/* wl driver profile */
+/* cfg driver profile */
struct wl_profile {
u32 mode;
s32 band;
s32 pm;
struct list_head list; /* list of all net_info structure */
};
-typedef s32(*ISCAN_HANDLER) (struct wl_priv *wl);
-
-/* iscan controller */
-struct wl_iscan_ctrl {
- struct net_device *dev;
- struct timer_list timer;
- u32 timer_ms;
- u32 timer_on;
- s32 state;
- struct task_struct *tsk;
- struct semaphore sync;
- ISCAN_HANDLER iscan_handler[WL_SCAN_ERSULTS_LAST];
- void *data;
- s8 ioctl_buf[WLC_IOCTL_SMLEN];
- s8 scan_buf[WL_ISCAN_BUF_MAX];
-};
/* association inform */
#define MAX_REQ_LINE 1024
/* Structure to hold WPS, WPA IEs for a AP */
u8 probe_res_ie[VNDR_IES_MAX_BUF_LEN];
u8 beacon_ie[VNDR_IES_MAX_BUF_LEN];
+ u8 assoc_res_ie[VNDR_IES_MAX_BUF_LEN];
u32 probe_res_ie_len;
u32 beacon_ie_len;
+ u32 assoc_res_ie_len;
u8 *wpa_ie;
u8 *rsn_ie;
u8 *wps_ie;
bool security_mode;
};
-struct btcoex_info {
- struct timer_list timer;
- u32 timer_ms;
- u32 timer_on;
- u32 ts_dhcp_start; /* ms ts ecord time stats */
- u32 ts_dhcp_ok; /* ms ts ecord time stats */
- bool dhcp_done; /* flag, indicates that host done with
- * dhcp before t1/t2 expiration
- */
- s32 bt_state;
- struct work_struct work;
- struct net_device *dev;
-};
struct sta_info {
/* Structure to hold WPS IE for a STA */
/* Max length of Interworking element */
#define IW_IES_MAX_BUF_LEN 9
#endif
-
+#ifdef WLFBT
+#define FBT_KEYLEN 32
+#endif
#define MAX_EVENT_BUF_NUM 16
typedef struct wl_eventmsg_buf {
u16 num;
} event [MAX_EVENT_BUF_NUM];
} wl_eventmsg_buf_t;
-/* private data of cfg80211 interface */
-struct wl_priv {
- struct wireless_dev *wdev; /* representing wl cfg80211 device */
+typedef struct wl_if_event_info {
+ bool valid;
+ int ifidx;
+ int bssidx;
+ uint8 mac[ETHER_ADDR_LEN];
+ char name[IFNAMSIZ+1];
+} wl_if_event_info;
- struct wireless_dev *p2p_wdev; /* representing wl cfg80211 device for P2P */
+/* private data of cfg80211 interface */
+struct bcm_cfg80211 {
+ struct wireless_dev *wdev; /* representing cfg cfg80211 device */
+ struct wireless_dev *p2p_wdev; /* representing cfg cfg80211 device for P2P */
struct net_device *p2p_net; /* reference to p2p0 interface */
struct wl_conf *conf;
#else
struct wl_ie ie;
#endif
- struct wl_iscan_ctrl *iscan; /* iscan controller */
/* association information container */
#if defined(STATIC_WL_PRIV_STRUCT)
#ifdef WL_CFG80211_SYNC_GON
ulong af_tx_sent_jiffies;
#endif /* WL_CFG80211_SYNC_GON */
- bool iscan_on; /* iscan on/off switch */
- bool iscan_kickstart; /* indicate iscan already started */
- bool escan_on; /* escan on/off switch */
struct escan_info escan_info; /* escan information */
bool active_scan; /* current scan mode */
bool ibss_starter; /* indicates this sta is ibss starter */
bool pwr_save;
bool roam_on; /* on/off switch for self-roaming */
bool scan_tried; /* indicates if first scan attempted */
+#if defined(BCMSDIO) || defined(BCMPCIE)
bool wlfc_on;
+#endif
bool vsdb_mode;
bool roamoff_on_concurrent;
u8 *ioctl_buf; /* ioctl buffer */
u64 send_action_id;
u64 last_roc_id;
wait_queue_head_t netif_change_event;
+ wl_if_event_info if_event_info;
struct completion send_af_done;
struct afx_hdl *afx_hdl;
struct ap_info *ap_info;
struct sta_info *sta_info;
struct p2p_info *p2p;
bool p2p_supported;
- struct btcoex_info *btcoex_info;
+ void *btcoex_info;
struct timer_list scan_timeout; /* Timer for catch scan event timeout */
- s32(*state_notifier) (struct wl_priv *wl,
+ s32(*state_notifier) (struct bcm_cfg80211 *cfg,
struct net_info *_net_info, enum wl_status state, bool set);
unsigned long interrested_state;
wlc_ssid_t hostapd_ssid;
struct timer_list scan_supp_timer;
struct work_struct wlan_work;
struct mutex event_sync; /* maily for up/down synchronization */
+ bool disable_roam_event;
bool pm_enable_work_on;
struct delayed_work pm_enable_work;
vndr_ie_setbuf_t *ibss_vsie; /* keep the VSIE for IBSS */
int ibss_vsie_len;
+#ifdef WLAIBSS
+ u32 aibss_txfail_pid;
+ u32 aibss_txfail_seq;
+#endif /* WLAIBSS */
+ u32 rmc_event_pid;
+ u32 rmc_event_seq;
+#ifdef WLAIBSS_MCHAN
+ struct ether_addr ibss_if_addr;
+ bcm_struct_cfgdev *ibss_cfgdev; /* For AIBSS */
+#endif /* WLAIBSS_MCHAN */
+ bcm_struct_cfgdev *bss_cfgdev; /* For DUAL STA/STA+AP */
+ s32 cfgdev_bssidx;
+ bool bss_pending_op; /* indicate where there is a pending IF operation */
+#ifdef WLFBT
+ uint8 fbt_key[FBT_KEYLEN];
+#endif
+ bool roam_offload;
+ bool nan_running;
};
(struct wl_bss_info *)((uintptr) bss + dtoh32(bss->length)) : list->bss_info;
}
static inline s32
-wl_alloc_netinfo(struct wl_priv *wl, struct net_device *ndev,
+wl_alloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct wireless_dev * wdev, s32 mode, bool pm_block)
{
struct net_info *_net_info;
s32 err = 0;
- if (wl->iface_cnt == IFACE_MAX_CNT)
+ if (cfg->iface_cnt == IFACE_MAX_CNT)
return -ENOMEM;
_net_info = kzalloc(sizeof(struct net_info), GFP_KERNEL);
if (!_net_info)
_net_info->pm = 0;
_net_info->pm_block = pm_block;
_net_info->roam_off = WL_INVALID;
- wl->iface_cnt++;
- list_add(&_net_info->list, &wl->net_list);
+ cfg->iface_cnt++;
+ list_add(&_net_info->list, &cfg->net_list);
}
return err;
}
static inline void
-wl_dealloc_netinfo(struct wl_priv *wl, struct net_device *ndev)
+wl_dealloc_netinfo(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev)) {
list_del(&_net_info->list);
- wl->iface_cnt--;
- if (_net_info->wdev) {
- kfree(_net_info->wdev);
- ndev->ieee80211_ptr = NULL;
- }
+ cfg->iface_cnt--;
kfree(_net_info);
}
}
}
static inline void
-wl_delete_all_netinfo(struct wl_priv *wl)
+wl_delete_all_netinfo(struct bcm_cfg80211 *cfg)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
list_del(&_net_info->list);
if (_net_info->wdev)
kfree(_net_info->wdev);
kfree(_net_info);
}
- wl->iface_cnt = 0;
+ cfg->iface_cnt = 0;
}
static inline u32
-wl_get_status_all(struct wl_priv *wl, s32 status)
+wl_get_status_all(struct bcm_cfg80211 *cfg, s32 status)
{
struct net_info *_net_info, *next;
u32 cnt = 0;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
if (_net_info->ndev &&
test_bit(status, &_net_info->sme_state))
cnt++;
return cnt;
}
static inline void
-wl_set_status_all(struct wl_priv *wl, s32 status, u32 op)
+wl_set_status_all(struct bcm_cfg80211 *cfg, s32 status, u32 op)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
switch (op) {
case 1:
return; /* set all status is not allowed */
case 2:
clear_bit(status, &_net_info->sme_state);
- if (wl->state_notifier &&
- test_bit(status, &(wl->interrested_state)))
- wl->state_notifier(wl, _net_info, status, false);
+ if (cfg->state_notifier &&
+ test_bit(status, &(cfg->interrested_state)))
+ cfg->state_notifier(cfg, _net_info, status, false);
break;
case 4:
return; /* change all status is not allowed */
}
}
static inline void
-wl_set_status_by_netdev(struct wl_priv *wl, s32 status,
+wl_set_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
struct net_device *ndev, u32 op)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev)) {
switch (op) {
case 1:
set_bit(status, &_net_info->sme_state);
- if (wl->state_notifier &&
- test_bit(status, &(wl->interrested_state)))
- wl->state_notifier(wl, _net_info, status, true);
+ if (cfg->state_notifier &&
+ test_bit(status, &(cfg->interrested_state)))
+ cfg->state_notifier(cfg, _net_info, status, true);
break;
case 2:
clear_bit(status, &_net_info->sme_state);
- if (wl->state_notifier &&
- test_bit(status, &(wl->interrested_state)))
- wl->state_notifier(wl, _net_info, status, false);
+ if (cfg->state_notifier &&
+ test_bit(status, &(cfg->interrested_state)))
+ cfg->state_notifier(cfg, _net_info, status, false);
break;
case 4:
change_bit(status, &_net_info->sme_state);
}
static inline u32
-wl_get_status_by_netdev(struct wl_priv *wl, s32 status,
+wl_get_status_by_netdev(struct bcm_cfg80211 *cfg, s32 status,
struct net_device *ndev)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev))
return test_bit(status, &_net_info->sme_state);
}
}
static inline s32
-wl_get_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+wl_get_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev))
return _net_info->mode;
}
static inline void
-wl_set_mode_by_netdev(struct wl_priv *wl, struct net_device *ndev,
+wl_set_mode_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev,
s32 mode)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev))
_net_info->mode = mode;
}
}
static inline struct wl_profile *
-wl_get_profile_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+wl_get_profile_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev))
return &_net_info->profile;
}
return NULL;
}
static inline struct net_info *
-wl_get_netinfo_by_netdev(struct wl_priv *wl, struct net_device *ndev)
+wl_get_netinfo_by_netdev(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
struct net_info *_net_info, *next;
- list_for_each_entry_safe(_net_info, next, &wl->net_list, list) {
+ list_for_each_entry_safe(_net_info, next, &cfg->net_list, list) {
if (ndev && (_net_info->ndev == ndev))
return _net_info;
}
return NULL;
}
-#define wl_to_wiphy(w) (w->wdev->wiphy)
-#define wl_to_prmry_ndev(w) (w->wdev->netdev)
-#define wl_to_prmry_wdev(w) (w->wdev)
-#define wl_to_p2p_wdev(w) (w->p2p_wdev)
+#define bcmcfg_to_wiphy(cfg) (cfg->wdev->wiphy)
+#define bcmcfg_to_prmry_ndev(cfg) (cfg->wdev->netdev)
+#define bcmcfg_to_prmry_wdev(cfg) (cfg->wdev)
+#define bcmcfg_to_p2p_wdev(cfg) (cfg->p2p_wdev)
#define ndev_to_wl(n) (wdev_to_wl(n->ieee80211_ptr))
#define ndev_to_wdev(ndev) (ndev->ieee80211_ptr)
#define wdev_to_ndev(wdev) (wdev->netdev)
#if defined(WL_ENABLE_P2P_IF)
-#define ndev_to_wlc_ndev(ndev, wl) ((ndev == wl->p2p_net) ? \
- wl_to_prmry_ndev(wl) : ndev)
+#define ndev_to_wlc_ndev(ndev, cfg) ((ndev == cfg->p2p_net) ? \
+ bcmcfg_to_prmry_ndev(cfg) : ndev)
#else
-#define ndev_to_wlc_ndev(ndev, wl) (ndev)
+#define ndev_to_wlc_ndev(ndev, cfg) (ndev)
#endif /* WL_ENABLE_P2P_IF */
#if defined(WL_CFG80211_P2P_DEV_IF)
-#define wdev_to_wlc_ndev(wdev, wl) \
+#define wdev_to_wlc_ndev(wdev, cfg) \
((wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) ? \
- wl_to_prmry_ndev(wl) : wdev_to_ndev(wdev))
-#define cfgdev_to_wlc_ndev(cfgdev, wl) wdev_to_wlc_ndev(cfgdev, wl)
+ bcmcfg_to_prmry_ndev(cfg) : wdev_to_ndev(wdev))
+#define cfgdev_to_wlc_ndev(cfgdev, cfg) wdev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_wdev(cfg)
#elif defined(WL_ENABLE_P2P_IF)
-#define cfgdev_to_wlc_ndev(cfgdev, wl) ndev_to_wlc_ndev(cfgdev, wl)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg) ndev_to_wlc_ndev(cfgdev, cfg)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) bcmcfg_to_prmry_ndev(cfg)
#else
-#define cfgdev_to_wlc_ndev(cfgdev, wl) (cfgdev)
+#define cfgdev_to_wlc_ndev(cfgdev, cfg) (cfgdev)
+#define bcmcfg_to_prmry_cfgdev(cfgdev, cfg) (cfgdev)
#endif /* WL_CFG80211_P2P_DEV_IF */
#if defined(WL_CFG80211_P2P_DEV_IF)
#define ndev_to_cfgdev(ndev) ndev_to_wdev(ndev)
+#define cfgdev_to_ndev(cfgdev) cfgdev ? (cfgdev->netdev) : NULL
+#define discover_cfgdev(cfgdev, cfg) (cfgdev->iftype == NL80211_IFTYPE_P2P_DEVICE)
#else
#define ndev_to_cfgdev(ndev) (ndev)
+#define cfgdev_to_ndev(cfgdev) (cfgdev)
+#define discover_cfgdev(cfgdev, cfg) (cfgdev == cfg->p2p_net)
#endif /* WL_CFG80211_P2P_DEV_IF */
#if defined(WL_CFG80211_P2P_DEV_IF)
-#define scan_req_match(wl) (((wl) && (wl->scan_request) && \
- (wl->scan_request->wdev == wl->p2p_wdev)) ? true : false)
+#define scan_req_match(cfg) (((cfg) && (cfg->scan_request) && \
+ (cfg->scan_request->wdev == cfg->p2p_wdev)) ? true : false)
#elif defined(WL_ENABLE_P2P_IF)
-#define scan_req_match(wl) (((wl) && (wl->scan_request) && \
- (wl->scan_request->dev == wl->p2p_net)) ? true : false)
+#define scan_req_match(cfg) (((cfg) && (cfg->scan_request) && \
+ (cfg->scan_request->dev == cfg->p2p_net)) ? true : false)
#else
-#define scan_req_match(wl) (((wl) && p2p_is_on(wl) && p2p_scan(wl)) ? \
+#define scan_req_match(cfg) (((cfg) && p2p_is_on(cfg) && p2p_scan(cfg)) ? \
true : false)
#endif /* WL_CFG80211_P2P_DEV_IF */
#define wl_to_ie(w) (&w->ie)
#define wl_to_conn(w) (&w->conn_info)
#endif
-#define iscan_to_wl(i) ((struct wl_priv *)(i->data))
-#define wl_to_iscan(w) (w->iscan)
#define wiphy_from_scan(w) (w->escan_info.wiphy)
-#define wl_get_drv_status_all(wl, stat) \
- (wl_get_status_all(wl, WL_STATUS_ ## stat))
-#define wl_get_drv_status(wl, stat, ndev) \
- (wl_get_status_by_netdev(wl, WL_STATUS_ ## stat, ndev))
-#define wl_set_drv_status(wl, stat, ndev) \
- (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 1))
-#define wl_clr_drv_status(wl, stat, ndev) \
- (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 2))
-#define wl_clr_drv_status_all(wl, stat) \
- (wl_set_status_all(wl, WL_STATUS_ ## stat, 2))
-#define wl_chg_drv_status(wl, stat, ndev) \
- (wl_set_status_by_netdev(wl, WL_STATUS_ ## stat, ndev, 4))
+#define wl_get_drv_status_all(cfg, stat) \
+ (wl_get_status_all(cfg, WL_STATUS_ ## stat))
+#define wl_get_drv_status(cfg, stat, ndev) \
+ (wl_get_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev))
+#define wl_set_drv_status(cfg, stat, ndev) \
+ (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 1))
+#define wl_clr_drv_status(cfg, stat, ndev) \
+ (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 2))
+#define wl_clr_drv_status_all(cfg, stat) \
+ (wl_set_status_all(cfg, WL_STATUS_ ## stat, 2))
+#define wl_chg_drv_status(cfg, stat, ndev) \
+ (wl_set_status_by_netdev(cfg, WL_STATUS_ ## stat, ndev, 4))
#define for_each_bss(list, bss, __i) \
for (__i = 0; __i < list->count && __i < WL_AP_MAX; __i++, bss = next_bss(list, bss))
-#define for_each_ndev(wl, iter, next) \
- list_for_each_entry_safe(iter, next, &wl->net_list, list)
+#define for_each_ndev(cfg, iter, next) \
+ list_for_each_entry_safe(iter, next, &cfg->net_list, list)
/* In case of WPS from wpa_supplicant, pairwise siute and group suite is 0.
((wl_cfgp2p_find_wpsie((u8 *)_sme->ie, _sme->ie_len) != NULL) && \
(!_sme->crypto.n_ciphers_pairwise) && \
(!_sme->crypto.cipher_group))
-extern s32 wl_cfg80211_attach(struct net_device *ndev, void *data);
+extern s32 wl_cfg80211_attach(struct net_device *ndev, void *context);
extern s32 wl_cfg80211_attach_post(struct net_device *ndev);
extern void wl_cfg80211_detach(void *para);
extern s32 wl_cfg80211_up(void *para);
extern s32 wl_cfg80211_down(void *para);
-extern s32 wl_cfg80211_notify_ifadd(struct net_device *ndev, s32 idx, s32 bssidx,
- void* _net_attach);
-extern s32 wl_cfg80211_ifdel_ops(struct net_device *net);
-extern s32 wl_cfg80211_notify_ifdel(void);
-extern s32 wl_cfg80211_is_progress_ifadd(void);
-extern s32 wl_cfg80211_is_progress_ifchange(void);
-extern s32 wl_cfg80211_is_progress_ifadd(void);
-extern s32 wl_cfg80211_notify_ifchange(void);
+extern s32 wl_cfg80211_notify_ifadd(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifdel(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern s32 wl_cfg80211_notify_ifchange(int ifidx, char *name, uint8 *mac, uint8 bssidx);
+extern struct net_device* wl_cfg80211_allocate_if(struct bcm_cfg80211 *cfg, int ifidx, char *name,
+ uint8 *mac, uint8 bssidx);
+extern int wl_cfg80211_register_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
+extern int wl_cfg80211_remove_if(struct bcm_cfg80211 *cfg, int ifidx, struct net_device* ndev);
+extern int wl_cfg80211_scan_stop(bcm_struct_cfgdev *cfgdev);
+extern bool wl_cfg80211_is_vsdb_mode(void);
+extern void* wl_cfg80211_get_dhdp(void);
+extern bool wl_cfg80211_is_p2p_active(void);
extern void wl_cfg80211_dbg_level(u32 level);
extern s32 wl_cfg80211_get_p2p_dev_addr(struct net_device *net, struct ether_addr *p2pdev_addr);
extern s32 wl_cfg80211_set_p2p_noa(struct net_device *net, char* buf, int len);
extern s32 wl_cfg80211_set_wps_p2p_ie(struct net_device *net, char *buf, int len,
enum wl_management_type type);
extern s32 wl_cfg80211_set_p2p_ps(struct net_device *net, char* buf, int len);
+
+/* btcoex functions */
+void* wl_cfg80211_btcoex_init(struct net_device *ndev);
+void wl_cfg80211_btcoex_deinit(void);
+
#ifdef WL_SDO
-extern s32 wl_cfg80211_sdo_init(struct wl_priv *wl);
-extern s32 wl_cfg80211_sdo_deinit(struct wl_priv *wl);
+extern s32 wl_cfg80211_sdo_init(struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_sdo_deinit(struct bcm_cfg80211 *cfg);
extern s32 wl_cfg80211_sd_offload(struct net_device *net, char *cmd, char* buf, int len);
-extern s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct wl_priv *wl);
-extern s32 wl_cfg80211_resume_sdo(struct net_device *dev, struct wl_priv *wl);
+extern s32 wl_cfg80211_pause_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
+extern s32 wl_cfg80211_resume_sdo(struct net_device *dev, struct bcm_cfg80211 *cfg);
+
#endif
+
#ifdef WL_SUPPORT_AUTO_CHANNEL
#define CHANSPEC_BUF_SIZE 1024
#define CHAN_SEL_IOCTL_DELAY 300
extern s32 wl_cfg80211_get_best_channels(struct net_device *dev, char* command,
int total_len);
#endif /* WL_SUPPORT_AUTO_CHANNEL */
+
+extern int wl_cfg80211_ether_atoe(const char *a, struct ether_addr *n);
+extern int wl_cfg80211_hex_str_to_bin(unsigned char *data, int dlen, char *str);
extern int wl_cfg80211_hang(struct net_device *dev, u16 reason);
extern s32 wl_mode_to_nl80211_iftype(s32 mode);
int wl_cfg80211_do_driver_init(struct net_device *net);
void wl_cfg80211_enable_trace(u32 level);
-extern s32 wl_update_wiphybands(struct wl_priv *wl, bool notify);
+extern s32 wl_update_wiphybands(struct bcm_cfg80211 *cfg, bool notify);
extern s32 wl_cfg80211_if_is_group_owner(void);
+extern chanspec_t wl_chspec_host_to_driver(chanspec_t chanspec);
extern chanspec_t wl_ch_host_to_driver(u16 channel);
+extern s32 wl_set_tx_power(struct net_device *dev,
+ enum nl80211_tx_power_setting type, s32 dbm);
+extern s32 wl_get_tx_power(struct net_device *dev, s32 *dbm);
extern s32 wl_add_remove_eventmsg(struct net_device *ndev, u16 event, bool add);
-extern void wl_stop_wait_next_action_frame(struct wl_priv *wl);
-extern int wl_cfg80211_update_power_mode(struct net_device *dev);
+extern void wl_stop_wait_next_action_frame(struct bcm_cfg80211 *cfg, struct net_device *ndev);
#ifdef WL_HOST_BAND_MGMT
extern s32 wl_cfg80211_set_band(struct net_device *ndev, int band);
#endif /* WL_HOST_BAND_MGMT */
#endif /* OEM_ANDROID */
extern void wl_cfg80211_add_to_eventbuffer(wl_eventmsg_buf_t *ev, u16 event, bool set);
extern s32 wl_cfg80211_apply_eventbuffer(struct net_device *ndev,
- struct wl_priv *wl, wl_eventmsg_buf_t *ev);
-extern void get_primary_mac(struct wl_priv *wl, struct ether_addr *mac);
+ struct bcm_cfg80211 *cfg, wl_eventmsg_buf_t *ev);
+extern void get_primary_mac(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
+extern void wl_cfg80211_update_power_mode(struct net_device *dev);
#define SCAN_BUF_CNT 2
#define SCAN_BUF_NEXT 1
+#define WL_SCANTYPE_LEGACY 0x1
+#define WL_SCANTYPE_P2P 0x2
#define wl_escan_set_sync_id(a, b) ((a) = htod16(0x1234))
+#define wl_escan_set_type(a, b)
#define wl_escan_get_buf(a, b) ((wl_scan_results_t *) (a)->escan_info.escan_buf)
#define wl_escan_check_sync_id(a, b, c) 0
#define wl_escan_print_sync_id(a, b, c)
#define wl_escan_init_sync_id(a)
extern void wl_cfg80211_ibss_vsie_set_buffer(vndr_ie_setbuf_t *ibss_vsie, int ibss_vsie_len);
extern s32 wl_cfg80211_ibss_vsie_delete(struct net_device *dev);
+#ifdef WLAIBSS
+extern void wl_cfg80211_set_txfail_pid(int pid);
+#endif /* WLAIBSS */
+extern void wl_cfg80211_set_rmc_pid(int pid);
+
+#ifdef WLFBT
+extern void wl_cfg80211_get_fbt_key(uint8 *key);
+#endif
/* Action frame specific functions */
extern u8 wl_get_action_category(void *frame, u32 frame_len);
extern int wl_get_public_action(void *frame, u32 frame_len, u8 *ret_action);
+#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
+struct net_device *wl_cfg80211_get_remain_on_channel_ndev(struct bcm_cfg80211 *cfg);
+#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
+
+#ifdef WL_SUPPORT_ACS
+#define ACS_MSRMNT_DELAY 1000 /* dump_obss delay in ms */
+#define IOCTL_RETRY_COUNT 5
+#define CHAN_NOISE_DUMMY -80
+#define OBSS_TOKEN_IDX 15
+#define IBSS_TOKEN_IDX 15
+#define TX_TOKEN_IDX 14
+#define CTG_TOKEN_IDX 13
+#define PKT_TOKEN_IDX 15
+#define IDLE_TOKEN_IDX 12
+#endif /* WL_SUPPORT_ACS */
+
+extern int wl_cfg80211_get_ioctl_version(void);
+extern int wl_cfg80211_enable_roam_offload(struct net_device *dev, bool enable);
+
+#ifdef WL_NAN
+extern int wl_cfg80211_nan_cmd_handler(struct net_device *ndev, char *cmd,
+ int cmd_len);
+#endif /* WL_NAN */
+
#endif /* _wl_cfg80211_h_ */
--- /dev/null
+/*
+ * Linux cfg80211 driver - Dongle Host Driver (DHD) related
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfg_btcoex.c 467328 2014-04-03 01:23:40Z $
+ */
+
+#include <net/rtnetlink.h>
+
+#include <bcmutils.h>
+#include <wldev_common.h>
+#include <wl_cfg80211.h>
+#include <dhd_cfg80211.h>
+#include <dngl_stats.h>
+#include <dhd.h>
+#include <dhdioctl.h>
+#include <wlioctl.h>
+
+#ifdef PKT_FILTER_SUPPORT
+extern uint dhd_pkt_filter_enable;
+extern uint dhd_master_mode;
+extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode);
+#endif
+
+struct btcoex_info {
+ struct timer_list timer;
+ u32 timer_ms;
+ u32 timer_on;
+ u32 ts_dhcp_start; /* ms ts ecord time stats */
+ u32 ts_dhcp_ok; /* ms ts ecord time stats */
+ bool dhcp_done; /* flag, indicates that host done with
+ * dhcp before t1/t2 expiration
+ */
+ s32 bt_state;
+ struct work_struct work;
+ struct net_device *dev;
+};
+
+static struct btcoex_info *btcoex_info_loc = NULL;
+
+/* TODO: clean up the BT-Coex code, it still have some legacy ioctl/iovar functions */
+
+/* use New SCO/eSCO smart YG suppression */
+#define BT_DHCP_eSCO_FIX
+/* this flag boost wifi pkt priority to max, caution: -not fair to sco */
+#define BT_DHCP_USE_FLAGS
+/* T1 start SCO/ESCo priority suppression */
+#define BT_DHCP_OPPR_WIN_TIME 2500
+/* T2 turn off SCO/SCO supperesion is (timeout) */
+#define BT_DHCP_FLAG_FORCE_TIME 5500
+
+enum wl_cfg80211_btcoex_status {
+ BT_DHCP_IDLE,
+ BT_DHCP_START,
+ BT_DHCP_OPPR_WIN,
+ BT_DHCP_FLAG_FORCE_TIMEOUT
+};
+
+/*
+ * get named driver variable to uint register value and return error indication
+ * calling example: dev_wlc_intvar_get_reg(dev, "btc_params",66, ®_value)
+ */
+static int
+dev_wlc_intvar_get_reg(struct net_device *dev, char *name,
+ uint reg, int *retval)
+{
+ union {
+ char buf[WLC_IOCTL_SMLEN];
+ int val;
+ } var;
+ int error;
+
+ bcm_mkiovar(name, (char *)(®), sizeof(reg),
+ (char *)(&var), sizeof(var.buf));
+ error = wldev_ioctl(dev, WLC_GET_VAR, (char *)(&var), sizeof(var.buf), false);
+
+ *retval = dtoh32(var.val);
+ return (error);
+}
+
+static int
+dev_wlc_bufvar_set(struct net_device *dev, char *name, char *buf, int len)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
+ char ioctlbuf_local[1024];
+#else
+ static char ioctlbuf_local[1024];
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) */
+
+ bcm_mkiovar(name, buf, len, ioctlbuf_local, sizeof(ioctlbuf_local));
+
+ return (wldev_ioctl(dev, WLC_SET_VAR, ioctlbuf_local, sizeof(ioctlbuf_local), true));
+}
+/*
+get named driver variable to uint register value and return error indication
+calling example: dev_wlc_intvar_set_reg(dev, "btc_params",66, value)
+*/
+static int
+dev_wlc_intvar_set_reg(struct net_device *dev, char *name, char *addr, char * val)
+{
+ char reg_addr[8];
+
+ memset(reg_addr, 0, sizeof(reg_addr));
+ memcpy((char *)®_addr[0], (char *)addr, 4);
+ memcpy((char *)®_addr[4], (char *)val, 4);
+
+ return (dev_wlc_bufvar_set(dev, name, (char *)®_addr[0], sizeof(reg_addr)));
+}
+
+static bool btcoex_is_sco_active(struct net_device *dev)
+{
+ int ioc_res = 0;
+ bool res = FALSE;
+ int sco_id_cnt = 0;
+ int param27;
+ int i;
+
+ for (i = 0; i < 12; i++) {
+
+ ioc_res = dev_wlc_intvar_get_reg(dev, "btc_params", 27, ¶m27);
+
+ WL_TRACE(("sample[%d], btc params: 27:%x\n", i, param27));
+
+ if (ioc_res < 0) {
+ WL_ERR(("ioc read btc params error\n"));
+ break;
+ }
+
+ if ((param27 & 0x6) == 2) { /* count both sco & esco */
+ sco_id_cnt++;
+ }
+
+ if (sco_id_cnt > 2) {
+ WL_TRACE(("sco/esco detected, pkt id_cnt:%d samples:%d\n",
+ sco_id_cnt, i));
+ res = TRUE;
+ break;
+ }
+
+ OSL_SLEEP(5);
+ }
+
+ return res;
+}
+
+#if defined(BT_DHCP_eSCO_FIX)
+/* Enhanced BT COEX settings for eSCO compatibility during DHCP window */
+static int set_btc_esco_params(struct net_device *dev, bool trump_sco)
+{
+ static bool saved_status = FALSE;
+
+ char buf_reg50va_dhcp_on[8] =
+ { 50, 00, 00, 00, 0x22, 0x80, 0x00, 0x00 };
+ char buf_reg51va_dhcp_on[8] =
+ { 51, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg64va_dhcp_on[8] =
+ { 64, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg65va_dhcp_on[8] =
+ { 65, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ char buf_reg71va_dhcp_on[8] =
+ { 71, 00, 00, 00, 0x00, 0x00, 0x00, 0x00 };
+ uint32 regaddr;
+ static uint32 saved_reg50;
+ static uint32 saved_reg51;
+ static uint32 saved_reg64;
+ static uint32 saved_reg65;
+ static uint32 saved_reg71;
+
+ if (trump_sco) {
+ /* this should reduce eSCO agressive retransmit
+ * w/o breaking it
+ */
+
+ /* 1st save current */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+ if ((!dev_wlc_intvar_get_reg(dev, "btc_params", 50, &saved_reg50)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 51, &saved_reg51)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 64, &saved_reg64)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 65, &saved_reg65)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 71, &saved_reg71))) {
+ saved_status = TRUE;
+ WL_TRACE(("saved bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51,
+ saved_reg64, saved_reg65, saved_reg71));
+ } else {
+ WL_ERR((":%s: save btc_params failed\n",
+ __FUNCTION__));
+ saved_status = FALSE;
+ return -1;
+ }
+
+ WL_TRACE(("override with [50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ *(u32 *)(buf_reg50va_dhcp_on+4),
+ *(u32 *)(buf_reg51va_dhcp_on+4),
+ *(u32 *)(buf_reg64va_dhcp_on+4),
+ *(u32 *)(buf_reg65va_dhcp_on+4),
+ *(u32 *)(buf_reg71va_dhcp_on+4)));
+
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg50va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg51va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg64va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg65va_dhcp_on[0], 8);
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg71va_dhcp_on[0], 8);
+
+ saved_status = TRUE;
+ } else if (saved_status) {
+ /* restore previously saved bt params */
+ WL_TRACE(("Do new SCO/eSCO coex algo {save &"
+ "override}\n"));
+
+ regaddr = 50;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)®addr, (char *)&saved_reg50);
+ regaddr = 51;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)®addr, (char *)&saved_reg51);
+ regaddr = 64;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)®addr, (char *)&saved_reg64);
+ regaddr = 65;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)®addr, (char *)&saved_reg65);
+ regaddr = 71;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)®addr, (char *)&saved_reg71);
+
+ WL_TRACE(("restore bt_params[50,51,64,65,71]:"
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ saved_reg50, saved_reg51, saved_reg64,
+ saved_reg65, saved_reg71));
+
+ saved_status = FALSE;
+ } else {
+ WL_ERR((":%s att to restore not saved BTCOEX params\n",
+ __FUNCTION__));
+ return -1;
+ }
+ return 0;
+}
+#endif /* BT_DHCP_eSCO_FIX */
+
+static void
+wl_cfg80211_bt_setflag(struct net_device *dev, bool set)
+{
+#if defined(BT_DHCP_USE_FLAGS)
+ char buf_flag7_dhcp_on[8] = { 7, 00, 00, 00, 0x1, 0x0, 0x00, 0x00 };
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+#endif
+
+
+#if defined(BT_DHCP_eSCO_FIX)
+ /* set = 1, save & turn on 0 - off & restore prev settings */
+ set_btc_esco_params(dev, set);
+#endif
+
+#if defined(BT_DHCP_USE_FLAGS)
+ WL_TRACE(("WI-FI priority boost via bt flags, set:%d\n", set));
+ if (set == TRUE)
+ /* Forcing bt_flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_dhcp_on[0],
+ sizeof(buf_flag7_dhcp_on));
+ else
+ /* Restoring default bt flag7 */
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0],
+ sizeof(buf_flag7_default));
+#endif
+}
+
+static void wl_cfg80211_bt_timerfunc(ulong data)
+{
+ struct btcoex_info *bt_local = (struct btcoex_info *)data;
+ WL_TRACE(("Enter\n"));
+ bt_local->timer_on = 0;
+ schedule_work(&bt_local->work);
+}
+
+static void wl_cfg80211_bt_handler(struct work_struct *work)
+{
+ struct btcoex_info *btcx_inf;
+
+ btcx_inf = container_of(work, struct btcoex_info, work);
+
+ if (btcx_inf->timer_on) {
+ btcx_inf->timer_on = 0;
+ del_timer_sync(&btcx_inf->timer);
+ }
+
+ switch (btcx_inf->bt_state) {
+ case BT_DHCP_START:
+ /* DHCP started
+ * provide OPPORTUNITY window to get DHCP address
+ */
+ WL_TRACE(("bt_dhcp stm: started \n"));
+
+ btcx_inf->bt_state = BT_DHCP_OPPR_WIN;
+ mod_timer(&btcx_inf->timer,
+ jiffies + msecs_to_jiffies(BT_DHCP_OPPR_WIN_TIME));
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_OPPR_WIN:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("DHCP Done before T1 expiration\n"));
+ goto btc_coex_idle;
+ }
+
+ /* DHCP is not over yet, start lowering BT priority
+ * enforce btc_params + flags if necessary
+ */
+ WL_TRACE(("DHCP T1:%d expired\n", BT_DHCP_OPPR_WIN_TIME));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, TRUE);
+ btcx_inf->bt_state = BT_DHCP_FLAG_FORCE_TIMEOUT;
+ mod_timer(&btcx_inf->timer,
+ jiffies + msecs_to_jiffies(BT_DHCP_FLAG_FORCE_TIME));
+ btcx_inf->timer_on = 1;
+ break;
+
+ case BT_DHCP_FLAG_FORCE_TIMEOUT:
+ if (btcx_inf->dhcp_done) {
+ WL_TRACE(("DHCP Done before T2 expiration\n"));
+ } else {
+ /* Noo dhcp during T1+T2, restore BT priority */
+ WL_TRACE(("DHCP wait interval T2:%d msec expired\n",
+ BT_DHCP_FLAG_FORCE_TIME));
+ }
+
+ /* Restoring default bt priority */
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+btc_coex_idle:
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+
+ default:
+ WL_ERR(("error g_status=%d !!!\n", btcx_inf->bt_state));
+ if (btcx_inf->dev)
+ wl_cfg80211_bt_setflag(btcx_inf->dev, FALSE);
+ btcx_inf->bt_state = BT_DHCP_IDLE;
+ btcx_inf->timer_on = 0;
+ break;
+ }
+
+ net_os_wake_unlock(btcx_inf->dev);
+}
+
+void* wl_cfg80211_btcoex_init(struct net_device *ndev)
+{
+ struct btcoex_info *btco_inf = NULL;
+
+ btco_inf = kmalloc(sizeof(struct btcoex_info), GFP_KERNEL);
+ if (!btco_inf)
+ return NULL;
+
+ btco_inf->bt_state = BT_DHCP_IDLE;
+ btco_inf->ts_dhcp_start = 0;
+ btco_inf->ts_dhcp_ok = 0;
+ /* Set up timer for BT */
+ btco_inf->timer_ms = 10;
+ init_timer(&btco_inf->timer);
+ btco_inf->timer.data = (ulong)btco_inf;
+ btco_inf->timer.function = wl_cfg80211_bt_timerfunc;
+
+ btco_inf->dev = ndev;
+
+ INIT_WORK(&btco_inf->work, wl_cfg80211_bt_handler);
+
+ btcoex_info_loc = btco_inf;
+ return btco_inf;
+}
+
+void wl_cfg80211_btcoex_deinit()
+{
+ if (!btcoex_info_loc)
+ return;
+
+ if (btcoex_info_loc->timer_on) {
+ btcoex_info_loc->timer_on = 0;
+ del_timer_sync(&btcoex_info_loc->timer);
+ }
+
+ cancel_work_sync(&btcoex_info_loc->work);
+
+ kfree(btcoex_info_loc);
+}
+
+int wl_cfg80211_set_btcoex_dhcp(struct net_device *dev, dhd_pub_t *dhd, char *command)
+{
+
+ struct btcoex_info *btco_inf = btcoex_info_loc;
+ char powermode_val = 0;
+ char buf_reg66va_dhcp_on[8] = { 66, 00, 00, 00, 0x10, 0x27, 0x00, 0x00 };
+ char buf_reg41va_dhcp_on[8] = { 41, 00, 00, 00, 0x33, 0x00, 0x00, 0x00 };
+ char buf_reg68va_dhcp_on[8] = { 68, 00, 00, 00, 0x90, 0x01, 0x00, 0x00 };
+
+ uint32 regaddr;
+ static uint32 saved_reg66;
+ static uint32 saved_reg41;
+ static uint32 saved_reg68;
+ static bool saved_status = FALSE;
+
+ char buf_flag7_default[8] = { 7, 00, 00, 00, 0x0, 0x00, 0x00, 0x00};
+
+ /* Figure out powermode 1 or o command */
+ strncpy((char *)&powermode_val, command + strlen("BTCOEXMODE") +1, 1);
+
+ if (strnicmp((char *)&powermode_val, "1", strlen("1")) == 0) {
+ WL_TRACE_HW4(("DHCP session starts\n"));
+
+#if defined(DHCP_SCAN_SUPPRESS)
+ /* Suppress scan during the DHCP */
+ wl_cfg80211_scan_suppress(dev, 1);
+#endif /* OEM_ANDROID */
+
+#ifdef PKT_FILTER_SUPPORT
+ dhd->dhcp_in_progress = 1;
+
+ if (dhd->early_suspended) {
+ WL_TRACE_HW4(("DHCP in progressing , disable packet filter!!!\n"));
+ dhd_enable_packet_filter(0, dhd);
+ }
+#endif
+
+ /* Retrieve and saved orig regs value */
+ if ((saved_status == FALSE) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 66, &saved_reg66)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 41, &saved_reg41)) &&
+ (!dev_wlc_intvar_get_reg(dev, "btc_params", 68, &saved_reg68))) {
+ saved_status = TRUE;
+ WL_TRACE(("Saved 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+
+ /* Disable PM mode during dhpc session */
+
+ /* Disable PM mode during dhpc session */
+ /* Start BT timer only for SCO connection */
+ if (btcoex_is_sco_active(dev)) {
+ /* btc_params 66 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg66va_dhcp_on[0],
+ sizeof(buf_reg66va_dhcp_on));
+ /* btc_params 41 0x33 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg41va_dhcp_on[0],
+ sizeof(buf_reg41va_dhcp_on));
+ /* btc_params 68 0x190 */
+ dev_wlc_bufvar_set(dev, "btc_params",
+ (char *)&buf_reg68va_dhcp_on[0],
+ sizeof(buf_reg68va_dhcp_on));
+ saved_status = TRUE;
+
+ btco_inf->bt_state = BT_DHCP_START;
+ btco_inf->timer_on = 1;
+ mod_timer(&btco_inf->timer, btco_inf->timer.expires);
+ WL_TRACE(("enable BT DHCP Timer\n"));
+ }
+ }
+ else if (saved_status == TRUE) {
+ WL_ERR(("was called w/o DHCP OFF. Continue\n"));
+ }
+ }
+ else if (strnicmp((char *)&powermode_val, "2", strlen("2")) == 0) {
+
+
+#if defined(DHCP_SCAN_SUPPRESS)
+ /* Since DHCP is complete, enable the scan back */
+ wl_cfg80211_scan_suppress(dev, 0);
+#endif /* OEM_ANDROID */
+
+#ifdef PKT_FILTER_SUPPORT
+ dhd->dhcp_in_progress = 0;
+ WL_TRACE_HW4(("DHCP is complete \n"));
+
+ /* Enable packet filtering */
+ if (dhd->early_suspended) {
+ WL_TRACE_HW4(("DHCP is complete , enable packet filter!!!\n"));
+ dhd_enable_packet_filter(1, dhd);
+ }
+#endif /* PKT_FILTER_SUPPORT */
+
+ /* Restoring PM mode */
+
+ /* Stop any bt timer because DHCP session is done */
+ WL_TRACE(("disable BT DHCP Timer\n"));
+ if (btco_inf->timer_on) {
+ btco_inf->timer_on = 0;
+ del_timer_sync(&btco_inf->timer);
+
+ if (btco_inf->bt_state != BT_DHCP_IDLE) {
+ /* need to restore original btc flags & extra btc params */
+ WL_TRACE(("bt->bt_state:%d\n", btco_inf->bt_state));
+ /* wake up btcoex thread to restore btlags+params */
+ schedule_work(&btco_inf->work);
+ }
+ }
+
+ /* Restoring btc_flag paramter anyway */
+ if (saved_status == TRUE)
+ dev_wlc_bufvar_set(dev, "btc_flags",
+ (char *)&buf_flag7_default[0], sizeof(buf_flag7_default));
+
+ /* Restore original values */
+ if (saved_status == TRUE) {
+ regaddr = 66;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)®addr, (char *)&saved_reg66);
+ regaddr = 41;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)®addr, (char *)&saved_reg41);
+ regaddr = 68;
+ dev_wlc_intvar_set_reg(dev, "btc_params",
+ (char *)®addr, (char *)&saved_reg68);
+
+ WL_TRACE(("restore regs {66,41,68} <- 0x%x 0x%x 0x%x\n",
+ saved_reg66, saved_reg41, saved_reg68));
+ }
+ saved_status = FALSE;
+
+ }
+ else {
+ WL_ERR(("Unkwown yet power setting, ignored\n"));
+ }
+
+ snprintf(command, 3, "OK");
+
+ return (strlen("OK"));
+}
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_cfgp2p.c 419821 2013-08-22 21:43:26Z $
+ * $Id: wl_cfgp2p.c 490694 2014-07-11 14:37:00Z $
*
*/
#include <typedefs.h>
#include <bcmendian.h>
#include <proto/ethernet.h>
#include <proto/802.11.h>
+#include <net/rtnetlink.h>
#include <wl_cfg80211.h>
#include <wl_cfgp2p.h>
wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type);
static u32
-wl_cfgp2p_vndr_ie(struct wl_priv *wl, u8 *iebuf, s32 pktflag,
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd);
-static s32 wl_cfgp2p_cancel_listen(struct wl_priv *wl, struct net_device *ndev,
+static s32 wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct wireless_dev *wdev, bool notify);
#if defined(WL_ENABLE_P2P_IF)
};
#endif /* WL_ENABLE_P2P_IF */
+#if defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd);
+
+static int wl_cfgp2p_if_dummy(struct net_device *net)
+{
+ return 0;
+}
+
+static const struct net_device_ops wl_cfgp2p_if_ops = {
+ .ndo_open = wl_cfgp2p_if_dummy,
+ .ndo_stop = wl_cfgp2p_if_dummy,
+ .ndo_do_ioctl = wl_cfgp2p_do_ioctl,
+ .ndo_start_xmit = wl_cfgp2p_start_xmit,
+};
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+
bool wl_cfgp2p_is_pub_action(void *frame, u32 frame_len)
{
wifi_p2p_pub_act_frame_t *pact_frm;
if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
return false;
-#ifdef WL11U
- if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP)
- return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
- (u8 *)sd_act_frm->query_data + GAS_RESP_OFFSET,
- frame_len);
-
- else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP)
- return wl_cfgp2p_find_gas_subtype(P2PSD_GAS_OUI_SUBTYPE,
- (u8 *)sd_act_frm->query_data + GAS_CRESP_OFFSET,
- frame_len);
- else if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
- sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ)
- return true;
- else
- return false;
-#else
if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ ||
sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP ||
sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ ||
return true;
else
return false;
-#endif /* WL11U */
}
void wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel)
{
*
*/
s32
-wl_cfgp2p_init_priv(struct wl_priv *wl)
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg)
{
- if (!(wl->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
+ if (!(cfg->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) {
CFGP2P_ERR(("struct p2p_info allocation failed\n"));
return -ENOMEM;
}
#define INIT_IE(IE_TYPE, BSS_TYPE) \
do { \
- memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
- sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
- wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
} while (0);
INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY);
INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION);
INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION);
#undef INIT_IE
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY) = wl_to_prmry_ndev(wl);
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY) = 0;
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = NULL;
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = 0;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY) = bcmcfg_to_prmry_ndev(cfg);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY) = 0;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_CONNECTION) = 0;
return BCME_OK;
}
*
*/
void
-wl_cfgp2p_deinit_priv(struct wl_priv *wl)
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg)
{
CFGP2P_DBG(("In\n"));
- if (wl->p2p) {
- kfree(wl->p2p);
- wl->p2p = NULL;
+ if (cfg->p2p) {
+ kfree(cfg->p2p);
+ cfg->p2p = NULL;
}
- wl->p2p_supported = 0;
+ cfg->p2p_supported = 0;
}
/*
* Set P2P functions into firmware
*/
s32
-wl_cfgp2p_set_firm_p2p(struct wl_priv *wl)
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg)
{
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } };
s32 ret = BCME_OK;
s32 val = 0;
* firmware for P2P device address
*/
ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr,
- sizeof(null_eth_addr), wl->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &wl->ioctl_buf_sync);
+ sizeof(null_eth_addr), cfg->ioctl_buf, WLC_IOCTL_MAXLEN, 0, &cfg->ioctl_buf_sync);
if (ret && ret != BCME_UNSUPPORTED) {
CFGP2P_ERR(("failed to update device address ret %d\n", ret));
}
* Returns 0 if success.
*/
s32
-wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
chanspec_t chspec)
{
wl_p2p_if_t ifreq;
s32 err;
u32 scb_timeout = WL_SCB_TIMEOUT;
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
ifreq.type = if_type;
ifreq.chspec = chspec;
memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
- CFGP2P_DBG(("---wl p2p_ifadd "MACDBG" %s %u\n",
+ CFGP2P_DBG(("---cfg p2p_ifadd "MACDBG" %s %u\n",
MAC2STRDBG(ifreq.addr.octet),
(if_type == WL_P2P_IF_GO) ? "go" : "client",
(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT));
err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(err < 0))
- printk("'wl p2p_ifadd' error %d\n", err);
+ printk("'cfg p2p_ifadd' error %d\n", err);
else if (if_type == WL_P2P_IF_GO) {
err = wldev_ioctl(ndev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
if (unlikely(err < 0))
- printk("'wl scb_timeout' error %d\n", err);
+ printk("'cfg scb_timeout' error %d\n", err);
}
return err;
}
* Returns 0 if success.
*/
s32
-wl_cfgp2p_ifdisable(struct wl_priv *wl, struct ether_addr *mac)
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
{
s32 ret;
- struct net_device *netdev = wl_to_prmry_ndev(wl);
+ struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
- CFGP2P_INFO(("------primary idx %d : wl p2p_ifdis "MACDBG"\n",
+ CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdis "MACDBG"\n",
netdev->ifindex, MAC2STRDBG(mac->octet)));
ret = wldev_iovar_setbuf(netdev, "p2p_ifdis", mac, sizeof(*mac),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(ret < 0)) {
- printk("'wl p2p_ifdis' error %d\n", ret);
+ printk("'cfg p2p_ifdis' error %d\n", ret);
}
return ret;
}
* Returns 0 if success.
*/
s32
-wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac)
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac)
{
s32 ret;
- struct net_device *netdev = wl_to_prmry_ndev(wl);
+ struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
- CFGP2P_INFO(("------primary idx %d : wl p2p_ifdel "MACDBG"\n",
+ CFGP2P_INFO(("------primary idx %d : cfg p2p_ifdel "MACDBG"\n",
netdev->ifindex, MAC2STRDBG(mac->octet)));
ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(ret < 0)) {
- printk("'wl p2p_ifdel' error %d\n", ret);
+ printk("'cfg p2p_ifdel' error %d\n", ret);
}
return ret;
}
* Returns 0 if success.
*/
s32
-wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
chanspec_t chspec)
{
wl_p2p_if_t ifreq;
s32 err;
u32 scb_timeout = WL_SCB_TIMEOUT;
- struct net_device *netdev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION);
+ struct net_device *netdev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
ifreq.type = if_type;
ifreq.chspec = chspec;
memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet));
- CFGP2P_INFO(("---wl p2p_ifchange "MACDBG" %s %u"
+ CFGP2P_INFO(("---cfg p2p_ifchange "MACDBG" %s %u"
" chanspec 0x%04x\n", MAC2STRDBG(ifreq.addr.octet),
(if_type == WL_P2P_IF_GO) ? "go" : "client",
(chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT,
ifreq.chspec));
err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (unlikely(err < 0)) {
- printk("'wl p2p_ifupd' error %d\n", err);
+ printk("'cfg p2p_ifupd' error %d\n", err);
} else if (if_type == WL_P2P_IF_GO) {
err = wldev_ioctl(netdev, WLC_SET_SCB_TIMEOUT, &scb_timeout, sizeof(u32), true);
if (unlikely(err < 0))
- printk("'wl scb_timeout' error %d\n", err);
+ printk("'cfg scb_timeout' error %d\n", err);
}
return err;
}
* Returns 0 if success.
*/
s32
-wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index)
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index)
{
s32 ret;
u8 getbuf[64];
- struct net_device *dev = wl_to_prmry_ndev(wl);
+ struct net_device *dev = bcmcfg_to_prmry_ndev(cfg);
- CFGP2P_INFO(("---wl p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet)));
+ CFGP2P_INFO(("---cfg p2p_if "MACDBG"\n", MAC2STRDBG(mac->octet)));
ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf,
- sizeof(getbuf), wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY), NULL);
+ sizeof(getbuf), wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY), NULL);
if (ret == 0) {
memcpy(index, getbuf, sizeof(s32));
- CFGP2P_INFO(("---wl p2p_if ==> %d\n", *index));
+ CFGP2P_INFO(("---cfg p2p_if ==> %d\n", *index));
}
return ret;
}
static s32
-wl_cfgp2p_set_discovery(struct wl_priv *wl, s32 on)
+wl_cfgp2p_set_discovery(struct bcm_cfg80211 *cfg, s32 on)
{
s32 ret = BCME_OK;
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
CFGP2P_DBG(("enter\n"));
ret = wldev_iovar_setint(ndev, "p2p_disc", on);
*/
s32
-wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, u32 channel, u16 listen_ms, int bssidx)
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode, u32 channel, u16 listen_ms, int bssidx)
{
wl_p2p_disc_st_t discovery_mode;
s32 ret;
return -1;
}
- dev = wl_cfgp2p_find_ndev(wl, bssidx);
+ dev = wl_cfgp2p_find_ndev(cfg, bssidx);
if (unlikely(dev == NULL)) {
CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx));
return BCME_NOTFOUND;
discovery_mode.chspec = wl_ch_host_to_driver(channel);
discovery_mode.dwell = listen_ms;
ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode,
- sizeof(discovery_mode), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &wl->ioctl_buf_sync);
+ sizeof(discovery_mode), cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
return ret;
}
/* Get the index of the P2P Discovery BSS */
static s32
-wl_cfgp2p_get_disc_idx(struct wl_priv *wl, s32 *index)
+wl_cfgp2p_get_disc_idx(struct bcm_cfg80211 *cfg, s32 *index)
{
s32 ret;
- struct net_device *dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+ struct net_device *dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
ret = wldev_iovar_getint(dev, "p2p_dev", index);
CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret));
}
s32
-wl_cfgp2p_init_discovery(struct wl_priv *wl)
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg)
{
s32 index = 0;
CFGP2P_DBG(("enter\n"));
- if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) != 0) {
+ if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) > 0) {
CFGP2P_ERR(("do nothing, already initialized\n"));
return ret;
}
- ret = wl_cfgp2p_set_discovery(wl, 1);
+ ret = wl_cfgp2p_set_discovery(cfg, 1);
if (ret < 0) {
CFGP2P_ERR(("set discover error\n"));
return ret;
}
/* Enable P2P Discovery in the WL Driver */
- ret = wl_cfgp2p_get_disc_idx(wl, &index);
+ ret = wl_cfgp2p_get_disc_idx(cfg, &index);
if (ret < 0) {
return ret;
}
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) =
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = index;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) =
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = index;
/* Set the initial discovery state to SCAN */
- ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
if (unlikely(ret != 0)) {
CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n"));
- wl_cfgp2p_set_discovery(wl, 0);
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0;
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+ wl_cfgp2p_set_discovery(cfg, 0);
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = 0;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
return 0;
}
return ret;
/* Deinitialize P2P Discovery
* Parameters :
- * @wl : wl_private data
+ * @cfg : wl_private data
* Returns 0 if succes
*/
static s32
-wl_cfgp2p_deinit_discovery(struct wl_priv *wl)
+wl_cfgp2p_deinit_discovery(struct bcm_cfg80211 *cfg)
{
s32 ret = BCME_OK;
CFGP2P_DBG(("enter\n"));
- if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+ if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
CFGP2P_ERR(("do nothing, not initialized\n"));
return -1;
}
/* Set the discovery state to SCAN */
- ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
/* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */
- ret = wl_cfgp2p_set_discovery(wl, 0);
+ ret = wl_cfgp2p_set_discovery(cfg, 0);
/* Clear our saved WPS and P2P IEs for the discovery BSS. The driver
* deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery
/* Clear the saved bsscfg index of the discovery BSSCFG to indicate we
* have no discovery BSS.
*/
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL;
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) = WL_INVALID;
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE) = NULL;
return ret;
}
/* Enable P2P Discovery
* Parameters:
- * @wl : wl_private data
+ * @cfg : wl_private data
* @ie : probe request ie (WPS IE + P2P IE)
* @ie_len : probe request ie length
* Returns 0 if success.
*/
s32
-wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev,
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev,
const u8 *ie, u32 ie_len)
{
s32 ret = BCME_OK;
s32 bssidx;
- if (wl_to_prmry_ndev(wl) == dev) {
- bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
- } else if (wl_cfgp2p_find_idx(wl, dev, &bssidx) != BCME_OK) {
- WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
- return BCME_ERROR;
- }
- if (wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ if (wl_get_p2p_status(cfg, DISCOVERY_ON)) {
CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n"));
goto set_ie;
}
- wl_set_p2p_status(wl, DISCOVERY_ON);
+ wl_set_p2p_status(cfg, DISCOVERY_ON);
CFGP2P_DBG(("enter\n"));
- ret = wl_cfgp2p_init_discovery(wl);
+ ret = wl_cfgp2p_init_discovery(cfg);
if (unlikely(ret < 0)) {
CFGP2P_ERR((" init discovery error %d\n", ret));
goto exit;
* P2P probe responses have the privacy bit set in the 802.11 WPA IE.
* Some peer devices may not initiate WPS with us if this bit is not set.
*/
- ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE),
- "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE),
+ "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
if (unlikely(ret < 0)) {
CFGP2P_ERR((" wsec error %d\n", ret));
}
set_ie:
if (ie_len) {
- ret = wl_cfgp2p_set_management_ie(wl, dev,
+ if (bcmcfg_to_prmry_ndev(cfg) == dev) {
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
+ } else if (wl_cfgp2p_find_idx(cfg, dev, &bssidx) != BCME_OK) {
+ WL_ERR(("Find p2p index from dev(%p) failed\n", dev));
+ return BCME_ERROR;
+ }
+
+ ret = wl_cfgp2p_set_management_ie(cfg, dev,
bssidx,
VNDR_IE_PRBREQ_FLAG, ie, ie_len);
/* Disable P2P Discovery
* Parameters:
- * @wl : wl_private_data
+ * @cfg : wl_private_data
* Returns 0 if success.
*/
s32
-wl_cfgp2p_disable_discovery(struct wl_priv *wl)
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg)
{
s32 ret = BCME_OK;
CFGP2P_DBG((" enter\n"));
- wl_clr_p2p_status(wl, DISCOVERY_ON);
+ wl_clr_p2p_status(cfg, DISCOVERY_ON);
- if(!wl->p2p) { // terence 20130113: Fix for p2p NULL pointer
+ if(!cfg->p2p) { // terence 20130113: Fix for p2p NULL pointer
+ ret = BCME_ERROR;
CFGP2P_ERR(("wl->p2p is NULL\n"));
goto exit;
}
- if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) {
+ if (wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE) == 0) {
CFGP2P_ERR((" do nothing, not initialized\n"));
goto exit;
}
- ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
if (unlikely(ret < 0)) {
/* Do a scan abort to stop the driver's scan engine in case it is still
* waiting out an action frame tx dwell time.
*/
- wl_clr_p2p_status(wl, DISCOVERY_ON);
- ret = wl_cfgp2p_deinit_discovery(wl);
+ wl_clr_p2p_status(cfg, DISCOVERY_ON);
+ ret = wl_cfgp2p_deinit_discovery(cfg);
exit:
return ret;
}
s32
-wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active,
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active,
u32 num_chans, u16 *channels,
s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
p2p_scan_purpose_t p2p_scan_purpose)
#define P2PAPI_SCAN_NPROBS_TIME_MS 30
#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100
- struct net_device *pri_dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY);
+ struct net_device *pri_dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_PRIMARY);
/* Allocate scan params which need space for 3 channels and 0 ssids */
eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE +
OFFSETOF(wl_escan_params_t, params)) +
return -1;
}
memset(memblk, 0, memsize);
- memset(wl->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
+ memset(cfg->ioctl_buf, 0, WLC_IOCTL_MAXLEN);
if (search_state == WL_P2P_DISC_ST_SEARCH) {
/*
* If we in SEARCH STATE, we don't need to set SSID explictly
* because dongle use P2P WILDCARD internally by default
*/
- wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx);
/* use null ssid */
ssid.SSID_len = 0;
memset(&ssid.SSID, 0, sizeof(ssid.SSID));
* we have to set ssid to P2P WILDCARD because
* we just do broadcast scan unless setting SSID
*/
- wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
+ wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx);
/* use wild card ssid */
ssid.SSID_len = WL_P2P_WILDCARD_SSID_LEN;
memset(&ssid.SSID, 0, sizeof(ssid.SSID));
switch (p2p_scan_purpose) {
case P2P_SCAN_SOCIAL_CHANNEL:
- eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
+ eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS);
break;
case P2P_SCAN_AFX_PEER_NORMAL:
case P2P_SCAN_AFX_PEER_REDUCED:
- eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
+ eparams->params.active_time = htod32(P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS);
break;
case P2P_SCAN_CONNECT_TRY:
eparams->params.active_time = htod32(WL_SCAN_CONNECT_DWELL_TIME_MS);
break;
default :
- if (wl_get_drv_status_all(wl, CONNECTED))
- eparams->params.active_time = -1;
- else
- eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
+ if (wl_get_drv_status_all(cfg, CONNECTED))
+ eparams->params.active_time = -1;
+ else
+ eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS);
break;
}
eparams->params.nprobes = htod32(eparams->params.active_time /
WL_SCAN_JOIN_PROBE_INTERVAL_MS);
else
- eparams->params.nprobes = htod32((eparams->params.active_time /
- P2PAPI_SCAN_NPROBS_TIME_MS));
+ eparams->params.nprobes = htod32((eparams->params.active_time /
+ P2PAPI_SCAN_NPROBS_TIME_MS));
if (eparams->params.nprobes <= 0)
}
eparams->version = htod32(ESCAN_REQ_VERSION);
eparams->action = htod16(action);
- wl_escan_set_sync_id(eparams->sync_id, wl);
+ wl_escan_set_sync_id(eparams->sync_id, cfg);
+ wl_escan_set_type(cfg, WL_SCANTYPE_P2P);
CFGP2P_INFO(("SCAN CHANNELS : "));
for (i = 0; i < num_chans; i++) {
CFGP2P_INFO(("\n"));
ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan",
- memblk, memsize, wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ memblk, memsize, cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (ret == BCME_OK)
- wl_set_p2p_status(wl, SCANNING);
+ wl_set_p2p_status(cfg, SCANNING);
return ret;
}
/* search function to reach at common channel to send action frame
* Parameters:
- * @wl : wl_private data
+ * @cfg : wl_private data
* @ndev : net device for bssidx
* @bssidx : bssidx for BSS
* Returns 0 if success.
*/
s32
-wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev,
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr)
{
s32 ret = 0;
u32 chan_cnt = 0;
u16 *default_chan_list = NULL;
p2p_scan_purpose_t p2p_scan_purpose = P2P_SCAN_AFX_PEER_NORMAL;
- if (!p2p_is_on(wl) || ndev == NULL || bssidx == WL_INVALID)
+ if (!p2p_is_on(cfg) || ndev == NULL || bssidx == WL_INVALID)
return -BCME_ERROR;
- CFGP2P_ERR((" Enter\n"));
- if (bssidx == wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY))
- bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
+ WL_TRACE_HW4((" Enter\n"));
+ if (bssidx == wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_PRIMARY))
+ bssidx = wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE);
if (channel)
chan_cnt = AF_PEER_SEARCH_CNT;
else
default_chan_list[1] = SOCIAL_CHAN_2;
default_chan_list[2] = SOCIAL_CHAN_3;
}
-
- ret = wl_cfgp2p_escan(wl, ndev, true, chan_cnt,
+ ret = wl_cfgp2p_escan(cfg, ndev, true, chan_cnt,
default_chan_list, WL_P2P_DISC_ST_SEARCH,
- WL_SCAN_ACTION_START, bssidx, tx_dst_addr, p2p_scan_purpose);
+ WL_SCAN_ACTION_START, bssidx, NULL, p2p_scan_purpose);
kfree(default_chan_list);
exit:
return ret;
remained_len = (s32)len;
memset(vndr_ies, 0, sizeof(*vndr_ies));
- WL_INFO(("---> len %d\n", len));
+ WL_INFORM(("---> len %d\n", len));
ie = (bcm_tlv_t *) parse;
if (!bcm_valid_tlv(ie, remained_len))
ie = NULL;
/* Delete and Set a management vndr ie to firmware
* Parameters:
- * @wl : wl_private data
+ * @cfg : wl_private data
* @ndev : net device for bssidx
* @bssidx : bssidx for BSS
* @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
*/
s32
-wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len)
{
s32 ret = BCME_OK;
u8 *ptr;
s32 type = -1;
s32 remained_buf_len;
-#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie)
-#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len)
+#define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie)
+#define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(cfg, bsstype).p2p_ ## type ## _ie_len)
memset(g_mgmt_ie_buf, 0, sizeof(g_mgmt_ie_buf));
curr_ie_buf = g_mgmt_ie_buf;
CFGP2P_DBG((" bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag));
- if (wl->p2p != NULL) {
- if (wl_cfgp2p_find_type(wl, bssidx, &type)) {
+
+#ifdef DUAL_STA
+ if ((cfg->p2p != NULL) && (bssidx != cfg->cfgdev_bssidx))
+#else
+ if (cfg->p2p != NULL)
+#endif
+ {
+ if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
CFGP2P_ERR(("cannot find type from bssidx : %d\n", bssidx));
return BCME_ERROR;
}
CFGP2P_ERR(("not suitable type\n"));
return BCME_ERROR;
}
- } else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_AP) {
+ } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_AP) {
switch (pktflag) {
case VNDR_IE_PRBRSP_FLAG :
- mgmt_ie_buf = wl->ap_info->probe_res_ie;
- mgmt_ie_len = &wl->ap_info->probe_res_ie_len;
- mgmt_ie_buf_len = sizeof(wl->ap_info->probe_res_ie);
+ mgmt_ie_buf = cfg->ap_info->probe_res_ie;
+ mgmt_ie_len = &cfg->ap_info->probe_res_ie_len;
+ mgmt_ie_buf_len = sizeof(cfg->ap_info->probe_res_ie);
break;
case VNDR_IE_BEACON_FLAG :
- mgmt_ie_buf = wl->ap_info->beacon_ie;
- mgmt_ie_len = &wl->ap_info->beacon_ie_len;
- mgmt_ie_buf_len = sizeof(wl->ap_info->beacon_ie);
+ mgmt_ie_buf = cfg->ap_info->beacon_ie;
+ mgmt_ie_len = &cfg->ap_info->beacon_ie_len;
+ mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie);
+ break;
+ case VNDR_IE_ASSOCRSP_FLAG :
+ /* WPS-AP WSC2.0 assoc res includes wps_ie */
+ mgmt_ie_buf = cfg->ap_info->assoc_res_ie;
+ mgmt_ie_len = &cfg->ap_info->assoc_res_ie_len;
+ mgmt_ie_buf_len = sizeof(cfg->ap_info->assoc_res_ie);
break;
default:
mgmt_ie_buf = NULL;
return BCME_ERROR;
}
bssidx = 0;
- } else if (wl_get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) {
+ } else if (wl_get_mode_by_netdev(cfg, ndev) == WL_MODE_BSS) {
switch (pktflag) {
case VNDR_IE_PRBREQ_FLAG :
- mgmt_ie_buf = wl->sta_info->probe_req_ie;
- mgmt_ie_len = &wl->sta_info->probe_req_ie_len;
- mgmt_ie_buf_len = sizeof(wl->sta_info->probe_req_ie);
+ mgmt_ie_buf = cfg->sta_info->probe_req_ie;
+ mgmt_ie_len = &cfg->sta_info->probe_req_ie_len;
+ mgmt_ie_buf_len = sizeof(cfg->sta_info->probe_req_ie);
break;
case VNDR_IE_ASSOCREQ_FLAG :
- mgmt_ie_buf = wl->sta_info->assoc_req_ie;
- mgmt_ie_len = &wl->sta_info->assoc_req_ie_len;
- mgmt_ie_buf_len = sizeof(wl->sta_info->assoc_req_ie);
+ mgmt_ie_buf = cfg->sta_info->assoc_req_ie;
+ mgmt_ie_len = &cfg->sta_info->assoc_req_ie_len;
+ mgmt_ie_buf_len = sizeof(cfg->sta_info->assoc_req_ie);
break;
default:
mgmt_ie_buf = NULL;
vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
vndrie_info->vndrie.oui[2]));
- del_add_ie_buf_len = wl_cfgp2p_vndr_ie(wl, curr_ie_buf,
+ del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
pktflag, vndrie_info->vndrie.oui,
vndrie_info->vndrie.id,
vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
vndrie_info->vndrie.oui[0], vndrie_info->vndrie.oui[1],
vndrie_info->vndrie.oui[2]));
- del_add_ie_buf_len = wl_cfgp2p_vndr_ie(wl, curr_ie_buf,
+ del_add_ie_buf_len = wl_cfgp2p_vndr_ie(cfg, curr_ie_buf,
pktflag, vndrie_info->vndrie.oui,
vndrie_info->vndrie.id,
vndrie_info->ie_ptr + VNDR_IE_FIXED_LEN,
break;
}
- /* save the parsed IE in wl struct */
+ /* save the parsed IE in cfg struct */
memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
vndrie_info->ie_len);
*mgmt_ie_len += vndrie_info->ie_len;
}
if (total_ie_buf_len) {
ret = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", g_mgmt_ie_buf,
- total_ie_buf_len, wl->ioctl_buf, WLC_IOCTL_MAXLEN,
- bssidx, &wl->ioctl_buf_sync);
+ total_ie_buf_len, cfg->ioctl_buf, WLC_IOCTL_MAXLEN,
+ bssidx, &cfg->ioctl_buf_sync);
if (ret)
CFGP2P_ERR(("vndr ie set error : %d\n", ret));
}
/* Clear the manament IE buffer of BSSCFG
* Parameters:
- * @wl : wl_private data
+ * @cfg : wl_private data
* @bssidx : bssidx for BSS
*
* Returns 0 if success.
*/
s32
-wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx)
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx)
{
s32 vndrie_flag[] = {VNDR_IE_BEACON_FLAG, VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG,
VNDR_IE_PRBREQ_FLAG, VNDR_IE_ASSOCREQ_FLAG};
s32 index = -1;
s32 type = -1;
- struct net_device *ndev = wl_cfgp2p_find_ndev(wl, bssidx);
+ struct net_device *ndev = wl_cfgp2p_find_ndev(cfg, bssidx);
#define INIT_IE(IE_TYPE, BSS_TYPE) \
do { \
- memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
- sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
- wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
+ memset(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \
+ sizeof(wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \
+ wl_to_p2p_bss_saved_ie(cfg, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \
} while (0);
if (bssidx < 0 || ndev == NULL) {
return BCME_BADARG;
}
- if (wl_cfgp2p_find_type(wl, bssidx, &type)) {
+ if (wl_cfgp2p_find_type(cfg, bssidx, &type)) {
CFGP2P_ERR(("invalid argument\n"));
return BCME_BADARG;
}
for (index = 0; index < ARRAYSIZE(vndrie_flag); index++) {
/* clean up vndr ies in dongle */
- wl_cfgp2p_set_management_ie(wl, ndev, bssidx, vndrie_flag[index], NULL, 0);
+ wl_cfgp2p_set_management_ie(cfg, ndev, bssidx, vndrie_flag[index], NULL, 0);
}
INIT_IE(probe_req, type);
INIT_IE(probe_res, type);
return NULL;
}
static u32
-wl_cfgp2p_vndr_ie(struct wl_priv *wl, u8 *iebuf, s32 pktflag,
+wl_cfgp2p_vndr_ie(struct bcm_cfg80211 *cfg, u8 *iebuf, s32 pktflag,
s8 *oui, s32 ie_id, s8 *data, s32 datalen, const s8* add_del_cmd)
{
vndr_ie_setbuf_t hdr; /* aligned temporary vndr_ie buffer header */
/*
* Search the bssidx based on dev argument
* Parameters:
- * @wl : wl_private data
+ * @cfg : wl_private data
* @ndev : net device to search bssidx
* @bssidx : output arg to store bssidx of the bsscfg of firmware.
* Returns error
*/
s32
-wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev, s32 *bssidx)
+wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *bssidx)
{
u32 i;
if (ndev == NULL || bssidx == NULL) {
CFGP2P_ERR((" argument is invalid\n"));
return BCME_BADARG;
}
- if (!wl->p2p_supported) {
+ if (!cfg->p2p_supported) {
*bssidx = P2PAPI_BSSCFG_PRIMARY;
return BCME_OK;
}
* because the ndev is same with ndev of PRIMARY BSS.
*/
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
- if (ndev == wl_to_p2p_bss_ndev(wl, i)) {
- *bssidx = wl_to_p2p_bss_bssidx(wl, i);
+ if (ndev == wl_to_p2p_bss_ndev(cfg, i)) {
+ *bssidx = wl_to_p2p_bss_bssidx(cfg, i);
return BCME_OK;
}
}
+
+#ifdef DUAL_STA
+ if (cfg->bss_cfgdev && (cfg->bss_cfgdev == ndev_to_cfgdev(ndev))) {
+ CFGP2P_INFO(("cfgdev is present, return the bssidx"));
+ *bssidx = cfg->cfgdev_bssidx;
+ return BCME_OK;
+ }
+#endif
+
return BCME_BADARG;
+
}
struct net_device *
-wl_cfgp2p_find_ndev(struct wl_priv *wl, s32 bssidx)
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx)
{
u32 i;
struct net_device *ndev = NULL;
}
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
- if (bssidx == wl_to_p2p_bss_bssidx(wl, i)) {
- ndev = wl_to_p2p_bss_ndev(wl, i);
+ if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
+ ndev = wl_to_p2p_bss_ndev(cfg, i);
break;
}
}
/*
* Search the driver array idx based on bssidx argument
* Parameters:
- * @wl : wl_private data
+ * @cfg : wl_private data
* @bssidx : bssidx which indicate bsscfg->idx of firmware.
* @type : output arg to store array idx of p2p->bss.
* Returns error
*/
s32
-wl_cfgp2p_find_type(struct wl_priv *wl, s32 bssidx, s32 *type)
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type)
{
u32 i;
if (bssidx < 0 || type == NULL) {
}
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
- if (bssidx == wl_to_p2p_bss_bssidx(wl, i)) {
+ if (bssidx == wl_to_p2p_bss_bssidx(cfg, i)) {
*type = i;
return BCME_OK;
}
}
+#ifdef DUAL_STA
+ if (bssidx == cfg->cfgdev_bssidx) {
+ CFGP2P_DBG(("bssidx matching with the virtual I/F \n"));
+ *type = 1;
+ return BCME_OK;
+ }
+#endif
+
exit:
return BCME_BADARG;
}
* Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE
*/
s32
-wl_cfgp2p_listen_complete(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
s32 ret = BCME_OK;
struct net_device *ndev = NULL;
- if (!wl || !wl->p2p)
+ if (!cfg || !cfg->p2p)
return BCME_ERROR;
CFGP2P_DBG((" Enter\n"));
- ndev = cfgdev_to_wlc_ndev(cfgdev, wl);
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
- if (wl_get_p2p_status(wl, LISTEN_EXPIRED) == 0) {
- wl_set_p2p_status(wl, LISTEN_EXPIRED);
- if (timer_pending(&wl->p2p->listen_timer)) {
- del_timer_sync(&wl->p2p->listen_timer);
+ if (wl_get_p2p_status(cfg, LISTEN_EXPIRED) == 0) {
+ wl_set_p2p_status(cfg, LISTEN_EXPIRED);
+ if (timer_pending(&cfg->p2p->listen_timer)) {
+ del_timer_sync(&cfg->p2p->listen_timer);
}
- if (wl->afx_hdl->is_listen == TRUE &&
- wl_get_drv_status_all(wl, FINDING_COMMON_CHANNEL)) {
+ if (cfg->afx_hdl->is_listen == TRUE &&
+ wl_get_drv_status_all(cfg, FINDING_COMMON_CHANNEL)) {
WL_DBG(("Listen DONE for action frame\n"));
- complete(&wl->act_frm_scan);
+ complete(&cfg->act_frm_scan);
}
#ifdef WL_CFG80211_SYNC_GON
- else if (wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM_LISTEN)) {
- wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM_LISTEN, ndev);
+ else if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM_LISTEN)) {
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM_LISTEN, ndev);
WL_DBG(("Listen DONE and wake up wait_next_af !!(%d)\n",
- jiffies_to_msecs(jiffies - wl->af_tx_sent_jiffies)));
+ jiffies_to_msecs(jiffies - cfg->af_tx_sent_jiffies)));
- if (wl_get_drv_status_all(wl, WAITING_NEXT_ACT_FRM))
- wl_clr_drv_status(wl, WAITING_NEXT_ACT_FRM, ndev);
+ if (wl_get_drv_status_all(cfg, WAITING_NEXT_ACT_FRM))
+ wl_clr_drv_status(cfg, WAITING_NEXT_ACT_FRM, ndev);
- complete(&wl->wait_next_af);
+ complete(&cfg->wait_next_af);
}
#endif /* WL_CFG80211_SYNC_GON */
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- if (wl_get_drv_status_all(wl, REMAINING_ON_CHANNEL))
+ if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL))
#else
- if (wl_get_drv_status_all(wl, REMAINING_ON_CHANNEL) ||
- wl_get_drv_status_all(wl, FAKE_REMAINING_ON_CHANNEL))
+ if (wl_get_drv_status_all(cfg, REMAINING_ON_CHANNEL) ||
+ wl_get_drv_status_all(cfg, FAKE_REMAINING_ON_CHANNEL))
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
{
WL_DBG(("Listen DONE for ramain on channel expired\n"));
- wl_clr_drv_status(wl, REMAINING_ON_CHANNEL, ndev);
+ wl_clr_drv_status(cfg, REMAINING_ON_CHANNEL, ndev);
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- wl_clr_drv_status(wl, FAKE_REMAINING_ON_CHANNEL, ndev);
+ wl_clr_drv_status(cfg, FAKE_REMAINING_ON_CHANNEL, ndev);
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
if (ndev && (ndev->ieee80211_ptr != NULL)) {
#if defined(WL_CFG80211_P2P_DEV_IF)
- cfg80211_remain_on_channel_expired(cfgdev, wl->last_roc_id,
- &wl->remain_on_chan, GFP_KERNEL);
+ // terence 20141221: Fix p2p connection issue in both p2p device in Android 5.0
+ // error log: CFG80211-ERROR) wl_cfg80211_send_action_frame : couldn't find peer's channel.
+ cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
+ cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
#else
- cfg80211_remain_on_channel_expired(cfgdev, wl->last_roc_id,
- &wl->remain_on_chan, wl->remain_on_chan_type, GFP_KERNEL);
+ cfg80211_remain_on_channel_expired(cfgdev, cfg->last_roc_id,
+ &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
#endif /* WL_CFG80211_P2P_DEV_IF */
}
}
- if (wl_add_remove_eventmsg(wl_to_prmry_ndev(wl),
+ if (wl_add_remove_eventmsg(bcmcfg_to_prmry_ndev(cfg),
WLC_E_P2P_PROBREQ_MSG, false) != BCME_OK) {
CFGP2P_ERR((" failed to unset WLC_E_P2P_PROPREQ_MSG\n"));
}
} else
- wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
return ret;
wl_cfgp2p_listen_expired(unsigned long data)
{
wl_event_msg_t msg;
- struct wl_priv *wl = (struct wl_priv *) data;
+ struct bcm_cfg80211 *cfg = (struct bcm_cfg80211 *) data;
CFGP2P_DBG((" Enter\n"));
bzero(&msg, sizeof(wl_event_msg_t));
msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE);
#if defined(WL_ENABLE_P2P_IF)
- wl_cfg80211_event(wl->p2p_net ? wl->p2p_net :
- wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
+ wl_cfg80211_event(cfg->p2p_net ? cfg->p2p_net :
+ wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg, NULL);
#else
- wl_cfg80211_event(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg,
+ wl_cfg80211_event(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_DEVICE), &msg,
NULL);
#endif /* WL_ENABLE_P2P_IF */
}
* Routine for cancelling the P2P LISTEN
*/
static s32
-wl_cfgp2p_cancel_listen(struct wl_priv *wl, struct net_device *ndev,
+wl_cfgp2p_cancel_listen(struct bcm_cfg80211 *cfg, struct net_device *ndev,
struct wireless_dev *wdev, bool notify)
{
WL_DBG(("Enter \n"));
/* Irrespective of whether timer is running or not, reset
* the LISTEN state.
*/
- if (timer_pending(&wl->p2p->listen_timer)) {
- del_timer_sync(&wl->p2p->listen_timer);
- if (notify)
- if (ndev && ndev->ieee80211_ptr) {
+ if (timer_pending(&cfg->p2p->listen_timer)) {
+ del_timer_sync(&cfg->p2p->listen_timer);
+ if (notify) {
#if defined(WL_CFG80211_P2P_DEV_IF)
- if (wdev) // terence 20140106: fix for NULL pointer of wdev
- cfg80211_remain_on_channel_expired(wdev, wl->last_roc_id,
- &wl->remain_on_chan, GFP_KERNEL);
+ if (wdev)
+ // terence 20141221: Fix p2p connection issue in both p2p device in Android 5.0
+ // error log: CFG80211-ERROR) wl_cfg80211_send_action_frame : couldn't find peer's channel.
+ cfg80211_remain_on_channel_expired(bcmcfg_to_p2p_wdev(cfg),
+ cfg->last_roc_id, &cfg->remain_on_chan, GFP_KERNEL);
#else
- cfg80211_remain_on_channel_expired(ndev, wl->last_roc_id,
- &wl->remain_on_chan, wl->remain_on_chan_type, GFP_KERNEL);
+ if (ndev && ndev->ieee80211_ptr)
+ cfg80211_remain_on_channel_expired(ndev, cfg->last_roc_id,
+ &cfg->remain_on_chan, cfg->remain_on_chan_type, GFP_KERNEL);
#endif /* WL_CFG80211_P2P_DEV_IF */
- }
+ }
}
return 0;
}
*
* This fn assumes dongle p2p device discovery is already enabled.
* Parameters :
- * @wl : wl_private data
+ * @cfg : wl_private data
* @channel : channel to listen
* @duration_ms : the time (milli seconds) to wait
*/
s32
-wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms)
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms)
{
#define EXTRA_DELAY_TIME 100
s32 ret = BCME_OK;
struct timer_list *_timer;
s32 extra_delay;
- struct net_device *netdev = wl_to_prmry_ndev(wl);
+ struct net_device *netdev = bcmcfg_to_prmry_ndev(cfg);
CFGP2P_DBG((" Enter Listen Channel : %d, Duration : %d\n", channel, duration_ms));
- if (unlikely(wl_get_p2p_status(wl, DISCOVERY_ON) == 0)) {
+ if (unlikely(wl_get_p2p_status(cfg, DISCOVERY_ON) == 0)) {
CFGP2P_ERR((" Discovery is not set, so we have noting to do\n"));
ret = BCME_NOTREADY;
goto exit;
}
- if (timer_pending(&wl->p2p->listen_timer)) {
+ if (timer_pending(&cfg->p2p->listen_timer)) {
CFGP2P_DBG(("previous LISTEN is not completed yet\n"));
goto exit;
}
#ifndef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
else
- wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
#endif /* not WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
if (wl_add_remove_eventmsg(netdev, WLC_E_P2P_PROBREQ_MSG, true) != BCME_OK) {
CFGP2P_ERR((" failed to set WLC_E_P2P_PROPREQ_MSG\n"));
}
- ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
- _timer = &wl->p2p->listen_timer;
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
+ _timer = &cfg->p2p->listen_timer;
/* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle ,
* otherwise we will wait up to duration_ms + 100ms + duration / 10
INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, extra_delay);
#ifdef WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST
- wl_clr_p2p_status(wl, LISTEN_EXPIRED);
+ wl_clr_p2p_status(cfg, LISTEN_EXPIRED);
#endif /* WL_CFG80211_VSDB_PRIORITIZE_SCAN_REQUEST */
#undef EXTRA_DELAY_TIME
s32
-wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable)
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable)
{
s32 ret = BCME_OK;
CFGP2P_DBG((" Enter\n"));
- if (!wl_get_p2p_status(wl, DISCOVERY_ON)) {
+ if (!wl_get_p2p_status(cfg, DISCOVERY_ON)) {
CFGP2P_DBG((" do nothing, discovery is off\n"));
return ret;
}
- if (wl_get_p2p_status(wl, SEARCH_ENABLED) == enable) {
+ if (wl_get_p2p_status(cfg, SEARCH_ENABLED) == enable) {
CFGP2P_DBG(("already : %d\n", enable));
return ret;
}
- wl_chg_p2p_status(wl, SEARCH_ENABLED);
+ wl_chg_p2p_status(cfg, SEARCH_ENABLED);
/* When disabling Search, reset the WL driver's p2p discovery state to
* WL_P2P_DISC_ST_SCAN.
*/
if (!enable) {
- wl_clr_p2p_status(wl, SCANNING);
- ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0,
- wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE));
+ wl_clr_p2p_status(cfg, SCANNING);
+ ret = wl_cfgp2p_set_p2p_mode(cfg, WL_P2P_DISC_ST_SCAN, 0, 0,
+ wl_to_p2p_bss_bssidx(cfg, P2PAPI_BSSCFG_DEVICE));
}
return ret;
* Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE
*/
s32
-wl_cfgp2p_action_tx_complete(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data)
{
s32 ret = BCME_OK;
u32 event_type = ntoh32(e->event_type);
u32 status = ntoh32(e->status);
+ struct net_device *ndev = NULL;
CFGP2P_DBG((" Enter\n"));
- if (wl_get_drv_status_all(wl, SENDING_ACT_FRM)) {
+
+ ndev = cfgdev_to_wlc_ndev(cfgdev, cfg);
+
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM)) {
if (event_type == WLC_E_ACTION_FRAME_COMPLETE) {
CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status));
if (status == WLC_E_STATUS_SUCCESS) {
- wl_set_p2p_status(wl, ACTION_TX_COMPLETED);
+ wl_set_p2p_status(cfg, ACTION_TX_COMPLETED);
CFGP2P_DBG(("WLC_E_ACTION_FRAME_COMPLETE : ACK\n"));
}
- else {
- if (!wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) {
- wl_set_p2p_status(wl, ACTION_TX_NOACK);
+ else if (!wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
+ wl_set_p2p_status(cfg, ACTION_TX_NOACK);
CFGP2P_INFO(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n"));
- wl_stop_wait_next_action_frame(wl);
- }
+ wl_stop_wait_next_action_frame(cfg, ndev);
}
} else {
CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received,"
"status : %d\n", status));
- if (wl_get_drv_status_all(wl, SENDING_ACT_FRM))
- complete(&wl->send_af_done);
+ if (wl_get_drv_status_all(cfg, SENDING_ACT_FRM))
+ complete(&cfg->send_af_done);
}
}
return ret;
* 802.11 ack has been received for the sent action frame.
*/
s32
-wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
wl_af_params_t *af_params, s32 bssidx)
{
s32 ret = BCME_OK;
CFGP2P_INFO(("channel : %u , dwell time : %u\n",
af_params->channel, af_params->dwell_time));
- wl_clr_p2p_status(wl, ACTION_TX_COMPLETED);
- wl_clr_p2p_status(wl, ACTION_TX_NOACK);
+ wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+ wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
bzero(&buf, sizeof(wl_eventmsg_buf_t));
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, true);
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, true);
- if ((evt_ret = wl_cfg80211_apply_eventbuffer(wl_to_prmry_ndev(wl), wl, &buf)) < 0)
+ if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0)
return evt_ret;
- if (bssidx == P2PAPI_BSSCFG_PRIMARY)
- bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE);
-
- wl->af_sent_channel = af_params->channel;
+ cfg->af_sent_channel = af_params->channel;
#ifdef WL_CFG80211_SYNC_GON
- wl->af_tx_sent_jiffies = jiffies;
+ cfg->af_tx_sent_jiffies = jiffies;
#endif /* WL_CFG80211_SYNC_GON */
ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, bssidx, &cfg->ioctl_buf_sync);
if (ret < 0) {
CFGP2P_ERR((" sending action frame is failed\n"));
goto exit;
}
- timeout = wait_for_completion_timeout(&wl->send_af_done,
+ timeout = wait_for_completion_timeout(&cfg->send_af_done,
msecs_to_jiffies(af_params->dwell_time + WL_AF_TX_EXTRA_TIME_MAX));
- if (timeout >= 0 && wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) {
+ if (timeout >= 0 && wl_get_p2p_status(cfg, ACTION_TX_COMPLETED)) {
CFGP2P_INFO(("tx action frame operation is completed\n"));
ret = BCME_OK;
+ } else if (ETHER_ISBCAST(&cfg->afx_hdl->tx_dst_addr)) {
+ CFGP2P_INFO(("bcast tx action frame operation is completed\n"));
+ ret = BCME_OK;
} else {
ret = BCME_ERROR;
CFGP2P_INFO(("tx action frame operation is failed\n"));
}
/* clear status bit for action tx */
- wl_clr_p2p_status(wl, ACTION_TX_COMPLETED);
- wl_clr_p2p_status(wl, ACTION_TX_NOACK);
+ wl_clr_p2p_status(cfg, ACTION_TX_COMPLETED);
+ wl_clr_p2p_status(cfg, ACTION_TX_NOACK);
exit:
CFGP2P_INFO((" via act frame iovar : status = %d\n", ret));
bzero(&buf, sizeof(wl_eventmsg_buf_t));
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE, false);
wl_cfg80211_add_to_eventbuffer(&buf, WLC_E_ACTION_FRAME_COMPLETE, false);
- if ((evt_ret = wl_cfg80211_apply_eventbuffer(wl_to_prmry_ndev(wl), wl, &buf)) < 0) {
+ if ((evt_ret = wl_cfg80211_apply_eventbuffer(bcmcfg_to_prmry_ndev(cfg), cfg, &buf)) < 0) {
WL_ERR(("TX frame events revert back failed \n"));
return evt_ret;
}
result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx,
sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0, NULL);
if (result != 0) {
- CFGP2P_ERR(("'wl bss -C %d' failed: %d\n", bsscfg_idx, result));
+ CFGP2P_ERR(("'cfg bss -C %d' failed: %d\n", bsscfg_idx, result));
CFGP2P_ERR(("NOTE: this ioctl error is normal "
"when the BSS has not been created yet.\n"));
} else {
val = *(int*)getbuf;
val = dtoh32(val);
- CFGP2P_INFO(("---wl bss -C %d ==> %d\n", bsscfg_idx, val));
+ CFGP2P_INFO(("---cfg bss -C %d ==> %d\n", bsscfg_idx, val));
isup = (val ? TRUE : FALSE);
}
return isup;
/* Bring up or down a BSS */
s32
-wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up)
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up)
{
s32 ret = BCME_OK;
s32 val = up ? 1 : 0;
bss_setbuf.cfg = htod32(bsscfg_idx);
bss_setbuf.val = htod32(val);
- CFGP2P_INFO(("---wl bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
+ CFGP2P_INFO(("---cfg bss -C %d %s\n", bsscfg_idx, up ? "up" : "down"));
ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (ret != 0) {
CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret));
/* Check if 'p2p' is supported in the driver */
s32
-wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev)
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev)
{
s32 ret = BCME_OK;
s32 p2p_supported = 0;
ret = wldev_iovar_getint(ndev, "p2p",
&p2p_supported);
if (ret < 0) {
- CFGP2P_ERR(("wl p2p error %d\n", ret));
- return 0;
+ if (ret == BCME_UNSUPPORTED) {
+ CFGP2P_INFO(("p2p is unsupported\n"));
+ return 0;
+ } else {
+ CFGP2P_ERR(("cfg p2p error %d\n", ret));
+ return ret;
+ }
}
if (p2p_supported == 1) {
CFGP2P_INFO(("p2p is supported\n"));
}
/* Cleanup P2P resources */
s32
-wl_cfgp2p_down(struct wl_priv *wl)
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg)
{
struct net_device *ndev = NULL;
struct wireless_dev *wdev = NULL;
s32 i = 0, index = -1;
#if defined(WL_CFG80211_P2P_DEV_IF)
- ndev = wl_to_prmry_ndev(wl);
- wdev = wl_to_p2p_wdev(wl);
+ ndev = bcmcfg_to_prmry_ndev(cfg);
+ wdev = bcmcfg_to_p2p_wdev(cfg);
#elif defined(WL_ENABLE_P2P_IF)
- ndev = wl->p2p_net ? wl->p2p_net : wl_to_prmry_ndev(wl);
+ ndev = cfg->p2p_net ? cfg->p2p_net : bcmcfg_to_prmry_ndev(cfg);
wdev = ndev_to_wdev(ndev);
#endif /* WL_CFG80211_P2P_DEV_IF */
- wl_cfgp2p_cancel_listen(wl, ndev, wdev, TRUE);
+ wl_cfgp2p_cancel_listen(cfg, ndev, wdev, TRUE);
for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) {
- index = wl_to_p2p_bss_bssidx(wl, i);
+ index = wl_to_p2p_bss_bssidx(cfg, i);
if (index != WL_INVALID)
- wl_cfgp2p_clear_management_ie(wl, index);
+ wl_cfgp2p_clear_management_ie(cfg, index);
}
-#if defined(WL_CFG80211_P2P_DEV_IF)
- wl_cfgp2p_del_p2p_disc_if(wdev);
-#endif /* WL_CFG80211_P2P_DEV_IF */
- wl_cfgp2p_deinit_priv(wl);
+ wl_cfgp2p_deinit_priv(cfg);
return 0;
}
s32
-wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
{
s32 ret = -1;
int count, start, duration;
memset(&dongle_noa, 0, sizeof(dongle_noa));
- if (wl->p2p && wl->p2p->vif_created) {
+ if (cfg->p2p && cfg->p2p->vif_created) {
- wl->p2p->noa.desc[0].start = 0;
+ cfg->p2p->noa.desc[0].start = 0;
sscanf(buf, "%10d %10d %10d", &count, &start, &duration);
CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n",
count, start, duration));
if (count != -1)
- wl->p2p->noa.desc[0].count = count;
+ cfg->p2p->noa.desc[0].count = count;
/* supplicant gives interval as start */
if (start != -1)
- wl->p2p->noa.desc[0].interval = start;
+ cfg->p2p->noa.desc[0].interval = start;
if (duration != -1)
- wl->p2p->noa.desc[0].duration = duration;
+ cfg->p2p->noa.desc[0].duration = duration;
- if (wl->p2p->noa.desc[0].count != 255) {
- wl->p2p->noa.desc[0].start = 200;
+ if (cfg->p2p->noa.desc[0].count != 255 && cfg->p2p->noa.desc[0].count != 0) {
+ cfg->p2p->noa.desc[0].start = 200;
dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS;
dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF;
dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS;
}
+ else if (cfg->p2p->noa.desc[0].count == 0) {
+ cfg->p2p->noa.desc[0].start = 0;
+ dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
+ dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL;
+ dongle_noa.action = WL_P2P_SCHED_ACTION_RESET;
+ }
else {
/* Continuous NoA interval. */
dongle_noa.action = WL_P2P_SCHED_ACTION_NONE;
dongle_noa.type = WL_P2P_SCHED_TYPE_ABS;
- if ((wl->p2p->noa.desc[0].interval == 102) ||
- (wl->p2p->noa.desc[0].interval == 100)) {
- wl->p2p->noa.desc[0].start = 100 -
- wl->p2p->noa.desc[0].duration;
+ if ((cfg->p2p->noa.desc[0].interval == 102) ||
+ (cfg->p2p->noa.desc[0].interval == 100)) {
+ cfg->p2p->noa.desc[0].start = 100 -
+ cfg->p2p->noa.desc[0].duration;
dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT;
}
else {
}
}
/* Put the noa descriptor in dongle format for dongle */
- dongle_noa.desc[0].count = htod32(wl->p2p->noa.desc[0].count);
+ dongle_noa.desc[0].count = htod32(cfg->p2p->noa.desc[0].count);
if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) {
- dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start);
- dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration);
+ dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start);
+ dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration);
}
else {
- dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start*1000);
- dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration*1000);
+ dongle_noa.desc[0].start = htod32(cfg->p2p->noa.desc[0].start*1000);
+ dongle_noa.desc[0].duration = htod32(cfg->p2p->noa.desc[0].duration*1000);
}
- dongle_noa.desc[0].interval = htod32(wl->p2p->noa.desc[0].interval*1000);
+ dongle_noa.desc[0].interval = htod32(cfg->p2p->noa.desc[0].interval*1000);
- ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
- "p2p_noa", &dongle_noa, sizeof(dongle_noa), wl->ioctl_buf, WLC_IOCTL_MAXLEN,
- &wl->ioctl_buf_sync);
+ ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION),
+ "p2p_noa", &dongle_noa, sizeof(dongle_noa), cfg->ioctl_buf,
+ WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (ret < 0) {
CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret));
return ret;
}
s32
-wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int buf_len)
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int buf_len)
{
wifi_p2p_noa_desc_t *noa_desc;
CFGP2P_DBG((" Enter\n"));
buf[0] = '\0';
- if (wl->p2p && wl->p2p->vif_created) {
- if (wl->p2p->noa.desc[0].count || wl->p2p->ops.ops) {
+ if (cfg->p2p && cfg->p2p->vif_created) {
+ if (cfg->p2p->noa.desc[0].count || cfg->p2p->ops.ops) {
_buf[0] = 1; /* noa index */
- _buf[1] = (wl->p2p->ops.ops ? 0x80: 0) |
- (wl->p2p->ops.ctw & 0x7f); /* ops + ctw */
+ _buf[1] = (cfg->p2p->ops.ops ? 0x80: 0) |
+ (cfg->p2p->ops.ctw & 0x7f); /* ops + ctw */
len += 2;
- if (wl->p2p->noa.desc[0].count) {
+ if (cfg->p2p->noa.desc[0].count) {
noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len];
- noa_desc->cnt_type = wl->p2p->noa.desc[0].count;
- noa_desc->duration = wl->p2p->noa.desc[0].duration;
- noa_desc->interval = wl->p2p->noa.desc[0].interval;
- noa_desc->start = wl->p2p->noa.desc[0].start;
+ noa_desc->cnt_type = cfg->p2p->noa.desc[0].count;
+ noa_desc->duration = cfg->p2p->noa.desc[0].duration;
+ noa_desc->interval = cfg->p2p->noa.desc[0].interval;
+ noa_desc->start = cfg->p2p->noa.desc[0].start;
len += sizeof(wifi_p2p_noa_desc_t);
}
if (buf_len <= len * 2) {
return len * 2;
}
s32
-wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len)
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len)
{
int ps, ctw;
int ret = -1;
s32 legacy_ps;
+ struct net_device *dev;
CFGP2P_DBG((" Enter\n"));
- if (wl->p2p && wl->p2p->vif_created) {
+ if (cfg->p2p && cfg->p2p->vif_created) {
sscanf(buf, "%10d %10d %10d", &legacy_ps, &ps, &ctw);
CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw));
+ dev = wl_to_p2p_bss_ndev(cfg, P2PAPI_BSSCFG_CONNECTION);
if (ctw != -1) {
- wl->p2p->ops.ctw = ctw;
+ cfg->p2p->ops.ctw = ctw;
ret = 0;
}
if (ps != -1) {
- wl->p2p->ops.ops = ps;
- ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
- "p2p_ops", &wl->p2p->ops, sizeof(wl->p2p->ops),
- wl->ioctl_buf, WLC_IOCTL_MAXLEN, &wl->ioctl_buf_sync);
+ cfg->p2p->ops.ops = ps;
+ ret = wldev_iovar_setbuf(dev,
+ "p2p_ops", &cfg->p2p->ops, sizeof(cfg->p2p->ops),
+ cfg->ioctl_buf, WLC_IOCTL_MAXLEN, &cfg->ioctl_buf_sync);
if (ret < 0) {
CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret));
}
}
if ((legacy_ps != -1) && ((legacy_ps == PM_MAX) || (legacy_ps == PM_OFF))) {
-#if defined(SUPPORT_PM2_ONLY)
- if (legacy_ps == PM_MAX)
- legacy_ps = PM_FAST;
-#endif /* SUPPORT_PM2_ONLY */
-
- ret = wldev_ioctl(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION),
+ ret = wldev_ioctl(dev,
WLC_SET_PM, &legacy_ps, sizeof(legacy_ps), true);
- if (unlikely(ret)) {
+ if (unlikely(ret))
CFGP2P_ERR(("error (%d)\n", ret));
- } else {
- wl_cfg80211_update_power_mode(ndev);
- }
+ wl_cfg80211_update_power_mode(dev);
}
else
CFGP2P_ERR(("ilegal setting\n"));
};
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
-#if defined(WL_ENABLE_P2P_IF)
+#if defined(WL_ENABLE_P2P_IF) || defined(WL_NEWCFG_PRIVCMD_SUPPORT)
s32
-wl_cfgp2p_register_ndev(struct wl_priv *wl)
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg)
{
int ret = 0;
struct net_device* net = NULL;
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
struct wireless_dev *wdev = NULL;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x33, 0x22, 0x11 };
- if (wl->p2p_net) {
+ if (cfg->p2p_net) {
CFGP2P_ERR(("p2p_net defined already.\n"));
return -EINVAL;
}
/* Allocate etherdev, including space for private structure */
- if (!(net = alloc_etherdev(sizeof(struct wl_priv *)))) {
+ if (!(net = alloc_etherdev(sizeof(struct bcm_cfg80211 *)))) {
CFGP2P_ERR(("%s: OOM - alloc_etherdev\n", __FUNCTION__));
return -ENODEV;
}
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (unlikely(!wdev)) {
WL_ERR(("Could not allocate wireless device\n"));
free_netdev(net);
return -ENOMEM;
}
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
strncpy(net->name, "p2p%d", sizeof(net->name) - 1);
net->name[IFNAMSIZ - 1] = '\0';
- /* Copy the reference to wl_priv */
- memcpy((void *)netdev_priv(net), &wl, sizeof(struct wl_priv *));
+ /* Copy the reference to bcm_cfg80211 */
+ memcpy((void *)netdev_priv(net), &cfg, sizeof(struct bcm_cfg80211 *));
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31))
ASSERT(!net->open);
/* Register with a dummy MAC addr */
memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
- wdev->wiphy = wl->wdev->wiphy;
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ wdev->wiphy = cfg->wdev->wiphy;
wdev->iftype = wl_mode_to_nl80211_iftype(WL_MODE_BSS);
net->ieee80211_ptr = wdev;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
net->ethtool_ops = &cfgp2p_ethtool_ops;
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
SET_NETDEV_DEV(net, wiphy_dev(wdev->wiphy));
/* Associate p2p0 network interface with new wdev */
wdev->netdev = net;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
ret = register_netdev(net);
if (ret) {
CFGP2P_ERR((" register_netdevice failed (%d)\n", ret));
free_netdev(net);
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
kfree(wdev);
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
return -ENODEV;
}
/* store p2p net ptr for further reference. Note that iflist won't have this
* entry as there corresponding firmware interface is a "Hidden" interface.
*/
- wl->p2p_wdev = wdev;
- wl->p2p_net = net;
+#ifndef WL_NEWCFG_PRIVCMD_SUPPORT
+ cfg->p2p_wdev = wdev;
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
+ cfg->p2p_net = net;
printk("%s: P2P Interface Registered\n", net->name);
}
s32
-wl_cfgp2p_unregister_ndev(struct wl_priv *wl)
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg)
{
- if (!wl || !wl->p2p_net) {
+ if (!cfg || !cfg->p2p_net) {
CFGP2P_ERR(("Invalid Ptr\n"));
return -EINVAL;
}
- unregister_netdev(wl->p2p_net);
- free_netdev(wl->p2p_net);
+ unregister_netdev(cfg->p2p_net);
+ free_netdev(cfg->p2p_net);
return 0;
}
+
static int wl_cfgp2p_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
static int wl_cfgp2p_do_ioctl(struct net_device *net, struct ifreq *ifr, int cmd)
{
int ret = 0;
- struct wl_priv *wl = *(struct wl_priv **)netdev_priv(net);
- struct net_device *ndev = wl_to_prmry_ndev(wl);
+ struct bcm_cfg80211 *cfg = *(struct bcm_cfg80211 **)netdev_priv(net);
+ struct net_device *ndev = bcmcfg_to_prmry_ndev(cfg);
/* There is no ifidx corresponding to p2p0 in our firmware. So we should
* not Handle any IOCTL cmds on p2p0 other than ANDROID PRIVATE CMDs.
return ret;
}
+#endif /* WL_ENABLE_P2P_IF || WL_NEWCFG_PRIVCMD_SUPPORT */
+#if defined(WL_ENABLE_P2P_IF)
static int wl_cfgp2p_if_open(struct net_device *net)
{
- extern struct wl_priv *wlcfg_drv_priv;
struct wireless_dev *wdev = net->ieee80211_ptr;
- struct wl_priv *wl = NULL;
- wl = wlcfg_drv_priv;
- if (!wdev || !wl || !wl->p2p)
+
+ if (!wdev || !wl_cfg80211_is_p2p_active())
return -EINVAL;
WL_TRACE(("Enter\n"));
#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
static int wl_cfgp2p_if_stop(struct net_device *net)
{
- extern struct wl_priv *wlcfg_drv_priv;
- struct wl_priv *wl = NULL;
- unsigned long flags;
struct wireless_dev *wdev = net->ieee80211_ptr;
- int clear_flag = 0;
+
if (!wdev)
return -EINVAL;
- WL_TRACE(("Enter\n"));
- wl = wlcfg_drv_priv;
- if (!wl)
- return -EINVAL;
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
- if (wl->scan_request && wl->scan_request->dev == net) {
- cfg80211_scan_done(wl->scan_request, true);
- wl->scan_request = NULL;
- clear_flag = 1;
- }
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
- if (clear_flag)
- wl_clr_drv_status(wl, SCANNING, net);
+ wl_cfg80211_scan_stop(net);
+
#if !defined(WL_IFACE_COMB_NUM_CHANNELS)
wdev->wiphy->interface_modes = (wdev->wiphy->interface_modes)
& (~(BIT(NL80211_IFTYPE_P2P_CLIENT)|
#if defined(WL_CFG80211_P2P_DEV_IF)
struct wireless_dev *
-wl_cfgp2p_add_p2p_disc_if(void)
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg)
{
- extern struct wl_priv *wlcfg_drv_priv;
- struct wl_priv *wl = wlcfg_drv_priv;
struct wireless_dev *wdev = NULL;
struct ether_addr primary_mac;
- if (!wl)
- return NULL;
+ if (!cfg)
+ return ERR_PTR(-EINVAL);
WL_TRACE(("Enter\n"));
- if (wl->p2p_wdev) {
+ if (cfg->p2p_wdev) {
CFGP2P_ERR(("p2p_wdev defined already.\n"));
- return NULL;
+#if (defined(CUSTOMER_HW10) && defined(CONFIG_ARCH_ODIN))
+ wl_cfgp2p_del_p2p_disc_if(cfg->p2p_wdev, cfg);
+ CFGP2P_ERR(("p2p_wdev deleted.\n"));
+#else
+ return ERR_PTR(-ENFILE);
+#endif
}
wdev = kzalloc(sizeof(*wdev), GFP_KERNEL);
if (unlikely(!wdev)) {
WL_ERR(("Could not allocate wireless device\n"));
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
memset(&primary_mac, 0, sizeof(primary_mac));
- get_primary_mac(wl, &primary_mac);
+ get_primary_mac(cfg, &primary_mac);
wl_cfgp2p_generate_bss_mac(&primary_mac,
- &wl->p2p->dev_addr, &wl->p2p->int_addr);
+ &cfg->p2p->dev_addr, &cfg->p2p->int_addr);
- wdev->wiphy = wl->wdev->wiphy;
+ wdev->wiphy = cfg->wdev->wiphy;
wdev->iftype = NL80211_IFTYPE_P2P_DEVICE;
- memcpy(wdev->address, &wl->p2p->dev_addr, ETHER_ADDR_LEN);
+ memcpy(wdev->address, &cfg->p2p->dev_addr, ETHER_ADDR_LEN);
+
+#if defined(WL_NEWCFG_PRIVCMD_SUPPORT)
+ if (cfg->p2p_net)
+ memcpy(cfg->p2p_net->dev_addr, &cfg->p2p->dev_addr, ETHER_ADDR_LEN);
+#endif /* WL_NEWCFG_PRIVCMD_SUPPORT */
/* store p2p wdev ptr for further reference. */
- wl->p2p_wdev = wdev;
+ cfg->p2p_wdev = wdev;
WL_TRACE(("P2P interface registered\n"));
wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
{
int ret = 0;
- extern struct wl_priv *wlcfg_drv_priv;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- if (!wl)
+ if (!cfg)
return -EINVAL;
WL_TRACE(("Enter\n"));
- ret = wl_cfgp2p_set_firm_p2p(wl);
+ ret = wl_cfgp2p_set_firm_p2p(cfg);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("Set P2P in firmware failed, ret=%d\n", ret));
goto exit;
}
- ret = wl_cfgp2p_enable_discovery(wl, wl_to_prmry_ndev(wl), NULL, 0);
+ ret = wl_cfgp2p_enable_discovery(cfg, bcmcfg_to_prmry_ndev(cfg), NULL, 0);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("P2P enable discovery failed, ret=%d\n", ret));
goto exit;
}
- p2p_on(wl) = true;
+ p2p_on(cfg) = true;
CFGP2P_DBG(("P2P interface started\n"));
wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev)
{
int ret = 0;
- int clear_flag = 0;
- unsigned long flags = 0;
- struct net_device *ndev = NULL;
- extern struct wl_priv *wlcfg_drv_priv;
- struct wl_priv *wl = wlcfg_drv_priv;
+ struct bcm_cfg80211 *cfg = wiphy_priv(wiphy);
- if (!wl || !wdev)
+ if (!cfg)
return;
WL_TRACE(("Enter\n"));
- ndev = wdev_to_wlc_ndev(wdev, wl);
-
- spin_lock_irqsave(&wl->cfgdrv_lock, flags);
- if (wl->scan_request && wl->scan_request->wdev == wdev) {
- cfg80211_scan_done(wl->scan_request, true);
- wl->scan_request = NULL;
- clear_flag = 1;
+ ret = wl_cfg80211_scan_stop(wdev);
+ if (unlikely(ret < 0)) {
+ CFGP2P_ERR(("P2P scan stop failed, ret=%d\n", ret));
}
- spin_unlock_irqrestore(&wl->cfgdrv_lock, flags);
- if (clear_flag)
- wl_clr_drv_status(wl, SCANNING, ndev);
+ if (!cfg->p2p)
+ return;
- ret = wl_cfgp2p_disable_discovery(wl);
+ ret = wl_cfgp2p_disable_discovery(cfg);
if (unlikely(ret < 0)) {
CFGP2P_ERR(("P2P disable discovery failed, ret=%d\n", ret));
- goto exit;
}
- // Fix for p2p NULL pointer (gwl)
- if(wl->p2p)
- p2p_on(wl) = false;
+ p2p_on(cfg) = false;
CFGP2P_DBG(("P2P interface stopped\n"));
-exit:
return;
}
int
-wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev)
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg)
{
- extern struct wl_priv *wlcfg_drv_priv;
- struct wl_priv *wl = wlcfg_drv_priv;
+ bool rollback_lock = false;
if (!wdev)
return -EINVAL;
WL_TRACE(("Enter\n"));
+ if (!rtnl_is_locked()) {
+ rtnl_lock();
+ rollback_lock = true;
+ }
+
cfg80211_unregister_wdev(wdev);
+ if (rollback_lock)
+ rtnl_unlock();
+
kfree(wdev);
- wl->p2p_wdev = NULL;
+ if (cfg)
+ cfg->p2p_wdev = NULL;
CFGP2P_ERR(("P2P interface unregistered\n"));
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_cfgp2p.h 415640 2013-07-31 02:43:28Z $
+ * $Id: wl_cfgp2p.h 472818 2014-04-25 08:07:56Z $
*/
#ifndef _wl_cfgp2p_h_
#define _wl_cfgp2p_h_
#include <proto/802.11.h>
#include <proto/p2p.h>
-struct wl_priv;
+struct bcm_cfg80211;
extern u32 wl_dbg_level;
typedef struct wifi_p2p_ie wifi_wfd_ie_t;
enum wl_cfgp2p_status {
WLP2P_STATUS_DISCOVERY_ON = 0,
WLP2P_STATUS_SEARCH_ENABLED,
- WLP2P_STATUS_IF_ADD,
- WLP2P_STATUS_IF_DEL,
+ WLP2P_STATUS_IF_ADDING,
WLP2P_STATUS_IF_DELETING,
WLP2P_STATUS_IF_CHANGING,
WLP2P_STATUS_IF_CHANGED,
};
-#define wl_to_p2p_bss_ndev(wl, type) ((wl)->p2p->bss[type].dev)
-#define wl_to_p2p_bss_bssidx(wl, type) ((wl)->p2p->bss[type].bssidx)
-#define wl_to_p2p_bss_saved_ie(wl, type) ((wl)->p2p->bss[type].saved_ie)
-#define wl_to_p2p_bss_private(wl, type) ((wl)->p2p->bss[type].private_data)
-#define wl_to_p2p_bss(wl, type) ((wl)->p2p->bss[type])
-#define wl_get_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : test_bit(WLP2P_STATUS_ ## stat, \
- &(wl)->p2p->status))
-#define wl_set_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : set_bit(WLP2P_STATUS_ ## stat, \
- &(wl)->p2p->status))
-#define wl_clr_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0 : clear_bit(WLP2P_STATUS_ ## stat, \
- &(wl)->p2p->status))
-#define wl_chg_p2p_status(wl, stat) ((!(wl)->p2p_supported) ? 0:change_bit(WLP2P_STATUS_ ## stat, \
- &(wl)->p2p->status))
-#define p2p_on(wl) ((wl)->p2p->on)
-#define p2p_scan(wl) ((wl)->p2p->scan)
-#define p2p_is_on(wl) ((wl)->p2p && (wl)->p2p->on)
+#define wl_to_p2p_bss_ndev(cfg, type) ((cfg)->p2p->bss[type].dev)
+#define wl_to_p2p_bss_bssidx(cfg, type) ((cfg)->p2p->bss[type].bssidx)
+#define wl_to_p2p_bss_saved_ie(cfg, type) ((cfg)->p2p->bss[type].saved_ie)
+#define wl_to_p2p_bss_private(cfg, type) ((cfg)->p2p->bss[type].private_data)
+#define wl_to_p2p_bss(cfg, type) ((cfg)->p2p->bss[type])
+#define wl_get_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+ test_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_set_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+ set_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_clr_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+ clear_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define wl_chg_p2p_status(cfg, stat) ((!(cfg)->p2p_supported) ? 0 : \
+ change_bit(WLP2P_STATUS_ ## stat, &(cfg)->p2p->status))
+#define p2p_on(cfg) ((cfg)->p2p->on)
+#define p2p_scan(cfg) ((cfg)->p2p->scan)
+#define p2p_is_on(cfg) ((cfg)->p2p && (cfg)->p2p->on)
/* dword align allocation */
#define WLC_IOCTL_MAXLEN 8192
init_timer(timer); \
timer->function = func; \
timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \
- timer->data = (unsigned long) wl; \
+ timer->data = (unsigned long) cfg; \
add_timer(timer); \
} while (0);
-#if !defined(WL_CFG80211_P2P_DEV_IF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)) && !defined(WL_CFG80211_P2P_DEV_IF)
#define WL_CFG80211_P2P_DEV_IF
+
+#ifdef WL_ENABLE_P2P_IF
#undef WL_ENABLE_P2P_IF
-#endif /* !WL_CFG80211_P2P_DEV_IF && (LINUX_VERSION >= VERSION(3, 8, 0)) */
+#endif
+
+#ifdef WL_SUPPORT_BACKPORTED_KPATCHES
+#undef WL_SUPPORT_BACKPORTED_KPATCHES
+#endif
+#else
+#ifdef WLP2P
+#ifndef WL_ENABLE_P2P_IF
+/* Enable P2P network Interface if P2P support is enabled */
+#define WL_ENABLE_P2P_IF
+#endif /* WL_ENABLE_P2P_IF */
+#endif /* WLP2P */
+#endif /* (LINUX_VERSION >= VERSION(3, 8, 0)) */
+
+#ifndef WL_CFG80211_P2P_DEV_IF
+#ifdef WL_NEWCFG_PRIVCMD_SUPPORT
+#undef WL_NEWCFG_PRIVCMD_SUPPORT
+#endif
+#endif /* WL_CFG80211_P2P_DEV_IF */
#if defined(WL_ENABLE_P2P_IF) && (defined(WL_CFG80211_P2P_DEV_IF) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)))
extern void
wl_cfgp2p_print_actframe(bool tx, void *frame, u32 frame_len, u32 channel);
extern s32
-wl_cfgp2p_init_priv(struct wl_priv *wl);
+wl_cfgp2p_init_priv(struct bcm_cfg80211 *cfg);
extern void
-wl_cfgp2p_deinit_priv(struct wl_priv *wl);
+wl_cfgp2p_deinit_priv(struct bcm_cfg80211 *cfg);
extern s32
-wl_cfgp2p_set_firm_p2p(struct wl_priv *wl);
+wl_cfgp2p_set_firm_p2p(struct bcm_cfg80211 *cfg);
extern s32
-wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode,
+wl_cfgp2p_set_p2p_mode(struct bcm_cfg80211 *cfg, u8 mode,
u32 channel, u16 listen_ms, int bssidx);
extern s32
-wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type,
+wl_cfgp2p_ifadd(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type,
chanspec_t chspec);
extern s32
-wl_cfgp2p_ifdisable(struct wl_priv *wl, struct ether_addr *mac);
+wl_cfgp2p_ifdisable(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
extern s32
-wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac);
+wl_cfgp2p_ifdel(struct bcm_cfg80211 *cfg, struct ether_addr *mac);
extern s32
-wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
+wl_cfgp2p_ifchange(struct bcm_cfg80211 *cfg, struct ether_addr *mac, u8 if_type, chanspec_t chspec);
extern s32
-wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index);
+wl_cfgp2p_ifidx(struct bcm_cfg80211 *cfg, struct ether_addr *mac, s32 *index);
extern s32
-wl_cfgp2p_init_discovery(struct wl_priv *wl);
+wl_cfgp2p_init_discovery(struct bcm_cfg80211 *cfg);
extern s32
-wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len);
+wl_cfgp2p_enable_discovery(struct bcm_cfg80211 *cfg, struct net_device *dev, const u8 *ie,
+ u32 ie_len);
extern s32
-wl_cfgp2p_disable_discovery(struct wl_priv *wl);
+wl_cfgp2p_disable_discovery(struct bcm_cfg80211 *cfg);
extern s32
-wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active, u32 num_chans,
+wl_cfgp2p_escan(struct bcm_cfg80211 *cfg, struct net_device *dev, u16 active, u32 num_chans,
u16 *channels,
s32 search_state, u16 action, u32 bssidx, struct ether_addr *tx_dst_addr,
p2p_scan_purpose_t p2p_scan_purpose);
extern s32
-wl_cfgp2p_act_frm_search(struct wl_priv *wl, struct net_device *ndev,
+wl_cfgp2p_act_frm_search(struct bcm_cfg80211 *cfg, struct net_device *ndev,
s32 bssidx, s32 channel, struct ether_addr *tx_dst_addr);
extern wpa_ie_fixed_t *
extern wifi_wfd_ie_t *
wl_cfgp2p_find_wfdie(u8 *parse, u32 len);
extern s32
-wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx,
+wl_cfgp2p_set_management_ie(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bssidx,
s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len);
extern s32
-wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx);
+wl_cfgp2p_clear_management_ie(struct bcm_cfg80211 *cfg, s32 bssidx);
extern s32
-wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev, s32 *index);
+wl_cfgp2p_find_idx(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 *index);
extern struct net_device *
-wl_cfgp2p_find_ndev(struct wl_priv *wl, s32 bssidx);
+wl_cfgp2p_find_ndev(struct bcm_cfg80211 *cfg, s32 bssidx);
extern s32
-wl_cfgp2p_find_type(struct wl_priv *wl, s32 bssidx, s32 *type);
+wl_cfgp2p_find_type(struct bcm_cfg80211 *cfg, s32 bssidx, s32 *type);
extern s32
-wl_cfgp2p_listen_complete(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_cfgp2p_listen_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
extern s32
-wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms);
+wl_cfgp2p_discover_listen(struct bcm_cfg80211 *cfg, s32 channel, u32 duration_ms);
extern s32
-wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable);
+wl_cfgp2p_discover_enable_search(struct bcm_cfg80211 *cfg, u8 enable);
extern s32
-wl_cfgp2p_action_tx_complete(struct wl_priv *wl, bcm_struct_cfgdev *cfgdev,
+wl_cfgp2p_action_tx_complete(struct bcm_cfg80211 *cfg, bcm_struct_cfgdev *cfgdev,
const wl_event_msg_t *e, void *data);
extern s32
-wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev,
+wl_cfgp2p_tx_action_frame(struct bcm_cfg80211 *cfg, struct net_device *dev,
wl_af_params_t *af_params, s32 bssidx);
extern void
wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx);
extern s32
-wl_cfgp2p_bss(struct wl_priv *wl, struct net_device *ndev, s32 bsscfg_idx, s32 up);
+wl_cfgp2p_bss(struct bcm_cfg80211 *cfg, struct net_device *ndev, s32 bsscfg_idx, s32 up);
extern s32
-wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev);
+wl_cfgp2p_supported(struct bcm_cfg80211 *cfg, struct net_device *ndev);
extern s32
-wl_cfgp2p_down(struct wl_priv *wl);
+wl_cfgp2p_down(struct bcm_cfg80211 *cfg);
extern s32
-wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+wl_cfgp2p_set_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
extern s32
-wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+wl_cfgp2p_get_p2p_noa(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
extern s32
-wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len);
+wl_cfgp2p_set_p2p_ps(struct bcm_cfg80211 *cfg, struct net_device *ndev, char* buf, int len);
extern u8 *
wl_cfgp2p_retreive_p2pattrib(void *buf, u8 element_id);
wl_cfgp2p_retreive_p2p_dev_addr(wl_bss_info_t *bi, u32 bi_length);
extern s32
-wl_cfgp2p_register_ndev(struct wl_priv *wl);
+wl_cfgp2p_register_ndev(struct bcm_cfg80211 *cfg);
extern s32
-wl_cfgp2p_unregister_ndev(struct wl_priv *wl);
+wl_cfgp2p_unregister_ndev(struct bcm_cfg80211 *cfg);
extern bool
wl_cfgp2p_is_ifops(const struct net_device_ops *if_ops);
#if defined(WL_CFG80211_P2P_DEV_IF)
extern struct wireless_dev *
-wl_cfgp2p_add_p2p_disc_if(void);
+wl_cfgp2p_add_p2p_disc_if(struct bcm_cfg80211 *cfg);
extern int
wl_cfgp2p_start_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
wl_cfgp2p_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev);
extern int
-wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev);
+wl_cfgp2p_del_p2p_disc_if(struct wireless_dev *wdev, struct bcm_cfg80211 *cfg);
#endif /* WL_CFG80211_P2P_DEV_IF */
/* WiFi Direct */
--- /dev/null
+/*
+ * Linux cfg80211 Vendor Extension Code
+ *
+ * $Copyright Open Broadcom Corporation$
+ *
+ * $Id: wl_cfgvendor.h 455257 2014-02-20 08:10:24Z $
+ */
+
+/*
+ * New vendor interface additon to nl80211/cfg80211 to allow vendors
+ * to implement proprietary features over the cfg80211 stack.
+ */
+
+#ifndef _wl_cfgvendor_h_
+#define _wl_cfgvendor_h_
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0)) && !defined(VENDOR_EXT_SUPPORT)
+#define VENDOR_EXT_SUPPORT
+#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0) && !VENDOR_EXT_SUPPORT */
+
+enum wl_vendor_event {
+ BRCM_VENDOR_EVENT_UNSPEC,
+ BRCM_VENDOR_EVENT_PRIV_STR
+};
+
+/* Capture the BRCM_VENDOR_SUBCMD_PRIV_STRINGS* here */
+#define BRCM_VENDOR_SCMD_CAPA "cap"
+
+#ifdef VENDOR_EXT_SUPPORT
+extern int cfgvendor_attach(struct wiphy *wiphy);
+extern int cfgvendor_detach(struct wiphy *wiphy);
+#else
+static INLINE int cfgvendor_attach(struct wiphy *wiphy) { return 0; }
+static INLINE int cfgvendor_detach(struct wiphy *wiphy) { return 0; }
+#endif /* VENDOR_EXT_SUPPORT */
+
+#endif /* _wl_cfgvendor_h_ */
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_dbg.h 376019 2012-12-21 01:00:06Z $
+ * $Id: wl_dbg.h 472390 2014-04-23 23:32:01Z $
*/
#define WL_TIMESTAMP()
#if 0 && (VERSION_MAJOR > 9)
+extern int osl_printf(const char *fmt, ...);
#include <IOKit/apple80211/IO8Log.h>
-#define WL_PRINT(args) do { printf args; IO8Log args; } while (0)
+#define WL_PRINT(args) do { osl_printf args; } while (0)
+#define RELEASE_PRINT(args) do { WL_PRINT(args); IO8Log args; } while (0)
#else
#define WL_PRINT(args) do { WL_TIMESTAMP(); printf args; } while (0)
+#endif
+
+#if defined(EVENT_LOG_COMPILE) && defined(WLMSG_SRSCAN)
+#define _WL_SRSCAN(fmt, ...) EVENT_LOG(EVENT_LOG_TAG_SRSCAN, fmt, ##__VA_ARGS__)
+#define WL_SRSCAN(args) _WL_SRSCAN args
+#else
+#define WL_SRSCAN(args)
#endif
+#if defined(BCMCONDITIONAL_LOGGING)
+
+/* Ideally this should be some include file that vendors can include to conditionalize logging */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x)
+
+/* To disable a message completely ... until you need it again */
+#define WL_NONE(args)
+#define WL_ERROR(args) do {if (wl_msg_level & WL_ERROR_VAL) WL_PRINT(args);} while (0)
+#define WL_TRACE(args)
+#define WL_PRHDRS_MSG(args)
+#define WL_PRHDRS(i, p, f, t, r, l)
+#define WL_PRPKT(m, b, n)
+#define WL_INFORM(args)
+#define WL_TMP(args)
+#define WL_OID(args)
+#define WL_RATE(args) do {if (wl_msg_level & WL_RATE_VAL) WL_PRINT(args);} while (0)
+#define WL_ASSOC(args) do {if (wl_msg_level & WL_ASSOC_VAL) WL_PRINT(args);} while (0)
+#define WL_PRUSR(m, b, n)
+#define WL_PS(args) do {if (wl_msg_level & WL_PS_VAL) WL_PRINT(args);} while (0)
+
+#define WL_PORT(args)
+#define WL_DUAL(args)
+#define WL_REGULATORY(args) do {if (wl_msg_level & WL_REGULATORY_VAL) WL_PRINT(args);} while (0)
+
+#define WL_MPC(args)
+#define WL_APSTA(args)
+#define WL_APSTA_BCN(args)
+#define WL_APSTA_TX(args)
+#define WL_APSTA_TSF(args)
+#define WL_APSTA_BSSID(args)
+#define WL_BA(args)
+#define WL_MBSS(args)
+#define WL_PROTO(args)
+
+#define WL_CAC(args) do {if (wl_msg_level & WL_CAC_VAL) WL_PRINT(args);} while (0)
+#define WL_AMSDU(args)
+#define WL_AMPDU(args)
+#define WL_FFPLD(args)
+#define WL_MCHAN(args)
+
+#define WL_DFS(args)
+#define WL_WOWL(args)
+#define WL_DPT(args)
+#define WL_ASSOC_OR_DPT(args)
+#define WL_SCAN(args) do {if (wl_msg_level2 & WL_SCAN_VAL) WL_PRINT(args);} while (0)
+#define WL_COEX(args)
+#define WL_RTDC(w, s, i, j)
+#define WL_RTDC2(w, s, i, j)
+#define WL_CHANINT(args)
+#define WL_BTA(args)
+#define WL_P2P(args)
+#define WL_ITFR(args)
+#define WL_TDLS(args)
+#define WL_MCNX(args)
+#define WL_PROT(args)
+#define WL_PSTA(args)
+#define WL_TRF_MGMT(args)
+#define WL_L2FILTER(args)
+#define WL_MQ(args)
+#define WL_TXBF(args)
+#define WL_P2PO(args)
+#define WL_NET_DETECT(args)
+#define WL_ROAM(args)
+#define WL_WNM(args)
+
+
+#define WL_AMPDU_UPDN(args)
+#define WL_AMPDU_RX(args)
+#define WL_AMPDU_ERR(args)
+#define WL_AMPDU_TX(args)
+#define WL_AMPDU_CTL(args)
+#define WL_AMPDU_HW(args)
+#define WL_AMPDU_HWTXS(args)
+#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_STAT(args)
+#define WL_AMPDU_ERR_ON() 0
+#define WL_AMPDU_HW_ON() 0
+#define WL_AMPDU_HWTXS_ON() 0
+
+#define WL_APSTA_UPDN(args)
+#define WL_APSTA_RX(args)
+#define WL_WSEC(args)
+#define WL_WSEC_DUMP(args)
+#define WL_PCIE(args)
+#define WL_CHANLOG(w, s, i, j)
+
+#define WL_ERROR_ON() (wl_msg_level & WL_ERROR_VAL)
+#define WL_TRACE_ON() 0
+#define WL_PRHDRS_ON() 0
+#define WL_PRPKT_ON() 0
+#define WL_INFORM_ON() 0
+#define WL_TMP_ON() 0
+#define WL_OID_ON() 0
+#define WL_RATE_ON() (wl_msg_level & WL_RATE_VAL)
+#define WL_ASSOC_ON() (wl_msg_level & WL_ASSOC_VAL)
+#define WL_PRUSR_ON() 0
+#define WL_PS_ON() (wl_msg_level & WL_PS_VAL)
+#define WL_PORT_ON() 0
+#define WL_WSEC_ON() 0
+#define WL_WSEC_DUMP_ON() 0
+#define WL_MPC_ON() 0
+#define WL_REGULATORY_ON() (wl_msg_level & WL_REGULATORY_VAL)
+#define WL_APSTA_ON() 0
+#define WL_DFS_ON() 0
+#define WL_MBSS_ON() 0
+#define WL_CAC_ON() (wl_msg_level & WL_CAC_VAL)
+#define WL_AMPDU_ON() 0
+#define WL_DPT_ON() 0
+#define WL_WOWL_ON() 0
+#define WL_SCAN_ON() (wl_msg_level2 & WL_SCAN_VAL)
+#define WL_BTA_ON() 0
+#define WL_P2P_ON() 0
+#define WL_ITFR_ON() 0
+#define WL_MCHAN_ON() 0
+#define WL_TDLS_ON() 0
+#define WL_MCNX_ON() 0
+#define WL_PROT_ON() 0
+#define WL_PSTA_ON() 0
+#define WL_TRF_MGMT_ON() 0
+#define WL_LPC_ON() 0
+#define WL_L2FILTER_ON() 0
+#define WL_TXBF_ON() 0
+#define WL_P2PO_ON() 0
+#define WL_CHANLOG_ON() 0
+#define WL_NET_DETECT_ON() 0
+#define WL_WNM_ON() 0
+#define WL_PCIE_ON() 0
+
+#else /* !BCMDBG */
+
+/* DBGONLY() macro to reduce ifdefs in code for statements that are only needed when
+ * BCMDBG is defined.
+ */
+#define DBGONLY(x)
/* To disable a message completely ... until you need it again */
#define WL_NONE(args)
#define WL_ERROR(args)
#define WL_TRACE(args)
+#ifndef LINUX_POSTMOGRIFY_REMOVAL
+#ifdef WLMSG_PRHDRS
+#define WL_PRHDRS_MSG(args) WL_PRINT(args)
+#define WL_PRHDRS(i, p, f, t, r, l) wlc_print_hdrs(i, p, f, t, r, l)
+#else
+#define WL_PRHDRS_MSG(args)
+#define WL_PRHDRS(i, p, f, t, r, l)
+#endif
+#ifdef WLMSG_PRPKT
+#define WL_PRPKT(m, b, n) prhex(m, b, n)
+#else
+#define WL_PRPKT(m, b, n)
+#endif
+#ifdef WLMSG_INFORM
+#define WL_INFORM(args) WL_PRINT(args)
+#else
+#define WL_INFORM(args)
+#endif
+#define WL_TMP(args)
+#ifdef WLMSG_OID
+#define WL_OID(args) WL_PRINT(args)
+#else
+#define WL_OID(args)
+#endif
+#define WL_RATE(args)
+#ifdef WLMSG_ASSOC
+#define WL_ASSOC(args) WL_PRINT(args)
+#else
+#define WL_ASSOC(args)
+#endif
+#define WL_PRUSR(m, b, n)
+#ifdef WLMSG_PS
+#define WL_PS(args) WL_PRINT(args)
+#else
+#define WL_PS(args)
+#endif
+#ifdef WLMSG_ROAM
+#define WL_ROAM(args) WL_PRINT(args)
+#else
+#define WL_ROAM(args)
+#endif
+#define WL_PORT(args)
+#define WL_DUAL(args)
+#define WL_REGULATORY(args)
+
+#ifdef WLMSG_MPC
+#define WL_MPC(args) WL_PRINT(args)
+#else
+#define WL_MPC(args)
+#endif
+#define WL_APSTA(args)
+#define WL_APSTA_BCN(args)
+#define WL_APSTA_TX(args)
+#define WL_APSTA_TSF(args)
+#define WL_APSTA_BSSID(args)
+#define WL_BA(args)
+#define WL_MBSS(args)
+#define WL_MODE_SWITCH(args)
+#define WL_PROTO(args)
+
+#define WL_CAC(args)
+#define WL_AMSDU(args)
+#define WL_AMPDU(args)
+#define WL_FFPLD(args)
+#define WL_MCHAN(args)
+
+/* Define WLMSG_DFS automatically for WLTEST builds */
+
+#ifdef WLMSG_DFS
+#define WL_DFS(args) do {if (wl_msg_level & WL_DFS_VAL) WL_PRINT(args);} while (0)
+#else /* WLMSG_DFS */
+#define WL_DFS(args)
+#endif /* WLMSG_DFS */
+#define WL_WOWL(args)
+#ifdef WLMSG_SCAN
+#define WL_SCAN(args) WL_PRINT(args)
+#else
+#define WL_SCAN(args)
+#endif
+#define WL_COEX(args)
+#define WL_RTDC(w, s, i, j)
+#define WL_RTDC2(w, s, i, j)
+#define WL_CHANINT(args)
+#ifdef WLMSG_BTA
+#define WL_BTA(args) WL_PRINT(args)
+#else
+#define WL_BTA(args)
+#endif
+#define WL_WMF(args)
+#define WL_P2P(args)
+#define WL_ITFR(args)
+#define WL_TDLS(args)
+#define WL_MCNX(args)
+#define WL_PROT(args)
+#define WL_PSTA(args)
+#define WL_TBTT(args)
+#define WL_TRF_MGMT(args)
+#define WL_L2FILTER(args)
+#define WL_MQ(args)
+#define WL_P2PO(args)
+#define WL_WNM(args)
+#define WL_TXBF(args)
+#define WL_CHANLOG(w, s, i, j)
+#define WL_NET_DETECT(args)
+
+#define WL_ERROR_ON() 0
+#define WL_TRACE_ON() 0
+#ifdef WLMSG_PRHDRS
+#define WL_PRHDRS_ON() 1
+#else
+#define WL_PRHDRS_ON() 0
+#endif
+#ifdef WLMSG_PRPKT
+#define WL_PRPKT_ON() 1
+#else
+#define WL_PRPKT_ON() 0
+#endif
+#ifdef WLMSG_INFORM
+#define WL_INFORM_ON() 1
+#else
+#define WL_INFORM_ON() 0
+#endif
+#ifdef WLMSG_OID
+#define WL_OID_ON() 1
+#else
+#define WL_OID_ON() 0
+#endif
+#define WL_TMP_ON() 0
+#define WL_RATE_ON() 0
+#ifdef WLMSG_ASSOC
+#define WL_ASSOC_ON() 1
+#else
+#define WL_ASSOC_ON() 0
+#endif
+#define WL_PORT_ON() 0
+#ifdef WLMSG_WSEC
+#define WL_WSEC_ON() 1
+#define WL_WSEC_DUMP_ON() 1
+#else
+#define WL_WSEC_ON() 0
+#define WL_WSEC_DUMP_ON() 0
+#endif
+#ifdef WLMSG_MPC
+#define WL_MPC_ON() 1
+#else
+#define WL_MPC_ON() 0
+#endif
+#define WL_REGULATORY_ON() 0
+
+#define WL_APSTA_ON() 0
+#define WL_BA_ON() 0
+#define WL_MBSS_ON() 0
+#define WL_MODE_SWITCH_ON() 0
+#ifdef WLMSG_DFS
+#define WL_DFS_ON() 1
+#else /* WLMSG_DFS */
+#define WL_DFS_ON() 0
+#endif /* WLMSG_DFS */
+#ifdef WLMSG_SCAN
+#define WL_SCAN_ON() 1
+#else
+#define WL_SCAN_ON() 0
+#endif
+#ifdef WLMSG_BTA
+#define WL_BTA_ON() 1
+#else
+#define WL_BTA_ON() 0
+#endif
+#define WL_WMF_ON() 0
+#define WL_P2P_ON() 0
+#define WL_MCHAN_ON() 0
+#define WL_TDLS_ON() 0
+#define WL_MCNX_ON() 0
+#define WL_PROT_ON() 0
+#define WL_TBTT_ON() 0
+#define WL_PWRSEL_ON() 0
+#define WL_L2FILTER_ON() 0
+#define WL_MQ_ON() 0
+#define WL_P2PO_ON() 0
+#define WL_TXBF_ON() 0
+#define WL_CHANLOG_ON() 0
+
+#define WL_AMPDU_UPDN(args)
+#define WL_AMPDU_RX(args)
+#define WL_AMPDU_ERR(args)
+#define WL_AMPDU_TX(args)
+#define WL_AMPDU_CTL(args)
+#define WL_AMPDU_HW(args)
+#define WL_AMPDU_HWTXS(args)
+#define WL_AMPDU_HWDBG(args)
+#define WL_AMPDU_STAT(args)
+#define WL_AMPDU_ERR_ON() 0
+#define WL_AMPDU_HW_ON() 0
+#define WL_AMPDU_HWTXS_ON() 0
+
+#define WL_WNM_ON() 0
+#endif /* LINUX_POSTMOGRIFY_REMOVAL */
#define WL_APSTA_UPDN(args)
#define WL_APSTA_RX(args)
#ifdef WLMSG_WSEC
#define WL_WSEC(args)
#define WL_WSEC_DUMP(args)
#endif
+#define WL_PCIE(args) do {if (wl_msg_level2 & WL_PCIE_VAL) WL_PRINT(args);} while (0)
+#define WL_PCIE_ON() (wl_msg_level2 & WL_PCIE_VAL)
+#endif
extern uint32 wl_msg_level;
extern uint32 wl_msg_level2;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_iw.c 396420 2013-04-12 06:55:45Z $
+ * $Id: wl_iw.c 467328 2014-04-03 01:23:40Z $
*/
#if defined(USE_IW)
#include <wlioctl.h>
-#include <wl_dbg.h>
+/* message levels */
+#define WL_ERROR_LEVEL 0x0001
+#define WL_SCAN_LEVEL 0x0002
+#define WL_ASSOC_LEVEL 0x0004
+#define WL_INFORM_LEVEL 0x0008
+#define WL_WSEC_LEVEL 0x0010
+#define WL_PNO_LEVEL 0x0020
+#define WL_COEX_LEVEL 0x0040
+#define WL_SOFTAP_LEVEL 0x0080
+#define WL_TRACE_LEVEL 0x0100
+
+uint iw_msg_level = WL_ERROR_LEVEL;
+
+#define WL_ERROR(x) do {if (iw_msg_level & WL_ERROR_LEVEL) printf x;} while (0)
+#define WL_SCAN(x) do {if (iw_msg_level & WL_SCAN_LEVEL) printf x;} while (0)
+#define WL_ASSOC(x) do {if (iw_msg_level & WL_ASSOC_LEVEL) printf x;} while (0)
+#define WL_INFORM(x) do {if (iw_msg_level & WL_INFORM_LEVEL) printf x;} while (0)
+#define WL_WSEC(x) do {if (iw_msg_level & WL_WSEC_LEVEL) printf x;} while (0)
+#define WL_PNO(x) do {if (iw_msg_level & WL_PNO_LEVEL) printf x;} while (0)
+#define WL_COEX(x) do {if (iw_msg_level & WL_COEX_LEVEL) printf x;} while (0)
+#define WL_SOFTAP(x) do {if (iw_msg_level & WL_SOFTAP_LEVEL) printf x;} while (0)
+#define WL_TRACE(x) do {if (iw_msg_level & WL_TRACE_LEVEL) printf x;} while (0)
+
#include <wl_iw.h>
#ifdef BCMWAPI_WPI
extern bool wl_iw_conn_status_str(uint32 event_type, uint32 status,
uint32 reason, char* stringBuf, uint buflen);
-uint iw_msg_level = WL_ERROR_VAL;
-
#define MAX_WLIW_IOCTL_LEN 1024
/* IOCTL swapping mode for Big Endian host with Little Endian dongle. Default to off */
-#define htod32(i) i
-#define htod16(i) i
-#define dtoh32(i) i
-#define dtoh16(i) i
-#define htodchanspec(i) i
-#define dtohchanspec(i) i
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
extern struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
extern int dhd_wait_pend8021x(struct net_device *dev);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
-#define DAEMONIZE(a)
+#define DAEMONIZE(a) do { \
+ allow_signal(SIGKILL); \
+ allow_signal(SIGTERM); \
+ } while (0)
#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)))
#define DAEMONIZE(a) daemonize(a); \
iscan_buf_t * list_cur;
/* Thread to work on iscan */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ struct task_struct *kthread;
+#endif
long sysioc_pid;
struct semaphore sysioc_sem;
struct completion sysioc_exited;
strcpy(ifr.ifr_name, dev->name);
ifr.ifr_data = (caddr_t) &ioc;
-#ifndef LINUX_HYBRID
- /* Causes an extraneous 'up'. If specific ioctls are failing due
- to device down, then we can investigate those ioctls.
- */
- dev_open(dev);
-#endif
-
fs = get_fs();
set_fs(get_ds());
#if defined(WL_USE_NETDEV_OPS)
error = dev_wlc_ioctl(dev, WLC_SET_PM, &pm, sizeof(pm));
return error;
}
+
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
#endif /* WIRELESS_EXT > 12 */
int
wlc_ssid_t ssid;
iscan_info_t *iscan = g_iscan;
- WL_TRACE(("%s: SIOCSIWSCAN\n", dev->name));
+ WL_TRACE(("%s: SIOCSIWSCAN iscan=%p\n", dev->name, iscan));
/* use backup if our thread is not successful */
if ((!iscan) || (iscan->sysioc_pid < 0)) {
uint8 *ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
int ptr_len = bi->ie_length;
+ /* OSEN IE */
+ if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_VS_ID)) &&
+ ie->len > WFA_OUI_LEN + 1 &&
+ !bcmp((const void *)&ie->data[0], (const void *)WFA_OUI, WFA_OUI_LEN) &&
+ ie->data[WFA_OUI_LEN] == WFA_OUI_TYPE_OSEN) {
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = ie->len + 2;
+ event = IWE_STREAM_ADD_POINT(info, event, end, &iwe, (char *)ie);
+ }
+ ptr = ((uint8 *)bi) + sizeof(wl_bss_info_t);
+
if ((ie = bcm_parse_tlvs(ptr, ptr_len, DOT11_MNG_RSN_ID))) {
iwe.cmd = IWEVGENIE;
iwe.u.data.length = ie->len + 2;
char *event = extra, *end = extra + dwrq->length, *value;
uint buflen = dwrq->length;
- WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+ WL_TRACE(("%s: %s SIOCGIWSCAN\n", __FUNCTION__, dev->name));
if (!extra)
return -EINVAL;
/* Channel */
iwe.cmd = SIOCGIWFREQ;
+
iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
- CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ (CHSPEC_IS2G(bi->chanspec)) ?
WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
iwe.u.freq.e = 6;
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
iscan_info_t *iscan = g_iscan;
iscan_buf_t * p_buf;
- WL_TRACE(("%s: SIOCGIWSCAN\n", dev->name));
+ WL_TRACE(("%s: %s SIOCGIWSCAN\n", __FUNCTION__, dev->name));
if (!extra)
return -EINVAL;
/* Channel */
iwe.cmd = SIOCGIWFREQ;
+
iwe.u.freq.m = wf_channel2mhz(CHSPEC_CHANNEL(bi->chanspec),
- CHSPEC_CHANNEL(bi->chanspec) <= CH_MAX_2G_CHANNEL ?
+ (CHSPEC_IS2G(bi->chanspec)) ?
WF_CHAN_FACTOR_2_4_G : WF_CHAN_FACTOR_5_G);
iwe.u.freq.e = 6;
event = IWE_STREAM_ADD_EVENT(info, event, end, &iwe, IW_EV_FREQ_LEN);
}
+#if WIRELESS_EXT > 17
struct {
pmkid_list_t pmkids;
pmkid_t foo[MAXPMKID-1];
dev_wlc_bufvar_set(dev, "pmkid_info", (char *)&pmkid_list, sizeof(pmkid_list));
return 0;
}
+#endif /* WIRELESS_EXT > 17 */
static int
wl_iw_get_encodeext(
WL_IW_SET_LEDDC = SIOCIWFIRSTPRIV,
WL_IW_SET_VLANMODE,
WL_IW_SET_PM,
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
WL_IW_SET_LAST
};
wl_iw_set_leddc,
wl_iw_set_vlanmode,
wl_iw_set_pm,
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
NULL
};
0,
"set_pm"
},
+#if WIRELESS_EXT > 17
+#endif /* WIRELESS_EXT > 17 */
{ 0, 0, 0, { 0 } }
};
{
iscan_info_t *iscan = NULL;
+ WL_TRACE(("%s: iscan=%p\n", __FUNCTION__, iscan));
+
if (!dev)
return 0;
if (!iscan)
return -ENOMEM;
memset(iscan, 0, sizeof(iscan_info_t));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ iscan->kthread = NULL;
+#endif
iscan->sysioc_pid = -1;
/* we only care about main interface so save a global here */
g_iscan = iscan;
sema_init(&iscan->sysioc_sem, 0);
init_completion(&iscan->sysioc_exited);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
+ iscan->kthread = kthread_run(_iscan_sysioc_thread, iscan, "iscan_sysioc");
+ iscan->sysioc_pid = iscan->kthread->pid;
+#else
iscan->sysioc_pid = kernel_thread(_iscan_sysioc_thread, iscan, 0);
+#endif
if (iscan->sysioc_pid < 0)
return -ENOMEM;
return 0;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wl_iw.h 291086 2011-10-21 01:17:24Z $
+ * $Id: wl_iw.h 488316 2014-06-30 15:22:21Z $
*/
#ifndef _wl_iw_h_
#define TXPOWER_SET_CMD "TXPOWER"
#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
-#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MACSTR "%02X:%02X:%02X:%02X:%02X:%02X"
/* Structure to keep global parameters */
typedef struct wl_iw_extra_params {
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: dhd_linux_mon.c 280623 2011-08-30 14:49:39Z $
+ * $Id: wl_linux_mon.c 467328 2014-04-03 01:23:40Z $
*/
#include <osl.h>
int dhd_del_monitor(struct net_device *ndev)
{
int i;
- bool rollback_lock = false;
if (!ndev)
return -EINVAL;
mutex_lock(&g_monitor.lock);
for (i = 0; i < DHD_MAX_IFS; i++) {
if (g_monitor.mon_if[i].mon_ndev == ndev ||
g_monitor.mon_if[i].real_ndev == ndev) {
+
g_monitor.mon_if[i].real_ndev = NULL;
- if (rtnl_is_locked()) {
- rtnl_unlock();
- rollback_lock = true;
- }
- unregister_netdev(g_monitor.mon_if[i].mon_ndev);
+ unregister_netdevice(g_monitor.mon_if[i].mon_ndev);
free_netdev(g_monitor.mon_if[i].mon_ndev);
g_monitor.mon_if[i].mon_ndev = NULL;
g_monitor.monitor_state = MONITOR_STATE_INTERFACE_DELETED;
break;
}
}
- if (rollback_lock) {
- rtnl_lock();
- rollback_lock = false;
- }
- if (g_monitor.monitor_state !=
- MONITOR_STATE_INTERFACE_DELETED)
- MON_PRINT("interface not found in monitor IF array, is this a monitor IF? 0x%p\n",
- ndev);
+ if (g_monitor.monitor_state != MONITOR_STATE_INTERFACE_DELETED)
+ MON_PRINT("IF not found in monitor array, is this a monitor IF? 0x%p\n", ndev);
mutex_unlock(&g_monitor.lock);
return 0;
{
int i;
struct net_device *ndev;
- bool rollback_lock = false;
mutex_lock(&g_monitor.lock);
if (g_monitor.monitor_state != MONITOR_STATE_DEINIT) {
for (i = 0; i < DHD_MAX_IFS; i++) {
ndev = g_monitor.mon_if[i].mon_ndev;
if (ndev) {
- if (rtnl_is_locked()) {
- rtnl_unlock();
- rollback_lock = true;
- }
- unregister_netdev(ndev);
+ unregister_netdevice(ndev);
free_netdev(ndev);
g_monitor.mon_if[i].real_ndev = NULL;
g_monitor.mon_if[i].mon_ndev = NULL;
- if (rollback_lock) {
- rtnl_lock();
- rollback_lock = false;
- }
}
}
g_monitor.monitor_state = MONITOR_STATE_DEINIT;
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wldev_common.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
+ * $Id: wldev_common.c 467328 2014-04-03 01:23:40Z $
*/
#include <osl.h>
#include <wldev_common.h>
#include <bcmutils.h>
+#include <dhd_config.h>
-#define htod32(i) i
-#define htod16(i) i
-#define dtoh32(i) i
-#define dtoh16(i) i
-#define htodchanspec(i) i
-#define dtohchanspec(i) i
+#define htod32(i) (i)
+#define htod16(i) (i)
+#define dtoh32(i) (i)
+#define dtoh16(i) (i)
+#define htodchanspec(i) (i)
+#define dtohchanspec(i) (i)
#define WLDEV_ERROR(args) \
do { \
}
if ((error < 0) ||
- (strncmp(country_code, cspec.ccode, WLC_CNTRY_BUF_SZ) != 0)) {
+ (strncmp(country_code, cspec.country_abbrev, WLC_CNTRY_BUF_SZ) != 0)) {
if (user_enforced) {
bzero(&scbval, sizeof(scb_val_t));
cspec.rev = -1;
memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
- get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
+ dhd_get_customized_country_code(dev, (char *)&cspec.country_abbrev, &cspec);
error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
smbuf, sizeof(smbuf), NULL);
if (error < 0) {
__FUNCTION__, country_code, cspec.ccode, cspec.rev));
return error;
}
+ dhd_conf_fix_country(dhd_get_pub(dev));
+ dhd_conf_get_country(dhd_get_pub(dev), &cspec);
dhd_bus_country_set(dev, &cspec, notify);
WLDEV_ERROR(("%s: set country for %s as %s rev %d\n",
__FUNCTION__, country_code, cspec.ccode, cspec.rev));
*
* $Copyright Open Broadcom Corporation$
*
- * $Id: wldev_common.h,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
+ * $Id: wldev_common.h 467328 2014-04-03 01:23:40Z $
*/
#ifndef __WLDEV_COMMON_H__
#define __WLDEV_COMMON_H__
s32 wldev_iovar_setint_bsscfg(
struct net_device *dev, s8 *iovar, s32 val, s32 bssidx);
-extern void get_customized_country_code(char *country_iso_code, wl_country_t *cspec);
+extern int dhd_net_set_fw_path(struct net_device *dev, char *fw);
+extern int dhd_net_bus_suspend(struct net_device *dev);
+extern int dhd_net_bus_resume(struct net_device *dev, uint8 stage);
+extern int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on,
+ unsigned long delay_msec);
+extern void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
+ wl_country_t *cspec);
extern void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify);
extern void dhd_bus_band_set(struct net_device *dev, uint band);
extern int wldev_set_country(struct net_device *dev, char *country_code, bool notify,