Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 9 Jun 2015 03:06:56 +0000 (20:06 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 9 Jun 2015 03:06:56 +0000 (20:06 -0700)
1042 files changed:
Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio [new file with mode: 0644]
Documentation/ABI/testing/sysfs-class-net
Documentation/ABI/testing/sysfs-class-net-janz-ican3 [new file with mode: 0644]
Documentation/devicetree/bindings/net/amd-xgbe-phy.txt [deleted file]
Documentation/devicetree/bindings/net/amd-xgbe.txt
Documentation/devicetree/bindings/net/ipq806x-dwmac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/net/ti,dp83867.txt [new file with mode: 0644]
Documentation/networking/bonding.txt
Documentation/networking/can.txt
Documentation/networking/dctcp.txt
Documentation/networking/ieee802154.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/pktgen.txt
Documentation/networking/switchdev.txt
Documentation/networking/tc-actions-env-rules.txt
Documentation/s390/qeth.txt
MAINTAINERS
arch/arm/net/bpf_jit_32.c
arch/x86/net/bpf_jit_comp.c
crypto/af_alg.c
drivers/atm/he.c
drivers/atm/iphase.c
drivers/bcma/driver_gpio.c
drivers/block/drbd/drbd_receiver.c
drivers/bluetooth/Kconfig
drivers/bluetooth/Makefile
drivers/bluetooth/btbcm.c
drivers/bluetooth/btintel.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btrtl.c [new file with mode: 0644]
drivers/bluetooth/btrtl.h [new file with mode: 0644]
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/bluetooth/hci_bcsp.c
drivers/clk/qcom/gcc-ipq806x.c
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/mlx4/alias_GUID.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx5/Kconfig
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/isdn/capi/capidrv.c
drivers/isdn/hisax/st5481_usb.c
drivers/isdn/mISDN/socket.c
drivers/mfd/janz-cmodio.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/flexcan.c
drivers/net/can/janz-ican3.c
drivers/net/dsa/Kconfig
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mv88e6123_61_65.c
drivers/net/dsa/mv88e6131.c
drivers/net/dsa/mv88e6171.c
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/apm/xgene/Makefile
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c [new file with mode: 0644]
drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h [new file with mode: 0644]
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/Kconfig [new file with mode: 0644]
drivers/net/ethernet/cavium/Makefile [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/Makefile [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/nic.h [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/nic_main.c [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/nic_reg.h [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/nicvf_main.c [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/nicvf_queues.c [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/nicvf_queues.h [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/q_struct.h [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/thunder_bgx.c [new file with mode: 0644]
drivers/net/ethernet/cavium/thunder/thunder_bgx.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4_values.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cisco/enic/enic_clsf.c
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/emulex/benet/Kconfig
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_hw.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/emulex/benet/be_roce.c
drivers/net/ethernet/emulex/benet/be_roce.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmveth.h
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/80003es2lan.c
drivers/net/ethernet/intel/e1000e/80003es2lan.h
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/82571.h
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/mac.h
drivers/net/ethernet/intel/e1000e/manage.c
drivers/net/ethernet/intel/e1000e/manage.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/nvm.c
drivers/net/ethernet/intel/e1000e/nvm.h
drivers/net/ethernet/intel/e1000e/param.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/e1000e/phy.h
drivers/net/ethernet/intel/e1000e/ptp.c
drivers/net/ethernet/intel/e1000e/regs.h
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/en.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_main.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/flow_table.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/srq.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/transobj.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/wq.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/wq.h [new file with mode: 0644]
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/rocker/rocker.h
drivers/net/ethernet/sfc/Kconfig
drivers/net/ethernet/sfc/Makefile
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ef10_sriov.c [new file with mode: 0644]
drivers/net/ethernet/sfc/ef10_sriov.h [new file with mode: 0644]
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/enum.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/siena_sriov.c
drivers/net/ethernet/sfc/siena_sriov.h [new file with mode: 0644]
drivers/net/ethernet/sfc/sriov.c [new file with mode: 0644]
drivers/net/ethernet/sfc/sriov.h [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
drivers/net/fddi/skfp/srf.c
drivers/net/geneve.c [new file with mode: 0644]
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/Kconfig
drivers/net/ieee802154/Makefile
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/at86rf230.h [new file with mode: 0644]
drivers/net/ieee802154/atusb.c [new file with mode: 0644]
drivers/net/ieee802154/atusb.h [new file with mode: 0644]
drivers/net/ieee802154/cc2520.c
drivers/net/ieee802154/fakelb.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/irda/irda-usb.c
drivers/net/macvtap.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/amd-xgbe-phy.c [deleted file]
drivers/net/phy/bcm7xxx.c
drivers/net/phy/dp83867.c [new file with mode: 0644]
drivers/net/phy/icplus.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-bitbang.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/ppp/pppoe.c
drivers/net/ppp/pppox.c
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/vxlan.c
drivers/net/wan/cosa.c
drivers/net/wan/dscc4.c
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/net/wireless/adm8211.c
drivers/net/wireless/at76c50x-usb.h
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ath.h
drivers/net/wireless/ath/ath10k/Makefile
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/p2p.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/p2p.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/rx_desc.h
drivers/net/wireless/ath/ath10k/spectral.c
drivers/net/wireless/ath/ath10k/thermal.c
drivers/net/wireless/ath/ath10k/thermal.h
drivers/net/wireless/ath/ath10k/trace.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath10k/wow.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/wow.h [new file with mode: 0644]
drivers/net/wireless/ath/ath5k/ath5k.h
drivers/net/wireless/ath/ath5k/led.c
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_phy.c
drivers/net/wireless/ath/ath9k/common-spectral.c
drivers/net/wireless/ath/ath9k/common-spectral.h
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/carl9170/fw.c
drivers/net/wireless/ath/carl9170/led.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/ath/dfs_pattern_detector.c
drivers/net/wireless/ath/dfs_pattern_detector.h
drivers/net/wireless/ath/dfs_pri_detector.c
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/pmc.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/pmc.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx.h
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/chip.c
drivers/net/wireless/brcm80211/brcmfmac/commonring.c
drivers/net/wireless/brcm80211/brcmfmac/feature.c
drivers/net/wireless/brcm80211/brcmfmac/feature.h
drivers/net/wireless/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/brcm80211/brcmfmac/firmware.h
drivers/net/wireless/brcm80211/brcmfmac/flowring.c
drivers/net/wireless/brcm80211/brcmfmac/flowring.h
drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/brcm80211/brcmfmac/of.c
drivers/net/wireless/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/cw1200/sta.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/Makefile
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/cfg.c
drivers/net/wireless/libertas/cfg.h
drivers/net/wireless/libertas/cmd.h
drivers/net/wireless/libertas/cmdresp.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/Kconfig [new file with mode: 0644]
drivers/net/wireless/mediatek/Makefile [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/Kconfig [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/Makefile [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/core.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/debugfs.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/dma.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/dma.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/eeprom.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/eeprom.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/init.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/initvals.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/initvals_phy.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/mac.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/mac.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/main.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/mcu.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/mt7601u.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/phy.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/regs.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/trace.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/trace.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/tx.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/usb.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/usb.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/util.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt7601u/util.h [new file with mode: 0644]
drivers/net/wireless/mwifiex/11h.c
drivers/net/wireless/mwifiex/11n.c
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/11n_rxreorder.c
drivers/net/wireless/mwifiex/Kconfig
drivers/net/wireless/mwifiex/README
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cfp.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/debugfs.c
drivers/net/wireless/mwifiex/ethtool.c
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/ioctl.h
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/sta_rx.c
drivers/net/wireless/mwifiex/sta_tx.c
drivers/net/wireless/mwifiex/tdls.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c
drivers/net/wireless/mwifiex/uap_txrx.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/p54/fwio.c
drivers/net/wireless/p54/led.c
drivers/net/wireless/p54/main.c
drivers/net/wireless/ray_cs.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2500usb.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800lib.h
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800soc.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtlwifi/Kconfig
drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
drivers/net/wireless/rtlwifi/core.h
drivers/net/wireless/rtlwifi/regd.c
drivers/net/wireless/rtlwifi/regd.h
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c
drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723be/fw.c
drivers/net/wireless/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/ti/wl1251/main.c
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/reg.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/of/of_mdio.c
drivers/s390/net/lcs.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l2_sys.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
drivers/staging/vt6655/device_main.c
drivers/staging/vt6656/main_usb.c
drivers/staging/wlan-ng/cfg80211.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_file.c
drivers/target/target_core_pr.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/vhost/scsi.c
fs/afs/rxrpc.c
fs/dlm/lowcomms.c
fs/splice.c
include/dt-bindings/clock/qcom,gcc-ipq806x.h
include/dt-bindings/net/ti-dp83867.h [new file with mode: 0644]
include/dt-bindings/reset/qcom,gcc-ipq806x.h
include/linux/bcma/bcma.h
include/linux/bpf.h
include/linux/etherdevice.h
include/linux/filter.h
include/linux/gfp.h
include/linux/if_pppox.h
include/linux/if_vlan.h
include/linux/igmp.h
include/linux/mdio-gpio.h
include/linux/mlx4/device.h
include/linux/mlx5/cq.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/flow_table.h [new file with mode: 0644]
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mlx5/vport.h [new file with mode: 0644]
include/linux/mm_types.h
include/linux/net.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_ingress.h [new file with mode: 0644]
include/linux/netlink.h
include/linux/pci_ids.h
include/linux/phy.h
include/linux/rtnetlink.h
include/linux/skbuff.h
include/linux/stmmac.h
include/linux/tcp.h
include/net/addrconf.h
include/net/af_vsock.h
include/net/bond_options.h
include/net/bonding.h
include/net/cfg80211.h
include/net/cfg802154.h
include/net/checksum.h
include/net/codel.h
include/net/dst.h
include/net/flow_dissector.h [new file with mode: 0644]
include/net/flow_keys.h [deleted file]
include/net/geneve.h
include/net/ieee802154_netdev.h
include/net/inet_common.h
include/net/inet_frag.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/llc_conn.h
include/net/mac80211.h
include/net/mac802154.h
include/net/net_namespace.h
include/net/netfilter/nf_tables.h
include/net/netns/ipv4.h
include/net/netns/ipv6.h
include/net/netns/nftables.h
include/net/nl802154.h
include/net/request_sock.h
include/net/sch_generic.h
include/net/sock.h
include/net/switchdev.h
include/net/tcp.h
include/uapi/linux/bpf.h
include/uapi/linux/can.h
include/uapi/linux/ethtool.h
include/uapi/linux/if_link.h
include/uapi/linux/if_packet.h
include/uapi/linux/in.h
include/uapi/linux/ipv6_route.h
include/uapi/linux/netfilter.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netlink.h
include/uapi/linux/nl80211.h
include/uapi/linux/openvswitch.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/rds.h
include/uapi/linux/snmp.h
include/uapi/linux/tcp.h
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/helpers.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/seccomp.c
kernel/trace/bpf_trace.c
lib/rhashtable.c
lib/test_bpf.c
lib/test_rhashtable.c
mm/page_alloc.c
net/8021q/vlan.c
net/Kconfig
net/appletalk/ddp.c
net/atm/common.c
net/atm/common.h
net/atm/pvc.c
net/atm/svc.c
net/ax25/af_ax25.c
net/batman-adv/Makefile
net/batman-adv/bat_algo.h
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/fragmentation.h
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/multicast.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/sysfs.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/sock.c
net/bluetooth/hci_core.c
net/bluetooth/hci_sock.c
net/bluetooth/hidp/sock.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bluetooth/smp.c
net/bridge/br.c
net/bridge/br_fdb.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/bridge/netfilter/ebt_stp.c
net/bridge/netfilter/ebtables.c
net/caif/caif_socket.c
net/can/af_can.c
net/ceph/messenger.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/netevent.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/stream.c
net/core/utils.c
net/decnet/af_decnet.c
net/dsa/slave.c
net/ethernet/eth.c
net/ieee802154/6lowpan/core.c
net/ieee802154/6lowpan/tx.c
net/ieee802154/core.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/nl802154.c
net/ieee802154/rdev-ops.h
net/ieee802154/socket.c
net/ieee802154/trace.h
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/geneve.c [deleted file]
net/ipv4/geneve_core.c [new file with mode: 0644]
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_forward.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ipip.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp_tunnel.c
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/icmp.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_udp_tunnel.c
net/ipv6/mcast_snoop.c [new file with mode: 0644]
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/output_core.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/syncookies.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/xfrm6_policy.c
net/ipx/af_ipx.c
net/irda/af_irda.c
net/irda/timer.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/llc/llc_conn.c
net/mac80211/Kconfig
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.h
net/mac80211/ethtool.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/led.c
net/mac80211/led.h
net/mac80211/main.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/rate.h
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac802154/Kconfig
net/mac802154/cfg.c
net/mac802154/driver-ops.h
net/mac802154/ieee802154_i.h
net/mac802154/iface.c
net/mac802154/mac_cmd.c
net/mac802154/main.c
net/mac802154/mib.c
net/mac802154/rx.c
net/mac802154/util.c
net/mpls/mpls_gso.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_getport.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_mac.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_netdev.c [new file with mode: 0644]
net/netfilter/nfnetlink_queue_core.c
net/netfilter/nft_compat.c
net/netfilter/x_tables.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TEE.c
net/netfilter/xt_addrtype.c
net/netfilter/xt_mark.c
net/netfilter/xt_set.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/nfc/af_nfc.c
net/nfc/llcp.h
net/nfc/llcp_core.c
net/nfc/llcp_sock.c
net/nfc/nfc.h
net/nfc/rawsock.c
net/openvswitch/Kconfig
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow.c
net/openvswitch/flow_netlink.c
net/openvswitch/vport-geneve.c
net/packet/af_packet.c
net/packet/internal.h
net/phonet/af_phonet.c
net/phonet/pep.c
net/rds/af_rds.c
net/rds/bind.c
net/rds/rds.h
net/rds/transport.c
net/rfkill/core.c
net/rfkill/rfkill-gpio.c
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-local.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_mirred.c
net/sched/act_pedit.c
net/sched/cls_bpf.c
net/sched/cls_flow.c
net/sched/cls_flower.c [new file with mode: 0644]
net/sched/sch_api.c
net/sched/sch_choke.c
net/sched/sch_codel.c
net/sched/sch_fq_codel.c
net/sched/sch_gred.c
net/sched/sch_hhf.c
net/sched/sch_ingress.c
net/sched/sch_netem.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/socket.c
net/switchdev/switchdev.c
net/tipc/addr.c
net/tipc/addr.h
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/core.c
net/tipc/core.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_table.c
net/tipc/net.c
net/tipc/netlink_compat.c
net/tipc/node.c
net/tipc/node.h
net/tipc/server.c
net/tipc/socket.c
net/tipc/subscr.c
net/tipc/subscr.h
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
net/wireless/chan.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/sme.c
net/wireless/sysfs.c
net/wireless/util.c
net/x25/af_x25.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
samples/bpf/Makefile
samples/bpf/bpf_helpers.h
samples/bpf/bpf_load.c
samples/bpf/sockex3_kern.c [new file with mode: 0644]
samples/bpf/sockex3_user.c [new file with mode: 0644]
samples/bpf/tcbpf1_kern.c
samples/bpf/test_verifier.c
samples/bpf/tracex5_kern.c [new file with mode: 0644]
samples/bpf/tracex5_user.c [new file with mode: 0644]
samples/pktgen/README.rst [new file with mode: 0644]
samples/pktgen/functions.sh [new file with mode: 0644]
samples/pktgen/parameters.sh [new file with mode: 0644]
samples/pktgen/pktgen.conf-1-1 [deleted file]
samples/pktgen/pktgen.conf-2-1 [deleted file]
samples/pktgen/pktgen.conf-2-2 [deleted file]
samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh [new file with mode: 0755]
samples/pktgen/pktgen_sample01_simple.sh [new file with mode: 0755]
samples/pktgen/pktgen_sample02_multiqueue.sh [new file with mode: 0755]
samples/pktgen/pktgen_sample03_burst_single_flow.sh [new file with mode: 0755]
tools/testing/selftests/net/psock_fanout.c

diff --git a/Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio b/Documentation/ABI/testing/sysfs-bus-pci-drivers-janz-cmodio
new file mode 100644 (file)
index 0000000..4d08f28
--- /dev/null
@@ -0,0 +1,8 @@
+What:          /sys/bus/pci/drivers/janz-cmodio/.../modulbus_number
+Date:          May 2010
+KernelVersion: 2.6.35
+Contact:       Ira W. Snyder <ira.snyder@gmail.com>
+Description:
+               Value representing the HEX switch S2 of the janz carrier board CMOD-IO or CAN-PCI2
+
+               Read-only: value of the configuration switch (0..15)
index 5ecfd72ba684c35acce51273d4370258b147ffb8..668604fc8e062bc7d7d4fc363aec6db48b00d6e8 100644 (file)
@@ -39,6 +39,25 @@ Description:
                Format is a string, e.g: 00:11:22:33:44:55 for an Ethernet MAC
                address.
 
+What:          /sys/class/net/<bridge iface>/bridge/group_fwd_mask
+Date:          January 2012
+KernelVersion: 3.2
+Contact:       netdev@vger.kernel.org
+Description:
+               Bitmask to allow forwarding of link local frames with address
+               01-80-C2-00-00-0X on a bridge device. Only values that set bits
+               not matching BR_GROUPFWD_RESTRICTED in net/bridge/br_private.h
+               allowed.
+               Default value 0 does not forward any link local frames.
+
+               Restricted bits:
+               0: 01-80-C2-00-00-00 Bridge Group Address used for STP
+               1: 01-80-C2-00-00-01 (MAC Control) 802.3 used for MAC PAUSE
+               2: 01-80-C2-00-00-02 (Link Aggregation) 802.3ad
+
+               Any values not setting these bits can be used. Take special
+               care when forwarding control frames e.g. 802.1X-PAE or LLDP.
+
 What:          /sys/class/net/<iface>/broadcast
 Date:          April 2005
 KernelVersion: 2.6.12
diff --git a/Documentation/ABI/testing/sysfs-class-net-janz-ican3 b/Documentation/ABI/testing/sysfs-class-net-janz-ican3
new file mode 100644 (file)
index 0000000..fdbc03a
--- /dev/null
@@ -0,0 +1,19 @@
+What:          /sys/class/net/<iface>/termination
+Date:          May 2010
+KernelVersion: 2.6.35
+Contact:       Ira W. Snyder <ira.snyder@gmail.com>
+Description:
+               Value representing the can bus termination
+
+               Default: 1 (termination active)
+               Reading: get actual termination state
+               Writing: set actual termination state (0=no termination, 1=termination active)
+
+What:          /sys/class/net/<iface>/fwinfo
+Date:          May 2015
+KernelVersion: 3.19
+Contact:       Andreas Gröger <andreas24groeger@gmail.com>
+Description:
+               Firmware stamp of ican3 module
+               Read-only: 32 byte string identification of the ICAN3 module
+               (known values: "JANZ-ICAN3 ICANOS 1.xx", "JANZ-ICAN3 CAL/CANopen 1.xx")
diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
deleted file mode 100644 (file)
index 8db3238..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-* AMD 10GbE PHY driver (amd-xgbe-phy)
-
-Required properties:
-- compatible: Should be "amd,xgbe-phy-seattle-v1a" and
-  "ethernet-phy-ieee802.3-c45"
-- reg: Address and length of the register sets for the device
-   - SerDes Rx/Tx registers
-   - SerDes integration registers (1/2)
-   - SerDes integration registers (2/2)
-- interrupt-parent: Should be the phandle for the interrupt controller
-  that services interrupts for this device
-- interrupts: Should contain the amd-xgbe-phy interrupt.
-
-Optional properties:
-- amd,speed-set: Speed capabilities of the device
-    0 - 1GbE and 10GbE (default)
-    1 - 2.5GbE and 10GbE
-
-The following optional properties are represented by an array with each
-value corresponding to a particular speed. The first array value represents
-the setting for the 1GbE speed, the second value for the 2.5GbE speed and
-the third value for the 10GbE speed.  All three values are required if the
-property is used.
-- amd,serdes-blwc: Baseline wandering correction enablement
-    0 - Off
-    1 - On
-- amd,serdes-cdr-rate: CDR rate speed selection
-- amd,serdes-pq-skew: PQ (data sampling) skew
-- amd,serdes-tx-amp: TX amplitude boost
-- amd,serdes-dfe-tap-config: DFE taps available to run
-- amd,serdes-dfe-tap-enable: DFE taps to enable
-
-Example:
-       xgbe_phy@e1240800 {
-               compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
-               reg = <0 0xe1240800 0 0x00400>,
-                     <0 0xe1250000 0 0x00060>,
-                     <0 0xe1250080 0 0x00004>;
-               interrupt-parent = <&gic>;
-               interrupts = <0 323 4>;
-               amd,speed-set = <0>;
-               amd,serdes-blwc = <1>, <1>, <0>;
-               amd,serdes-cdr-rate = <2>, <2>, <7>;
-               amd,serdes-pq-skew = <10>, <10>, <30>;
-               amd,serdes-tx-amp = <15>, <15>, <10>;
-               amd,serdes-dfe-tap-config = <3>, <3>, <1>;
-               amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
-       };
index 26efd526d16c29cc45de438aef24adb8f34898bd..4bb624a73b5409555c529a0c85e2ee10d69ce3b8 100644 (file)
@@ -5,12 +5,16 @@ Required properties:
 - reg: Address and length of the register sets for the device
    - MAC registers
    - PCS registers
+   - SerDes Rx/Tx registers
+   - SerDes integration registers (1/2)
+   - SerDes integration registers (2/2)
 - interrupt-parent: Should be the phandle for the interrupt controller
   that services interrupts for this device
 - interrupts: Should contain the amd-xgbe interrupt(s). The first interrupt
   listed is required and is the general device interrupt. If the optional
   amd,per-channel-interrupt property is specified, then one additional
-  interrupt for each DMA channel supported by the device should be specified
+  interrupt for each DMA channel supported by the device should be specified.
+  The last interrupt listed should be the PCS auto-negotiation interrupt.
 - clocks:
    - DMA clock for the amd-xgbe device (used for calculating the
      correct Rx interrupt watchdog timer value on a DMA channel
@@ -19,7 +23,6 @@ Required properties:
 - clock-names: Should be the names of the clocks
    - "dma_clk" for the DMA clock
    - "ptp_clk" for the PTP clock
-- phy-handle: See ethernet.txt file in the same directory
 - phy-mode: See ethernet.txt file in the same directory
 
 Optional properties:
@@ -29,19 +32,46 @@ Optional properties:
 - amd,per-channel-interrupt: Indicates that Rx and Tx complete will generate
   a unique interrupt for each DMA channel - this requires an additional
   interrupt be configured for each DMA channel
+- amd,speed-set: Speed capabilities of the device
+    0 - 1GbE and 10GbE (default)
+    1 - 2.5GbE and 10GbE
+
+The following optional properties are represented by an array with each
+value corresponding to a particular speed. The first array value represents
+the setting for the 1GbE speed, the second value for the 2.5GbE speed and
+the third value for the 10GbE speed.  All three values are required if the
+property is used.
+- amd,serdes-blwc: Baseline wandering correction enablement
+    0 - Off
+    1 - On
+- amd,serdes-cdr-rate: CDR rate speed selection
+- amd,serdes-pq-skew: PQ (data sampling) skew
+- amd,serdes-tx-amp: TX amplitude boost
+- amd,serdes-dfe-tap-config: DFE taps available to run
+- amd,serdes-dfe-tap-enable: DFE taps to enable
 
 Example:
        xgbe@e0700000 {
                compatible = "amd,xgbe-seattle-v1a";
                reg = <0 0xe0700000 0 0x80000>,
-                     <0 0xe0780000 0 0x80000>;
+                     <0 0xe0780000 0 0x80000>,
+                     <0 0xe1240800 0 0x00400>,
+                     <0 0xe1250000 0 0x00060>,
+                     <0 0xe1250080 0 0x00004>;
                interrupt-parent = <&gic>;
                interrupts = <0 325 4>,
-                            <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>;
+                            <0 326 1>, <0 327 1>, <0 328 1>, <0 329 1>,
+                            <0 323 4>;
                amd,per-channel-interrupt;
                clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
                clock-names = "dma_clk", "ptp_clk";
-               phy-handle = <&phy>;
                phy-mode = "xgmii";
                mac-address = [ 02 a1 a2 a3 a4 a5 ];
+               amd,speed-set = <0>;
+               amd,serdes-blwc = <1>, <1>, <0>;
+               amd,serdes-cdr-rate = <2>, <2>, <7>;
+               amd,serdes-pq-skew = <10>, <10>, <30>;
+               amd,serdes-tx-amp = <15>, <15>, <10>;
+               amd,serdes-dfe-tap-config = <3>, <3>, <1>;
+               amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
        };
diff --git a/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt b/Documentation/devicetree/bindings/net/ipq806x-dwmac.txt
new file mode 100644 (file)
index 0000000..6d7ab4e
--- /dev/null
@@ -0,0 +1,35 @@
+* IPQ806x DWMAC Ethernet controller
+
+The device inherits all the properties of the dwmac/stmmac devices
+described in the file net/stmmac.txt with the following changes.
+
+Required properties:
+
+- compatible: should be "qcom,ipq806x-gmac" along with "snps,dwmac"
+             and any applicable more detailed version number
+             described in net/stmmac.txt
+
+- qcom,nss-common: should contain a phandle to a syscon device mapping the
+                  nss-common registers.
+
+- qcom,qsgmii-csr: should contain a phandle to a syscon device mapping the
+                  qsgmii-csr registers.
+
+Example:
+
+       gmac: ethernet@37000000 {
+               device_type = "network";
+               compatible = "qcom,ipq806x-gmac";
+               reg = <0x37000000 0x200000>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "macirq";
+
+               qcom,nss-common = <&nss_common>;
+               qcom,qsgmii-csr = <&qsgmii_csr>;
+
+               clocks = <&gcc GMAC_CORE1_CLK>;
+               clock-names = "stmmaceth";
+
+               resets = <&gcc GMAC_CORE1_RESET>;
+               reset-names = "stmmaceth";
+       };
index ba19d671e8081148529368442fa39be451294608..8ec5fdf444e99e7b48a786fc8ead23d2150c0926 100644 (file)
@@ -9,6 +9,7 @@ Required properties:
   the Cadence GEM, or the generic form: "cdns,gem".
   Use "cdns,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
   Use "cdns,sama5d4-gem" for the Gigabit IP available on Atmel sama5d4 SoCs.
+  Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
 - phy-mode: See ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt b/Documentation/devicetree/bindings/net/nxp,lpc1850-dwmac.txt
new file mode 100644 (file)
index 0000000..7edba12
--- /dev/null
@@ -0,0 +1,20 @@
+* NXP LPC1850 GMAC ethernet controller
+
+This device is a platform glue layer for stmmac.
+Please see stmmac.txt for the other unchanged properties.
+
+Required properties:
+ - compatible:  Should contain "nxp,lpc1850-dwmac"
+
+Examples:
+
+mac: ethernet@40010000 {
+       compatible = "nxp,lpc1850-dwmac", "snps,dwmac-3.611", "snps,dwmac";
+       reg = <0x40010000 0x2000>;
+       interrupts = <5>;
+       interrupt-names = "macirq";
+       clocks = <&ccu1 CLK_CPU_ETHERNET>;
+       clock-names = "stmmaceth";
+       resets = <&rgu 22>;
+       reset-names = "stmmaceth";
+}
index 40831fbaff72102d6ff608ae06c1ddf4bedcdbae..525e1658f2da5ca9ed22594fda97a25646ac2e20 100644 (file)
@@ -30,6 +30,9 @@ Optional Properties:
 
 - max-speed: Maximum PHY supported speed (10, 100, 1000...)
 
+- broken-turn-around: If set, indicates the PHY device does not correctly
+  release the turn around line low at the end of a MDIO transaction.
+
 Example:
 
 ethernet-phy@0 {
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
new file mode 100644 (file)
index 0000000..58d935b
--- /dev/null
@@ -0,0 +1,25 @@
+* Texas Instruments - dp83867 Giga bit ethernet phy
+
+Required properties:
+       - reg - The ID number for the phy, usually a small integer
+       - ti,rx-internal-delay - RGMII Recieve Clock Delay - see dt-bindings/net/ti-dp83867.h
+               for applicable values
+       - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
+               for applicable values
+       - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
+               for applicable values
+
+Default child nodes are standard Ethernet PHY device
+nodes as described in Documentation/devicetree/bindings/net/phy.txt
+
+Example:
+
+       ethernet-phy@0 {
+               reg = <0>;
+               ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+               ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_75_NS>;
+               ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+       };
+
+Datasheet can be found:
+http://www.ti.com/product/DP83867IR/datasheet
index 83bf4986baeabbcc75e03abd5e3d50edd3e97962..334b49ef02d13eea3bbbb6213ec5241881b20fe6 100644 (file)
@@ -51,6 +51,7 @@ Table of Contents
 3.4    Configuring Bonding Manually via Sysfs
 3.5    Configuration with Interfaces Support
 3.6    Overriding Configuration for Special Cases
+3.7 Configuring LACP for 802.3ad mode in a more secure way
 
 4. Querying Bonding Configuration
 4.1    Bonding Configuration
@@ -178,6 +179,27 @@ active_slave
        active slave, or the empty string if there is no active slave or
        the current mode does not use an active slave.
 
+ad_actor_sys_prio
+
+       In an AD system, this specifies the system priority. The allowed range
+       is 1 - 65535. If the value is not specified, it takes 65535 as the
+       default value.
+
+       This parameter has effect only in 802.3ad mode and is available through
+       SysFs interface.
+
+ad_actor_system
+
+       In an AD system, this specifies the mac-address for the actor in
+       protocol packet exchanges (LACPDUs). The value cannot be NULL or
+       multicast. It is preferred to have the local-admin bit set for this
+       mac but driver does not enforce it. If the value is not given then
+       system defaults to using the masters' mac address as actors' system
+       address.
+
+       This parameter has effect only in 802.3ad mode and is available through
+       SysFs interface.
+
 ad_select
 
        Specifies the 802.3ad aggregation selection logic to use.  The
@@ -220,6 +242,21 @@ ad_select
 
        This option was added in bonding version 3.4.0.
 
+ad_user_port_key
+
+       In an AD system, the port-key has three parts as shown below -
+
+          Bits   Use
+          00     Duplex
+          01-05  Speed
+          06-15  User-defined
+
+       This defines the upper 10 bits of the port key. The values can be
+       from 0 - 1023. If not given, the system defaults to 0.
+
+       This parameter has effect only in 802.3ad mode and is available through
+       SysFs interface.
+
 all_slaves_active
 
        Specifies that duplicate frames (received on inactive ports) should be
@@ -1622,6 +1659,53 @@ output port selection.
 This feature first appeared in bonding driver version 3.7.0 and support for
 output slave selection was limited to round-robin and active-backup modes.
 
+3.7 Configuring LACP for 802.3ad mode in a more secure way
+----------------------------------------------------------
+
+When using 802.3ad bonding mode, the Actor (host) and Partner (switch)
+exchange LACPDUs.  These LACPDUs cannot be sniffed, because they are
+destined to link local mac addresses (which switches/bridges are not
+supposed to forward).  However, most of the values are easily predictable
+or are simply the machine's MAC address (which is trivially known to all
+other hosts in the same L2).  This implies that other machines in the L2
+domain can spoof LACPDU packets from other hosts to the switch and potentially
+cause mayhem by joining (from the point of view of the switch) another
+machine's aggregate, thus receiving a portion of that hosts incoming
+traffic and / or spoofing traffic from that machine themselves (potentially
+even successfully terminating some portion of flows). Though this is not
+a likely scenario, one could avoid this possibility by simply configuring
+few bonding parameters:
+
+   (a) ad_actor_system : You can set a random mac-address that can be used for
+       these LACPDU exchanges. The value can not be either NULL or Multicast.
+       Also it's preferable to set the local-admin bit. Following shell code
+       generates a random mac-address as described above.
+
+       # sys_mac_addr=$(printf '%02x:%02x:%02x:%02x:%02x:%02x' \
+                                $(( (RANDOM & 0xFE) | 0x02 )) \
+                                $(( RANDOM & 0xFF )) \
+                                $(( RANDOM & 0xFF )) \
+                                $(( RANDOM & 0xFF )) \
+                                $(( RANDOM & 0xFF )) \
+                                $(( RANDOM & 0xFF )))
+       # echo $sys_mac_addr > /sys/class/net/bond0/bonding/ad_actor_system
+
+   (b) ad_actor_sys_prio : Randomize the system priority. The default value
+       is 65535, but system can take the value from 1 - 65535. Following shell
+       code generates random priority and sets it.
+
+       # sys_prio=$(( 1 + RANDOM + RANDOM ))
+       # echo $sys_prio > /sys/class/net/bond0/bonding/ad_actor_sys_prio
+
+   (c) ad_user_port_key : Use the user portion of the port-key. The default
+       keeps this empty. These are the upper 10 bits of the port-key and value
+       ranges from 0 - 1023. Following shell code generates these 10 bits and
+       sets it.
+
+       # usr_port_key=$(( RANDOM & 0x3FF ))
+       # echo $usr_port_key > /sys/class/net/bond0/bonding/ad_user_port_key
+
+
 4 Querying Bonding Configuration
 =================================
 
index 5abad1e921ca810c1e765d1d84d1ca4a50ce5016..b48d4a1494113c9842b45de8f299f20ecc96c620 100644 (file)
@@ -268,6 +268,9 @@ solution for a couple of reasons:
     struct can_frame {
             canid_t can_id;  /* 32 bit CAN_ID + EFF/RTR/ERR flags */
             __u8    can_dlc; /* frame payload length in byte (0 .. 8) */
+            __u8    __pad;   /* padding */
+            __u8    __res0;  /* reserved / padding */
+            __u8    __res1;  /* reserved / padding */
             __u8    data[8] __attribute__((aligned(8)));
     };
 
index 0d5dfbc89ec9e37e27b53a610594dd112d3c6e37..13a85775320875a886442375fcfc027628613f2b 100644 (file)
@@ -8,6 +8,7 @@ the data center network to provide multi-bit feedback to the end hosts.
 To enable it on end hosts:
 
   sysctl -w net.ipv4.tcp_congestion_control=dctcp
+  sysctl -w net.ipv4.tcp_ecn_fallback=0 (optional)
 
 All switches in the data center network running DCTCP must support ECN
 marking and be configured for marking when reaching defined switch buffer
index 22bbc7225f8ed599e8c8b51653c017a6c4845c53..1700756af0570bf75b6422600beb22c935072575 100644 (file)
@@ -30,8 +30,8 @@ int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0);
 
 The address family, socket addresses etc. are defined in the
 include/net/af_ieee802154.h header or in the special header
-in our userspace package (see either linux-zigbee sourceforge download page
-or git tree at git://linux-zigbee.git.sourceforge.net/gitroot/linux-zigbee).
+in the userspace package (see either http://wpan.cakelab.org/ or the
+git tree at https://github.com/linux-wpan/wpan-tools).
 
 One can use SOCK_RAW for passing raw data towards device xmit function. YMMV.
 
@@ -49,15 +49,6 @@ Like with WiFi, there are several types of devices implementing IEEE 802.15.4.
 Those types of devices require different approach to be hooked into Linux kernel.
 
 
-MLME - MAC Level Management
-============================
-
-Most of IEEE 802.15.4 MLME interfaces are directly mapped on netlink commands.
-See the include/net/nl802154.h header. Our userspace tools package
-(see above) provides CLI configuration utility for radio interfaces and simple
-coordinator for IEEE 802.15.4 networks as an example users of MLME protocol.
-
-
 HardMAC
 =======
 
@@ -75,8 +66,6 @@ net_device with a pointer to struct ieee802154_mlme_ops instance. The fields
 assoc_req, assoc_resp, disassoc_req, start_req, and scan_req are optional.
 All other fields are required.
 
-We provide an example of simple HardMAC driver at drivers/ieee802154/fakehard.c
-
 
 SoftMAC
 =======
@@ -89,7 +78,8 @@ stack interface for network sniffers (e.g. WireShark).
 
 This layer is going to be extended soon.
 
-See header include/net/mac802154.h and several drivers in drivers/ieee802154/.
+See header include/net/mac802154.h and several drivers in
+drivers/net/ieee802154/.
 
 
 Device drivers API
@@ -114,18 +104,17 @@ Moreover IEEE 802.15.4 device operations structure should be filled.
 Fake drivers
 ============
 
-In addition there are two drivers available which simulate real devices with
-HardMAC (fakehard) and SoftMAC (fakelb - IEEE 802.15.4 loopback driver)
-interfaces. This option provides possibility to test and debug stack without
-usage of real hardware.
+In addition there is a driver available which simulates a real device with
+SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option
+provides possibility to test and debug stack without usage of real hardware.
 
-See sources in drivers/ieee802154 folder for more details.
+See sources in drivers/net/ieee802154 folder for more details.
 
 
 6LoWPAN Linux implementation
 ============================
 
-The IEEE 802.15.4 standard specifies an MTU of 128 bytes, yielding about 80
+The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80
 octets of actual MAC payload once security is turned on, on a wireless link
 with a link throughput of 250 kbps or less.  The 6LoWPAN adaptation format
 [RFC4944] was specified to carry IPv6 datagrams over such constrained links,
@@ -140,7 +129,8 @@ In Semptember 2011 the standard update was published - [RFC6282].
 It deprecates HC1 and HC2 compression and defines IPHC encoding format which is
 used in this Linux implementation.
 
-All the code related to 6lowpan you may find in files: net/ieee802154/6lowpan.*
+All the code related to 6lowpan you may find in files: net/6lowpan/*
+and net/ieee802154/6lowpan/*
 
 To setup 6lowpan interface you need (busybox release > 1.17.0):
 1. Add IEEE802.15.4 interface and initialize PANid;
index 071fb18dc57c868e99367f9d3872bc0e91401414..5fae7704daab292cf900158666c2d4bb80dd2424 100644 (file)
@@ -267,6 +267,15 @@ tcp_ecn - INTEGER
                  but do not request ECN on outgoing connections.
        Default: 2
 
+tcp_ecn_fallback - BOOLEAN
+       If the kernel detects that ECN connection misbehaves, enable fall
+       back to non-ECN. Currently, this knob implements the fallback
+       from RFC3168, section 6.1.1.1., but we reserve that in future,
+       additional detection mechanisms could be implemented under this
+       knob. The value is not used, if tcp_ecn or per route (or congestion
+       control) ECN settings are disabled.
+       Default: 1 (fallback enabled)
+
 tcp_fack - BOOLEAN
        Enable FACK congestion avoidance and fast retransmission.
        The value is not used, if tcp_sack is not enabled.
@@ -742,8 +751,10 @@ IP Variables:
 ip_local_port_range - 2 INTEGERS
        Defines the local port range that is used by TCP and UDP to
        choose the local port. The first number is the first, the
-       second the last local port number. The default values are
-       32768 and 61000 respectively.
+       second the last local port number.
+       If possible, it is better these numbers have different parity.
+       (one even and one odd values)
+       The default values are 32768 and 60999 respectively.
 
 ip_local_reserved_ports - list of comma separated ranges
        Specify the ports which are reserved for known third-party
@@ -766,7 +777,7 @@ ip_local_reserved_ports - list of comma separated ranges
        ip_local_port_range, e.g.:
 
        $ cat /proc/sys/net/ipv4/ip_local_port_range
-       32000   61000
+       32000   60999
        $ cat /proc/sys/net/ipv4/ip_local_reserved_ports
        8080,9148
 
@@ -1213,6 +1224,14 @@ auto_flowlabels - BOOLEAN
        FALSE: disabled
        Default: false
 
+flowlabel_state_ranges - BOOLEAN
+       Split the flow label number space into two ranges. 0-0x7FFFF is
+       reserved for the IPv6 flow manager facility, 0x80000-0xFFFFF
+       is reserved for stateless flow labels as described in RFC6437.
+       TRUE: enabled
+       FALSE: disabled
+       Default: true
+
 anycast_src_echo_reply - BOOLEAN
        Controls the use of anycast addresses as source addresses for ICMPv6
        echo reply
index 0344f1d45b3765b016f0388a830e3a8e48af31ce..f4be85e9600578e7411f1baa1ab37041a677fe4a 100644 (file)
@@ -1,6 +1,6 @@
 
 
-                  HOWTO for the linux packet generator 
+                  HOWTO for the linux packet generator
                   ------------------------------------
 
 Enable CONFIG_NET_PKTGEN to compile and build pktgen either in-kernel
@@ -50,17 +50,33 @@ For ixgbe use e.g. "30" resulting in approx 33K interrupts/sec (1/30*10^6):
  # ethtool -C ethX rx-usecs 30
 
 
-Viewing threads
-===============
-/proc/net/pktgen/kpktgend_0 
-Name: kpktgend_0  max_before_softirq: 10000
-Running: 
-Stopped: eth1 
-Result: OK: max_before_softirq=10000
+Kernel threads
+==============
+Pktgen creates a thread for each CPU with affinity to that CPU.
+Which is controlled through procfile /proc/net/pktgen/kpktgend_X.
+
+Example: /proc/net/pktgen/kpktgend_0
+
+ Running:
+ Stopped: eth4@0
+ Result: OK: add_device=eth4@0
+
+Most important are the devices assigned to the thread.
+
+The two basic thread commands are:
+ * add_device DEVICE@NAME -- adds a single device
+ * rem_device_all         -- remove all associated devices
+
+When adding a device to a thread, a corrosponding procfile is created
+which is used for configuring this device. Thus, device names need to
+be unique.
 
-Most important are the devices assigned to the thread.  Note that a
-device can only belong to one thread.
+To support adding the same device to multiple threads, which is useful
+with multi queue NICs, a the device naming scheme is extended with "@":
+ device@something
 
+The part after "@" can be anything, but it is custom to use the thread
+number.
 
 Viewing devices
 ===============
@@ -69,29 +85,32 @@ The Params section holds configured information.  The Current section
 holds running statistics.  The Result is printed after a run or after
 interruption.  Example:
 
-/proc/net/pktgen/eth1       
+/proc/net/pktgen/eth4@0
 
-Params: count 10000000  min_pkt_size: 60  max_pkt_size: 60
-     frags: 0  delay: 0  clone_skb: 1000000  ifname: eth1
+ Params: count 100000  min_pkt_size: 60  max_pkt_size: 60
+     frags: 0  delay: 0  clone_skb: 64  ifname: eth4@0
      flows: 0 flowlen: 0
-     dst_min: 10.10.11.2  dst_max: 
-     src_min:   src_max: 
-     src_mac: 00:00:00:00:00:00  dst_mac: 00:04:23:AC:FD:82
-     udp_src_min: 9  udp_src_max: 9  udp_dst_min: 9  udp_dst_max: 9
-     src_mac_count: 0  dst_mac_count: 0 
-     Flags: 
-Current:
-     pkts-sofar: 10000000  errors: 39664
-     started: 1103053986245187us  stopped: 1103053999346329us idle: 880401us
-     seq_num: 10000011  cur_dst_mac_offset: 0  cur_src_mac_offset: 0
-     cur_saddr: 0x10a0a0a  cur_daddr: 0x20b0a0a
-     cur_udp_dst: 9  cur_udp_src: 9
+     queue_map_min: 0  queue_map_max: 0
+     dst_min: 192.168.81.2  dst_max:
+     src_min:   src_max:
+     src_mac: 90:e2:ba:0a:56:b4 dst_mac: 00:1b:21:3c:9d:f8
+     udp_src_min: 9  udp_src_max: 109  udp_dst_min: 9  udp_dst_max: 9
+     src_mac_count: 0  dst_mac_count: 0
+     Flags: UDPSRC_RND  NO_TIMESTAMP  QUEUE_MAP_CPU
+ Current:
+     pkts-sofar: 100000  errors: 0
+     started: 623913381008us  stopped: 623913396439us idle: 25us
+     seq_num: 100001  cur_dst_mac_offset: 0  cur_src_mac_offset: 0
+     cur_saddr: 192.168.8.3  cur_daddr: 192.168.81.2
+     cur_udp_dst: 9  cur_udp_src: 42
+     cur_queue_map: 0
      flows: 0
-Result: OK: 13101142(c12220741+d880401) usec, 10000000 (60byte,0frags)
-  763292pps 390Mb/sec (390805504bps) errors: 39664
+ Result: OK: 15430(c15405+d25) usec, 100000 (60byte,0frags)
+  6480562pps 3110Mb/sec (3110669760bps) errors: 0
 
-Configuring threads and devices
-================================
+
+Configuring devices
+===================
 This is done via the /proc interface, and most easily done via pgset
 as defined in the sample scripts.
 
@@ -126,7 +145,7 @@ Examples:
                          To select queue 1 of a given device,
                          use queue_map_min=1 and queue_map_max=1
 
- pgset "src_mac_count 1" Sets the number of MACs we'll range through.  
+ pgset "src_mac_count 1" Sets the number of MACs we'll range through.
                          The 'minimum' MAC is what you set with srcmac.
 
  pgset "dst_mac_count 1" Sets the number of MACs we'll range through.
@@ -145,6 +164,7 @@ Examples:
                               UDPCSUM,
                               IPSEC # IPsec encapsulation (needs CONFIG_XFRM)
                               NODE_ALLOC # node specific memory allocation
+                              NO_TIMESTAMP # disable timestamping
 
  pgset spi SPI_VALUE     Set specific SA used to transform packet.
 
@@ -192,24 +212,43 @@ Examples:
  pgset "rate 300M"        set rate to 300 Mb/s
  pgset "ratep 1000000"    set rate to 1Mpps
 
+ pgset "xmit_mode netif_receive"  RX inject into stack netif_receive_skb()
+                                 Works with "burst" but not with "clone_skb".
+                                 Default xmit_mode is "start_xmit".
+
 Sample scripts
 ==============
 
-A collection of small tutorial scripts for pktgen is in the
-samples/pktgen directory:
+A collection of tutorial scripts and helpers for pktgen is in the
+samples/pktgen directory. The helper parameters.sh file support easy
+and consistant parameter parsing across the sample scripts.
+
+Usage example and help:
+ ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
+
+Usage: ./pktgen_sample01_simple.sh [-vx] -i ethX
+  -i : ($DEV)       output interface/device (required)
+  -s : ($PKT_SIZE)  packet size
+  -d : ($DEST_IP)   destination IP
+  -m : ($DST_MAC)   destination MAC-addr
+  -t : ($THREADS)   threads to start
+  -c : ($SKB_CLONE) SKB clones send before alloc new SKB
+  -b : ($BURST)     HW level bursting of SKBs
+  -v : ($VERBOSE)   verbose
+  -x : ($DEBUG)     debug
+
+The global variables being set are also listed.  E.g. the required
+interface/device parameter "-i" sets variable $DEV.  Copy the
+pktgen_sampleXX scripts and modify them to fit your own needs.
+
+The old scripts:
 
-pktgen.conf-1-1                  # 1 CPU 1 dev 
 pktgen.conf-1-2                  # 1 CPU 2 dev
-pktgen.conf-2-1                  # 2 CPU's 1 dev 
-pktgen.conf-2-2                  # 2 CPU's 2 dev
 pktgen.conf-1-1-rdos             # 1 CPU 1 dev w. route DoS 
 pktgen.conf-1-1-ip6              # 1 CPU 1 dev ipv6
 pktgen.conf-1-1-ip6-rdos         # 1 CPU 1 dev ipv6  w. route DoS
 pktgen.conf-1-1-flows            # 1 CPU 1 dev multiple flows.
 
-Run in shell: ./pktgen.conf-X-Y
-This does all the setup including sending.
-
 
 Interrupt affinity
 ===================
@@ -217,6 +256,9 @@ Note that when adding devices to a specific CPU it is a good idea to
 also assign /proc/irq/XX/smp_affinity so that the TX interrupts are bound
 to the same CPU.  This reduces cache bouncing when freeing skbs.
 
+Plus using the device flag QUEUE_MAP_CPU, which maps the SKBs TX queue
+to the running threads CPU (directly from smp_processor_id()).
+
 Enable IPsec
 ============
 Default IPsec transformation with ESP encapsulation plus transport mode
@@ -237,18 +279,19 @@ Current commands and configuration options
 
 start
 stop
+reset
 
 ** Thread commands:
 
 add_device
 rem_device_all
-max_before_softirq
 
 
 ** Device commands:
 
 count
 clone_skb
+burst
 debug
 
 frags
@@ -257,10 +300,17 @@ delay
 src_mac_count
 dst_mac_count
 
-pkt_size 
+pkt_size
 min_pkt_size
 max_pkt_size
 
+queue_map_min
+queue_map_max
+skb_priority
+
+tos           (ipv4)
+traffic_class (ipv6)
+
 mpls
 
 udp_src_min
@@ -269,6 +319,8 @@ udp_src_max
 udp_dst_min
 udp_dst_max
 
+node
+
 flag
   IPSRC_RND
   IPDST_RND
@@ -287,6 +339,9 @@ flag
   UDPCSUM
   IPSEC
   NODE_ALLOC
+  NO_TIMESTAMP
+
+spi (ipsec)
 
 dst_min
 dst_max
@@ -299,8 +354,10 @@ src_mac
 
 clear_counters
 
-dst6
 src6
+dst6
+dst6_max
+dst6_min
 
 flows
 flowlen
@@ -308,6 +365,17 @@ flowlen
 rate
 ratep
 
+xmit_mode <start_xmit|netif_receive>
+
+vlan_cfi
+vlan_id
+vlan_p
+
+svlan_cfi
+svlan_id
+svlan_p
+
+
 References:
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/
 ftp://robur.slu.se/pub/Linux/net-development/pktgen-testing/examples/
index f981a9295a39a14245f884b1fbc810817f74999c..da82cd75a4f6492a2fcb9b9daaf7cee83e625be4 100644 (file)
-Switch (and switch-ish) device drivers HOWTO
-===========================
-
-Please note that the word "switch" is here used in very generic meaning.
-This include devices supporting L2/L3 but also various flow offloading chips,
-including switches embedded into SR-IOV NICs.
-
-Lets describe a topology a bit. Imagine the following example:
-
-       +----------------------------+    +---------------+
-       |     SOME switch chip       |    |      CPU      |
-       +----------------------------+    +---------------+
-       port1 port2 port3 port4 MNGMNT    |     PCI-E     |
-         |     |     |     |     |       +---------------+
-        PHY   PHY    |     |     |         |  NIC0 NIC1
-                     |     |     |         |   |    |
-                     |     |     +- PCI-E -+   |    |
-                     |     +------- MII -------+    |
-                     +------------- MII ------------+
-
-In this example, there are two independent lines between the switch silicon
-and CPU. NIC0 and NIC1 drivers are not aware of a switch presence. They are
-separate from the switch driver. SOME switch chip is by managed by a driver
-via PCI-E device MNGMNT. Note that MNGMNT device, NIC0 and NIC1 may be
-connected to some other type of bus.
-
-Now, for the previous example show the representation in kernel:
-
-       +----------------------------+    +---------------+
-       |     SOME switch chip       |    |      CPU      |
-       +----------------------------+    +---------------+
-       sw0p0 sw0p1 sw0p2 sw0p3 MNGMNT    |     PCI-E     |
-         |     |     |     |     |       +---------------+
-        PHY   PHY    |     |     |         |  eth0 eth1
-                     |     |     |         |   |    |
-                     |     |     +- PCI-E -+   |    |
-                     |     +------- MII -------+    |
-                     +------------- MII ------------+
-
-Lets call the example switch driver for SOME switch chip "SOMEswitch". This
-driver takes care of PCI-E device MNGMNT. There is a netdevice instance sw0pX
-created for each port of a switch. These netdevices are instances
-of "SOMEswitch" driver. sw0pX netdevices serve as a "representation"
-of the switch chip. eth0 and eth1 are instances of some other existing driver.
-
-The only difference of the switch-port netdevice from the ordinary netdevice
-is that is implements couple more NDOs:
-
-  ndo_switch_parent_id_get - This returns the same ID for two port netdevices
-                            of the same physical switch chip. This is
-                            mandatory to be implemented by all switch drivers
-                            and serves the caller for recognition of a port
-                            netdevice.
-  ndo_switch_parent_* - Functions that serve for a manipulation of the switch
-                       chip itself (it can be though of as a "parent" of the
-                       port, therefore the name). They are not port-specific.
-                       Caller might use arbitrary port netdevice of the same
-                       switch and it will make no difference.
-  ndo_switch_port_* - Functions that serve for a port-specific manipulation.
+Ethernet switch device driver model (switchdev)
+===============================================
+Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
+Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
+
+
+The Ethernet switch device driver model (switchdev) is an in-kernel driver
+model for switch devices which offload the forwarding (data) plane from the
+kernel.
+
+Figure 1 is a block diagram showing the components of the switchdev model for
+an example setup using a data-center-class switch ASIC chip.  Other setups
+with SR-IOV or soft switches, such as OVS, are possible.
+
+
+                             User-space tools                                 
+                                                                              
+       user space                   |                                         
+      +-------------------------------------------------------------------+   
+       kernel                       | Netlink                                 
+                                    |                                         
+                     +--------------+-------------------------------+         
+                     |         Network stack                        |         
+                     |           (Linux)                            |         
+                     |                                              |         
+                     +----------------------------------------------+         
+                                                                              
+                           sw1p2     sw1p4     sw1p6
+                      sw1p1  +  sw1p3  + â€‰sw1p5  +  â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰eth1             
+                        +    |    +    |    +    |            +               
+                        |    |    |    |    |    |            |               
+                     +--+----+----+----+-+--+----+---+  +-----+-----+         
+                     |         Switch driver         |  |    mgmt   |         
+                     |        (this document)        |  |   driver  |         
+                     |                               |  |           |         
+                     +--------------+----------------+  +-----------+         
+                                    |                          â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰
+       kernel                       | HW bus (eg PCI)          â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰
+      +-------------------------------------------------------------------+   
+       hardware                     |                          â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰
+                     +--------------+---+------------+         â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰
+                     |         Switch device (sw1)   |         â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰â€‰
+                     |  +----+                       +--------+               
+                     |  |    v offloaded data path   | mgmt port              
+                     |  |    |                       |                        
+                     +--|----|----+----+----+----+---+                        
+                        |    |    |    |    |    |                            
+                        +    +    +    +    +    +                            
+                       p1   p2   p3   p4   p5   p6
+                                       
+                             front-panel ports                                
+                                                                              
+
+                                    Fig 1.
+
+
+Include Files
+-------------
+
+#include <linux/netdevice.h>
+#include <net/switchdev.h>
+
+
+Configuration
+-------------
+
+Use "depends NET_SWITCHDEV" in driver's Kconfig to ensure switchdev model
+support is built for driver.
+
+
+Switch Ports
+------------
+
+On switchdev driver initialization, the driver will allocate and register a
+struct net_device (using register_netdev()) for each enumerated physical switch
+port, called the port netdev.  A port netdev is the software representation of
+the physical port and provides a conduit for control traffic to/from the
+controller (the kernel) and the network, as well as an anchor point for higher
+level constructs such as bridges, bonds, VLANs, tunnels, and L3 routers.  Using
+standard netdev tools (iproute2, ethtool, etc), the port netdev can also
+provide to the user access to the physical properties of the switch port such
+as PHY link state and I/O statistics.
+
+There is (currently) no higher-level kernel object for the switch beyond the
+port netdevs.  All of the switchdev driver ops are netdev ops or switchdev ops.
+
+A switch management port is outside the scope of the switchdev driver model.
+Typically, the management port is not participating in offloaded data plane and
+is loaded with a different driver, such as a NIC driver, on the management port
+device.
+
+Port Netdev Naming
+^^^^^^^^^^^^^^^^^^
+
+Udev rules should be used for port netdev naming, using some unique attribute
+of the port as a key, for example the port MAC address or the port PHYS name.
+Hard-coding of kernel netdev names within the driver is discouraged; let the
+kernel pick the default netdev name, and let udev set the final name based on a
+port attribute.
+
+Using port PHYS name (ndo_get_phys_port_name) for the key is particularly
+useful for dynamically-named ports where the device names its ports based on
+external configuration.  For example, if a physical 40G port is split logically
+into 4 10G ports, resulting in 4 port netdevs, the device can give a unique
+name for each port using port PHYS name.  The udev rule would be:
+
+SUBSYSTEM=="net", ACTION=="add", DRIVER="<driver>", ATTR{phys_port_name}!="", \
+       NAME="$attr{phys_port_name}"
+
+Suggested naming convention is "swXpYsZ", where X is the switch name or ID, Y
+is the port name or ID, and Z is the sub-port name or ID.  For example, sw1p1s0
+would be sub-port 0 on port 1 on switch 1.
+
+Switch ID
+^^^^^^^^^
+
+The switchdev driver must implement the switchdev op switchdev_port_attr_get
+for SWITCHDEV_ATTR_PORT_PARENT_ID for each port netdev, returning the same
+physical ID for each port of a switch.  The ID must be unique between switches
+on the same system.  The ID does not need to be unique between switches on
+different systems.
+
+The switch ID is used to locate ports on a switch and to know if aggregated
+ports belong to the same switch.
+
+Port Features
+^^^^^^^^^^^^^
+
+NETIF_F_NETNS_LOCAL
+
+If the switchdev driver (and device) only supports offloading of the default
+network namespace (netns), the driver should set this feature flag to prevent
+the port netdev from being moved out of the default netns.  A netns-aware
+driver/device would not set this flag and be responsible for partitioning
+hardware to preserve netns containment.  This means hardware cannot forward
+traffic from a port in one namespace to another port in another namespace.
+
+Port Topology
+^^^^^^^^^^^^^
+
+The port netdevs representing the physical switch ports can be organized into
+higher-level switching constructs.  The default construct is a standalone
+router port, used to offload L3 forwarding.  Two or more ports can be bonded
+together to form a LAG.  Two or more ports (or LAGs) can be bridged to bridge
+L2 networks.  VLANs can be applied to sub-divide L2 networks.  L2-over-L3
+tunnels can be built on ports.  These constructs are built using standard Linux
+tools such as the bridge driver, the bonding/team drivers, and netlink-based
+tools such as iproute2.
+
+The switchdev driver can know a particular port's position in the topology by
+monitoring NETDEV_CHANGEUPPER notifications.  For example, a port moved into a
+bond will see it's upper master change.  If that bond is moved into a bridge,
+the bond's upper master will change.  And so on.  The driver will track such
+movements to know what position a port is in in the overall topology by
+registering for netdevice events and acting on NETDEV_CHANGEUPPER.
+
+L2 Forwarding Offload
+---------------------
+
+The idea is to offload the L2 data forwarding (switching) path from the kernel
+to the switchdev device by mirroring bridge FDB entries down to the device.  An
+FDB entry is the {port, MAC, VLAN} tuple forwarding destination.
+
+To offloading L2 bridging, the switchdev driver/device should support:
+
+       - Static FDB entries installed on a bridge port
+       - Notification of learned/forgotten src mac/vlans from device
+       - STP state changes on the port
+       - VLAN flooding of multicast/broadcast and unknown unicast packets
+
+Static FDB Entries
+^^^^^^^^^^^^^^^^^^
+
+The switchdev driver should implement ndo_fdb_add, ndo_fdb_del and ndo_fdb_dump
+to support static FDB entries installed to the device.  Static bridge FDB
+entries are installed, for example, using iproute2 bridge cmd:
+
+       bridge fdb add ADDR dev DEV [vlan VID] [self]
+
+The driver should use the helper switchdev_port_fdb_xxx ops for ndo_fdb_xxx
+ops, and handle add/delete/dump of SWITCHDEV_OBJ_PORT_FDB object using
+switchdev_port_obj_xxx ops.
+
+XXX: what should be done if offloading this rule to hardware fails (for
+example, due to full capacity in hardware tables) ?
+
+Note: by default, the bridge does not filter on VLAN and only bridges untagged
+traffic.  To enable VLAN support, turn on VLAN filtering:
+
+       echo 1 >/sys/class/net/<bridge>/bridge/vlan_filtering
+
+Notification of Learned/Forgotten Source MAC/VLANs
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The switch device will learn/forget source MAC address/VLAN on ingress packets
+and notify the switch driver of the mac/vlan/port tuples.  The switch driver,
+in turn, will notify the bridge driver using the switchdev notifier call:
+
+       err = call_switchdev_notifiers(val, dev, info);
+
+Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when
+forgetting, and info points to a struct switchdev_notifier_fdb_info.  On
+SWITCHDEV_FDB_ADD, the bridge driver will install the FDB entry into the
+bridge's FDB and mark the entry as NTF_EXT_LEARNED.  The iproute2 bridge
+command will label these entries "offload":
+
+       $ bridge fdb
+       52:54:00:12:35:01 dev sw1p1 master br0 permanent
+       00:02:00:00:02:00 dev sw1p1 master br0 offload
+       00:02:00:00:02:00 dev sw1p1 self
+       52:54:00:12:35:02 dev sw1p2 master br0 permanent
+       00:02:00:00:03:00 dev sw1p2 master br0 offload
+       00:02:00:00:03:00 dev sw1p2 self
+       33:33:00:00:00:01 dev eth0 self permanent
+       01:00:5e:00:00:01 dev eth0 self permanent
+       33:33:ff:00:00:00 dev eth0 self permanent
+       01:80:c2:00:00:0e dev eth0 self permanent
+       33:33:00:00:00:01 dev br0 self permanent
+       01:00:5e:00:00:01 dev br0 self permanent
+       33:33:ff:12:35:01 dev br0 self permanent
+
+Learning on the port should be disabled on the bridge using the bridge command:
+
+       bridge link set dev DEV learning off
+
+Learning on the device port should be enabled, as well as learning_sync:
+
+       bridge link set dev DEV learning on self
+       bridge link set dev DEV learning_sync on self
+
+Learning_sync attribute enables syncing of the learned/forgotton FDB entry to
+the bridge's FDB.  It's possible, but not optimal, to enable learning on the
+device port and on the bridge port, and disable learning_sync.
+
+To support learning and learning_sync port attributes, the driver implements
+switchdev op switchdev_port_attr_get/set for SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS.
+The driver should initialize the attributes to the hardware defaults.
+
+FDB Ageing
+^^^^^^^^^^
+
+There are two FDB ageing models supported: 1) ageing by the device, and 2)
+ageing by the kernel.  Ageing by the device is preferred if many FDB entries
+are supported.  The driver calls call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+...) to age out the FDB entry.  In this model, ageing by the kernel should be
+turned off.  XXX: how to turn off ageing in kernel on a per-port basis or
+otherwise prevent the kernel from ageing out the FDB entry?
+
+In the kernel ageing model, the standard bridge ageing mechanism is used to age
+out stale FDB entries.  To keep an FDB entry "alive", the driver should refresh
+the FDB entry by calling call_switchdev_notifiers(SWITCHDEV_FDB_ADD, ...).  The
+notification will reset the FDB entry's last-used time to now.  The driver
+should rate limit refresh notifications, for example, no more than once a
+second.  If the FDB entry expires, ndo_fdb_del is called to remove entry from
+the device.  XXX: this last part isn't currently correct: ndo_fdb_del isn't
+called, so the stale entry remains in device...this need to get fixed.
+
+FDB Flush
+^^^^^^^^^
+
+XXX: Unimplemented.  Need to support FDB flush by bridge driver for port and
+remove both static and learned FDB entries.
+
+STP State Change on Port
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Internally or with a third-party STP protocol implementation (e.g. mstpd), the
+bridge driver maintains the STP state for ports, and will notify the switch
+driver of STP state change on a port using the switchdev op
+switchdev_attr_port_set for SWITCHDEV_ATTR_PORT_STP_UPDATE.
+
+State is one of BR_STATE_*.  The switch driver can use STP state updates to
+update ingress packet filter list for the port.  For example, if port is
+DISABLED, no packets should pass, but if port moves to BLOCKED, then STP BPDUs
+and other IEEE 01:80:c2:xx:xx:xx link-local multicast packets can pass.
+
+Note that STP BDPUs are untagged and STP state applies to all VLANs on the port
+so packet filters should be applied consistently across untagged and tagged
+VLANs on the port.
+
+Flooding L2 domain
+^^^^^^^^^^^^^^^^^^
+
+For a given L2 VLAN domain, the switch device should flood multicast/broadcast
+and unknown unicast packets to all ports in domain, if allowed by port's
+current STP state.  The switch driver, knowing which ports are within which
+vlan L2 domain, can program the switch device for flooding.  The packet should
+also be sent to the port netdev for processing by the bridge driver.  The
+bridge should not reflood the packet to the same ports the device flooded.
+XXX: the mechanism to avoid duplicate flood packets is being discuseed.
+
+It is possible for the switch device to not handle flooding and push the
+packets up to the bridge driver for flooding.  This is not ideal as the number
+of ports scale in the L2 domain as the device is much more efficient at
+flooding packets that software.
+
+IGMP Snooping
+^^^^^^^^^^^^^
+
+XXX: complete this section
+
+
+L3 Routing Offload
+------------------
+
+Offloading L3 routing requires that device be programmed with FIB entries from
+the kernel, with the device doing the FIB lookup and forwarding.  The device
+does a longest prefix match (LPM) on FIB entries matching route prefix and
+forwards the packet to the matching FIB entry's nexthop(s) egress ports.
+
+To program the device, the driver implements support for
+SWITCHDEV_OBJ_IPV[4|6]_FIB object using switchdev_port_obj_xxx ops.
+switchdev_port_obj_add is used for both adding a new FIB entry to the device,
+or modifying an existing entry on the device.
+
+XXX: Currently, only SWITCHDEV_OBJ_IPV4_FIB objects are supported.
+
+SWITCHDEV_OBJ_IPV4_FIB object passes:
+
+       struct switchdev_obj_ipv4_fib {         /* IPV4_FIB */
+               u32 dst;
+               int dst_len;
+               struct fib_info *fi;
+               u8 tos;
+               u8 type;
+               u32 nlflags;
+               u32 tb_id;
+       } ipv4_fib;
+
+to add/modify/delete IPv4 dst/dest_len prefix on table tb_id.  The *fi
+structure holds details on the route and route's nexthops.  *dev is one of the
+port netdevs mentioned in the routes next hop list.  If the output port netdevs
+referenced in the route's nexthop list don't all have the same switch ID, the
+driver is not called to add/modify/delete the FIB entry.
+
+Routes offloaded to the device are labeled with "offload" in the ip route
+listing:
+
+       $ ip route show
+       default via 192.168.0.2 dev eth0
+       11.0.0.0/30 dev sw1p1  proto kernel  scope link  src 11.0.0.2 offload
+       11.0.0.4/30 via 11.0.0.1 dev sw1p1  proto zebra  metric 20 offload
+       11.0.0.8/30 dev sw1p2  proto kernel  scope link  src 11.0.0.10 offload
+       11.0.0.12/30 via 11.0.0.9 dev sw1p2  proto zebra  metric 20 offload
+       12.0.0.2  proto zebra  metric 30 offload
+               nexthop via 11.0.0.1  dev sw1p1 weight 1
+               nexthop via 11.0.0.9  dev sw1p2 weight 1
+       12.0.0.3 via 11.0.0.1 dev sw1p1  proto zebra  metric 20 offload
+       12.0.0.4 via 11.0.0.9 dev sw1p2  proto zebra  metric 20 offload
+       192.168.0.0/24 dev eth0  proto kernel  scope link  src 192.168.0.15
+
+XXX: add/mod/del IPv6 FIB API
+
+Nexthop Resolution
+^^^^^^^^^^^^^^^^^^
+
+The FIB entry's nexthop list contains the nexthop tuple (gateway, dev), but for
+the switch device to forward the packet with the correct dst mac address, the
+nexthop gateways must be resolved to the neighbor's mac address.  Neighbor mac
+address discovery comes via the ARP (or ND) process and is available via the
+arp_tbl neighbor table.  To resolve the routes nexthop gateways, the driver
+should trigger the kernel's neighbor resolution process.  See the rocker
+driver's rocker_port_ipv4_resolve() for an example.
+
+The driver can monitor for updates to arp_tbl using the netevent notifier
+NETEVENT_NEIGH_UPDATE.  The device can be programmed with resolved nexthops
+for the routes as arp_tbl updates.
index 70d6cf608251b5cc9b35550c72c2095f2a87c026..f37814693ad31e381fc01d4a1878ba3490cb6d57 100644 (file)
@@ -8,14 +8,8 @@ For example if your action queues a packet to be processed later,
 or intentionally branches by redirecting a packet, then you need to
 clone the packet.
 
-There are certain fields in the skb tc_verd that need to be reset so we
-avoid loops, etc.  A few are generic enough that skb_act_clone()
-resets them for you, so invoke skb_act_clone() rather than skb_clone().
-
 2) If you munge any packet thou shalt call pskb_expand_head in the case
 someone else is referencing the skb. After that you "own" the skb.
-You must also tell us if it is ok to munge the packet (TC_OK2MUNGE),
-this way any action downstream can stomp on the packet.
 
 3) Dropping packets you don't own is a no-no. You simply return
 TC_ACT_SHOT to the caller and they will drop it.
index 74122ada99492c573e6cc29303bbc24637b5c1f0..aa06fcf5f8c21d8e5e073307822de89a0cb590fa 100644 (file)
@@ -1,6 +1,6 @@
 IBM s390 QDIO Ethernet Driver
 
-HiperSockets Bridge Port Support
+OSA and HiperSockets Bridge Port Support
 
 Uevents
 
@@ -8,7 +8,7 @@ To generate the events the device must be assigned a role of either
 a primary or a secondary Bridge Port. For more information, see
 "z/VM Connectivity, SC24-6174".
 
-When run on HiperSockets Bridge Capable Port hardware, and the state
+When run on an OSA or HiperSockets Bridge Capable Port hardware, and the state
 of some configured Bridge Port device on the channel changes, a udev
 event with ACTION=CHANGE is emitted on behalf of the corresponding
 ccwgroup device. The event has the following attributes:
index d8afd29536786b0907c795afeb8cd5aa3ff2d3b6..5bb0b9e3059f8ed3526133a774495c33da17a6b0 100644 (file)
@@ -652,7 +652,6 @@ M:  Tom Lendacky <thomas.lendacky@amd.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/amd/xgbe/
-F:     drivers/net/phy/amd-xgbe-phy.c
 
 AMS (Apple Motion Sensor) DRIVER
 M:     Michael Hanselmann <linux-kernel@hansmi.ch>
@@ -922,6 +921,13 @@ M: Krzysztof Halasa <khalasa@piap.pl>
 S:     Maintained
 F:     arch/arm/mach-cns3xxx/
 
+ARM/CAVIUM THUNDER NETWORK DRIVER
+M:     Sunil Goutham <sgoutham@cavium.com>
+M:     Robert Richter <rric@kernel.org>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Supported
+F:     drivers/net/ethernet/cavium/
+
 ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
 M:     Alexander Shiyan <shc_work@mail.ru>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6366,6 +6372,12 @@ F:       include/uapi/linux/meye.h
 F:     include/uapi/linux/ivtv*
 F:     include/uapi/linux/uvcvideo.h
 
+MEDIATEK MT7601U WIRELESS LAN DRIVER
+M:     Jakub Kicinski <kubakici@wp.pl>
+L:     linux-wireless@vger.kernel.org
+S:     Maintained
+F:     drivers/net/wireless/mediatek/mt7601u/
+
 MEGARAID SCSI/SAS DRIVERS
 M:     Kashyap Desai <kashyap.desai@avagotech.com>
 M:     Sumit Saxena <sumit.saxena@avagotech.com>
index e0e23582c8b4e4687209a219c0d2bbc86b88bbd9..4550d247e308be128b439b0735d853f4745f3618 100644 (file)
@@ -873,6 +873,16 @@ b_epilogue:
                        off = offsetof(struct sk_buff, queue_mapping);
                        emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
                        break;
+               case BPF_LDX | BPF_W | BPF_ABS:
+                       /*
+                        * load a 32bit word from struct seccomp_data.
+                        * seccomp_check_filter() will already have checked
+                        * that k is 32bit aligned and lies within the
+                        * struct seccomp_data.
+                        */
+                       ctx->seen |= SEEN_SKB;
+                       emit(ARM_LDR_I(r_A, r_skb, k), ctx);
+                       break;
                default:
                        return -1;
                }
index ddeff4844a100de83b52ef9dae9f42974666dc23..579a8fd74be07804d983a298641b755526f1cb44 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/filter.h>
 #include <linux/if_vlan.h>
 #include <asm/cacheflush.h>
+#include <linux/bpf.h>
 
 int bpf_jit_enable __read_mostly;
 
@@ -37,7 +38,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
        return ptr + len;
 }
 
-#define EMIT(bytes, len)       do { prog = emit_code(prog, bytes, len); } while (0)
+#define EMIT(bytes, len) \
+       do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
 
 #define EMIT1(b1)              EMIT(b1, 1)
 #define EMIT2(b1, b2)          EMIT((b1) + ((b2) << 8), 2)
@@ -186,31 +188,31 @@ struct jit_context {
 #define BPF_MAX_INSN_SIZE      128
 #define BPF_INSN_SAFETY                64
 
-static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
-                 int oldproglen, struct jit_context *ctx)
+#define STACKSIZE \
+       (MAX_BPF_STACK + \
+        32 /* space for rbx, r13, r14, r15 */ + \
+        8 /* space for skb_copy_bits() buffer */)
+
+#define PROLOGUE_SIZE 51
+
+/* emit x64 prologue code for BPF program and check it's size.
+ * bpf_tail_call helper will skip it while jumping into another program
+ */
+static void emit_prologue(u8 **pprog)
 {
-       struct bpf_insn *insn = bpf_prog->insnsi;
-       int insn_cnt = bpf_prog->len;
-       bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
-       bool seen_exit = false;
-       u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
-       int i;
-       int proglen = 0;
-       u8 *prog = temp;
-       int stacksize = MAX_BPF_STACK +
-               32 /* space for rbx, r13, r14, r15 */ +
-               8 /* space for skb_copy_bits() buffer */;
+       u8 *prog = *pprog;
+       int cnt = 0;
 
        EMIT1(0x55); /* push rbp */
        EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
 
-       /* sub rsp, stacksize */
-       EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
+       /* sub rsp, STACKSIZE */
+       EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
 
        /* all classic BPF filters use R6(rbx) save it */
 
        /* mov qword ptr [rbp-X],rbx */
-       EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
+       EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
 
        /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
         * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
@@ -221,16 +223,112 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
         */
 
        /* mov qword ptr [rbp-X],r13 */
-       EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
+       EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
        /* mov qword ptr [rbp-X],r14 */
-       EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
+       EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
        /* mov qword ptr [rbp-X],r15 */
-       EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
+       EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
 
        /* clear A and X registers */
        EMIT2(0x31, 0xc0); /* xor eax, eax */
        EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
 
+       /* clear tail_cnt: mov qword ptr [rbp-X], rax */
+       EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
+
+       BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
+       *pprog = prog;
+}
+
+/* generate the following code:
+ * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
+ *   if (index >= array->map.max_entries)
+ *     goto out;
+ *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
+ *     goto out;
+ *   prog = array->prog[index];
+ *   if (prog == NULL)
+ *     goto out;
+ *   goto *(prog->bpf_func + prologue_size);
+ * out:
+ */
+static void emit_bpf_tail_call(u8 **pprog)
+{
+       u8 *prog = *pprog;
+       int label1, label2, label3;
+       int cnt = 0;
+
+       /* rdi - pointer to ctx
+        * rsi - pointer to bpf_array
+        * rdx - index in bpf_array
+        */
+
+       /* if (index >= array->map.max_entries)
+        *   goto out;
+        */
+       EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
+             offsetof(struct bpf_array, map.max_entries));
+       EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
+#define OFFSET1 44 /* number of bytes to jump */
+       EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
+       label1 = cnt;
+
+       /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+        *   goto out;
+        */
+       EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
+       EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
+#define OFFSET2 33
+       EMIT2(X86_JA, OFFSET2);                   /* ja out */
+       label2 = cnt;
+       EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
+       EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
+
+       /* prog = array->prog[index]; */
+       EMIT4(0x48, 0x8D, 0x44, 0xD6);            /* lea rax, [rsi + rdx * 8 + 0x50] */
+       EMIT1(offsetof(struct bpf_array, prog));
+       EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
+
+       /* if (prog == NULL)
+        *   goto out;
+        */
+       EMIT4(0x48, 0x83, 0xF8, 0x00);            /* cmp rax, 0 */
+#define OFFSET3 10
+       EMIT2(X86_JE, OFFSET3);                   /* je out */
+       label3 = cnt;
+
+       /* goto *(prog->bpf_func + prologue_size); */
+       EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
+             offsetof(struct bpf_prog, bpf_func));
+       EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
+
+       /* now we're ready to jump into next BPF program
+        * rdi == ctx (1st arg)
+        * rax == prog->bpf_func + prologue_size
+        */
+       EMIT2(0xFF, 0xE0);                        /* jmp rax */
+
+       /* out: */
+       BUILD_BUG_ON(cnt - label1 != OFFSET1);
+       BUILD_BUG_ON(cnt - label2 != OFFSET2);
+       BUILD_BUG_ON(cnt - label3 != OFFSET3);
+       *pprog = prog;
+}
+
+static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
+                 int oldproglen, struct jit_context *ctx)
+{
+       struct bpf_insn *insn = bpf_prog->insnsi;
+       int insn_cnt = bpf_prog->len;
+       bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
+       bool seen_exit = false;
+       u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
+       int i, cnt = 0;
+       int proglen = 0;
+       u8 *prog = temp;
+
+       emit_prologue(&prog);
+
        if (seen_ld_abs) {
                /* r9d : skb->len - skb->data_len (headlen)
                 * r10 : skb->data
@@ -739,6 +837,10 @@ xadd:                      if (is_imm8(insn->off))
                        }
                        break;
 
+               case BPF_JMP | BPF_CALL | BPF_X:
+                       emit_bpf_tail_call(&prog);
+                       break;
+
                        /* cond jump */
                case BPF_JMP | BPF_JEQ | BPF_X:
                case BPF_JMP | BPF_JNE | BPF_X:
@@ -891,13 +993,13 @@ common_load:
                        /* update cleanup_addr */
                        ctx->cleanup_addr = proglen;
                        /* mov rbx, qword ptr [rbp-X] */
-                       EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
+                       EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
                        /* mov r13, qword ptr [rbp-X] */
-                       EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
+                       EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
                        /* mov r14, qword ptr [rbp-X] */
-                       EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
+                       EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
                        /* mov r15, qword ptr [rbp-X] */
-                       EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
+                       EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
 
                        EMIT1(0xC9); /* leave */
                        EMIT1(0xC3); /* ret */
index f22cc56fd1b383f7ef37472808070e8e31120a11..5ad0d53545356147c2bf153e77eb15b199094596 100644 (file)
@@ -244,7 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
        if (!type)
                goto unlock;
 
-       sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto);
+       sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
        err = -ENOMEM;
        if (!sk2)
                goto unlock;
@@ -324,7 +324,7 @@ static int alg_create(struct net *net, struct socket *sock, int protocol,
                return -EPROTONOSUPPORT;
 
        err = -ENOMEM;
-       sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto);
+       sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern);
        if (!sk)
                goto out;
 
index 93dca2e73bf5a9d696e5c169d6499a8479901ac5..a8da3a50e374f8bb1b69a90ac049287ba11069b1 100644 (file)
@@ -116,8 +116,8 @@ static bool disable64;
 static short nvpibits = -1;
 static short nvcibits = -1;
 static short rx_skb_reserve = 16;
-static bool irq_coalesce = 1;
-static bool sdh = 0;
+static bool irq_coalesce = true;
+static bool sdh;
 
 /* Read from EEPROM = 0000 0011b */
 static unsigned int readtab[] = {
index 924f8e26789dbb3efa4bd911f2d5f3428b6c5ecb..65e65903faa072f5d9f8897bf9052112ad624866 100644 (file)
@@ -2618,7 +2618,7 @@ static void ia_close(struct atm_vcc *vcc)
         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
            iadev->close_pending++;
           prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
-          schedule_timeout(50);
+          schedule_timeout(msecs_to_jiffies(500));
           finish_wait(&iadev->timeout_wait, &wait);
            spin_lock_irqsave(&iadev->tx_lock, flags); 
            while((skb = skb_dequeue(&iadev->tx_backlog))) {
index 74ccb02e0f10c8c52a1db7ab3948f5d697b3cddc..5f6018e7cd4c42c5b4e2f7670c28e05a2bc21eb5 100644 (file)
@@ -226,6 +226,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
                chip->of_node   = cc->core->dev.of_node;
 #endif
        switch (bus->chipinfo.id) {
+       case BCMA_CHIP_ID_BCM4707:
        case BCMA_CHIP_ID_BCM5357:
        case BCMA_CHIP_ID_BCM53572:
                chip->ngpio     = 32;
@@ -235,16 +236,17 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        }
 
        /*
-        * On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO
-        * pin numbers. We don't have Device Tree there and we can't really use
-        * relative (per chip) numbers.
-        * So let's use predictable base for BCM47XX and "random" for all other.
+        * Register SoC GPIO devices with absolute GPIO pin base.
+        * On MIPS, we don't have Device Tree and we can't use relative (per chip)
+        * GPIO numbers.
+        * On some ARM devices, user space may want to access some system GPIO
+        * pins directly, which is easier to do with a predictable GPIO base.
         */
-#if IS_BUILTIN(CONFIG_BCM47XX)
-       chip->base              = bus->num * BCMA_GPIO_MAX_PINS;
-#else
-       chip->base              = -1;
-#endif
+       if (IS_BUILTIN(CONFIG_BCM47XX) ||
+           cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+               chip->base              = bus->num * BCMA_GPIO_MAX_PINS;
+       else
+               chip->base              = -1;
 
        err = bcma_gpio_irq_domain_init(cc);
        if (err)
index cee20354ac37b8297f991d65ef693a90153a2f1c..c097909c589c240e4b9d2ca44e1a9b7084ea9b15 100644 (file)
@@ -598,7 +598,7 @@ static struct socket *drbd_try_connect(struct drbd_connection *connection)
        memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
 
        what = "sock_create_kern";
-       err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
+       err = sock_create_kern(&init_net, ((struct sockaddr *)&src_in6)->sa_family,
                               SOCK_STREAM, IPPROTO_TCP, &sock);
        if (err < 0) {
                sock = NULL;
@@ -693,7 +693,7 @@ static int prepare_listen_socket(struct drbd_connection *connection, struct acce
        memcpy(&my_addr, &connection->my_addr, my_addr_len);
 
        what = "sock_create_kern";
-       err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
+       err = sock_create_kern(&init_net, ((struct sockaddr *)&my_addr)->sa_family,
                               SOCK_STREAM, IPPROTO_TCP, &s_listen);
        if (err) {
                s_listen = NULL;
index ed5c2738bea20efcf0c6ed0a920ad9faf5b1fd67..2e777071e1dcb8bd544a16aedd58ca28ad2f43bc 100644 (file)
@@ -9,6 +9,10 @@ config BT_BCM
        tristate
        select FW_LOADER
 
+config BT_RTL
+       tristate
+       select FW_LOADER
+
 config BT_HCIBTUSB
        tristate "HCI USB driver"
        depends on USB
@@ -32,6 +36,17 @@ config BT_HCIBTUSB_BCM
 
          Say Y here to compile support for Broadcom protocol.
 
+config BT_HCIBTUSB_RTL
+       bool "Realtek protocol support"
+       depends on BT_HCIBTUSB
+       select BT_RTL
+       default y
+       help
+         The Realtek protocol support enables firmware and configuration
+         download support for Realtek Bluetooth controllers.
+
+         Say Y here to compile support for Realtek protocol.
+
 config BT_HCIBTSDIO
        tristate "HCI SDIO driver"
        depends on MMC
index dd0d9c40b99914817f76728d600fb359dbcac007..f40e194e7080183e999ebb5ac8381130cb7a4b54 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_BT_MRVL)         += btmrvl.o
 obj-$(CONFIG_BT_MRVL_SDIO)     += btmrvl_sdio.o
 obj-$(CONFIG_BT_WILINK)                += btwilink.o
 obj-$(CONFIG_BT_BCM)           += btbcm.o
+obj-$(CONFIG_BT_RTL)           += btrtl.o
 
 btmrvl-y                       := btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)      += btmrvl_debugfs.o
index 4bba86677adc64553fe8415d9b6812bfb3e1449d..728fce38a5a24cd6fb142f77b7a7178508d8e0c1 100644 (file)
@@ -55,12 +55,6 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
        }
 
        bda = (struct hci_rp_read_bd_addr *)skb->data;
-       if (bda->status) {
-               BT_ERR("%s: BCM: Device address result failed (%02x)",
-                      hdev->name, bda->status);
-               kfree_skb(skb);
-               return -bt_to_errno(bda->status);
-       }
 
        /* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
         * with no configured address.
index 2d43d4279b0092d8cfe5f225e6574e7d4ccea67e..828f2f8d1568c8c50962dee7d8e77fcfcd669972 100644 (file)
@@ -53,12 +53,6 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
        }
 
        bda = (struct hci_rp_read_bd_addr *)skb->data;
-       if (bda->status) {
-               BT_ERR("%s: Intel device address result failed (%02x)",
-                      hdev->name, bda->status);
-               kfree_skb(skb);
-               return -bt_to_errno(bda->status);
-       }
 
        /* For some Intel based controllers, the default Bluetooth device
         * address 00:03:19:9E:8B:00 can be found. These controllers are
index 01d6da577eeb0713127f57c264e6345b5055781d..b9a811900f6ab534087e17f3726c840a8afdb34e 100644 (file)
@@ -1217,7 +1217,7 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv)
        unsigned int reg, reg_start, reg_end;
        enum rdwr_status stat;
        u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr;
-       u8 dump_num, idx, i, read_reg, doneflag = 0;
+       u8 dump_num = 0, idx, i, read_reg, doneflag = 0;
        u32 memory_size, fw_dump_len = 0;
 
        /* dump sdio register first */
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
new file mode 100644 (file)
index 0000000..8428893
--- /dev/null
@@ -0,0 +1,390 @@
+/*
+ *  Bluetooth support for Realtek devices
+ *
+ *  Copyright (C) 2015 Endless Mobile, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <asm/unaligned.h>
+#include <linux/usb.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btrtl.h"
+
+#define VERSION "0.1"
+
+#define RTL_EPATCH_SIGNATURE   "Realtech"
+#define RTL_ROM_LMP_3499       0x3499
+#define RTL_ROM_LMP_8723A      0x1200
+#define RTL_ROM_LMP_8723B      0x8723
+#define RTL_ROM_LMP_8821A      0x8821
+#define RTL_ROM_LMP_8761A      0x8761
+
+static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
+{
+       struct rtl_rom_version_evt *rom_version;
+       struct sk_buff *skb;
+
+       /* Read RTL ROM version command */
+       skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Read ROM version failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*rom_version)) {
+               BT_ERR("%s: RTL version event length mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EIO;
+       }
+
+       rom_version = (struct rtl_rom_version_evt *)skb->data;
+       BT_INFO("%s: rom_version status=%x version=%x",
+               hdev->name, rom_version->status, rom_version->version);
+
+       *version = rom_version->version;
+
+       kfree_skb(skb);
+       return 0;
+}
+
+static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
+                                  const struct firmware *fw,
+                                  unsigned char **_buf)
+{
+       const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+       struct rtl_epatch_header *epatch_info;
+       unsigned char *buf;
+       int i, ret, len;
+       size_t min_size;
+       u8 opcode, length, data, rom_version = 0;
+       int project_id = -1;
+       const unsigned char *fwptr, *chip_id_base;
+       const unsigned char *patch_length_base, *patch_offset_base;
+       u32 patch_offset = 0;
+       u16 patch_length, num_patches;
+       const u16 project_id_to_lmp_subver[] = {
+               RTL_ROM_LMP_8723A,
+               RTL_ROM_LMP_8723B,
+               RTL_ROM_LMP_8821A,
+               RTL_ROM_LMP_8761A
+       };
+
+       ret = rtl_read_rom_version(hdev, &rom_version);
+       if (ret)
+               return ret;
+
+       min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       fwptr = fw->data + fw->size - sizeof(extension_sig);
+       if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
+               BT_ERR("%s: extension section signature mismatch", hdev->name);
+               return -EINVAL;
+       }
+
+       /* Loop from the end of the firmware parsing instructions, until
+        * we find an instruction that identifies the "project ID" for the
+        * hardware supported by this firwmare file.
+        * Once we have that, we double-check that that project_id is suitable
+        * for the hardware we are working with.
+        */
+       while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+               opcode = *--fwptr;
+               length = *--fwptr;
+               data = *--fwptr;
+
+               BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
+
+               if (opcode == 0xff) /* EOF */
+                       break;
+
+               if (length == 0) {
+                       BT_ERR("%s: found instruction with length 0",
+                              hdev->name);
+                       return -EINVAL;
+               }
+
+               if (opcode == 0 && length == 1) {
+                       project_id = data;
+                       break;
+               }
+
+               fwptr -= length;
+       }
+
+       if (project_id < 0) {
+               BT_ERR("%s: failed to find version instruction", hdev->name);
+               return -EINVAL;
+       }
+
+       if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+               BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+               return -EINVAL;
+       }
+
+       if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+               BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
+                      project_id_to_lmp_subver[project_id], lmp_subver);
+               return -EINVAL;
+       }
+
+       epatch_info = (struct rtl_epatch_header *)fw->data;
+       if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
+               BT_ERR("%s: bad EPATCH signature", hdev->name);
+               return -EINVAL;
+       }
+
+       num_patches = le16_to_cpu(epatch_info->num_patches);
+       BT_DBG("fw_version=%x, num_patches=%d",
+              le32_to_cpu(epatch_info->fw_version), num_patches);
+
+       /* After the rtl_epatch_header there is a funky patch metadata section.
+        * Assuming 2 patches, the layout is:
+        * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
+        *
+        * Find the right patch for this chip.
+        */
+       min_size += 8 * num_patches;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+       patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
+       patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
+       for (i = 0; i < num_patches; i++) {
+               u16 chip_id = get_unaligned_le16(chip_id_base +
+                                                (i * sizeof(u16)));
+               if (chip_id == rom_version + 1) {
+                       patch_length = get_unaligned_le16(patch_length_base +
+                                                         (i * sizeof(u16)));
+                       patch_offset = get_unaligned_le32(patch_offset_base +
+                                                         (i * sizeof(u32)));
+                       break;
+               }
+       }
+
+       if (!patch_offset) {
+               BT_ERR("%s: didn't find patch for chip id %d",
+                      hdev->name, rom_version);
+               return -EINVAL;
+       }
+
+       BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
+       min_size = patch_offset + patch_length;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       /* Copy the firmware into a new buffer and write the version at
+        * the end.
+        */
+       len = patch_length;
+       buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
+
+       *_buf = buf;
+       return len;
+}
+
+static int rtl_download_firmware(struct hci_dev *hdev,
+                                const unsigned char *data, int fw_len)
+{
+       struct rtl_download_cmd *dl_cmd;
+       int frag_num = fw_len / RTL_FRAG_LEN + 1;
+       int frag_len = RTL_FRAG_LEN;
+       int ret = 0;
+       int i;
+
+       dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
+       if (!dl_cmd)
+               return -ENOMEM;
+
+       for (i = 0; i < frag_num; i++) {
+               struct sk_buff *skb;
+
+               BT_DBG("download fw (%d/%d)", i, frag_num);
+
+               dl_cmd->index = i;
+               if (i == (frag_num - 1)) {
+                       dl_cmd->index |= 0x80; /* data end */
+                       frag_len = fw_len % RTL_FRAG_LEN;
+               }
+               memcpy(dl_cmd->data, data, frag_len);
+
+               /* Send download command */
+               skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
+                                    HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb)) {
+                       BT_ERR("%s: download fw command failed (%ld)",
+                              hdev->name, PTR_ERR(skb));
+                       ret = -PTR_ERR(skb);
+                       goto out;
+               }
+
+               if (skb->len != sizeof(struct rtl_download_response)) {
+                       BT_ERR("%s: download fw event length mismatch",
+                              hdev->name);
+                       kfree_skb(skb);
+                       ret = -EIO;
+                       goto out;
+               }
+
+               kfree_skb(skb);
+               data += RTL_FRAG_LEN;
+       }
+
+out:
+       kfree(dl_cmd);
+       return ret;
+}
+
+static int btrtl_setup_rtl8723a(struct hci_dev *hdev)
+{
+       const struct firmware *fw;
+       int ret;
+
+       BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
+       ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &hdev->dev);
+       if (ret < 0) {
+               BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
+               return ret;
+       }
+
+       if (fw->size < 8) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Check that the firmware doesn't have the epatch signature
+        * (which is only for RTL8723B and newer).
+        */
+       if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
+               BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = rtl_download_firmware(hdev, fw->data, fw->size);
+
+out:
+       release_firmware(fw);
+       return ret;
+}
+
+static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
+                               const char *fw_name)
+{
+       unsigned char *fw_data = NULL;
+       const struct firmware *fw;
+       int ret;
+
+       BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
+       ret = request_firmware(&fw, fw_name, &hdev->dev);
+       if (ret < 0) {
+               BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
+               return ret;
+       }
+
+       ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+       if (ret < 0)
+               goto out;
+
+       ret = rtl_download_firmware(hdev, fw_data, ret);
+       kfree(fw_data);
+       if (ret < 0)
+               goto out;
+
+out:
+       release_firmware(fw);
+       return ret;
+}
+
+static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return skb;
+       }
+
+       if (skb->len != sizeof(struct hci_rp_read_local_version)) {
+               BT_ERR("%s: HCI_OP_READ_LOCAL_VERSION event length mismatch",
+                      hdev->name);
+               kfree_skb(skb);
+               return ERR_PTR(-EIO);
+       }
+
+       return skb;
+}
+
+int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       struct hci_rp_read_local_version *resp;
+       u16 lmp_subver;
+
+       skb = btrtl_read_local_version(hdev);
+       if (IS_ERR(skb))
+               return -PTR_ERR(skb);
+
+       resp = (struct hci_rp_read_local_version *)skb->data;
+       BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
+               "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
+               resp->lmp_ver, resp->lmp_subver);
+
+       lmp_subver = le16_to_cpu(resp->lmp_subver);
+       kfree_skb(skb);
+
+       /* Match a set of subver values that correspond to stock firmware,
+        * which is not compatible with standard btusb.
+        * If matched, upload an alternative firmware that does conform to
+        * standard btusb. Once that firmware is uploaded, the subver changes
+        * to a different value.
+        */
+       switch (lmp_subver) {
+       case RTL_ROM_LMP_8723A:
+       case RTL_ROM_LMP_3499:
+               return btrtl_setup_rtl8723a(hdev);
+       case RTL_ROM_LMP_8723B:
+               return btrtl_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8723b_fw.bin");
+       case RTL_ROM_LMP_8821A:
+               return btrtl_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8821a_fw.bin");
+       case RTL_ROM_LMP_8761A:
+               return btrtl_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8761a_fw.bin");
+       default:
+               BT_INFO("rtl: assuming no firmware upload needed.");
+               return 0;
+       }
+}
+EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
+
+MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h
new file mode 100644 (file)
index 0000000..38ffe48
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ *  Bluetooth support for Realtek devices
+ *
+ *  Copyright (C) 2015 Endless Mobile, Inc.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ */
+
+#define RTL_FRAG_LEN 252
+
+struct rtl_download_cmd {
+       __u8 index;
+       __u8 data[RTL_FRAG_LEN];
+} __packed;
+
+struct rtl_download_response {
+       __u8 status;
+       __u8 index;
+} __packed;
+
+struct rtl_rom_version_evt {
+       __u8 status;
+       __u8 version;
+} __packed;
+
+struct rtl_epatch_header {
+       __u8 signature[8];
+       __le32 fw_version;
+       __le16 num_patches;
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_RTL)
+
+int btrtl_setup_realtek(struct hci_dev *hdev);
+
+#else
+
+static inline int btrtl_setup_realtek(struct hci_dev *hdev)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
index 3c10d4dfe9a790e6e34f12022b1bcc2321ac648c..94c6c048130fe61be8173248bdb769eb7d50f84b 100644 (file)
 
 #include "btintel.h"
 #include "btbcm.h"
+#include "btrtl.h"
 
 #define VERSION "0.8"
 
 static bool disable_scofix;
 static bool force_scofix;
 
-static bool reset = 1;
+static bool reset = true;
 
 static struct usb_driver btusb_driver;
 
@@ -330,6 +331,7 @@ static const struct usb_device_id blacklist_table[] = {
 #define BTUSB_FIRMWARE_LOADED  7
 #define BTUSB_FIRMWARE_FAILED  8
 #define BTUSB_BOOTING          9
+#define BTUSB_RESET_RESUME     10
 
 struct btusb_data {
        struct hci_dev       *hdev;
@@ -1372,378 +1374,6 @@ static int btusb_setup_csr(struct hci_dev *hdev)
        return ret;
 }
 
-#define RTL_FRAG_LEN 252
-
-struct rtl_download_cmd {
-       __u8 index;
-       __u8 data[RTL_FRAG_LEN];
-} __packed;
-
-struct rtl_download_response {
-       __u8 status;
-       __u8 index;
-} __packed;
-
-struct rtl_rom_version_evt {
-       __u8 status;
-       __u8 version;
-} __packed;
-
-struct rtl_epatch_header {
-       __u8 signature[8];
-       __le32 fw_version;
-       __le16 num_patches;
-} __packed;
-
-#define RTL_EPATCH_SIGNATURE   "Realtech"
-#define RTL_ROM_LMP_3499       0x3499
-#define RTL_ROM_LMP_8723A      0x1200
-#define RTL_ROM_LMP_8723B      0x8723
-#define RTL_ROM_LMP_8821A      0x8821
-#define RTL_ROM_LMP_8761A      0x8761
-
-static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
-{
-       struct rtl_rom_version_evt *rom_version;
-       struct sk_buff *skb;
-       int ret;
-
-       /* Read RTL ROM version command */
-       skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               BT_ERR("%s: Read ROM version failed (%ld)",
-                      hdev->name, PTR_ERR(skb));
-               return PTR_ERR(skb);
-       }
-
-       if (skb->len != sizeof(*rom_version)) {
-               BT_ERR("%s: RTL version event length mismatch", hdev->name);
-               kfree_skb(skb);
-               return -EIO;
-       }
-
-       rom_version = (struct rtl_rom_version_evt *)skb->data;
-       BT_INFO("%s: rom_version status=%x version=%x",
-               hdev->name, rom_version->status, rom_version->version);
-
-       ret = rom_version->status;
-       if (ret == 0)
-               *version = rom_version->version;
-
-       kfree_skb(skb);
-       return ret;
-}
-
-static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
-                                  const struct firmware *fw,
-                                  unsigned char **_buf)
-{
-       const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
-       struct rtl_epatch_header *epatch_info;
-       unsigned char *buf;
-       int i, ret, len;
-       size_t min_size;
-       u8 opcode, length, data, rom_version = 0;
-       int project_id = -1;
-       const unsigned char *fwptr, *chip_id_base;
-       const unsigned char *patch_length_base, *patch_offset_base;
-       u32 patch_offset = 0;
-       u16 patch_length, num_patches;
-       const u16 project_id_to_lmp_subver[] = {
-               RTL_ROM_LMP_8723A,
-               RTL_ROM_LMP_8723B,
-               RTL_ROM_LMP_8821A,
-               RTL_ROM_LMP_8761A
-       };
-
-       ret = rtl_read_rom_version(hdev, &rom_version);
-       if (ret)
-               return -bt_to_errno(ret);
-
-       min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
-       if (fw->size < min_size)
-               return -EINVAL;
-
-       fwptr = fw->data + fw->size - sizeof(extension_sig);
-       if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
-               BT_ERR("%s: extension section signature mismatch", hdev->name);
-               return -EINVAL;
-       }
-
-       /* Loop from the end of the firmware parsing instructions, until
-        * we find an instruction that identifies the "project ID" for the
-        * hardware supported by this firwmare file.
-        * Once we have that, we double-check that that project_id is suitable
-        * for the hardware we are working with.
-        */
-       while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
-               opcode = *--fwptr;
-               length = *--fwptr;
-               data = *--fwptr;
-
-               BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
-
-               if (opcode == 0xff) /* EOF */
-                       break;
-
-               if (length == 0) {
-                       BT_ERR("%s: found instruction with length 0",
-                              hdev->name);
-                       return -EINVAL;
-               }
-
-               if (opcode == 0 && length == 1) {
-                       project_id = data;
-                       break;
-               }
-
-               fwptr -= length;
-       }
-
-       if (project_id < 0) {
-               BT_ERR("%s: failed to find version instruction", hdev->name);
-               return -EINVAL;
-       }
-
-       if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
-               BT_ERR("%s: unknown project id %d", hdev->name, project_id);
-               return -EINVAL;
-       }
-
-       if (lmp_subver != project_id_to_lmp_subver[project_id]) {
-               BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
-                      project_id_to_lmp_subver[project_id], lmp_subver);
-               return -EINVAL;
-       }
-
-       epatch_info = (struct rtl_epatch_header *)fw->data;
-       if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
-               BT_ERR("%s: bad EPATCH signature", hdev->name);
-               return -EINVAL;
-       }
-
-       num_patches = le16_to_cpu(epatch_info->num_patches);
-       BT_DBG("fw_version=%x, num_patches=%d",
-              le32_to_cpu(epatch_info->fw_version), num_patches);
-
-       /* After the rtl_epatch_header there is a funky patch metadata section.
-        * Assuming 2 patches, the layout is:
-        * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
-        *
-        * Find the right patch for this chip.
-        */
-       min_size += 8 * num_patches;
-       if (fw->size < min_size)
-               return -EINVAL;
-
-       chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
-       patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
-       patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
-       for (i = 0; i < num_patches; i++) {
-               u16 chip_id = get_unaligned_le16(chip_id_base +
-                                                (i * sizeof(u16)));
-               if (chip_id == rom_version + 1) {
-                       patch_length = get_unaligned_le16(patch_length_base +
-                                                         (i * sizeof(u16)));
-                       patch_offset = get_unaligned_le32(patch_offset_base +
-                                                         (i * sizeof(u32)));
-                       break;
-               }
-       }
-
-       if (!patch_offset) {
-               BT_ERR("%s: didn't find patch for chip id %d",
-                      hdev->name, rom_version);
-               return -EINVAL;
-       }
-
-       BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
-       min_size = patch_offset + patch_length;
-       if (fw->size < min_size)
-               return -EINVAL;
-
-       /* Copy the firmware into a new buffer and write the version at
-        * the end.
-        */
-       len = patch_length;
-       buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
-
-       *_buf = buf;
-       return len;
-}
-
-static int rtl_download_firmware(struct hci_dev *hdev,
-                                const unsigned char *data, int fw_len)
-{
-       struct rtl_download_cmd *dl_cmd;
-       int frag_num = fw_len / RTL_FRAG_LEN + 1;
-       int frag_len = RTL_FRAG_LEN;
-       int ret = 0;
-       int i;
-
-       dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
-       if (!dl_cmd)
-               return -ENOMEM;
-
-       for (i = 0; i < frag_num; i++) {
-               struct rtl_download_response *dl_resp;
-               struct sk_buff *skb;
-
-               BT_DBG("download fw (%d/%d)", i, frag_num);
-
-               dl_cmd->index = i;
-               if (i == (frag_num - 1)) {
-                       dl_cmd->index |= 0x80; /* data end */
-                       frag_len = fw_len % RTL_FRAG_LEN;
-               }
-               memcpy(dl_cmd->data, data, frag_len);
-
-               /* Send download command */
-               skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
-                                    HCI_INIT_TIMEOUT);
-               if (IS_ERR(skb)) {
-                       BT_ERR("%s: download fw command failed (%ld)",
-                              hdev->name, PTR_ERR(skb));
-                       ret = -PTR_ERR(skb);
-                       goto out;
-               }
-
-               if (skb->len != sizeof(*dl_resp)) {
-                       BT_ERR("%s: download fw event length mismatch",
-                              hdev->name);
-                       kfree_skb(skb);
-                       ret = -EIO;
-                       goto out;
-               }
-
-               dl_resp = (struct rtl_download_response *)skb->data;
-               if (dl_resp->status != 0) {
-                       kfree_skb(skb);
-                       ret = bt_to_errno(dl_resp->status);
-                       goto out;
-               }
-
-               kfree_skb(skb);
-               data += RTL_FRAG_LEN;
-       }
-
-out:
-       kfree(dl_cmd);
-       return ret;
-}
-
-static int btusb_setup_rtl8723a(struct hci_dev *hdev)
-{
-       struct btusb_data *data = dev_get_drvdata(&hdev->dev);
-       struct usb_device *udev = interface_to_usbdev(data->intf);
-       const struct firmware *fw;
-       int ret;
-
-       BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
-       ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev);
-       if (ret < 0) {
-               BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
-               return ret;
-       }
-
-       if (fw->size < 8) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* Check that the firmware doesn't have the epatch signature
-        * (which is only for RTL8723B and newer).
-        */
-       if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
-               BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ret = rtl_download_firmware(hdev, fw->data, fw->size);
-
-out:
-       release_firmware(fw);
-       return ret;
-}
-
-static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
-                               const char *fw_name)
-{
-       struct btusb_data *data = dev_get_drvdata(&hdev->dev);
-       struct usb_device *udev = interface_to_usbdev(data->intf);
-       unsigned char *fw_data = NULL;
-       const struct firmware *fw;
-       int ret;
-
-       BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
-       ret = request_firmware(&fw, fw_name, &udev->dev);
-       if (ret < 0) {
-               BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
-               return ret;
-       }
-
-       ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
-       if (ret < 0)
-               goto out;
-
-       ret = rtl_download_firmware(hdev, fw_data, ret);
-       kfree(fw_data);
-       if (ret < 0)
-               goto out;
-
-out:
-       release_firmware(fw);
-       return ret;
-}
-
-static int btusb_setup_realtek(struct hci_dev *hdev)
-{
-       struct sk_buff *skb;
-       struct hci_rp_read_local_version *resp;
-       u16 lmp_subver;
-
-       skb = btusb_read_local_version(hdev);
-       if (IS_ERR(skb))
-               return -PTR_ERR(skb);
-
-       resp = (struct hci_rp_read_local_version *)skb->data;
-       BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
-               "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
-               resp->lmp_ver, resp->lmp_subver);
-
-       lmp_subver = le16_to_cpu(resp->lmp_subver);
-       kfree_skb(skb);
-
-       /* Match a set of subver values that correspond to stock firmware,
-        * which is not compatible with standard btusb.
-        * If matched, upload an alternative firmware that does conform to
-        * standard btusb. Once that firmware is uploaded, the subver changes
-        * to a different value.
-        */
-       switch (lmp_subver) {
-       case RTL_ROM_LMP_8723A:
-       case RTL_ROM_LMP_3499:
-               return btusb_setup_rtl8723a(hdev);
-       case RTL_ROM_LMP_8723B:
-               return btusb_setup_rtl8723b(hdev, lmp_subver,
-                                           "rtl_bt/rtl8723b_fw.bin");
-       case RTL_ROM_LMP_8821A:
-               return btusb_setup_rtl8723b(hdev, lmp_subver,
-                                           "rtl_bt/rtl8821a_fw.bin");
-       case RTL_ROM_LMP_8761A:
-               return btusb_setup_rtl8723b(hdev, lmp_subver,
-                                           "rtl_bt/rtl8761a_fw.bin");
-       default:
-               BT_INFO("rtl: assuming no firmware upload needed.");
-               return 0;
-       }
-}
-
 static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
                                                       struct intel_version *ver)
 {
@@ -1951,12 +1581,6 @@ static int btusb_setup_intel(struct hci_dev *hdev)
        }
 
        ver = (struct intel_version *)skb->data;
-       if (ver->status) {
-               BT_ERR("%s Intel fw version event failed (%02x)", hdev->name,
-                      ver->status);
-               kfree_skb(skb);
-               return -bt_to_errno(ver->status);
-       }
 
        BT_INFO("%s: read Intel version: %02x%02x%02x%02x%02x%02x%02x%02x%02x",
                hdev->name, ver->hw_platform, ver->hw_variant,
@@ -2004,15 +1628,6 @@ static int btusb_setup_intel(struct hci_dev *hdev)
                return PTR_ERR(skb);
        }
 
-       if (skb->data[0]) {
-               u8 evt_status = skb->data[0];
-
-               BT_ERR("%s enable Intel manufacturer mode event failed (%02x)",
-                      hdev->name, evt_status);
-               kfree_skb(skb);
-               release_firmware(fw);
-               return -bt_to_errno(evt_status);
-       }
        kfree_skb(skb);
 
        disable_patch = 1;
@@ -2358,13 +1973,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        }
 
        ver = (struct intel_version *)skb->data;
-       if (ver->status) {
-               BT_ERR("%s: Intel version command failure (%02x)",
-                      hdev->name, ver->status);
-               err = -bt_to_errno(ver->status);
-               kfree_skb(skb);
-               return err;
-       }
 
        /* The hardware platform number has a fixed value of 0x37 and
         * for now only accept this single value.
@@ -2439,13 +2047,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        }
 
        params = (struct intel_boot_params *)skb->data;
-       if (params->status) {
-               BT_ERR("%s: Intel boot parameters command failure (%02x)",
-                      hdev->name, params->status);
-               err = -bt_to_errno(params->status);
-               kfree_skb(skb);
-               return err;
-       }
 
        BT_INFO("%s: Device revision is %u", hdev->name,
                le16_to_cpu(params->dev_revid));
@@ -2678,13 +2279,6 @@ static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
                return;
        }
 
-       if (skb->data[0] != 0x00) {
-               BT_ERR("%s: Exception info command failure (%02x)",
-                      hdev->name, skb->data[0]);
-               kfree_skb(skb);
-               return;
-       }
-
        BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
 
        kfree_skb(skb);
@@ -2792,6 +2386,7 @@ struct qca_device_info {
 static const struct qca_device_info qca_devices_table[] = {
        { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
        { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
+       { 0x00000200, 28, 4, 18 }, /* Rome 2.0 */
        { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
        { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
        { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
@@ -3175,8 +2770,17 @@ static int btusb_probe(struct usb_interface *intf,
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
        }
 
-       if (id->driver_info & BTUSB_REALTEK)
-               hdev->setup = btusb_setup_realtek;
+#ifdef CONFIG_BT_HCIBTUSB_RTL
+       if (id->driver_info & BTUSB_REALTEK) {
+               hdev->setup = btrtl_setup_realtek;
+
+               /* Realtek devices lose their updated firmware over suspend,
+                * but the USB hub doesn't notice any status change.
+                * Explicitly request a device reset on resume.
+                */
+               set_bit(BTUSB_RESET_RESUME, &data->flags);
+       }
+#endif
 
        if (id->driver_info & BTUSB_AMP) {
                /* AMP controllers do not support SCO packets */
@@ -3308,6 +2912,14 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
        btusb_stop_traffic(data);
        usb_kill_anchored_urbs(&data->tx_anchor);
 
+       /* Optionally request a device reset on resume, but only when
+        * wakeups are disabled. If wakeups are enabled we assume the
+        * device will stay powered up throughout suspend.
+        */
+       if (test_bit(BTUSB_RESET_RESUME, &data->flags) &&
+           !device_may_wakeup(&data->udev->dev))
+               data->udev->reset_resume = 1;
+
        return 0;
 }
 
index 55c135b7757a9df84745f05bf27870365cbddd73..7a722df97343ee6f25fb4c02959b8d64bacc4449 100644 (file)
@@ -22,7 +22,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  *
  */
-#define DEBUG
+
 #include <linux/platform_device.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
index dc8e3d4356a0ff40f1b27ddc63de3e7eefef7a87..fc0056a28b8177cf2be5d0d40c37bd26d38f1aa9 100644 (file)
@@ -47,8 +47,8 @@
 
 #include "hci_uart.h"
 
-static bool txcrc = 1;
-static bool hciextn = 1;
+static bool txcrc = true;
+static bool hciextn = true;
 
 #define BCSP_TXWINSIZE 4
 
index a50936a17376b3654a6449c78554254aea19824a..563969942a1df2bee95ef9164b7f4fce509a2229 100644 (file)
@@ -140,12 +140,47 @@ static struct clk_regmap pll14_vote = {
        },
 };
 
+#define NSS_PLL_RATE(f, _l, _m, _n, i) \
+       {  \
+               .freq = f,  \
+               .l = _l, \
+               .m = _m, \
+               .n = _n, \
+               .ibits = i, \
+       }
+
+static struct pll_freq_tbl pll18_freq_tbl[] = {
+       NSS_PLL_RATE(550000000, 44, 0, 1, 0x01495625),
+       NSS_PLL_RATE(733000000, 58, 16, 25, 0x014b5625),
+};
+
+static struct clk_pll pll18 = {
+       .l_reg = 0x31a4,
+       .m_reg = 0x31a8,
+       .n_reg = 0x31ac,
+       .config_reg = 0x31b4,
+       .mode_reg = 0x31a0,
+       .status_reg = 0x31b8,
+       .status_bit = 16,
+       .post_div_shift = 16,
+       .post_div_width = 1,
+       .freq_tbl = pll18_freq_tbl,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll18",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
 enum {
        P_PXO,
        P_PLL8,
        P_PLL3,
        P_PLL0,
        P_CXO,
+       P_PLL14,
+       P_PLL18,
 };
 
 static const struct parent_map gcc_pxo_pll8_map[] = {
@@ -197,6 +232,22 @@ static const char *gcc_pxo_pll8_pll0_map[] = {
        "pll0_vote",
 };
 
+static const struct parent_map gcc_pxo_pll8_pll14_pll18_pll0_map[] = {
+       { P_PXO, 0 },
+       { P_PLL8, 4 },
+       { P_PLL0, 2 },
+       { P_PLL14, 5 },
+       { P_PLL18, 1 }
+};
+
+static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = {
+       "pxo",
+       "pll8_vote",
+       "pll0_vote",
+       "pll14",
+       "pll18",
+};
+
 static struct freq_tbl clk_tbl_gsbi_uart[] = {
        {  1843200, P_PLL8, 2,  6, 625 },
        {  3686400, P_PLL8, 2, 12, 625 },
@@ -2202,6 +2253,472 @@ static struct clk_branch ebi2_aon_clk = {
        },
 };
 
+static const struct freq_tbl clk_tbl_gmac[] = {
+       { 133000000, P_PLL0, 1,  50, 301 },
+       { 266000000, P_PLL0, 1, 127, 382 },
+       { }
+};
+
+static struct clk_dyn_rcg gmac_core1_src = {
+       .ns_reg[0] = 0x3cac,
+       .ns_reg[1] = 0x3cb0,
+       .md_reg[0] = 0x3ca4,
+       .md_reg[1] = 0x3ca8,
+       .bank_reg = 0x3ca0,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_gmac,
+       .clkr = {
+               .enable_reg = 0x3ca0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core1_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch gmac_core1_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 4,
+       .hwcg_reg = 0x3cb4,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3cb4,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core1_clk",
+                       .parent_names = (const char *[]){
+                               "gmac_core1_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_dyn_rcg gmac_core2_src = {
+       .ns_reg[0] = 0x3ccc,
+       .ns_reg[1] = 0x3cd0,
+       .md_reg[0] = 0x3cc4,
+       .md_reg[1] = 0x3cc8,
+       .bank_reg = 0x3ca0,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_gmac,
+       .clkr = {
+               .enable_reg = 0x3cc0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core2_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch gmac_core2_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 5,
+       .hwcg_reg = 0x3cd4,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3cd4,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core2_clk",
+                       .parent_names = (const char *[]){
+                               "gmac_core2_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_dyn_rcg gmac_core3_src = {
+       .ns_reg[0] = 0x3cec,
+       .ns_reg[1] = 0x3cf0,
+       .md_reg[0] = 0x3ce4,
+       .md_reg[1] = 0x3ce8,
+       .bank_reg = 0x3ce0,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_gmac,
+       .clkr = {
+               .enable_reg = 0x3ce0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core3_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch gmac_core3_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 6,
+       .hwcg_reg = 0x3cf4,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3cf4,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core3_clk",
+                       .parent_names = (const char *[]){
+                               "gmac_core3_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_dyn_rcg gmac_core4_src = {
+       .ns_reg[0] = 0x3d0c,
+       .ns_reg[1] = 0x3d10,
+       .md_reg[0] = 0x3d04,
+       .md_reg[1] = 0x3d08,
+       .bank_reg = 0x3d00,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_gmac,
+       .clkr = {
+               .enable_reg = 0x3d00,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core4_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch gmac_core4_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 7,
+       .hwcg_reg = 0x3d14,
+       .hwcg_bit = 6,
+       .clkr = {
+               .enable_reg = 0x3d14,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gmac_core4_clk",
+                       .parent_names = (const char *[]){
+                               "gmac_core4_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_nss_tcm[] = {
+       { 266000000, P_PLL0, 3, 0, 0 },
+       { 400000000, P_PLL0, 2, 0, 0 },
+       { }
+};
+
+static struct clk_dyn_rcg nss_tcm_src = {
+       .ns_reg[0] = 0x3dc4,
+       .ns_reg[1] = 0x3dc8,
+       .bank_reg = 0x3dc0,
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_nss_tcm,
+       .clkr = {
+               .enable_reg = 0x3dc0,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_tcm_src",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch nss_tcm_clk = {
+       .halt_reg = 0x3c20,
+       .halt_bit = 14,
+       .clkr = {
+               .enable_reg = 0x3dd0,
+               .enable_mask = BIT(6) | BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "nss_tcm_clk",
+                       .parent_names = (const char *[]){
+                               "nss_tcm_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_nss[] = {
+       { 110000000, P_PLL18, 1, 1, 5 },
+       { 275000000, P_PLL18, 2, 0, 0 },
+       { 550000000, P_PLL18, 1, 0, 0 },
+       { 733000000, P_PLL18, 1, 0, 0 },
+       { }
+};
+
+static struct clk_dyn_rcg ubi32_core1_src_clk = {
+       .ns_reg[0] = 0x3d2c,
+       .ns_reg[1] = 0x3d30,
+       .md_reg[0] = 0x3d24,
+       .md_reg[1] = 0x3d28,
+       .bank_reg = 0x3d20,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_nss,
+       .clkr = {
+               .enable_reg = 0x3d20,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ubi32_core1_src_clk",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               },
+       },
+};
+
+static struct clk_dyn_rcg ubi32_core2_src_clk = {
+       .ns_reg[0] = 0x3d4c,
+       .ns_reg[1] = 0x3d50,
+       .md_reg[0] = 0x3d44,
+       .md_reg[1] = 0x3d48,
+       .bank_reg = 0x3d40,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .s[0] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll14_pll18_pll0_map,
+       },
+       .p[0] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .p[1] = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .mux_sel_bit = 0,
+       .freq_tbl = clk_tbl_nss,
+       .clkr = {
+               .enable_reg = 0x3d40,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ubi32_core2_src_clk",
+                       .parent_names = gcc_pxo_pll8_pll14_pll18_pll0,
+                       .num_parents = 5,
+                       .ops = &clk_dyn_rcg_ops,
+                       .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+               },
+       },
+};
+
 static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL0] = &pll0.clkr,
        [PLL0_VOTE] = &pll0_vote,
@@ -2211,6 +2728,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL8_VOTE] = &pll8_vote,
        [PLL14] = &pll14.clkr,
        [PLL14_VOTE] = &pll14_vote,
+       [PLL18] = &pll18.clkr,
        [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
        [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
        [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
@@ -2307,6 +2825,18 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr,
        [EBI2_CLK] = &ebi2_clk.clkr,
        [EBI2_AON_CLK] = &ebi2_aon_clk.clkr,
+       [GMAC_CORE1_CLK_SRC] = &gmac_core1_src.clkr,
+       [GMAC_CORE1_CLK] = &gmac_core1_clk.clkr,
+       [GMAC_CORE2_CLK_SRC] = &gmac_core2_src.clkr,
+       [GMAC_CORE2_CLK] = &gmac_core2_clk.clkr,
+       [GMAC_CORE3_CLK_SRC] = &gmac_core3_src.clkr,
+       [GMAC_CORE3_CLK] = &gmac_core3_clk.clkr,
+       [GMAC_CORE4_CLK_SRC] = &gmac_core4_src.clkr,
+       [GMAC_CORE4_CLK] = &gmac_core4_clk.clkr,
+       [UBI32_CORE1_CLK_SRC] = &ubi32_core1_src_clk.clkr,
+       [UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
+       [NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
+       [NSSTCM_CLK] = &nss_tcm_clk.clkr,
 };
 
 static const struct qcom_reset_map gcc_ipq806x_resets[] = {
@@ -2425,6 +2955,48 @@ static const struct qcom_reset_map gcc_ipq806x_resets[] = {
        [USB30_1_PHY_RESET] = { 0x3b58, 0 },
        [NSSFB0_RESET] = { 0x3b60, 6 },
        [NSSFB1_RESET] = { 0x3b60, 7 },
+       [UBI32_CORE1_CLKRST_CLAMP_RESET] = { 0x3d3c, 3},
+       [UBI32_CORE1_CLAMP_RESET] = { 0x3d3c, 2 },
+       [UBI32_CORE1_AHB_RESET] = { 0x3d3c, 1 },
+       [UBI32_CORE1_AXI_RESET] = { 0x3d3c, 0 },
+       [UBI32_CORE2_CLKRST_CLAMP_RESET] = { 0x3d5c, 3 },
+       [UBI32_CORE2_CLAMP_RESET] = { 0x3d5c, 2 },
+       [UBI32_CORE2_AHB_RESET] = { 0x3d5c, 1 },
+       [UBI32_CORE2_AXI_RESET] = { 0x3d5c, 0 },
+       [GMAC_CORE1_RESET] = { 0x3cbc, 0 },
+       [GMAC_CORE2_RESET] = { 0x3cdc, 0 },
+       [GMAC_CORE3_RESET] = { 0x3cfc, 0 },
+       [GMAC_CORE4_RESET] = { 0x3d1c, 0 },
+       [GMAC_AHB_RESET] = { 0x3e24, 0 },
+       [NSS_CH0_RST_RX_CLK_N_RESET] = { 0x3b60, 0 },
+       [NSS_CH0_RST_TX_CLK_N_RESET] = { 0x3b60, 1 },
+       [NSS_CH0_RST_RX_125M_N_RESET] = { 0x3b60, 2 },
+       [NSS_CH0_HW_RST_RX_125M_N_RESET] = { 0x3b60, 3 },
+       [NSS_CH0_RST_TX_125M_N_RESET] = { 0x3b60, 4 },
+       [NSS_CH1_RST_RX_CLK_N_RESET] = { 0x3b60, 5 },
+       [NSS_CH1_RST_TX_CLK_N_RESET] = { 0x3b60, 6 },
+       [NSS_CH1_RST_RX_125M_N_RESET] = { 0x3b60, 7 },
+       [NSS_CH1_HW_RST_RX_125M_N_RESET] = { 0x3b60, 8 },
+       [NSS_CH1_RST_TX_125M_N_RESET] = { 0x3b60, 9 },
+       [NSS_CH2_RST_RX_CLK_N_RESET] = { 0x3b60, 10 },
+       [NSS_CH2_RST_TX_CLK_N_RESET] = { 0x3b60, 11 },
+       [NSS_CH2_RST_RX_125M_N_RESET] = { 0x3b60, 12 },
+       [NSS_CH2_HW_RST_RX_125M_N_RESET] = { 0x3b60, 13 },
+       [NSS_CH2_RST_TX_125M_N_RESET] = { 0x3b60, 14 },
+       [NSS_CH3_RST_RX_CLK_N_RESET] = { 0x3b60, 15 },
+       [NSS_CH3_RST_TX_CLK_N_RESET] = { 0x3b60, 16 },
+       [NSS_CH3_RST_RX_125M_N_RESET] = { 0x3b60, 17 },
+       [NSS_CH3_HW_RST_RX_125M_N_RESET] = { 0x3b60, 18 },
+       [NSS_CH3_RST_TX_125M_N_RESET] = { 0x3b60, 19 },
+       [NSS_RST_RX_250M_125M_N_RESET] = { 0x3b60, 20 },
+       [NSS_RST_TX_250M_125M_N_RESET] = { 0x3b60, 21 },
+       [NSS_QSGMII_TXPI_RST_N_RESET] = { 0x3b60, 22 },
+       [NSS_QSGMII_CDR_RST_N_RESET] = { 0x3b60, 23 },
+       [NSS_SGMII2_CDR_RST_N_RESET] = { 0x3b60, 24 },
+       [NSS_SGMII3_CDR_RST_N_RESET] = { 0x3b60, 25 },
+       [NSS_CAL_PRBS_RST_N_RESET] = { 0x3b60, 26 },
+       [NSS_LCKDT_RST_N_RESET] = { 0x3b60, 27 },
+       [NSS_SRDS_N_RESET] = { 0x3b60, 28 },
 };
 
 static const struct regmap_config gcc_ipq806x_regmap_config = {
@@ -2453,6 +3025,8 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
 {
        struct clk *clk;
        struct device *dev = &pdev->dev;
+       struct regmap *regmap;
+       int ret;
 
        /* Temporary until RPM clocks supported */
        clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 25000000);
@@ -2463,7 +3037,25 @@ static int gcc_ipq806x_probe(struct platform_device *pdev)
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
-       return qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+       ret = qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+       if (ret)
+               return ret;
+
+       regmap = dev_get_regmap(dev, NULL);
+       if (!regmap)
+               return -ENODEV;
+
+       /* Setup PLL18 static bits */
+       regmap_update_bits(regmap, 0x31a4, 0xffffffc0, 0x40000400);
+       regmap_write(regmap, 0x31b0, 0x3080);
+
+       /* Set GMAC footswitch sleep/wakeup values */
+       regmap_write(regmap, 0x3cb8, 8);
+       regmap_write(regmap, 0x3cd8, 8);
+       regmap_write(regmap, 0x3cf8, 8);
+       regmap_write(regmap, 0x3d18, 8);
+
+       return 0;
 }
 
 static int gcc_ipq806x_remove(struct platform_device *pdev)
index 66bd6a2ad83b04f34e2b5fbb822e0b8ff062359b..d95a0c300b03390ef99443be108cb0bf1372fc40 100644 (file)
@@ -445,10 +445,10 @@ static int c4iw_get_mib(struct ib_device *ibdev,
 
        cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
        memset(stats, 0, sizeof *stats);
-       stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
-       stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
-       stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
-       stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
+       stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs;
+       stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs;
+       stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs;
+       stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts;
 
        return 0;
 }
index 0f00204d2ecea0e19f0a9917431e4bb8cbbdfa49..21cb41a60fe8096c3392e9b886252ae37307d9a4 100644 (file)
@@ -189,7 +189,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
 {
        int i;
        u64 guid_indexes;
-       int slave_id;
+       int slave_id, slave_port;
        enum slave_port_state new_state;
        enum slave_port_state prev_state;
        __be64 tmp_cur_ag, form_cache_ag;
@@ -217,6 +217,11 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
                slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
                if (slave_id >= dev->dev->persist->num_vfs + 1)
                        return;
+
+               slave_port = mlx4_phys_to_slave_port(dev->dev, slave_id, port_num);
+               if (slave_port < 0) /* this port isn't available for the VF */
+                       continue;
+
                tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
                form_cache_ag = get_cached_alias_guid(dev, port_num,
                                        (NUM_ALIAS_GUID_IN_REC * block_num) + i);
index 9cd2b002d7ae57fb4f33944cbaa14a3e2a27dedd..ad6a8818608d13eb8ba56443d67d5332c02c45cf 100644 (file)
@@ -1365,14 +1365,17 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
         * stadard address handle by decoding the tunnelled mlx4_ah fields */
        memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
        ah.ibah.device = ctx->ib_dev;
+
+       port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
+       port = mlx4_slave_convert_port(dev->dev, slave, port);
+       if (port < 0)
+               return;
+       ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
+
        mlx4_ib_query_ah(&ah.ibah, &ah_attr);
        if (ah_attr.ah_flags & IB_AH_GRH)
                fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
 
-       port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
-       if (port < 0)
-               return;
-       ah_attr.port_num = port;
        memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
        ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
        /* if slave have default vlan use it */
index cc64400d41ace3005c8a878b4c6811b0506726f9..024b0f745035caee392ea24e3a75b1e75fe6a1c9 100644 (file)
@@ -1090,7 +1090,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
 
        ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
-                          MLX4_CMD_NATIVE);
+                          MLX4_CMD_WRAPPED);
        if (ret == -ENOMEM)
                pr_err("mcg table is full. Fail to register network rule.\n");
        else if (ret == -ENXIO)
@@ -1107,7 +1107,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
        int err;
        err = mlx4_cmd(dev, reg_id, 0, 0,
                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
-                      MLX4_CMD_NATIVE);
+                      MLX4_CMD_WRAPPED);
        if (err)
                pr_err("Fail to detach network rule. registration id = 0x%llx\n",
                       reg_id);
@@ -2041,77 +2041,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
 
 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
-       char name[80];
-       int eq_per_port = 0;
-       int added_eqs = 0;
-       int total_eqs = 0;
-       int i, j, eq;
-
-       /* Legacy mode or comp_pool is not large enough */
-       if (dev->caps.comp_pool == 0 ||
-           dev->caps.num_ports > dev->caps.comp_pool)
-               return;
-
-       eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
-
-       /* Init eq table */
-       added_eqs = 0;
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
-               added_eqs += eq_per_port;
-
-       total_eqs = dev->caps.num_comp_vectors + added_eqs;
+       int i, j, eq = 0, total_eqs = 0;
 
-       ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
+       ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
+                                 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
        if (!ibdev->eq_table)
                return;
 
-       ibdev->eq_added = added_eqs;
-
-       eq = 0;
-       mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
-               for (j = 0; j < eq_per_port; j++) {
-                       snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
-                                i, j, dev->persist->pdev->bus->name);
-                       /* Set IRQ for specific name (per ring) */
-                       if (mlx4_assign_eq(dev, name, NULL,
-                                          &ibdev->eq_table[eq])) {
-                               /* Use legacy (same as mlx4_en driver) */
-                               pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
-                               ibdev->eq_table[eq] =
-                                       (eq % dev->caps.num_comp_vectors);
-                       }
-                       eq++;
+       for (i = 1; i <= dev->caps.num_ports; i++) {
+               for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
+                    j++, total_eqs++) {
+                       if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
+                               continue;
+                       ibdev->eq_table[eq] = total_eqs;
+                       if (!mlx4_assign_eq(dev, i,
+                                           &ibdev->eq_table[eq]))
+                               eq++;
+                       else
+                               ibdev->eq_table[eq] = -1;
                }
        }
 
-       /* Fill the reset of the vector with legacy EQ */
-       for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
-               ibdev->eq_table[eq++] = i;
+       for (i = eq; i < dev->caps.num_comp_vectors;
+            ibdev->eq_table[i++] = -1)
+               ;
 
        /* Advertise the new number of EQs to clients */
-       ibdev->ib_dev.num_comp_vectors = total_eqs;
+       ibdev->ib_dev.num_comp_vectors = eq;
 }
 
 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
 {
        int i;
+       int total_eqs = ibdev->ib_dev.num_comp_vectors;
 
-       /* no additional eqs were added */
+       /* no eqs were allocated */
        if (!ibdev->eq_table)
                return;
 
        /* Reset the advertised EQ number */
-       ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
+       ibdev->ib_dev.num_comp_vectors = 0;
 
-       /* Free only the added eqs */
-       for (i = 0; i < ibdev->eq_added; i++) {
-               /* Don't free legacy eqs if used */
-               if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
-                       continue;
+       for (i = 0; i < total_eqs; i++)
                mlx4_release_eq(dev, ibdev->eq_table[i]);
-       }
 
        kfree(ibdev->eq_table);
+       ibdev->eq_table = NULL;
 }
 
 static void *mlx4_ib_add(struct mlx4_dev *dev)
index fce3934372a161680e4e4f2dd9716963e1178790..ef80e6c99a685bf0eb94a269e455746de7c74e49 100644 (file)
@@ -523,7 +523,6 @@ struct mlx4_ib_dev {
        struct mlx4_ib_iboe     iboe;
        int                     counters[MLX4_MAX_PORTS];
        int                    *eq_table;
-       int                     eq_added;
        struct kobject         *iov_parent;
        struct kobject         *ports_parent;
        struct kobject         *dev_ports_parent[MLX4_MFUNC_MAX];
index 10df386c63447c9757fa22bfc97c433b59ea3895..bce263b928211c3e2126b04f6ef00fbc41b33d03 100644 (file)
@@ -1,8 +1,6 @@
 config MLX5_INFINIBAND
        tristate "Mellanox Connect-IB HCA support"
-       depends on NETDEVICES && ETHERNET && PCI
-       select NET_VENDOR_MELLANOX
-       select MLX5_CORE
+       depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
        ---help---
          This driver provides low-level InfiniBand support for
          Mellanox Connect-IB PCI Express host channel adapters (HCAs).
index 2ee6b105197544abb2799e552b129d37eff53906..e2bea9ab93b3b81ece3482963aecf5dec2ef3160 100644 (file)
@@ -590,8 +590,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
 {
        int err;
 
-       err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
-                            PAGE_SIZE * 2, &buf->buf);
+       err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
        if (err)
                return err;
 
@@ -754,7 +753,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
                return ERR_PTR(-EINVAL);
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries > dev->mdev->caps.gen.max_cqes)
+       if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
                return ERR_PTR(-EINVAL);
 
        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -921,7 +920,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
        int err;
        u32 fsel;
 
-       if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+       if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
                return -ENOSYS;
 
        in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -1076,7 +1075,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
        int uninitialized_var(cqe_size);
        unsigned long flags;
 
-       if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+       if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
                pr_info("Firmware does not support resize CQ\n");
                return -ENOSYS;
        }
@@ -1085,7 +1084,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                return -EINVAL;
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries > dev->mdev->caps.gen.max_cqes + 1)
+       if (entries >  (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
                return -EINVAL;
 
        if (entries == ibcq->cqe + 1)
index 9cf9a37bb5ff9360303a0ea9197869b5fcfbaefc..a770490ebbf107e97af4f362ab633d6d087a47b9 100644 (file)
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
 
        packet_error = be16_to_cpu(out_mad->status);
 
-       dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ?
+       dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
                MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
 
 out:
@@ -137,3 +137,300 @@ out:
        kfree(out_mad);
        return err;
 }
+
+int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+                                         struct ib_smp *out_mad)
+{
+       struct ib_smp *in_mad = NULL;
+       int err = -ENOMEM;
+
+       in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+       if (!in_mad)
+               return -ENOMEM;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
+                          out_mad);
+
+       kfree(in_mad);
+       return err;
+}
+
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+                                        __be64 *sys_image_guid)
+{
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+
+       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+       if (!out_mad)
+               return -ENOMEM;
+
+       err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+       if (err)
+               goto out;
+
+       memcpy(sys_image_guid, out_mad->data + 4, 8);
+
+out:
+       kfree(out_mad);
+
+       return err;
+}
+
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+                                u16 *max_pkeys)
+{
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+
+       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+       if (!out_mad)
+               return -ENOMEM;
+
+       err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+       if (err)
+               goto out;
+
+       *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
+
+out:
+       kfree(out_mad);
+
+       return err;
+}
+
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+                                u32 *vendor_id)
+{
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+
+       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+       if (!out_mad)
+               return -ENOMEM;
+
+       err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+       if (err)
+               goto out;
+
+       *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
+
+out:
+       kfree(out_mad);
+
+       return err;
+}
+
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+
+       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+
+       err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+       if (err)
+               goto out;
+
+       memcpy(node_desc, out_mad->data, 64);
+out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+
+       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+       err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+       if (err)
+               goto out;
+
+       memcpy(node_guid, out_mad->data + 12, 8);
+out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+                           u16 *pkey)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+
+       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
+       in_mad->attr_mod = cpu_to_be32(index / 32);
+
+       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+                          out_mad);
+       if (err)
+               goto out;
+
+       *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+
+out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+                           union ib_gid *gid)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+
+       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+       in_mad->attr_mod = cpu_to_be32(port);
+
+       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+                          out_mad);
+       if (err)
+               goto out;
+
+       memcpy(gid->raw, out_mad->data + 8, 8);
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
+       in_mad->attr_mod = cpu_to_be32(index / 8);
+
+       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+                          out_mad);
+       if (err)
+               goto out;
+
+       memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+
+out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+                           struct ib_port_attr *props)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int ext_active_speed;
+       int err = -ENOMEM;
+
+       if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+               mlx5_ib_warn(dev, "invalid port number %d\n", port);
+               return -EINVAL;
+       }
+
+       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       memset(props, 0, sizeof(*props));
+
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+       in_mad->attr_mod = cpu_to_be32(port);
+
+       err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
+       if (err) {
+               mlx5_ib_warn(dev, "err %d\n", err);
+               goto out;
+       }
+
+       props->lid              = be16_to_cpup((__be16 *)(out_mad->data + 16));
+       props->lmc              = out_mad->data[34] & 0x7;
+       props->sm_lid           = be16_to_cpup((__be16 *)(out_mad->data + 18));
+       props->sm_sl            = out_mad->data[36] & 0xf;
+       props->state            = out_mad->data[32] & 0xf;
+       props->phys_state       = out_mad->data[33] >> 4;
+       props->port_cap_flags   = be32_to_cpup((__be32 *)(out_mad->data + 20));
+       props->gid_tbl_len      = out_mad->data[50];
+       props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+       props->pkey_tbl_len     = mdev->port_caps[port - 1].pkey_table_len;
+       props->bad_pkey_cntr    = be16_to_cpup((__be16 *)(out_mad->data + 46));
+       props->qkey_viol_cntr   = be16_to_cpup((__be16 *)(out_mad->data + 48));
+       props->active_width     = out_mad->data[31] & 0xf;
+       props->active_speed     = out_mad->data[35] >> 4;
+       props->max_mtu          = out_mad->data[41] & 0xf;
+       props->active_mtu       = out_mad->data[36] >> 4;
+       props->subnet_timeout   = out_mad->data[51] & 0x1f;
+       props->max_vl_num       = out_mad->data[37] >> 4;
+       props->init_type_reply  = out_mad->data[41] >> 4;
+
+       /* Check if extended speeds (EDR/FDR/...) are supported */
+       if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
+               ext_active_speed = out_mad->data[62] >> 4;
+
+               switch (ext_active_speed) {
+               case 1:
+                       props->active_speed = 16; /* FDR */
+                       break;
+               case 2:
+                       props->active_speed = 32; /* EDR */
+                       break;
+               }
+       }
+
+       /* If reported active speed is QDR, check if is FDR-10 */
+       if (props->active_speed == 4) {
+               if (mdev->port_caps[port - 1].ext_port_cap &
+                   MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
+                       init_query_mad(in_mad);
+                       in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
+                       in_mad->attr_mod = cpu_to_be32(port);
+
+                       err = mlx5_MAD_IFC(dev, 1, 1, port,
+                                          NULL, NULL, in_mad, out_mad);
+                       if (err)
+                               goto out;
+
+                       /* Checking LinkSpeedActive for FDR-10 */
+                       if (out_mad->data[15] & 0x1)
+                               props->active_speed = 8;
+               }
+       }
+
+out:
+       kfree(in_mad);
+       kfree(out_mad);
+
+       return err;
+}
index 57c9809e8b8774e8aac47806134216ef97c46883..d4dea86052d6b0cbb4dffc89d58053df256a5798 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/io-mapping.h>
 #include <linux/sched.h>
 #include <rdma/ib_user_verbs.h>
+#include <linux/mlx5/vport.h>
 #include <rdma/ib_smi.h>
 #include <rdma/ib_umem.h>
 #include "user.h"
@@ -62,32 +63,168 @@ static char mlx5_version[] =
        DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
        DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
 
+static enum rdma_link_layer
+mlx5_ib_port_link_layer(struct ib_device *device)
+{
+       struct mlx5_ib_dev *dev = to_mdev(device);
+
+       switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
+       case MLX5_CAP_PORT_TYPE_IB:
+               return IB_LINK_LAYER_INFINIBAND;
+       case MLX5_CAP_PORT_TYPE_ETH:
+               return IB_LINK_LAYER_ETHERNET;
+       default:
+               return IB_LINK_LAYER_UNSPECIFIED;
+       }
+}
+
+static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+{
+       return !dev->mdev->issi;
+}
+
+enum {
+       MLX5_VPORT_ACCESS_METHOD_MAD,
+       MLX5_VPORT_ACCESS_METHOD_HCA,
+       MLX5_VPORT_ACCESS_METHOD_NIC,
+};
+
+static int mlx5_get_vport_access_method(struct ib_device *ibdev)
+{
+       if (mlx5_use_mad_ifc(to_mdev(ibdev)))
+               return MLX5_VPORT_ACCESS_METHOD_MAD;
+
+       if (mlx5_ib_port_link_layer(ibdev) ==
+           IB_LINK_LAYER_ETHERNET)
+               return MLX5_VPORT_ACCESS_METHOD_NIC;
+
+       return MLX5_VPORT_ACCESS_METHOD_HCA;
+}
+
+static int mlx5_query_system_image_guid(struct ib_device *ibdev,
+                                       __be64 *sys_image_guid)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       u64 tmp;
+       int err;
+
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_system_image_guid(ibdev,
+                                                           sys_image_guid);
+
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+               err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+               if (!err)
+                       *sys_image_guid = cpu_to_be64(tmp);
+               return err;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int mlx5_query_max_pkeys(struct ib_device *ibdev,
+                               u16 *max_pkeys)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
+
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
+
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+       case MLX5_VPORT_ACCESS_METHOD_NIC:
+               *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
+                                               pkey_table_size));
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int mlx5_query_vendor_id(struct ib_device *ibdev,
+                               u32 *vendor_id)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
+
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+       case MLX5_VPORT_ACCESS_METHOD_NIC:
+               return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
+                               __be64 *node_guid)
+{
+       u64 tmp;
+       int err;
+
+       switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_node_guid(dev, node_guid);
+
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+               err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
+               if (!err)
+                       *node_guid = cpu_to_be64(tmp);
+               return err;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+struct mlx5_reg_node_desc {
+       u8      desc[64];
+};
+
+static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+       struct mlx5_reg_node_desc in;
+
+       if (mlx5_use_mad_ifc(dev))
+               return mlx5_query_mad_ifc_node_desc(dev, node_desc);
+
+       memset(&in, 0, sizeof(in));
+
+       return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
+                                   sizeof(struct mlx5_reg_node_desc),
+                                   MLX5_REG_NODE_DESC, 0, 0);
+}
+
 static int mlx5_ib_query_device(struct ib_device *ibdev,
                                struct ib_device_attr *props)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
-       struct ib_smp *in_mad  = NULL;
-       struct ib_smp *out_mad = NULL;
-       struct mlx5_general_caps *gen;
+       struct mlx5_core_dev *mdev = dev->mdev;
        int err = -ENOMEM;
        int max_rq_sg;
        int max_sq_sg;
-       u64 flags;
 
-       gen = &dev->mdev->caps.gen;
-       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-       if (!in_mad || !out_mad)
-               goto out;
-
-       init_query_mad(in_mad);
-       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+       memset(props, 0, sizeof(*props));
+       err = mlx5_query_system_image_guid(ibdev,
+                                          &props->sys_image_guid);
+       if (err)
+               return err;
 
-       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
+       err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
        if (err)
-               goto out;
+               return err;
 
-       memset(props, 0, sizeof(*props));
+       err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
+       if (err)
+               return err;
 
        props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
                (fw_rev_min(dev->mdev) << 16) |
@@ -96,18 +233,18 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                IB_DEVICE_PORT_ACTIVE_EVENT             |
                IB_DEVICE_SYS_IMAGE_GUID                |
                IB_DEVICE_RC_RNR_NAK_GEN;
-       flags = gen->flags;
-       if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
+
+       if (MLX5_CAP_GEN(mdev, pkv))
                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
-       if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
+       if (MLX5_CAP_GEN(mdev, qkv))
                props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
-       if (flags & MLX5_DEV_CAP_FLAG_APM)
+       if (MLX5_CAP_GEN(mdev, apm))
                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
        props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
-       if (flags & MLX5_DEV_CAP_FLAG_XRC)
+       if (MLX5_CAP_GEN(mdev, xrc))
                props->device_cap_flags |= IB_DEVICE_XRC;
        props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
-       if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) {
+       if (MLX5_CAP_GEN(mdev, sho)) {
                props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
                /* At this stage no support for signature handover */
                props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
@@ -116,221 +253,274 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
                                       IB_GUARD_T10DIF_CSUM;
        }
-       if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)
+       if (MLX5_CAP_GEN(mdev, block_lb_mc))
                props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
 
-       props->vendor_id           = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
-               0xffffff;
-       props->vendor_part_id      = be16_to_cpup((__be16 *)(out_mad->data + 30));
-       props->hw_ver              = be32_to_cpup((__be32 *)(out_mad->data + 32));
-       memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
+       props->vendor_part_id      = mdev->pdev->device;
+       props->hw_ver              = mdev->pdev->revision;
 
        props->max_mr_size         = ~0ull;
-       props->page_size_cap       = gen->min_page_sz;
-       props->max_qp              = 1 << gen->log_max_qp;
-       props->max_qp_wr           = gen->max_wqes;
-       max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
-       max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
-               sizeof(struct mlx5_wqe_data_seg);
+       props->page_size_cap       = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+       props->max_qp              = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
+       props->max_qp_wr           = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+       max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
+                    sizeof(struct mlx5_wqe_data_seg);
+       max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
+                    sizeof(struct mlx5_wqe_ctrl_seg)) /
+                    sizeof(struct mlx5_wqe_data_seg);
        props->max_sge = min(max_rq_sg, max_sq_sg);
-       props->max_cq              = 1 << gen->log_max_cq;
-       props->max_cqe             = gen->max_cqes - 1;
-       props->max_mr              = 1 << gen->log_max_mkey;
-       props->max_pd              = 1 << gen->log_max_pd;
-       props->max_qp_rd_atom      = 1 << gen->log_max_ra_req_qp;
-       props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp;
-       props->max_srq             = 1 << gen->log_max_srq;
-       props->max_srq_wr          = gen->max_srq_wqes - 1;
-       props->local_ca_ack_delay  = gen->local_ca_ack_delay;
+       props->max_cq              = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+       props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
+       props->max_mr              = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+       props->max_pd              = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
+       props->max_qp_rd_atom      = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
+       props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
+       props->max_srq             = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
+       props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
+       props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
        props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
        props->max_srq_sge         = max_rq_sg - 1;
        props->max_fast_reg_page_list_len = (unsigned int)-1;
-       props->local_ca_ack_delay  = gen->local_ca_ack_delay;
        props->atomic_cap          = IB_ATOMIC_NONE;
        props->masked_atomic_cap   = IB_ATOMIC_NONE;
-       props->max_pkeys           = be16_to_cpup((__be16 *)(out_mad->data + 28));
-       props->max_mcast_grp       = 1 << gen->log_max_mcg;
-       props->max_mcast_qp_attach = gen->max_qp_mcg;
+       props->max_mcast_grp       = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
+       props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
        props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
                                           props->max_mcast_grp;
        props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+       if (MLX5_CAP_GEN(mdev, pg))
                props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
        props->odp_caps = dev->odp_caps;
 #endif
 
-out:
-       kfree(in_mad);
-       kfree(out_mad);
-
-       return err;
+       return 0;
 }
 
-int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
-                      struct ib_port_attr *props)
+enum mlx5_ib_width {
+       MLX5_IB_WIDTH_1X        = 1 << 0,
+       MLX5_IB_WIDTH_2X        = 1 << 1,
+       MLX5_IB_WIDTH_4X        = 1 << 2,
+       MLX5_IB_WIDTH_8X        = 1 << 3,
+       MLX5_IB_WIDTH_12X       = 1 << 4
+};
+
+static int translate_active_width(struct ib_device *ibdev, u8 active_width,
+                                 u8 *ib_width)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
-       struct ib_smp *in_mad  = NULL;
-       struct ib_smp *out_mad = NULL;
-       struct mlx5_general_caps *gen;
-       int ext_active_speed;
-       int err = -ENOMEM;
-
-       gen = &dev->mdev->caps.gen;
-       if (port < 1 || port > gen->num_ports) {
-               mlx5_ib_warn(dev, "invalid port number %d\n", port);
-               return -EINVAL;
+       int err = 0;
+
+       if (active_width & MLX5_IB_WIDTH_1X) {
+               *ib_width = IB_WIDTH_1X;
+       } else if (active_width & MLX5_IB_WIDTH_2X) {
+               mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
+                           (int)active_width);
+               err = -EINVAL;
+       } else if (active_width & MLX5_IB_WIDTH_4X) {
+               *ib_width = IB_WIDTH_4X;
+       } else if (active_width & MLX5_IB_WIDTH_8X) {
+               *ib_width = IB_WIDTH_8X;
+       } else if (active_width & MLX5_IB_WIDTH_12X) {
+               *ib_width = IB_WIDTH_12X;
+       } else {
+               mlx5_ib_dbg(dev, "Invalid active_width %d\n",
+                           (int)active_width);
+               err = -EINVAL;
        }
 
-       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-       if (!in_mad || !out_mad)
-               goto out;
-
-       memset(props, 0, sizeof(*props));
-
-       init_query_mad(in_mad);
-       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
-       in_mad->attr_mod = cpu_to_be32(port);
+       return err;
+}
 
-       err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
-       if (err) {
-               mlx5_ib_warn(dev, "err %d\n", err);
-               goto out;
+static int mlx5_mtu_to_ib_mtu(int mtu)
+{
+       switch (mtu) {
+       case 256: return 1;
+       case 512: return 2;
+       case 1024: return 3;
+       case 2048: return 4;
+       case 4096: return 5;
+       default:
+               pr_warn("invalid mtu\n");
+               return -1;
        }
+}
 
+enum ib_max_vl_num {
+       __IB_MAX_VL_0           = 1,
+       __IB_MAX_VL_0_1         = 2,
+       __IB_MAX_VL_0_3         = 3,
+       __IB_MAX_VL_0_7         = 4,
+       __IB_MAX_VL_0_14        = 5,
+};
 
-       props->lid              = be16_to_cpup((__be16 *)(out_mad->data + 16));
-       props->lmc              = out_mad->data[34] & 0x7;
-       props->sm_lid           = be16_to_cpup((__be16 *)(out_mad->data + 18));
-       props->sm_sl            = out_mad->data[36] & 0xf;
-       props->state            = out_mad->data[32] & 0xf;
-       props->phys_state       = out_mad->data[33] >> 4;
-       props->port_cap_flags   = be32_to_cpup((__be32 *)(out_mad->data + 20));
-       props->gid_tbl_len      = out_mad->data[50];
-       props->max_msg_sz       = 1 << gen->log_max_msg;
-       props->pkey_tbl_len     = gen->port[port - 1].pkey_table_len;
-       props->bad_pkey_cntr    = be16_to_cpup((__be16 *)(out_mad->data + 46));
-       props->qkey_viol_cntr   = be16_to_cpup((__be16 *)(out_mad->data + 48));
-       props->active_width     = out_mad->data[31] & 0xf;
-       props->active_speed     = out_mad->data[35] >> 4;
-       props->max_mtu          = out_mad->data[41] & 0xf;
-       props->active_mtu       = out_mad->data[36] >> 4;
-       props->subnet_timeout   = out_mad->data[51] & 0x1f;
-       props->max_vl_num       = out_mad->data[37] >> 4;
-       props->init_type_reply  = out_mad->data[41] >> 4;
-
-       /* Check if extended speeds (EDR/FDR/...) are supported */
-       if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
-               ext_active_speed = out_mad->data[62] >> 4;
-
-               switch (ext_active_speed) {
-               case 1:
-                       props->active_speed = 16; /* FDR */
-                       break;
-               case 2:
-                       props->active_speed = 32; /* EDR */
-                       break;
-               }
-       }
+enum mlx5_vl_hw_cap {
+       MLX5_VL_HW_0    = 1,
+       MLX5_VL_HW_0_1  = 2,
+       MLX5_VL_HW_0_2  = 3,
+       MLX5_VL_HW_0_3  = 4,
+       MLX5_VL_HW_0_4  = 5,
+       MLX5_VL_HW_0_5  = 6,
+       MLX5_VL_HW_0_6  = 7,
+       MLX5_VL_HW_0_7  = 8,
+       MLX5_VL_HW_0_14 = 15
+};
 
-       /* If reported active speed is QDR, check if is FDR-10 */
-       if (props->active_speed == 4) {
-               if (gen->ext_port_cap[port - 1] &
-                   MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
-                       init_query_mad(in_mad);
-                       in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
-                       in_mad->attr_mod = cpu_to_be32(port);
-
-                       err = mlx5_MAD_IFC(dev, 1, 1, port,
-                                          NULL, NULL, in_mad, out_mad);
-                       if (err)
-                               goto out;
-
-                       /* Checking LinkSpeedActive for FDR-10 */
-                       if (out_mad->data[15] & 0x1)
-                               props->active_speed = 8;
-               }
-       }
+static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
+                               u8 *max_vl_num)
+{
+       switch (vl_hw_cap) {
+       case MLX5_VL_HW_0:
+               *max_vl_num = __IB_MAX_VL_0;
+               break;
+       case MLX5_VL_HW_0_1:
+               *max_vl_num = __IB_MAX_VL_0_1;
+               break;
+       case MLX5_VL_HW_0_3:
+               *max_vl_num = __IB_MAX_VL_0_3;
+               break;
+       case MLX5_VL_HW_0_7:
+               *max_vl_num = __IB_MAX_VL_0_7;
+               break;
+       case MLX5_VL_HW_0_14:
+               *max_vl_num = __IB_MAX_VL_0_14;
+               break;
 
-out:
-       kfree(in_mad);
-       kfree(out_mad);
+       default:
+               return -EINVAL;
+       }
 
-       return err;
+       return 0;
 }
 
-static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
-                            union ib_gid *gid)
+static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
+                              struct ib_port_attr *props)
 {
-       struct ib_smp *in_mad  = NULL;
-       struct ib_smp *out_mad = NULL;
-       int err = -ENOMEM;
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       struct mlx5_hca_vport_context *rep;
+       int max_mtu;
+       int oper_mtu;
+       int err;
+       u8 ib_link_width_oper;
+       u8 vl_hw_cap;
 
-       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-       if (!in_mad || !out_mad)
+       rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+       if (!rep) {
+               err = -ENOMEM;
                goto out;
+       }
 
-       init_query_mad(in_mad);
-       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
-       in_mad->attr_mod = cpu_to_be32(port);
+       memset(props, 0, sizeof(*props));
+
+       err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
+       if (err)
+               goto out;
 
-       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+       props->lid              = rep->lid;
+       props->lmc              = rep->lmc;
+       props->sm_lid           = rep->sm_lid;
+       props->sm_sl            = rep->sm_sl;
+       props->state            = rep->vport_state;
+       props->phys_state       = rep->port_physical_state;
+       props->port_cap_flags   = rep->cap_mask1;
+       props->gid_tbl_len      = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
+       props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+       props->pkey_tbl_len     = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
+       props->bad_pkey_cntr    = rep->pkey_violation_counter;
+       props->qkey_viol_cntr   = rep->qkey_violation_counter;
+       props->subnet_timeout   = rep->subnet_timeout;
+       props->init_type_reply  = rep->init_type_reply;
+
+       err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
        if (err)
                goto out;
 
-       memcpy(gid->raw, out_mad->data + 8, 8);
+       err = translate_active_width(ibdev, ib_link_width_oper,
+                                    &props->active_width);
+       if (err)
+               goto out;
+       err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
+                                        port);
+       if (err)
+               goto out;
+
+       err = mlx5_query_port_max_mtu(mdev, &max_mtu, port);
+       if (err)
+               goto out;
 
-       init_query_mad(in_mad);
-       in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
-       in_mad->attr_mod = cpu_to_be32(index / 8);
+       props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
 
-       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
+       err = mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
        if (err)
                goto out;
 
-       memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+       props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
+
+       err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
+       if (err)
+               goto out;
 
+       err = translate_max_vl_num(ibdev, vl_hw_cap,
+                                  &props->max_vl_num);
 out:
-       kfree(in_mad);
-       kfree(out_mad);
+       kfree(rep);
        return err;
 }
 
-static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
-                             u16 *pkey)
+int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+                      struct ib_port_attr *props)
 {
-       struct ib_smp *in_mad  = NULL;
-       struct ib_smp *out_mad = NULL;
-       int err = -ENOMEM;
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_port(ibdev, port, props);
 
-       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-       if (!in_mad || !out_mad)
-               goto out;
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+               return mlx5_query_hca_port(ibdev, port, props);
 
-       init_query_mad(in_mad);
-       in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
-       in_mad->attr_mod = cpu_to_be32(index / 32);
+       default:
+               return -EINVAL;
+       }
+}
 
-       err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
-       if (err)
-               goto out;
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+                            union ib_gid *gid)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
 
-       *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
+
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+               return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
+
+       default:
+               return -EINVAL;
+       }
 
-out:
-       kfree(in_mad);
-       kfree(out_mad);
-       return err;
 }
 
-struct mlx5_reg_node_desc {
-       u8      desc[64];
-};
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+                             u16 *pkey)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
+
+       switch (mlx5_get_vport_access_method(ibdev)) {
+       case MLX5_VPORT_ACCESS_METHOD_MAD:
+               return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
+
+       case MLX5_VPORT_ACCESS_METHOD_HCA:
+       case MLX5_VPORT_ACCESS_METHOD_NIC:
+               return mlx5_query_hca_vport_pkey(mdev, 0, port,  0, index,
+                                                pkey);
+       default:
+               return -EINVAL;
+       }
+}
 
 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
                                 struct ib_device_modify *props)
@@ -392,7 +582,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        struct mlx5_ib_alloc_ucontext_req_v2 req;
        struct mlx5_ib_alloc_ucontext_resp resp;
        struct mlx5_ib_ucontext *context;
-       struct mlx5_general_caps *gen;
        struct mlx5_uuar_info *uuari;
        struct mlx5_uar *uars;
        int gross_uuars;
@@ -403,7 +592,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        int i;
        size_t reqlen;
 
-       gen = &dev->mdev->caps.gen;
        if (!dev->ib_active)
                return ERR_PTR(-EAGAIN);
 
@@ -436,14 +624,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 
        num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
        gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
-       resp.qp_tab_size      = 1 << gen->log_max_qp;
-       resp.bf_reg_size      = gen->bf_reg_size;
-       resp.cache_line_size  = L1_CACHE_BYTES;
-       resp.max_sq_desc_sz = gen->max_sq_desc_sz;
-       resp.max_rq_desc_sz = gen->max_rq_desc_sz;
-       resp.max_send_wqebb = gen->max_wqes;
-       resp.max_recv_wr = gen->max_wqes;
-       resp.max_srq_recv_wr = gen->max_srq_wqes;
+       resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
+       resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+       resp.cache_line_size = L1_CACHE_BYTES;
+       resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
+       resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
+       resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+       resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+       resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 
        context = kzalloc(sizeof(*context), GFP_KERNEL);
        if (!context)
@@ -493,7 +681,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        mutex_init(&context->db_page_mutex);
 
        resp.tot_uuars = req.total_num_uuars;
-       resp.num_ports = gen->num_ports;
+       resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
        err = ib_copy_to_udata(udata, &resp,
                               sizeof(resp) - sizeof(resp.reserved));
        if (err)
@@ -731,37 +919,15 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
 
 static int init_node_data(struct mlx5_ib_dev *dev)
 {
-       struct ib_smp *in_mad  = NULL;
-       struct ib_smp *out_mad = NULL;
-       int err = -ENOMEM;
-
-       in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
-       out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
-       if (!in_mad || !out_mad)
-               goto out;
-
-       init_query_mad(in_mad);
-       in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
-
-       err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
-       if (err)
-               goto out;
-
-       memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
-
-       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+       int err;
 
-       err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+       err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
        if (err)
-               goto out;
+               return err;
 
-       dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
-       memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
+       dev->mdev->rev_id = dev->mdev->pdev->revision;
 
-out:
-       kfree(in_mad);
-       kfree(out_mad);
-       return err;
+       return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
 }
 
 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
@@ -895,11 +1061,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
 
 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
 {
-       struct mlx5_general_caps *gen;
        int port;
 
-       gen = &dev->mdev->caps.gen;
-       for (port = 1; port <= gen->num_ports; port++)
+       for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
                mlx5_query_ext_port_caps(dev, port);
 }
 
@@ -907,11 +1071,9 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
 {
        struct ib_device_attr *dprops = NULL;
        struct ib_port_attr *pprops = NULL;
-       struct mlx5_general_caps *gen;
        int err = -ENOMEM;
        int port;
 
-       gen = &dev->mdev->caps.gen;
        pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
        if (!pprops)
                goto out;
@@ -926,14 +1088,17 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
                goto out;
        }
 
-       for (port = 1; port <= gen->num_ports; port++) {
+       for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
                err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
                if (err) {
-                       mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
+                       mlx5_ib_warn(dev, "query_port %d failed %d\n",
+                                    port, err);
                        break;
                }
-               gen->port[port - 1].pkey_table_len = dprops->max_pkeys;
-               gen->port[port - 1].gid_table_len = pprops->gid_tbl_len;
+               dev->mdev->port_caps[port - 1].pkey_table_len =
+                                               dprops->max_pkeys;
+               dev->mdev->port_caps[port - 1].gid_table_len =
+                                               pprops->gid_tbl_len;
                mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
                            dprops->max_pkeys, pprops->gid_tbl_len);
        }
@@ -1159,8 +1324,29 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
        atomic_inc(&devr->p0->usecnt);
        atomic_set(&devr->s0->usecnt, 0);
 
+       memset(&attr, 0, sizeof(attr));
+       attr.attr.max_sge = 1;
+       attr.attr.max_wr = 1;
+       attr.srq_type = IB_SRQT_BASIC;
+       devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
+       if (IS_ERR(devr->s1)) {
+               ret = PTR_ERR(devr->s1);
+               goto error5;
+       }
+       devr->s1->device        = &dev->ib_dev;
+       devr->s1->pd            = devr->p0;
+       devr->s1->uobject       = NULL;
+       devr->s1->event_handler = NULL;
+       devr->s1->srq_context   = NULL;
+       devr->s1->srq_type      = IB_SRQT_BASIC;
+       devr->s1->ext.xrc.cq    = devr->c0;
+       atomic_inc(&devr->p0->usecnt);
+       atomic_set(&devr->s0->usecnt, 0);
+
        return 0;
 
+error5:
+       mlx5_ib_destroy_srq(devr->s0);
 error4:
        mlx5_ib_dealloc_xrcd(devr->x1);
 error3:
@@ -1175,6 +1361,7 @@ error0:
 
 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
 {
+       mlx5_ib_destroy_srq(devr->s1);
        mlx5_ib_destroy_srq(devr->s0);
        mlx5_ib_dealloc_xrcd(devr->x0);
        mlx5_ib_dealloc_xrcd(devr->x1);
@@ -1188,6 +1375,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        int err;
        int i;
 
+       /* don't create IB instance over Eth ports, no RoCE yet! */
+       if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
+               return NULL;
+
        printk_once(KERN_INFO "%s", mlx5_version);
 
        dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
@@ -1200,15 +1391,16 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        if (err)
                goto err_dealloc;
 
-       get_ext_port_caps(dev);
+       if (mlx5_use_mad_ifc(dev))
+               get_ext_port_caps(dev);
 
        MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
 
        strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
        dev->ib_dev.owner               = THIS_MODULE;
        dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
-       dev->ib_dev.local_dma_lkey      = mdev->caps.gen.reserved_lkey;
-       dev->num_ports          = mdev->caps.gen.num_ports;
+       dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
+       dev->num_ports          = MLX5_CAP_GEN(mdev, num_ports);
        dev->ib_dev.phys_port_cnt     = dev->num_ports;
        dev->ib_dev.num_comp_vectors    =
                dev->mdev->priv.eq_table.num_comp_vectors;
@@ -1286,9 +1478,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.free_fast_reg_page_list  = mlx5_ib_free_fast_reg_page_list;
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
 
-       mlx5_ib_internal_query_odp_caps(dev);
+       mlx5_ib_internal_fill_odp_caps(dev);
 
-       if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
+       if (MLX5_CAP_GEN(mdev, xrc)) {
                dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
                dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
                dev->ib_dev.uverbs_cmd_mask |=
index dff1cfcdf476cfed06d8835cd5316d234df09e1e..873dc354766a81c001c50cfd60bab1980c8e6fa5 100644 (file)
@@ -415,6 +415,7 @@ struct mlx5_ib_resources {
        struct ib_xrcd  *x1;
        struct ib_pd    *p0;
        struct ib_srq   *s0;
+       struct ib_srq   *s1;
 };
 
 struct mlx5_ib_dev {
@@ -594,6 +595,22 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
 int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
+int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+                                         struct ib_smp *out_mad);
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+                                        __be64 *sys_image_guid);
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+                                u16 *max_pkeys);
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+                                u32 *vendor_id);
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
+                           u16 *pkey);
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
+                           union ib_gid *gid);
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
+                           struct ib_port_attr *props);
 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
                       struct ib_port_attr *props);
 int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
@@ -617,7 +634,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 extern struct workqueue_struct *mlx5_ib_page_fault_wq;
 
-int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev);
+void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
 void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
                               struct mlx5_ib_pfault *pfault);
 void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
@@ -631,9 +648,9 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
                              unsigned long end);
 
 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
-       return 0;
+       return;
 }
 
 static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)                {}
index 71c5935838649e71a4a2f6b6cc16cb18f9a1cc16..bc9a0de897cb466d62d69dde0330ca96581953ca 100644 (file)
@@ -975,8 +975,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
        struct mlx5_ib_mr *mr;
        int inlen;
        int err;
-       bool pg_cap = !!(dev->mdev->caps.gen.flags &
-                        MLX5_DEV_CAP_FLAG_ON_DMND_PG);
+       bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
 
        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr)
index 5099db08afd2c80c1b9049e3fc71fef4cfbb269b..aa8391e75385016bb11a2526f8660331193b7c4e 100644 (file)
@@ -109,40 +109,33 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
        ib_umem_odp_unmap_dma_pages(umem, start, end);
 }
 
-#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do {        \
-       if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name)  \
-               ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name;       \
-} while (0)
-
-int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
+void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 {
-       int err;
-       struct mlx5_odp_caps hw_caps;
        struct ib_odp_caps *caps = &dev->odp_caps;
 
        memset(caps, 0, sizeof(*caps));
 
-       if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
-               return 0;
-
-       err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
-       if (err)
-               goto out;
+       if (!MLX5_CAP_GEN(dev->mdev, pg))
+               return;
 
        caps->general_caps = IB_ODP_SUPPORT;
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps,
-                              SEND);
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-                              SEND);
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-                              RECV);
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-                              WRITE);
-       COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps,
-                              READ);
-
-out:
-       return err;
+
+       if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+               caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+
+       return;
 }
 
 static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
index d35f62d4f4c58ecce848cfb2d0544500c62dbfd3..203c8a45e095560b146859e464eb0a33933c474a 100644 (file)
@@ -220,13 +220,11 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
                       int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
 {
-       struct mlx5_general_caps *gen;
        int wqe_size;
        int wq_size;
 
-       gen = &dev->mdev->caps.gen;
        /* Sanity check RQ size before proceeding */
-       if (cap->max_recv_wr  > gen->max_wqes)
+       if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
                return -EINVAL;
 
        if (!has_rq) {
@@ -246,10 +244,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
                        wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
                        wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
                        qp->rq.wqe_cnt = wq_size / wqe_size;
-                       if (wqe_size > gen->max_rq_desc_sz) {
+                       if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
                                mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
                                            wqe_size,
-                                           gen->max_rq_desc_sz);
+                                           MLX5_CAP_GEN(dev->mdev,
+                                                        max_wqe_sz_rq));
                                return -EINVAL;
                        }
                        qp->rq.wqe_shift = ilog2(wqe_size);
@@ -330,11 +329,9 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
                        struct mlx5_ib_qp *qp)
 {
-       struct mlx5_general_caps *gen;
        int wqe_size;
        int wq_size;
 
-       gen = &dev->mdev->caps.gen;
        if (!attr->cap.max_send_wr)
                return 0;
 
@@ -343,9 +340,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
        if (wqe_size < 0)
                return wqe_size;
 
-       if (wqe_size > gen->max_sq_desc_sz) {
+       if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
                mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
-                           wqe_size, gen->max_sq_desc_sz);
+                           wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
                return -EINVAL;
        }
 
@@ -358,9 +355,10 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 
        wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
        qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
-       if (qp->sq.wqe_cnt > gen->max_wqes) {
+       if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
                mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
-                           qp->sq.wqe_cnt, gen->max_wqes);
+                           qp->sq.wqe_cnt,
+                           1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
                return -ENOMEM;
        }
        qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -375,13 +373,11 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
                            struct mlx5_ib_qp *qp,
                            struct mlx5_ib_create_qp *ucmd)
 {
-       struct mlx5_general_caps *gen;
        int desc_sz = 1 << qp->sq.wqe_shift;
 
-       gen = &dev->mdev->caps.gen;
-       if (desc_sz > gen->max_sq_desc_sz) {
+       if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
                mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
-                            desc_sz, gen->max_sq_desc_sz);
+                            desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
                return -EINVAL;
        }
 
@@ -393,9 +389,10 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
 
        qp->sq.wqe_cnt = ucmd->sq_wqe_count;
 
-       if (qp->sq.wqe_cnt > gen->max_wqes) {
+       if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
                mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
-                            qp->sq.wqe_cnt, gen->max_wqes);
+                            qp->sq.wqe_cnt,
+                            1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
                return -EINVAL;
        }
 
@@ -768,7 +765,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
        qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
 
-       err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
+       err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
        if (err) {
                mlx5_ib_dbg(dev, "err %d\n", err);
                goto err_uuar;
@@ -866,22 +863,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                            struct ib_udata *udata, struct mlx5_ib_qp *qp)
 {
        struct mlx5_ib_resources *devr = &dev->devr;
+       struct mlx5_core_dev *mdev = dev->mdev;
        struct mlx5_ib_create_qp_resp resp;
        struct mlx5_create_qp_mbox_in *in;
-       struct mlx5_general_caps *gen;
        struct mlx5_ib_create_qp ucmd;
        int inlen = sizeof(*in);
        int err;
 
        mlx5_ib_odp_create_qp(qp);
 
-       gen = &dev->mdev->caps.gen;
        mutex_init(&qp->mutex);
        spin_lock_init(&qp->sq.lock);
        spin_lock_init(&qp->rq.lock);
 
        if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
-               if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
+               if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
                        mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
                        return -EINVAL;
                } else {
@@ -914,15 +910,17 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 
        if (pd) {
                if (pd->uobject) {
+                       __u32 max_wqes =
+                               1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
                        mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
                        if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
                            ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
                                mlx5_ib_dbg(dev, "invalid rq params\n");
                                return -EINVAL;
                        }
-                       if (ucmd.sq_wqe_count > gen->max_wqes) {
+                       if (ucmd.sq_wqe_count > max_wqes) {
                                mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
-                                           ucmd.sq_wqe_count, gen->max_wqes);
+                                           ucmd.sq_wqe_count, max_wqes);
                                return -EINVAL;
                        }
                        err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1014,7 +1012,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                        in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
                } else {
                        in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
-                       in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+                       in->ctx.rq_type_srqn |=
+                               cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
                }
        }
 
@@ -1226,7 +1225,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
                                struct ib_qp_init_attr *init_attr,
                                struct ib_udata *udata)
 {
-       struct mlx5_general_caps *gen;
        struct mlx5_ib_dev *dev;
        struct mlx5_ib_qp *qp;
        u16 xrcdn = 0;
@@ -1244,12 +1242,11 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
                }
                dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
        }
-       gen = &dev->mdev->caps.gen;
 
        switch (init_attr->qp_type) {
        case IB_QPT_XRC_TGT:
        case IB_QPT_XRC_INI:
-               if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
+               if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
                        mlx5_ib_dbg(dev, "XRC not supported\n");
                        return ERR_PTR(-ENOSYS);
                }
@@ -1356,9 +1353,6 @@ enum {
 
 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
 {
-       struct mlx5_general_caps *gen;
-
-       gen = &dev->mdev->caps.gen;
        if (rate == IB_RATE_PORT_CURRENT) {
                return 0;
        } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1366,7 +1360,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
        } else {
                while (rate != IB_RATE_2_5_GBPS &&
                       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
-                        gen->stat_rate_support))
+                        MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
                        --rate;
        }
 
@@ -1377,10 +1371,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
                         struct mlx5_qp_path *path, u8 port, int attr_mask,
                         u32 path_flags, const struct ib_qp_attr *attr)
 {
-       struct mlx5_general_caps *gen;
        int err;
 
-       gen = &dev->mdev->caps.gen;
        path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
        path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
 
@@ -1391,9 +1383,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
        path->rlid      = cpu_to_be16(ah->dlid);
 
        if (ah->ah_flags & IB_AH_GRH) {
-               if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
+               if (ah->grh.sgid_index >=
+                   dev->mdev->port_caps[port - 1].gid_table_len) {
                        pr_err("sgid_index (%u) too large. max is %d\n",
-                              ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
+                              ah->grh.sgid_index,
+                              dev->mdev->port_caps[port - 1].gid_table_len);
                        return -EINVAL;
                }
                path->grh_mlid |= 1 << 7;
@@ -1570,7 +1564,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        struct mlx5_ib_qp *qp = to_mqp(ibqp);
        struct mlx5_ib_cq *send_cq, *recv_cq;
        struct mlx5_qp_context *context;
-       struct mlx5_general_caps *gen;
        struct mlx5_modify_qp_mbox_in *in;
        struct mlx5_ib_pd *pd;
        enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1579,7 +1572,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        int mlx5_st;
        int err;
 
-       gen = &dev->mdev->caps.gen;
        in = kzalloc(sizeof(*in), GFP_KERNEL);
        if (!in)
                return -ENOMEM;
@@ -1619,7 +1611,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                        err = -EINVAL;
                        goto out;
                }
-               context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
+               context->mtu_msgmax = (attr->path_mtu << 5) |
+                                     (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
        }
 
        if (attr_mask & IB_QP_DEST_QPN)
@@ -1777,11 +1770,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
        struct mlx5_ib_qp *qp = to_mqp(ibqp);
        enum ib_qp_state cur_state, new_state;
-       struct mlx5_general_caps *gen;
        int err = -EINVAL;
        int port;
 
-       gen = &dev->mdev->caps.gen;
        mutex_lock(&qp->mutex);
 
        cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1793,21 +1784,25 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto out;
 
        if ((attr_mask & IB_QP_PORT) &&
-           (attr->port_num == 0 || attr->port_num > gen->num_ports))
+           (attr->port_num == 0 ||
+            attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
                goto out;
 
        if (attr_mask & IB_QP_PKEY_INDEX) {
                port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
-               if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
+               if (attr->pkey_index >=
+                   dev->mdev->port_caps[port - 1].pkey_table_len)
                        goto out;
        }
 
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-           attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
+           attr->max_rd_atomic >
+           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
                goto out;
 
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
-           attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
+           attr->max_dest_rd_atomic >
+           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
                goto out;
 
        if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -3009,7 +3004,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
        ib_ah_attr->port_num      = path->port;
 
        if (ib_ah_attr->port_num == 0 ||
-           ib_ah_attr->port_num > dev->caps.gen.num_ports)
+           ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
                return;
 
        ib_ah_attr->sl = path->sl & 0xf;
@@ -3135,12 +3130,10 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
                                          struct ib_udata *udata)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
-       struct mlx5_general_caps *gen;
        struct mlx5_ib_xrcd *xrcd;
        int err;
 
-       gen = &dev->mdev->caps.gen;
-       if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
+       if (!MLX5_CAP_GEN(dev->mdev, xrc))
                return ERR_PTR(-ENOSYS);
 
        xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
index 02d77a29764d5e1ab925423b64bdb20157fbc781..e008505e96e9778ff9a71cfc021de7ecee6c3331 100644 (file)
@@ -165,7 +165,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
                return err;
        }
 
-       if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+       if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
                mlx5_ib_dbg(dev, "buf alloc failed\n");
                err = -ENOMEM;
                goto err_db;
@@ -236,7 +236,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
                                  struct ib_udata *udata)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
-       struct mlx5_general_caps *gen;
        struct mlx5_ib_srq *srq;
        int desc_size;
        int buf_size;
@@ -245,13 +244,13 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
        int uninitialized_var(inlen);
        int is_xrc;
        u32 flgs, xrcdn;
+       __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
 
-       gen = &dev->mdev->caps.gen;
        /* Sanity check SRQ size before proceeding */
-       if (init_attr->attr.max_wr >= gen->max_srq_wqes) {
+       if (init_attr->attr.max_wr >= max_srq_wqes) {
                mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
                            init_attr->attr.max_wr,
-                           gen->max_srq_wqes);
+                           max_srq_wqes);
                return ERR_PTR(-EINVAL);
        }
 
@@ -303,7 +302,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 
        in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
        in->ctx.db_record = cpu_to_be64(srq->db.dma);
-       err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
+       err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
        kvfree(in);
        if (err) {
                mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
index 1cc6ca8bfbda3fa1268407677e7abd24d8977b01..85cfa4f8691f59e4a674f41377de91e300593b7d 100644 (file)
@@ -2264,7 +2264,7 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
                return -1;
        }
        card->owner = THIS_MODULE;
-       init_timer(&card->listentimer);
+       setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
        strcpy(card->name, id);
        card->contrnr = contr;
        card->nbchan = profp->nbchannel;
@@ -2331,8 +2331,6 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
        card->cipmask = 0x1FFF03FF;     /* any */
        card->cipmask2 = 0;
 
-       card->listentimer.data = (unsigned long)card;
-       card->listentimer.function = listentimerfunc;
        send_listen(card);
        mod_timer(&card->listentimer, jiffies + 60 * HZ);
 
index ead0a4fb7448643faa66cb8c3fceee3c2b62d9fd..a0fdbc074b982a441a18714bbca1b0adfa7eb265 100644 (file)
@@ -267,8 +267,8 @@ int st5481_setup_usb(struct st5481_adapter *adapter)
        }
 
        // The descriptor is wrong for some early samples of the ST5481 chip
-       altsetting->endpoint[3].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
-       altsetting->endpoint[4].desc.wMaxPacketSize = __constant_cpu_to_le16(32);
+       altsetting->endpoint[3].desc.wMaxPacketSize = cpu_to_le16(32);
+       altsetting->endpoint[4].desc.wMaxPacketSize = cpu_to_le16(32);
 
        // Use alternative setting 3 on interface 0 to have 2B+D
        if ((status = usb_set_interface(dev, 0, 3)) < 0) {
index 8dc7290089bbc29ab8f2ec752c02dc3dfb73e806..0d29b5a6356d729a6ceb987bccc8c2dbe6779823 100644 (file)
@@ -601,14 +601,14 @@ static const struct proto_ops data_sock_ops = {
 };
 
 static int
-data_sock_create(struct net *net, struct socket *sock, int protocol)
+data_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
 {
        struct sock *sk;
 
        if (sock->type != SOCK_DGRAM)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
+       sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
        if (!sk)
                return -ENOMEM;
 
@@ -756,14 +756,14 @@ static const struct proto_ops base_sock_ops = {
 
 
 static int
-base_sock_create(struct net *net, struct socket *sock, int protocol)
+base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
 {
        struct sock *sk;
 
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
+       sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
        if (!sk)
                return -ENOMEM;
 
@@ -785,7 +785,7 @@ mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
 
        switch (proto) {
        case ISDN_P_BASE:
-               err = base_sock_create(net, sock, proto);
+               err = base_sock_create(net, sock, proto, kern);
                break;
        case ISDN_P_TE_S0:
        case ISDN_P_NT_S0:
@@ -799,7 +799,7 @@ mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
        case ISDN_P_B_L2DTMF:
        case ISDN_P_B_L2DSP:
        case ISDN_P_B_L2DSPHDLC:
-               err = data_sock_create(net, sock, proto);
+               err = data_sock_create(net, sock, proto, kern);
                break;
        default:
                return err;
index 433f823037dd7a25eceb5a644661aec52d3ea5b7..ec1f46a6be3a021f67b7098f81ec887a5d2abf7c 100644 (file)
@@ -267,6 +267,10 @@ static void cmodio_pci_remove(struct pci_dev *dev)
 static const struct pci_device_id cmodio_pci_ids[] = {
        { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0101 },
        { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0100 },
+       { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0201 },
+       { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_JANZ, 0x0202 },
+       { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0201 },
+       { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_JANZ, 0x0202 },
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, cmodio_pci_ids);
index df51d6025a9017413500046edb78401abe0dfdb3..019fceffc9e52980d1f3837b01a299b97782dfca 100644 (file)
@@ -179,6 +179,20 @@ config VXLAN
          To compile this driver as a module, choose M here: the module
          will be called vxlan.
 
+config GENEVE
+       tristate "Generic Network Virtualization Encapsulation netdev"
+       depends on INET && GENEVE_CORE
+       select NET_IP_TUNNEL
+       ---help---
+         This allows one to create geneve virtual interfaces that provide
+         Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+         to tunnel virtual network infrastructure in virtualized environments.
+         For more information see:
+           http://tools.ietf.org/html/draft-gross-geneve-02
+
+         To compile this driver as a module, choose M here: the module
+         will be called geneve.
+
 config NETCONSOLE
        tristate "Network console logging support"
        ---help---
index e25fdd7d905e334dff7ccbcaba73b57436248f9f..c12cb22478a7daa39e2935f1c0aa22c26ab04bdc 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_TUN) += tun.o
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_GENEVE) += geneve.o
 obj-$(CONFIG_NLMON) += nlmon.o
 
 #
index fbd54f0e32e8d7baf7e1f2de3c5155201aeccfea..7fde4d5c2b28beca9c645a498fb3aefc467fbe26 100644 (file)
 /* Port Key definitions
  * key is determined according to the link speed, duplex and
  * user key (which is yet not supported)
- * --------------------------------------------------------------
- * Port key :  | User key      | Speed         | Duplex        |
- * --------------------------------------------------------------
- * 16            6               1               0
+ *           --------------------------------------------------------------
+ * Port key  | User key (10 bits)           | Speed (5 bits)      | Duplex|
+ *           --------------------------------------------------------------
+ *           |15                           6|5                   1|0
  */
 #define  AD_DUPLEX_KEY_MASKS    0x1
 #define  AD_SPEED_KEY_MASKS     0x3E
@@ -1908,8 +1908,14 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
 
                BOND_AD_INFO(bond).aggregator_identifier = 0;
 
-               BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
-               BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
+               BOND_AD_INFO(bond).system.sys_priority =
+                       bond->params.ad_actor_sys_prio;
+               if (is_zero_ether_addr(bond->params.ad_actor_system))
+                       BOND_AD_INFO(bond).system.sys_mac_addr =
+                           *((struct mac_addr *)bond->dev->dev_addr);
+               else
+                       BOND_AD_INFO(bond).system.sys_mac_addr =
+                           *((struct mac_addr *)bond->params.ad_actor_system);
 
                /* initialize how many times this module is called in one
                 * second (should be about every 100ms)
@@ -1945,10 +1951,10 @@ void bond_3ad_bind_slave(struct slave *slave)
 
                port->slave = slave;
                port->actor_port_number = SLAVE_AD_INFO(slave)->id;
-               /* key is determined according to the link speed, duplex and user key(which
-                * is yet not supported)
+               /* key is determined according to the link speed, duplex and
+                * user key
                 */
-               port->actor_admin_port_key = 0;
+               port->actor_admin_port_key = bond->params.ad_user_port_key << 6;
                port->actor_admin_port_key |= __get_duplex(port);
                port->actor_admin_port_key |= (__get_link_speed(port) << 1);
                port->actor_oper_port_key = port->actor_admin_port_key;
@@ -1959,6 +1965,8 @@ void bond_3ad_bind_slave(struct slave *slave)
                        port->sm_vars &= ~AD_PORT_LACP_ENABLED;
                /* actor system is the bond's system */
                port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
+               port->actor_system_priority =
+                   BOND_AD_INFO(bond).system.sys_priority;
                /* tx timer(to verify that no more than MAX_TX_IN_SECOND
                 * lacpdu's are sent in one second)
                 */
index d5fe5d5f490f3efa70e022fdac9b64bd89311e22..19eb990d398c03affd47a3a50e01ad0571427aa8 100644 (file)
@@ -76,7 +76,7 @@
 #include <net/netns/generic.h>
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 #include <net/switchdev.h>
 #include <net/bonding.h>
 #include <net/bond_3ad.h>
@@ -1015,10 +1015,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        netdev_features_t mask;
        struct slave *slave;
 
-       /* If any slave has the offload feature flag set,
-        * set the offload flag on the bond.
-        */
-       mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+       mask = features;
 
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
@@ -3054,16 +3051,15 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
        int noff, proto = -1;
 
        if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
-               return skb_flow_dissect(skb, fk);
+               return skb_flow_dissect_flow_keys(skb, fk);
 
-       fk->ports = 0;
+       fk->ports.ports = 0;
        noff = skb_network_offset(skb);
        if (skb->protocol == htons(ETH_P_IP)) {
                if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
                        return false;
                iph = ip_hdr(skb);
-               fk->src = iph->saddr;
-               fk->dst = iph->daddr;
+               iph_to_flow_copy_v4addrs(fk, iph);
                noff += iph->ihl << 2;
                if (!ip_is_fragment(iph))
                        proto = iph->protocol;
@@ -3071,15 +3067,14 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
                if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6))))
                        return false;
                iph6 = ipv6_hdr(skb);
-               fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
-               fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+               iph_to_flow_copy_v6addrs(fk, iph6);
                noff += sizeof(*iph6);
                proto = iph6->nexthdr;
        } else {
                return false;
        }
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
-               fk->ports = skb_flow_get_ports(skb, noff, proto);
+               fk->ports.ports = skb_flow_get_ports(skb, noff, proto);
 
        return true;
 }
@@ -3105,8 +3100,9 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
            bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
                hash = bond_eth_hash(skb);
        else
-               hash = (__force u32)flow.ports;
-       hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+               hash = (__force u32)flow.ports.ports;
+       hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+               (__force u32)flow_get_u32_src(&flow);
        hash ^= (hash >> 16);
        hash ^= (hash >> 8);
 
@@ -4039,8 +4035,12 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_add_slave          = bond_enslave,
        .ndo_del_slave          = bond_release,
        .ndo_fix_features       = bond_fix_features,
-       .ndo_bridge_setlink     = ndo_dflt_netdev_switch_port_bridge_setlink,
-       .ndo_bridge_dellink     = ndo_dflt_netdev_switch_port_bridge_dellink,
+       .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
+       .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
+       .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
+       .ndo_fdb_add            = switchdev_port_fdb_add,
+       .ndo_fdb_del            = switchdev_port_fdb_del,
+       .ndo_fdb_dump           = switchdev_port_fdb_dump,
        .ndo_features_check     = passthru_features_check,
 };
 
@@ -4140,6 +4140,8 @@ static int bond_check_params(struct bond_params *params)
        struct bond_opt_value newval;
        const struct bond_opt_value *valptr;
        int arp_all_targets_value;
+       u16 ad_actor_sys_prio = 0;
+       u16 ad_user_port_key = 0;
 
        /* Convert string parameters. */
        if (mode) {
@@ -4434,6 +4436,24 @@ static int bond_check_params(struct bond_params *params)
                fail_over_mac_value = BOND_FOM_NONE;
        }
 
+       bond_opt_initstr(&newval, "default");
+       valptr = bond_opt_parse(
+                       bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
+                                    &newval);
+       if (!valptr) {
+               pr_err("Error: No ad_actor_sys_prio default value");
+               return -EINVAL;
+       }
+       ad_actor_sys_prio = valptr->value;
+
+       valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
+                               &newval);
+       if (!valptr) {
+               pr_err("Error: No ad_user_port_key default value");
+               return -EINVAL;
+       }
+       ad_user_port_key = valptr->value;
+
        if (lp_interval == 0) {
                pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
                        INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
@@ -4462,6 +4482,9 @@ static int bond_check_params(struct bond_params *params)
        params->lp_interval = lp_interval;
        params->packets_per_slave = packets_per_slave;
        params->tlb_dynamic_lb = 1; /* Default value */
+       params->ad_actor_sys_prio = ad_actor_sys_prio;
+       eth_zero_addr(params->ad_actor_system);
+       params->ad_user_port_key = ad_user_port_key;
        if (packets_per_slave > 0) {
                params->reciprocal_packets_per_slave =
                        reciprocal_value(packets_per_slave);
index 7b11243660113484f59d97e2b3ffc9d5b1cb5d98..f7015eb4f8db69714e515100d00b8494fb93512d 100644 (file)
@@ -94,6 +94,10 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
        [IFLA_BOND_AD_LACP_RATE]        = { .type = NLA_U8 },
        [IFLA_BOND_AD_SELECT]           = { .type = NLA_U8 },
        [IFLA_BOND_AD_INFO]             = { .type = NLA_NESTED },
+       [IFLA_BOND_AD_ACTOR_SYS_PRIO]   = { .type = NLA_U16 },
+       [IFLA_BOND_AD_USER_PORT_KEY]    = { .type = NLA_U16 },
+       [IFLA_BOND_AD_ACTOR_SYSTEM]     = { .type = NLA_BINARY,
+                                           .len  = ETH_ALEN },
 };
 
 static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -379,6 +383,36 @@ static int bond_changelink(struct net_device *bond_dev,
                if (err)
                        return err;
        }
+       if (data[IFLA_BOND_AD_ACTOR_SYS_PRIO]) {
+               int actor_sys_prio =
+                       nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
+
+               bond_opt_initval(&newval, actor_sys_prio);
+               err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval);
+               if (err)
+                       return err;
+       }
+
+       if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
+               int port_key =
+                       nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
+
+               bond_opt_initval(&newval, port_key);
+               err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval);
+               if (err)
+                       return err;
+       }
+
+       if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
+               if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
+                       return -EINVAL;
+
+               bond_opt_initval(&newval,
+                                nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+               err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
+               if (err)
+                       return err;
+       }
        return 0;
 }
 
@@ -426,6 +460,9 @@ static size_t bond_get_size(const struct net_device *bond_dev)
                nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
                nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
                nla_total_size(ETH_ALEN) +    /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
+               nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
+               nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
+               nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
                0;
 }
 
@@ -551,6 +588,19 @@ static int bond_fill_info(struct sk_buff *skb,
        if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info info;
 
+               if (nla_put_u16(skb, IFLA_BOND_AD_ACTOR_SYS_PRIO,
+                               bond->params.ad_actor_sys_prio))
+                       goto nla_put_failure;
+
+               if (nla_put_u16(skb, IFLA_BOND_AD_USER_PORT_KEY,
+                               bond->params.ad_user_port_key))
+                       goto nla_put_failure;
+
+               if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
+                           sizeof(bond->params.ad_actor_system),
+                           &bond->params.ad_actor_system))
+                       goto nla_put_failure;
+
                if (!bond_3ad_get_active_agg_info(bond, &info)) {
                        struct nlattr *nest;
 
index e8d3c1d35453d1e182cd4b82388978f580abe946..e9c624d54dd4cdf869d1cf04859dd0010c7c7f21 100644 (file)
@@ -70,6 +70,12 @@ static int bond_option_slaves_set(struct bonding *bond,
                                  const struct bond_opt_value *newval);
 static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
                                  const struct bond_opt_value *newval);
+static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
+                                            const struct bond_opt_value *newval);
+static int bond_option_ad_actor_system_set(struct bonding *bond,
+                                          const struct bond_opt_value *newval);
+static int bond_option_ad_user_port_key_set(struct bonding *bond,
+                                           const struct bond_opt_value *newval);
 
 
 static const struct bond_opt_value bond_mode_tbl[] = {
@@ -186,6 +192,18 @@ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
        { NULL,  -1, 0}
 };
 
+static const struct bond_opt_value bond_ad_actor_sys_prio_tbl[] = {
+       { "minval",  1,     BOND_VALFLAG_MIN},
+       { "maxval",  65535, BOND_VALFLAG_MAX | BOND_VALFLAG_DEFAULT},
+       { NULL,      -1,    0},
+};
+
+static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
+       { "minval",  0,     BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
+       { "maxval",  1023,  BOND_VALFLAG_MAX},
+       { NULL,      -1,    0},
+};
+
 static const struct bond_option bond_opts[BOND_OPT_LAST] = {
        [BOND_OPT_MODE] = {
                .id = BOND_OPT_MODE,
@@ -379,6 +397,29 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
                .values = bond_tlb_dynamic_lb_tbl,
                .flags = BOND_OPTFLAG_IFDOWN,
                .set = bond_option_tlb_dynamic_lb_set,
+       },
+       [BOND_OPT_AD_ACTOR_SYS_PRIO] = {
+               .id = BOND_OPT_AD_ACTOR_SYS_PRIO,
+               .name = "ad_actor_sys_prio",
+               .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+               .flags = BOND_OPTFLAG_IFDOWN,
+               .values = bond_ad_actor_sys_prio_tbl,
+               .set = bond_option_ad_actor_sys_prio_set,
+       },
+       [BOND_OPT_AD_ACTOR_SYSTEM] = {
+               .id = BOND_OPT_AD_ACTOR_SYSTEM,
+               .name = "ad_actor_system",
+               .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+               .flags = BOND_OPTFLAG_RAWVAL | BOND_OPTFLAG_IFDOWN,
+               .set = bond_option_ad_actor_system_set,
+       },
+       [BOND_OPT_AD_USER_PORT_KEY] = {
+               .id = BOND_OPT_AD_USER_PORT_KEY,
+               .name = "ad_user_port_key",
+               .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
+               .flags = BOND_OPTFLAG_IFDOWN,
+               .values = bond_ad_user_port_key_tbl,
+               .set = bond_option_ad_user_port_key_set,
        }
 };
 
@@ -1349,3 +1390,53 @@ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
 
        return 0;
 }
+
+static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
+                                            const struct bond_opt_value *newval)
+{
+       netdev_info(bond->dev, "Setting ad_actor_sys_prio to %llu\n",
+                   newval->value);
+
+       bond->params.ad_actor_sys_prio = newval->value;
+       return 0;
+}
+
+static int bond_option_ad_actor_system_set(struct bonding *bond,
+                                          const struct bond_opt_value *newval)
+{
+       u8 macaddr[ETH_ALEN];
+       u8 *mac;
+       int i;
+
+       if (newval->string) {
+               i = sscanf(newval->string, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+                          &macaddr[0], &macaddr[1], &macaddr[2],
+                          &macaddr[3], &macaddr[4], &macaddr[5]);
+               if (i != ETH_ALEN)
+                       goto err;
+               mac = macaddr;
+       } else {
+               mac = (u8 *)&newval->value;
+       }
+
+       if (!is_valid_ether_addr(mac))
+               goto err;
+
+       netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac);
+       ether_addr_copy(bond->params.ad_actor_system, mac);
+       return 0;
+
+err:
+       netdev_err(bond->dev, "Invalid MAC address.\n");
+       return -EINVAL;
+}
+
+static int bond_option_ad_user_port_key_set(struct bonding *bond,
+                                           const struct bond_opt_value *newval)
+{
+       netdev_info(bond->dev, "Setting ad_user_port_key to %llu\n",
+                   newval->value);
+
+       bond->params.ad_user_port_key = newval->value;
+       return 0;
+}
index b20b35acb47d3465063cdde30a1018321b344b56..e7f3047a26df23bcf3b50e4cc1f09a2929240764 100644 (file)
@@ -135,6 +135,10 @@ static void bond_info_show_master(struct seq_file *seq)
                                          bond->params.ad_select);
                seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
                           optval->string);
+               seq_printf(seq, "System priority: %d\n",
+                          BOND_AD_INFO(bond).system.sys_priority);
+               seq_printf(seq, "System MAC address: %pM\n",
+                          &BOND_AD_INFO(bond).system.sys_mac_addr);
 
                if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
                        seq_printf(seq, "bond %s has no active aggregator\n",
@@ -198,6 +202,8 @@ static void bond_info_show_slave(struct seq_file *seq,
                        seq_puts(seq, "details actor lacp pdu:\n");
                        seq_printf(seq, "    system priority: %d\n",
                                   port->actor_system_priority);
+                       seq_printf(seq, "    system mac address: %pM\n",
+                                  &port->actor_system);
                        seq_printf(seq, "    port key: %d\n",
                                   port->actor_oper_port_key);
                        seq_printf(seq, "    port priority: %d\n",
@@ -210,6 +216,8 @@ static void bond_info_show_slave(struct seq_file *seq,
                        seq_puts(seq, "details partner lacp pdu:\n");
                        seq_printf(seq, "    system priority: %d\n",
                                   port->partner_oper.system_priority);
+                       seq_printf(seq, "    system mac address: %pM\n",
+                                  &port->partner_oper.system);
                        seq_printf(seq, "    oper key: %d\n",
                                   port->partner_oper.key);
                        seq_printf(seq, "    port priority: %d\n",
index 7e9e151d4d6168821e28ef82e8c197b2c4666b87..143a2abd1c1cda8aae0dc61422368f3400df540f 100644 (file)
@@ -692,6 +692,49 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
 static DEVICE_ATTR(packets_per_slave, S_IRUGO | S_IWUSR,
                   bonding_show_packets_per_slave, bonding_sysfs_store_option);
 
+static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
+                                             struct device_attribute *attr,
+                                             char *buf)
+{
+       struct bonding *bond = to_bond(d);
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+
+       return 0;
+}
+static DEVICE_ATTR(ad_actor_sys_prio, S_IRUGO | S_IWUSR,
+                  bonding_show_ad_actor_sys_prio, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_ad_actor_system(struct device *d,
+                                           struct device_attribute *attr,
+                                           char *buf)
+{
+       struct bonding *bond = to_bond(d);
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+
+       return 0;
+}
+
+static DEVICE_ATTR(ad_actor_system, S_IRUGO | S_IWUSR,
+                  bonding_show_ad_actor_system, bonding_sysfs_store_option);
+
+static ssize_t bonding_show_ad_user_port_key(struct device *d,
+                                            struct device_attribute *attr,
+                                            char *buf)
+{
+       struct bonding *bond = to_bond(d);
+
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+
+       return 0;
+}
+static DEVICE_ATTR(ad_user_port_key, S_IRUGO | S_IWUSR,
+                  bonding_show_ad_user_port_key, bonding_sysfs_store_option);
+
 static struct attribute *per_bond_attrs[] = {
        &dev_attr_slaves.attr,
        &dev_attr_mode.attr,
@@ -725,6 +768,9 @@ static struct attribute *per_bond_attrs[] = {
        &dev_attr_lp_interval.attr,
        &dev_attr_packets_per_slave.attr,
        &dev_attr_tlb_dynamic_lb.attr,
+       &dev_attr_ad_actor_sys_prio.attr,
+       &dev_attr_ad_actor_system.attr,
+       &dev_attr_ad_user_port_key.attr,
        NULL,
 };
 
index ad0a7e8c2c2bdf33626824645a8180d8e6d900ff..6201c5a1a8845f2e3e68f3921a2fcae8e4469217 100644 (file)
        (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
 
 /* FLEXCAN control register 2 (CTRL2) bits */
-#define FLEXCAN_CRL2_ECRWRE            BIT(29)
-#define FLEXCAN_CRL2_WRMFRZ            BIT(28)
-#define FLEXCAN_CRL2_RFFN(x)           (((x) & 0x0f) << 24)
-#define FLEXCAN_CRL2_TASD(x)           (((x) & 0x1f) << 19)
-#define FLEXCAN_CRL2_MRP               BIT(18)
-#define FLEXCAN_CRL2_RRS               BIT(17)
-#define FLEXCAN_CRL2_EACEN             BIT(16)
+#define FLEXCAN_CTRL2_ECRWRE           BIT(29)
+#define FLEXCAN_CTRL2_WRMFRZ           BIT(28)
+#define FLEXCAN_CTRL2_RFFN(x)          (((x) & 0x0f) << 24)
+#define FLEXCAN_CTRL2_TASD(x)          (((x) & 0x1f) << 19)
+#define FLEXCAN_CTRL2_MRP              BIT(18)
+#define FLEXCAN_CTRL2_RRS              BIT(17)
+#define FLEXCAN_CTRL2_EACEN            BIT(16)
 
 /* FLEXCAN memory error control register (MECR) bits */
 #define FLEXCAN_MECR_ECRWRDIS          BIT(31)
         FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
 
 /* FLEXCAN message buffers */
-#define FLEXCAN_MB_CNT_CODE(x)         (((x) & 0xf) << 24)
 #define FLEXCAN_MB_CODE_RX_INACTIVE    (0x0 << 24)
 #define FLEXCAN_MB_CODE_RX_EMPTY       (0x4 << 24)
 #define FLEXCAN_MB_CODE_RX_FULL                (0x2 << 24)
  * FLEXCAN hardware feature flags
  *
  * Below is some version info we got:
- *    SOC   Version   IP-Version  Glitch-  [TR]WRN_INT  Memory err
- *                                Filter?   connected?  detection
- *   MX25  FlexCAN2  03.00.00.00     no         no         no
- *   MX28  FlexCAN2  03.00.04.00    yes        yes         no
- *   MX35  FlexCAN2  03.00.00.00     no         no         no
- *   MX53  FlexCAN2  03.00.00.00    yes         no         no
- *   MX6s  FlexCAN3  10.00.12.00    yes        yes         no
- *   VF610 FlexCAN3  ?               no        yes        yes
+ *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT Memory err RTR re-
+ *                                Filter? connected?  detection  ception in MB
+ *   MX25  FlexCAN2  03.00.00.00     no        no         no        no
+ *   MX28  FlexCAN2  03.00.04.00    yes       yes         no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no         no        no
+ *   MX53  FlexCAN2  03.00.00.00    yes        no         no        no
+ *   MX6s  FlexCAN3  10.00.12.00    yes       yes         no       yes
+ *   VF610 FlexCAN3  ?               no       yes        yes       yes?
  *
  * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
  */
@@ -221,7 +220,7 @@ struct flexcan_regs {
        u32 imask1;             /* 0x28 */
        u32 iflag2;             /* 0x2c */
        u32 iflag1;             /* 0x30 */
-       u32 crl2;               /* 0x34 */
+       u32 ctrl2;              /* 0x34 */
        u32 esr2;               /* 0x38 */
        u32 imeur;              /* 0x3c */
        u32 lrfr;               /* 0x40 */
@@ -230,6 +229,16 @@ struct flexcan_regs {
        u32 rxfir;              /* 0x4c */
        u32 _reserved3[12];     /* 0x50 */
        struct flexcan_mb cantxfg[64];  /* 0x80 */
+       /* FIFO-mode:
+        *                      MB
+        * 0x080...0x08f        0       RX message buffer
+        * 0x090...0x0df        1-5     reserverd
+        * 0x0e0...0x0ff        6-7     8 entry ID table
+        *                              (mx25, mx28, mx35, mx53)
+        * 0x0e0...0x2df        6-7..37 8..128 entry ID table
+        *                              size conf'ed via ctrl2::RFFN
+        *                              (mx6, vf610)
+        */
        u32 _reserved4[408];
        u32 mecr;               /* 0xae0 */
        u32 erriar;             /* 0xae4 */
@@ -468,7 +477,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct flexcan_regs __iomem *regs = priv->base;
        struct can_frame *cf = (struct can_frame *)skb->data;
        u32 can_id;
-       u32 ctrl = FLEXCAN_MB_CNT_CODE(0xc) | (cf->can_dlc << 16);
+       u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
 
        if (can_dropped_invalid_skb(dev, skb))
                return NETDEV_TX_OK;
@@ -815,7 +824,7 @@ static int flexcan_chip_start(struct net_device *dev)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
        struct flexcan_regs __iomem *regs = priv->base;
-       u32 reg_mcr, reg_ctrl, reg_crl2, reg_mecr;
+       u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
        int err, i;
 
        /* enable module */
@@ -918,9 +927,9 @@ static int flexcan_chip_start(struct net_device *dev)
                 * and Correction of Memory Errors" to write to
                 * MECR register
                 */
-               reg_crl2 = flexcan_read(&regs->crl2);
-               reg_crl2 |= FLEXCAN_CRL2_ECRWRE;
-               flexcan_write(reg_crl2, &regs->crl2);
+               reg_ctrl2 = flexcan_read(&regs->ctrl2);
+               reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
+               flexcan_write(reg_ctrl2, &regs->ctrl2);
 
                reg_mecr = flexcan_read(&regs->mecr);
                reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
index 4dd183a3643ae3cbe358455f2b099f54dda7870e..c1e85368a198bd898f853c5842c58fbb3b01b04b 100644 (file)
@@ -40,6 +40,7 @@
 #define MSYNC_PEER             0x00            /* ICAN only */
 #define MSYNC_LOCL             0x01            /* host only */
 #define TARGET_RUNNING         0x02
+#define FIRMWARE_STAMP         0x60            /* big endian firmware stamp */
 
 #define MSYNC_RB0              0x01
 #define MSYNC_RB1              0x02
@@ -83,6 +84,7 @@
 #define MSG_COFFREQ            0x42
 #define MSG_CONREQ             0x43
 #define MSG_CCONFREQ           0x47
+#define MSG_LMTS               0xb4
 
 /*
  * Janz ICAN3 CAN Inquiry Message Types
 /* SJA1000 Clock Input */
 #define ICAN3_CAN_CLOCK                8000000
 
+/* Janz ICAN3 firmware types */
+enum ican3_fwtype {
+       ICAN3_FWTYPE_ICANOS,
+       ICAN3_FWTYPE_CAL_CANOPEN,
+};
+
 /* Driver Name */
 #define DRV_NAME "janz-ican3"
 
@@ -215,6 +223,10 @@ struct ican3_dev {
        struct completion buserror_comp;
        struct can_berr_counter bec;
 
+       /* firmware type */
+       enum ican3_fwtype fwtype;
+       char fwinfo[32];
+
        /* old and new style host interface */
        unsigned int iftype;
 
@@ -750,13 +762,61 @@ static int ican3_set_id_filter(struct ican3_dev *mod, bool accept)
  */
 static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
 {
+       struct can_bittiming *bt = &mod->can.bittiming;
        struct ican3_msg msg;
+       u8 btr0, btr1;
+       int res;
 
-       memset(&msg, 0, sizeof(msg));
-       msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
-       msg.len = cpu_to_le16(0);
+       /* This algorithm was stolen from drivers/net/can/sja1000/sja1000.c      */
+       /* The bittiming register command for the ICAN3 just sets the bit timing */
+       /* registers on the SJA1000 chip directly                                */
+       btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+       btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+               (((bt->phase_seg2 - 1) & 0x7) << 4);
+       if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+               btr1 |= 0x80;
 
-       return ican3_send_msg(mod, &msg);
+       if (mod->fwtype == ICAN3_FWTYPE_ICANOS) {
+               if (on) {
+                       /* set bittiming */
+                       memset(&msg, 0, sizeof(msg));
+                       msg.spec = MSG_CBTRREQ;
+                       msg.len = cpu_to_le16(4);
+                       msg.data[0] = 0x00;
+                       msg.data[1] = 0x00;
+                       msg.data[2] = btr0;
+                       msg.data[3] = btr1;
+
+                       res = ican3_send_msg(mod, &msg);
+                       if (res)
+                               return res;
+               }
+
+               /* can-on/off request */
+               memset(&msg, 0, sizeof(msg));
+               msg.spec = on ? MSG_CONREQ : MSG_COFFREQ;
+               msg.len = cpu_to_le16(0);
+
+               return ican3_send_msg(mod, &msg);
+
+       } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+               memset(&msg, 0, sizeof(msg));
+               msg.spec = MSG_LMTS;
+               if (on) {
+                       msg.len = cpu_to_le16(4);
+                       msg.data[0] = 0;
+                       msg.data[1] = 0;
+                       msg.data[2] = btr0;
+                       msg.data[3] = btr1;
+               } else {
+                       msg.len = cpu_to_le16(2);
+                       msg.data[0] = 1;
+                       msg.data[1] = 0;
+               }
+
+               return ican3_send_msg(mod, &msg);
+       }
+       return -ENOTSUPP;
 }
 
 static int ican3_set_termination(struct ican3_dev *mod, bool on)
@@ -1402,7 +1462,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
                        return 0;
 
                msleep(10);
-       } while (time_before(jiffies, start + HZ / 4));
+       } while (time_before(jiffies, start + HZ / 2));
 
        netdev_err(mod->ndev, "failed to reset CAN module\n");
        return -ETIMEDOUT;
@@ -1427,6 +1487,17 @@ static int ican3_startup_module(struct ican3_dev *mod)
                return ret;
        }
 
+       /* detect firmware */
+       memcpy_fromio(mod->fwinfo, mod->dpm + FIRMWARE_STAMP, sizeof(mod->fwinfo) - 1);
+       if (strncmp(mod->fwinfo, "JANZ-ICAN3", 10)) {
+               netdev_err(mod->ndev, "ICAN3 not detected (found %s)\n", mod->fwinfo);
+               return -ENODEV;
+       }
+       if (strstr(mod->fwinfo, "CAL/CANopen"))
+               mod->fwtype = ICAN3_FWTYPE_CAL_CANOPEN;
+       else
+               mod->fwtype = ICAN3_FWTYPE_ICANOS;
+
        /* re-enable interrupts so we can send messages */
        iowrite8(1 << mod->num, &mod->ctrl->int_enable);
 
@@ -1615,36 +1686,6 @@ static const struct can_bittiming_const ican3_bittiming_const = {
        .brp_inc = 1,
 };
 
-/*
- * This routine was stolen from drivers/net/can/sja1000/sja1000.c
- *
- * The bittiming register command for the ICAN3 just sets the bit timing
- * registers on the SJA1000 chip directly
- */
-static int ican3_set_bittiming(struct net_device *ndev)
-{
-       struct ican3_dev *mod = netdev_priv(ndev);
-       struct can_bittiming *bt = &mod->can.bittiming;
-       struct ican3_msg msg;
-       u8 btr0, btr1;
-
-       btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
-       btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
-               (((bt->phase_seg2 - 1) & 0x7) << 4);
-       if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
-               btr1 |= 0x80;
-
-       memset(&msg, 0, sizeof(msg));
-       msg.spec = MSG_CBTRREQ;
-       msg.len = cpu_to_le16(4);
-       msg.data[0] = 0x00;
-       msg.data[1] = 0x00;
-       msg.data[2] = btr0;
-       msg.data[3] = btr1;
-
-       return ican3_send_msg(mod, &msg);
-}
-
 static int ican3_set_mode(struct net_device *ndev, enum can_mode mode)
 {
        struct ican3_dev *mod = netdev_priv(ndev);
@@ -1730,11 +1771,22 @@ static ssize_t ican3_sysfs_set_term(struct device *dev,
        return count;
 }
 
+static ssize_t ican3_sysfs_show_fwinfo(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
+{
+       struct ican3_dev *mod = netdev_priv(to_net_dev(dev));
+
+       return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo);
+}
+
 static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
                                                   ican3_sysfs_set_term);
+static DEVICE_ATTR(fwinfo, S_IRUSR | S_IRUGO, ican3_sysfs_show_fwinfo, NULL);
 
 static struct attribute *ican3_sysfs_attrs[] = {
        &dev_attr_termination.attr,
+       &dev_attr_fwinfo.attr,
        NULL,
 };
 
@@ -1794,7 +1846,6 @@ static int ican3_probe(struct platform_device *pdev)
 
        mod->can.clock.freq = ICAN3_CAN_CLOCK;
        mod->can.bittiming_const = &ican3_bittiming_const;
-       mod->can.do_set_bittiming = ican3_set_bittiming;
        mod->can.do_set_mode = ican3_set_mode;
        mod->can.do_get_berr_counter = ican3_get_berr_counter;
        mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
@@ -1866,7 +1917,7 @@ static int ican3_probe(struct platform_device *pdev)
                goto out_free_irq;
        }
 
-       dev_info(dev, "module %d: registered CAN device\n", pdata->modno);
+       netdev_info(mod->ndev, "module %d: registered CAN device\n", pdata->modno);
        return 0;
 
 out_free_irq:
index 18550c7ebe6f1beb225ba9c56717d14aacf45896..7ad0a4d8e475f519b0f1a6618091c3c93cb60427 100644 (file)
@@ -37,22 +37,22 @@ config NET_DSA_MV88E6123_61_65
          ethernet switch chips.
 
 config NET_DSA_MV88E6171
-       tristate "Marvell 88E6171/6172 ethernet switch chip support"
+       tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support"
        depends on NET_DSA
        select NET_DSA_MV88E6XXX
        select NET_DSA_TAG_EDSA
        ---help---
-         This enables support for the Marvell 88E6171/6172 ethernet switch
-         chips.
+         This enables support for the Marvell 88E6171/6175/6350/6351
+         ethernet switches chips.
 
 config NET_DSA_MV88E6352
-       tristate "Marvell 88E6176/88E6352 ethernet switch chip support"
+       tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
        depends on NET_DSA
        select NET_DSA_MV88E6XXX
        select NET_DSA_TAG_EDSA
        ---help---
-         This enables support for the Marvell 88E6176 and 88E6352 ethernet
-         switch chips.
+         This enables support for the Marvell 88E6172, 88E6176 and 88E6352
+         ethernet switch chips.
 
 config NET_DSA_BCM_SF2
        tristate "Broadcom Starfighter 2 Ethernet switch support"
index cedb572bf25af58d202a8550b003753f00219f3b..103fde3da476d77936d7abe7f1440fab6ff1b7fe 100644 (file)
@@ -911,6 +911,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
         */
        if (port == 7) {
                status->link = priv->port_sts[port].link;
+               /* For MoCA interfaces, also force a link down notification
+                * since some version of the user-space daemon (mocad) use
+                * cmd->autoneg to force the link, which messes up the PHY
+                * state machine and make it go in PHY_FORCING state instead.
+                */
+               if (!status->link)
+                       netif_carrier_off(ds->ports[port]);
                status->duplex = 1;
        } else {
                status->link = 1;
index b4af6d5aff7cc970773f55c3f9a8fedbe3c06f1f..71a29a7ce538d70193c2813bfd10dc4947237f24 100644 (file)
@@ -54,192 +54,40 @@ static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr)
 
 static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
 {
+       u32 upstream_port = dsa_upstream_port(ds);
        int ret;
-       int i;
+       u32 reg;
+
+       ret = mv88e6xxx_setup_global(ds);
+       if (ret)
+               return ret;
 
        /* Disable the PHY polling unit (since there won't be any
         * external PHYs to poll), don't discard packets with
         * excessive collisions, and mask all interrupt sources.
         */
-       REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
-
-       /* Set the default address aging time to 5 minutes, and
-        * enable address learn messages to be sent to all message
-        * ports.
-        */
-       REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
-       /* Configure the priority mapping registers. */
-       ret = mv88e6xxx_config_prio(ds);
-       if (ret < 0)
-               return ret;
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000);
 
        /* Configure the upstream port, and configure the upstream
         * port as the port to which ingress and egress monitor frames
         * are to be sent.
         */
-       REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+       REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
 
        /* Disable remote management for now, and set the switch's
         * DSA device number.
         */
-       REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
-       /* Send all frames with destination addresses matching
-        * 01:80:c2:00:00:2x to the CPU port.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
-       /* Send all frames with destination addresses matching
-        * 01:80:c2:00:00:0x to the CPU port.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
-       /* Disable the loopback filter, disable flow control
-        * messages, disable flood broadcast override, disable
-        * removing of provider tags, disable ATU age violation
-        * interrupts, disable tag flow control, force flow
-        * control priority to the highest, and send all special
-        * multicast frames to the CPU at the highest priority.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
-       /* Program the DSA routing table. */
-       for (i = 0; i < 32; i++) {
-               int nexthop;
-
-               nexthop = 0x1f;
-               if (i != ds->index && i < ds->dst->pd->nr_chips)
-                       nexthop = ds->pd->rtable[i] & 0x1f;
-
-               REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
-       }
-
-       /* Clear all trunk masks. */
-       for (i = 0; i < 8; i++)
-               REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
-
-       /* Clear all trunk mappings. */
-       for (i = 0; i < 16; i++)
-               REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
-       /* Disable ingress rate limiting by resetting all ingress
-        * rate limit registers to their initial state.
-        */
-       for (i = 0; i < 6; i++)
-               REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
-       /* Initialise cross-chip port VLAN table to reset defaults. */
-       REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
-       /* Clear the priority override table. */
-       for (i = 0; i < 16; i++)
-               REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
-       /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
 
        return 0;
 }
 
-static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
-{
-       int addr = REG_PORT(p);
-       u16 val;
-
-       /* MAC Forcing register: don't force link, speed, duplex
-        * or flow control state to any particular values on physical
-        * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-        * full duplex.
-        */
-       if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-               REG_WRITE(addr, 0x01, 0x003e);
-       else
-               REG_WRITE(addr, 0x01, 0x0003);
-
-       /* Do not limit the period of time that this port can be
-        * paused for by the remote end or the period of time that
-        * this port can pause the remote end.
-        */
-       REG_WRITE(addr, 0x02, 0x0000);
-
-       /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
-        * disable Header mode, enable IGMP/MLD snooping, disable VLAN
-        * tunneling, determine priority by looking at 802.1p and IP
-        * priority fields (IP prio has precedence), and set STP state
-        * to Forwarding.
-        *
-        * If this is the CPU link, use DSA or EDSA tagging depending
-        * on which tagging mode was configured.
-        *
-        * If this is a link to another switch, use DSA tagging mode.
-        *
-        * If this is the upstream port for this switch, enable
-        * forwarding of unknown unicasts and multicasts.
-        */
-       val = 0x0433;
-       if (dsa_is_cpu_port(ds, p)) {
-               if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
-                       val |= 0x3300;
-               else
-                       val |= 0x0100;
-       }
-       if (ds->dsa_port_mask & (1 << p))
-               val |= 0x0100;
-       if (p == dsa_upstream_port(ds))
-               val |= 0x000c;
-       REG_WRITE(addr, 0x04, val);
-
-       /* Port Control 2: don't force a good FCS, set the maximum
-        * frame size to 10240 bytes, don't let the switch add or
-        * strip 802.1q tags, don't discard tagged or untagged frames
-        * on this port, do a destination address lookup on all
-        * received packets as usual, disable ARP mirroring and don't
-        * send a copy of all transmitted/received frames on this port
-        * to the CPU.
-        */
-       REG_WRITE(addr, 0x08, 0x2080);
-
-       /* Egress rate control: disable egress rate control. */
-       REG_WRITE(addr, 0x09, 0x0001);
-
-       /* Egress rate control 2: disable egress rate control. */
-       REG_WRITE(addr, 0x0a, 0x0000);
-
-       /* Port Association Vector: when learning source addresses
-        * of packets, add the address to the address database using
-        * a port bitmap that has only the bit for this port set and
-        * the other bits clear.
-        */
-       REG_WRITE(addr, 0x0b, 1 << p);
-
-       /* Port ATU control: disable limiting the number of address
-        * database entries that this port is allowed to use.
-        */
-       REG_WRITE(addr, 0x0c, 0x0000);
-
-       /* Priority Override: disable DA, SA and VTU priority override. */
-       REG_WRITE(addr, 0x0d, 0x0000);
-
-       /* Port Ethertype: use the Ethertype DSA Ethertype value. */
-       REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
-       /* Tag Remap: use an identity 802.1p prio -> switch prio
-        * mapping.
-        */
-       REG_WRITE(addr, 0x18, 0x3210);
-
-       /* Tag Remap 2: use an identity 802.1p prio -> switch prio
-        * mapping.
-        */
-       REG_WRITE(addr, 0x19, 0x7654);
-
-       return mv88e6xxx_setup_port_common(ds, p);
-}
-
 static int mv88e6123_61_65_setup(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int i;
        int ret;
 
        ret = mv88e6xxx_setup_common(ds);
@@ -262,19 +110,11 @@ static int mv88e6123_61_65_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       /* @@@ initialise vtu and atu */
-
        ret = mv88e6123_61_65_setup_global(ds);
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < ps->num_ports; i++) {
-               ret = mv88e6123_61_65_setup_port(ds, i);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
+       return mv88e6xxx_setup_ports(ds);
 }
 
 struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
index e54824fa0d959f919586c7ec07cbb678a40a031a..32f4a08e9bc99dce2fa59af8cae8b34475bb24d6 100644 (file)
@@ -37,6 +37,8 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
                        return "Marvell 88E6131 (B2)";
                if (ret_masked == PORT_SWITCH_ID_6131)
                        return "Marvell 88E6131";
+               if (ret_masked == PORT_SWITCH_ID_6185)
+                       return "Marvell 88E6185";
        }
 
        return NULL;
@@ -44,186 +46,62 @@ static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
 
 static int mv88e6131_setup_global(struct dsa_switch *ds)
 {
+       u32 upstream_port = dsa_upstream_port(ds);
        int ret;
-       int i;
+       u32 reg;
+
+       ret = mv88e6xxx_setup_global(ds);
+       if (ret)
+               return ret;
 
        /* Enable the PHY polling unit, don't discard packets with
         * excessive collisions, use a weighted fair queueing scheme
         * to arbitrate between packet queues, set the maximum frame
         * size to 1632, and mask all interrupt sources.
         */
-       REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
-
-       /* Set the default address aging time to 5 minutes, and
-        * enable address learn messages to be sent to all message
-        * ports.
-        */
-       REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
-       /* Configure the priority mapping registers. */
-       ret = mv88e6xxx_config_prio(ds);
-       if (ret < 0)
-               return ret;
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+                 GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632);
 
        /* Set the VLAN ethertype to 0x8100. */
-       REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
+       REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100);
 
        /* Disable ARP mirroring, and configure the upstream port as
         * the port to which ingress and egress monitor frames are to
         * be sent.
         */
-       REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
+       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+               GLOBAL_MONITOR_CONTROL_ARP_DISABLED;
+       REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
 
        /* Disable cascade port functionality unless this device
         * is used in a cascade configuration, and set the switch's
         * DSA device number.
         */
        if (ds->dst->pd->nr_chips > 1)
-               REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
+               REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
+                         GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+                         (ds->index & 0x1f));
        else
-               REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
-
-       /* Send all frames with destination addresses matching
-        * 01:80:c2:00:00:0x to the CPU port.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
-       /* Ignore removed tag data on doubly tagged packets, disable
-        * flow control messages, force flow control priority to the
-        * highest, and send all special multicast frames to the CPU
-        * port at the highest priority.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
-       /* Program the DSA routing table. */
-       for (i = 0; i < 32; i++) {
-               int nexthop;
-
-               nexthop = 0x1f;
-               if (ds->pd->rtable &&
-                   i != ds->index && i < ds->dst->pd->nr_chips)
-                       nexthop = ds->pd->rtable[i] & 0x1f;
-
-               REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
-       }
-
-       /* Clear all trunk masks. */
-       for (i = 0; i < 8; i++)
-               REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
-
-       /* Clear all trunk mappings. */
-       for (i = 0; i < 16; i++)
-               REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+               REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
+                         GLOBAL_CONTROL_2_NO_CASCADE |
+                         (ds->index & 0x1f));
 
        /* Force the priority of IGMP/MLD snoop frames and ARP frames
         * to the highest setting.
         */
-       REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
+                 GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP |
+                 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT |
+                 GLOBAL2_PRIO_OVERRIDE_FORCE_ARP |
+                 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT);
 
        return 0;
 }
 
-static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int addr = REG_PORT(p);
-       u16 val;
-
-       /* MAC Forcing register: don't force link, speed, duplex
-        * or flow control state to any particular values on physical
-        * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-        * (100 Mb/s on 6085) full duplex.
-        */
-       if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-               if (ps->id == PORT_SWITCH_ID_6085)
-                       REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
-               else
-                       REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
-       else
-               REG_WRITE(addr, 0x01, 0x0003);
-
-       /* Port Control: disable Core Tag, disable Drop-on-Lock,
-        * transmit frames unmodified, disable Header mode,
-        * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
-        * tunneling, determine priority by looking at 802.1p and
-        * IP priority fields (IP prio has precedence), and set STP
-        * state to Forwarding.
-        *
-        * If this is the upstream port for this switch, enable
-        * forwarding of unknown unicasts, and enable DSA tagging
-        * mode.
-        *
-        * If this is the link to another switch, use DSA tagging
-        * mode, but do not enable forwarding of unknown unicasts.
-        */
-       val = 0x0433;
-       if (p == dsa_upstream_port(ds)) {
-               val |= 0x0104;
-               /* On 6085, unknown multicast forward is controlled
-                * here rather than in Port Control 2 register.
-                */
-               if (ps->id == PORT_SWITCH_ID_6085)
-                       val |= 0x0008;
-       }
-       if (ds->dsa_port_mask & (1 << p))
-               val |= 0x0100;
-       REG_WRITE(addr, 0x04, val);
-
-       /* Port Control 2: don't force a good FCS, don't use
-        * VLAN-based, source address-based or destination
-        * address-based priority overrides, don't let the switch
-        * add or strip 802.1q tags, don't discard tagged or
-        * untagged frames on this port, do a destination address
-        * lookup on received packets as usual, don't send a copy
-        * of all transmitted/received frames on this port to the
-        * CPU, and configure the upstream port number.
-        *
-        * If this is the upstream port for this switch, enable
-        * forwarding of unknown multicast addresses.
-        */
-       if (ps->id == PORT_SWITCH_ID_6085)
-               /* on 6085, bits 3:0 are reserved, bit 6 control ARP
-                * mirroring, and multicast forward is handled in
-                * Port Control register.
-                */
-               REG_WRITE(addr, 0x08, 0x0080);
-       else {
-               val = 0x0080 | dsa_upstream_port(ds);
-               if (p == dsa_upstream_port(ds))
-                       val |= 0x0040;
-               REG_WRITE(addr, 0x08, val);
-       }
-
-       /* Rate Control: disable ingress rate limiting. */
-       REG_WRITE(addr, 0x09, 0x0000);
-
-       /* Rate Control 2: disable egress rate limiting. */
-       REG_WRITE(addr, 0x0a, 0x0000);
-
-       /* Port Association Vector: when learning source addresses
-        * of packets, add the address to the address database using
-        * a port bitmap that has only the bit for this port set and
-        * the other bits clear.
-        */
-       REG_WRITE(addr, 0x0b, 1 << p);
-
-       /* Tag Remap: use an identity 802.1p prio -> switch prio
-        * mapping.
-        */
-       REG_WRITE(addr, 0x18, 0x3210);
-
-       /* Tag Remap 2: use an identity 802.1p prio -> switch prio
-        * mapping.
-        */
-       REG_WRITE(addr, 0x19, 0x7654);
-
-       return mv88e6xxx_setup_port_common(ds, p);
-}
-
 static int mv88e6131_setup(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int i;
        int ret;
 
        ret = mv88e6xxx_setup_common(ds);
@@ -234,6 +112,7 @@ static int mv88e6131_setup(struct dsa_switch *ds)
 
        switch (ps->id) {
        case PORT_SWITCH_ID_6085:
+       case PORT_SWITCH_ID_6185:
                ps->num_ports = 10;
                break;
        case PORT_SWITCH_ID_6095:
@@ -251,19 +130,11 @@ static int mv88e6131_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       /* @@@ initialise vtu and atu */
-
        ret = mv88e6131_setup_global(ds);
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < ps->num_ports; i++) {
-               ret = mv88e6131_setup_port(ds, i);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
+       return mv88e6xxx_setup_ports(ds);
 }
 
 static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
index 9104efea0e3e8289803c53348e79eded3d80e50c..1c7808495a9dd8198936e6db4ce23fd7f2d29ed3 100644 (file)
@@ -1,4 +1,4 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171/8826172 switch chip support
+/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
  * Copyright (c) 2008-2009 Marvell Semiconductor
  * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
  *
@@ -29,8 +29,12 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
        if (ret >= 0) {
                if ((ret & 0xfff0) == PORT_SWITCH_ID_6171)
                        return "Marvell 88E6171";
-               if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
-                       return "Marvell 88E6172";
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6175)
+                       return "Marvell 88E6175";
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6350)
+                       return "Marvell 88E6350";
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6351)
+                       return "Marvell 88E6351";
        }
 
        return NULL;
@@ -38,196 +42,41 @@ static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
 
 static int mv88e6171_setup_global(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u32 upstream_port = dsa_upstream_port(ds);
        int ret;
-       int i;
+       u32 reg;
+
+       ret = mv88e6xxx_setup_global(ds);
+       if (ret)
+               return ret;
 
        /* Discard packets with excessive collisions, mask all
         * interrupt sources, enable PPU.
         */
-       REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
-
-       /* Set the default address aging time to 5 minutes, and
-        * enable address learn messages to be sent to all message
-        * ports.
-        */
-       REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
-       /* Configure the priority mapping registers. */
-       ret = mv88e6xxx_config_prio(ds);
-       if (ret < 0)
-               return ret;
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+                 GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
 
        /* Configure the upstream port, and configure the upstream
         * port as the port to which ingress and egress monitor frames
         * are to be sent.
         */
-       if (REG_READ(REG_PORT(0), 0x03) == 0x1710)
-               REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1111));
-       else
-               REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
+       REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
 
        /* Disable remote management for now, and set the switch's
         * DSA device number.
         */
-       REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
-       /* Send all frames with destination addresses matching
-        * 01:80:c2:00:00:2x to the CPU port.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
-       /* Send all frames with destination addresses matching
-        * 01:80:c2:00:00:0x to the CPU port.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
-       /* Disable the loopback filter, disable flow control
-        * messages, disable flood broadcast override, disable
-        * removing of provider tags, disable ATU age violation
-        * interrupts, disable tag flow control, force flow
-        * control priority to the highest, and send all special
-        * multicast frames to the CPU at the highest priority.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
-       /* Program the DSA routing table. */
-       for (i = 0; i < 32; i++) {
-               int nexthop;
-
-               nexthop = 0x1f;
-               if (i != ds->index && i < ds->dst->pd->nr_chips)
-                       nexthop = ds->pd->rtable[i] & 0x1f;
-
-               REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
-       }
-
-       /* Clear all trunk masks. */
-       for (i = 0; i < ps->num_ports; i++)
-               REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
-
-       /* Clear all trunk mappings. */
-       for (i = 0; i < 16; i++)
-               REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
-       /* Disable ingress rate limiting by resetting all ingress
-        * rate limit registers to their initial state.
-        */
-       for (i = 0; i < 6; i++)
-               REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
-       /* Initialise cross-chip port VLAN table to reset defaults. */
-       REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
-       /* Clear the priority override table. */
-       for (i = 0; i < 16; i++)
-               REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
-       /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
 
        return 0;
 }
 
-static int mv88e6171_setup_port(struct dsa_switch *ds, int p)
-{
-       int addr = REG_PORT(p);
-       u16 val;
-
-       /* MAC Forcing register: don't force link, speed, duplex
-        * or flow control state to any particular values on physical
-        * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-        * full duplex.
-        */
-       val = REG_READ(addr, 0x01);
-       if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-               REG_WRITE(addr, 0x01, val | 0x003e);
-       else
-               REG_WRITE(addr, 0x01, val | 0x0003);
-
-       /* Do not limit the period of time that this port can be
-        * paused for by the remote end or the period of time that
-        * this port can pause the remote end.
-        */
-       REG_WRITE(addr, 0x02, 0x0000);
-
-       /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
-        * disable Header mode, enable IGMP/MLD snooping, disable VLAN
-        * tunneling, determine priority by looking at 802.1p and IP
-        * priority fields (IP prio has precedence), and set STP state
-        * to Forwarding.
-        *
-        * If this is the CPU link, use DSA or EDSA tagging depending
-        * on which tagging mode was configured.
-        *
-        * If this is a link to another switch, use DSA tagging mode.
-        *
-        * If this is the upstream port for this switch, enable
-        * forwarding of unknown unicasts and multicasts.
-        */
-       val = 0x0433;
-       if (dsa_is_cpu_port(ds, p)) {
-               if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
-                       val |= 0x3300;
-               else
-                       val |= 0x0100;
-       }
-       if (ds->dsa_port_mask & (1 << p))
-               val |= 0x0100;
-       if (p == dsa_upstream_port(ds))
-               val |= 0x000c;
-       REG_WRITE(addr, 0x04, val);
-
-       /* Port Control 2: don't force a good FCS, set the maximum
-        * frame size to 10240 bytes, don't let the switch add or
-        * strip 802.1q tags, don't discard tagged or untagged frames
-        * on this port, do a destination address lookup on all
-        * received packets as usual, disable ARP mirroring and don't
-        * send a copy of all transmitted/received frames on this port
-        * to the CPU.
-        */
-       REG_WRITE(addr, 0x08, 0x2080);
-
-       /* Egress rate control: disable egress rate control. */
-       REG_WRITE(addr, 0x09, 0x0001);
-
-       /* Egress rate control 2: disable egress rate control. */
-       REG_WRITE(addr, 0x0a, 0x0000);
-
-       /* Port Association Vector: when learning source addresses
-        * of packets, add the address to the address database using
-        * a port bitmap that has only the bit for this port set and
-        * the other bits clear.
-        */
-       REG_WRITE(addr, 0x0b, 1 << p);
-
-       /* Port ATU control: disable limiting the number of address
-        * database entries that this port is allowed to use.
-        */
-       REG_WRITE(addr, 0x0c, 0x0000);
-
-       /* Priority Override: disable DA, SA and VTU priority override. */
-       REG_WRITE(addr, 0x0d, 0x0000);
-
-       /* Port Ethertype: use the Ethertype DSA Ethertype value. */
-       REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
-       /* Tag Remap: use an identity 802.1p prio -> switch prio
-        * mapping.
-        */
-       REG_WRITE(addr, 0x18, 0x3210);
-
-       /* Tag Remap 2: use an identity 802.1p prio -> switch prio
-        * mapping.
-        */
-       REG_WRITE(addr, 0x19, 0x7654);
-
-       return mv88e6xxx_setup_port_common(ds, p);
-}
-
 static int mv88e6171_setup(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int i;
        int ret;
 
        ret = mv88e6xxx_setup_common(ds);
@@ -240,44 +89,11 @@ static int mv88e6171_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       /* @@@ initialise vtu and atu */
-
        ret = mv88e6171_setup_global(ds);
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < ps->num_ports; i++) {
-               if (!(dsa_is_cpu_port(ds, i) || ds->phys_port_mask & (1 << i)))
-                       continue;
-
-               ret = mv88e6171_setup_port(ds, i);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int mv88e6171_get_eee(struct dsa_switch *ds, int port,
-                            struct ethtool_eee *e)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       if (ps->id == PORT_SWITCH_ID_6172)
-               return mv88e6xxx_get_eee(ds, port, e);
-
-       return -EOPNOTSUPP;
-}
-
-static int mv88e6171_set_eee(struct dsa_switch *ds, int port,
-                            struct phy_device *phydev, struct ethtool_eee *e)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
-       if (ps->id == PORT_SWITCH_ID_6172)
-               return mv88e6xxx_set_eee(ds, port, phydev, e);
-
-       return -EOPNOTSUPP;
+       return mv88e6xxx_setup_ports(ds);
 }
 
 struct dsa_switch_driver mv88e6171_switch_driver = {
@@ -292,8 +108,6 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
        .get_strings            = mv88e6xxx_get_strings,
        .get_ethtool_stats      = mv88e6xxx_get_ethtool_stats,
        .get_sset_count         = mv88e6xxx_get_sset_count,
-       .set_eee                = mv88e6171_set_eee,
-       .get_eee                = mv88e6171_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
        .get_temp               = mv88e6xxx_get_temp,
 #endif
@@ -308,4 +122,6 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
 };
 
 MODULE_ALIAS("platform:mv88e6171");
-MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6175");
+MODULE_ALIAS("platform:mv88e6350");
+MODULE_ALIAS("platform:mv88e6351");
index 126c11b81e756ec232106de72583904638ccb024..632815c10a401f7bd873e077a262528b73ceed7d 100644 (file)
@@ -32,6 +32,8 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
 
        ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
        if (ret >= 0) {
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6172)
+                       return "Marvell 88E6172";
                if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
                        return "Marvell 88E6176";
                if (ret == PORT_SWITCH_ID_6352_A0)
@@ -47,187 +49,37 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
 
 static int mv88e6352_setup_global(struct dsa_switch *ds)
 {
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       u32 upstream_port = dsa_upstream_port(ds);
        int ret;
-       int i;
+       u32 reg;
+
+       ret = mv88e6xxx_setup_global(ds);
+       if (ret)
+               return ret;
 
        /* Discard packets with excessive collisions,
         * mask all interrupt sources, enable PPU (bit 14, undocumented).
         */
-       REG_WRITE(REG_GLOBAL, 0x04, 0x6000);
-
-       /* Set the default address aging time to 5 minutes, and
-        * enable address learn messages to be sent to all message
-        * ports.
-        */
-       REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
-
-       /* Configure the priority mapping registers. */
-       ret = mv88e6xxx_config_prio(ds);
-       if (ret < 0)
-               return ret;
+       REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
+                 GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
 
        /* Configure the upstream port, and configure the upstream
         * port as the port to which ingress and egress monitor frames
         * are to be sent.
         */
-       REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+       reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+               upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+       REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
 
        /* Disable remote management for now, and set the switch's
         * DSA device number.
         */
        REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
 
-       /* Send all frames with destination addresses matching
-        * 01:80:c2:00:00:2x to the CPU port.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
-
-       /* Send all frames with destination addresses matching
-        * 01:80:c2:00:00:0x to the CPU port.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
-
-       /* Disable the loopback filter, disable flow control
-        * messages, disable flood broadcast override, disable
-        * removing of provider tags, disable ATU age violation
-        * interrupts, disable tag flow control, force flow
-        * control priority to the highest, and send all special
-        * multicast frames to the CPU at the highest priority.
-        */
-       REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
-
-       /* Program the DSA routing table. */
-       for (i = 0; i < 32; i++) {
-               int nexthop = 0x1f;
-
-               if (i != ds->index && i < ds->dst->pd->nr_chips)
-                       nexthop = ds->pd->rtable[i] & 0x1f;
-
-               REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
-       }
-
-       /* Clear all trunk masks. */
-       for (i = 0; i < 8; i++)
-               REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7f);
-
-       /* Clear all trunk mappings. */
-       for (i = 0; i < 16; i++)
-               REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
-
-       /* Disable ingress rate limiting by resetting all ingress
-        * rate limit registers to their initial state.
-        */
-       for (i = 0; i < ps->num_ports; i++)
-               REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
-
-       /* Initialise cross-chip port VLAN table to reset defaults. */
-       REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
-
-       /* Clear the priority override table. */
-       for (i = 0; i < 16; i++)
-               REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
-
-       /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
-
        return 0;
 }
 
-static int mv88e6352_setup_port(struct dsa_switch *ds, int p)
-{
-       int addr = REG_PORT(p);
-       u16 val;
-
-       /* MAC Forcing register: don't force link, speed, duplex
-        * or flow control state to any particular values on physical
-        * ports, but force the CPU port and all DSA ports to 1000 Mb/s
-        * full duplex.
-        */
-       if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
-               REG_WRITE(addr, 0x01, 0x003e);
-       else
-               REG_WRITE(addr, 0x01, 0x0003);
-
-       /* Do not limit the period of time that this port can be
-        * paused for by the remote end or the period of time that
-        * this port can pause the remote end.
-        */
-       REG_WRITE(addr, 0x02, 0x0000);
-
-       /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
-        * disable Header mode, enable IGMP/MLD snooping, disable VLAN
-        * tunneling, determine priority by looking at 802.1p and IP
-        * priority fields (IP prio has precedence), and set STP state
-        * to Forwarding.
-        *
-        * If this is the CPU link, use DSA or EDSA tagging depending
-        * on which tagging mode was configured.
-        *
-        * If this is a link to another switch, use DSA tagging mode.
-        *
-        * If this is the upstream port for this switch, enable
-        * forwarding of unknown unicasts and multicasts.
-        */
-       val = 0x0433;
-       if (dsa_is_cpu_port(ds, p)) {
-               if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
-                       val |= 0x3300;
-               else
-                       val |= 0x0100;
-       }
-       if (ds->dsa_port_mask & (1 << p))
-               val |= 0x0100;
-       if (p == dsa_upstream_port(ds))
-               val |= 0x000c;
-       REG_WRITE(addr, 0x04, val);
-
-       /* Port Control 2: don't force a good FCS, set the maximum
-        * frame size to 10240 bytes, don't let the switch add or
-        * strip 802.1q tags, don't discard tagged or untagged frames
-        * on this port, do a destination address lookup on all
-        * received packets as usual, disable ARP mirroring and don't
-        * send a copy of all transmitted/received frames on this port
-        * to the CPU.
-        */
-       REG_WRITE(addr, 0x08, 0x2080);
-
-       /* Egress rate control: disable egress rate control. */
-       REG_WRITE(addr, 0x09, 0x0001);
-
-       /* Egress rate control 2: disable egress rate control. */
-       REG_WRITE(addr, 0x0a, 0x0000);
-
-       /* Port Association Vector: when learning source addresses
-        * of packets, add the address to the address database using
-        * a port bitmap that has only the bit for this port set and
-        * the other bits clear.
-        */
-       REG_WRITE(addr, 0x0b, 1 << p);
-
-       /* Port ATU control: disable limiting the number of address
-        * database entries that this port is allowed to use.
-        */
-       REG_WRITE(addr, 0x0c, 0x0000);
-
-       /* Priority Override: disable DA, SA and VTU priority override. */
-       REG_WRITE(addr, 0x0d, 0x0000);
-
-       /* Port Ethertype: use the Ethertype DSA Ethertype value. */
-       REG_WRITE(addr, 0x0f, ETH_P_EDSA);
-
-       /* Tag Remap: use an identity 802.1p prio -> switch prio
-        * mapping.
-        */
-       REG_WRITE(addr, 0x18, 0x3210);
-
-       /* Tag Remap 2: use an identity 802.1p prio -> switch prio
-        * mapping.
-        */
-       REG_WRITE(addr, 0x19, 0x7654);
-
-       return mv88e6xxx_setup_port_common(ds, p);
-}
-
 #ifdef CONFIG_NET_DSA_HWMON
 
 static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
@@ -292,7 +144,6 @@ static int mv88e6352_setup(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
-       int i;
 
        ret = mv88e6xxx_setup_common(ds);
        if (ret < 0)
@@ -306,19 +157,11 @@ static int mv88e6352_setup(struct dsa_switch *ds)
        if (ret < 0)
                return ret;
 
-       /* @@@ initialise vtu and atu */
-
        ret = mv88e6352_setup_global(ds);
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < ps->num_ports; i++) {
-               ret = mv88e6352_setup_port(ds, i);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
+       return mv88e6xxx_setup_ports(ds);
 }
 
 static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
@@ -552,3 +395,4 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
 };
 
 MODULE_ALIAS("platform:mv88e6352");
+MODULE_ALIAS("platform:mv88e6172");
index cf309aa92802623ec0532b56b1dd10a88a234af1..39530fa142b012098971b523cf1194684440d075 100644 (file)
 #include <net/dsa.h>
 #include "mv88e6xxx.h"
 
+/* MDIO bus access can be nested in the case of PHYs connected to the
+ * internal MDIO bus of the switch, which is accessed via MDIO bus of
+ * the Ethernet interface. Avoid lockdep false positives by using
+ * mutex_lock_nested().
+ */
+static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
+{
+       int ret;
+
+       mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+       ret = bus->read(bus, addr, regnum);
+       mutex_unlock(&bus->mdio_lock);
+
+       return ret;
+}
+
+static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
+                                  u16 val)
+{
+       int ret;
+
+       mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+       ret = bus->write(bus, addr, regnum, val);
+       mutex_unlock(&bus->mdio_lock);
+
+       return ret;
+}
+
 /* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
  * use all 32 SMI bus addresses on its SMI bus, and all switch registers
  * will be directly accessible on some {device address,register address}
@@ -33,7 +61,7 @@ static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
        int i;
 
        for (i = 0; i < 16; i++) {
-               ret = mdiobus_read(bus, sw_addr, SMI_CMD);
+               ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
                if (ret < 0)
                        return ret;
 
@@ -49,7 +77,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
        int ret;
 
        if (sw_addr == 0)
-               return mdiobus_read(bus, addr, reg);
+               return mv88e6xxx_mdiobus_read(bus, addr, reg);
 
        /* Wait for the bus to become free. */
        ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -57,8 +85,8 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
                return ret;
 
        /* Transmit the read command. */
-       ret = mdiobus_write(bus, sw_addr, SMI_CMD,
-                           SMI_CMD_OP_22_READ | (addr << 5) | reg);
+       ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
+                                     SMI_CMD_OP_22_READ | (addr << 5) | reg);
        if (ret < 0)
                return ret;
 
@@ -68,7 +96,7 @@ int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
                return ret;
 
        /* Read the data. */
-       ret = mdiobus_read(bus, sw_addr, SMI_DATA);
+       ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
        if (ret < 0)
                return ret;
 
@@ -112,7 +140,7 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
        int ret;
 
        if (sw_addr == 0)
-               return mdiobus_write(bus, addr, reg, val);
+               return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
 
        /* Wait for the bus to become free. */
        ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
@@ -120,13 +148,13 @@ int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
                return ret;
 
        /* Transmit the data to write. */
-       ret = mdiobus_write(bus, sw_addr, SMI_DATA, val);
+       ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
        if (ret < 0)
                return ret;
 
        /* Transmit the write command. */
-       ret = mdiobus_write(bus, sw_addr, SMI_CMD,
-                           SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
+       ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
+                                     SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
        if (ret < 0)
                return ret;
 
@@ -165,24 +193,6 @@ int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
        return ret;
 }
 
-int mv88e6xxx_config_prio(struct dsa_switch *ds)
-{
-       /* Configure the IP ToS mapping registers. */
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
-       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
-
-       /* Configure the IEEE 802.1p priority mapping register. */
-       REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
-
-       return 0;
-}
-
 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
 {
        REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
@@ -217,20 +227,20 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
        return 0;
 }
 
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
 {
        if (addr >= 0)
-               return mv88e6xxx_reg_read(ds, addr, regnum);
+               return _mv88e6xxx_reg_read(ds, addr, regnum);
        return 0xffff;
 }
 
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
                                u16 val)
 {
        if (addr >= 0)
-               return mv88e6xxx_reg_write(ds, addr, regnum, val);
+               return _mv88e6xxx_reg_write(ds, addr, regnum, val);
        return 0;
 }
 
@@ -434,26 +444,113 @@ void mv88e6xxx_poll_link(struct dsa_switch *ds)
        }
 }
 
+static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6031:
+       case PORT_SWITCH_ID_6061:
+       case PORT_SWITCH_ID_6035:
+       case PORT_SWITCH_ID_6065:
+               return true;
+       }
+       return false;
+}
+
+static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6092:
+       case PORT_SWITCH_ID_6095:
+               return true;
+       }
+       return false;
+}
+
+static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6046:
+       case PORT_SWITCH_ID_6085:
+       case PORT_SWITCH_ID_6096:
+       case PORT_SWITCH_ID_6097:
+               return true;
+       }
+       return false;
+}
+
+static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6123:
+       case PORT_SWITCH_ID_6161:
+       case PORT_SWITCH_ID_6165:
+               return true;
+       }
+       return false;
+}
+
+static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6121:
+       case PORT_SWITCH_ID_6122:
+       case PORT_SWITCH_ID_6152:
+       case PORT_SWITCH_ID_6155:
+       case PORT_SWITCH_ID_6182:
+       case PORT_SWITCH_ID_6185:
+       case PORT_SWITCH_ID_6108:
+       case PORT_SWITCH_ID_6131:
+               return true;
+       }
+       return false;
+}
+
+static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6171:
+       case PORT_SWITCH_ID_6175:
+       case PORT_SWITCH_ID_6350:
+       case PORT_SWITCH_ID_6351:
+               return true;
+       }
+       return false;
+}
+
 static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
        switch (ps->id) {
-       case PORT_SWITCH_ID_6352:
        case PORT_SWITCH_ID_6172:
        case PORT_SWITCH_ID_6176:
+       case PORT_SWITCH_ID_6240:
+       case PORT_SWITCH_ID_6352:
                return true;
        }
        return false;
 }
 
-static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
 {
        int ret;
        int i;
 
        for (i = 0; i < 10; i++) {
-               ret = REG_READ(REG_GLOBAL, GLOBAL_STATS_OP);
+               ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
                if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
                        return 0;
        }
@@ -461,7 +558,8 @@ static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
        return -ETIMEDOUT;
 }
 
-static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+/* Must be called with SMI mutex held */
+static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
 {
        int ret;
 
@@ -469,42 +567,45 @@ static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
                port = (port + 1) << 5;
 
        /* Snapshot the hardware statistics counters for this port. */
-       REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP,
-                 GLOBAL_STATS_OP_CAPTURE_PORT |
-                 GLOBAL_STATS_OP_HIST_RX_TX | port);
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+                                  GLOBAL_STATS_OP_CAPTURE_PORT |
+                                  GLOBAL_STATS_OP_HIST_RX_TX | port);
+       if (ret < 0)
+               return ret;
 
        /* Wait for the snapshotting to complete. */
-       ret = mv88e6xxx_stats_wait(ds);
+       ret = _mv88e6xxx_stats_wait(ds);
        if (ret < 0)
                return ret;
 
        return 0;
 }
 
-static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+/* Must be called with SMI mutex held */
+static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
 {
        u32 _val;
        int ret;
 
        *val = 0;
 
-       ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
-                                 GLOBAL_STATS_OP_READ_CAPTURED |
-                                 GLOBAL_STATS_OP_HIST_RX_TX | stat);
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+                                  GLOBAL_STATS_OP_READ_CAPTURED |
+                                  GLOBAL_STATS_OP_HIST_RX_TX | stat);
        if (ret < 0)
                return;
 
-       ret = mv88e6xxx_stats_wait(ds);
+       ret = _mv88e6xxx_stats_wait(ds);
        if (ret < 0)
                return;
 
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
        if (ret < 0)
                return;
 
        _val = ret << 16;
 
-       ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+       ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
        if (ret < 0)
                return;
 
@@ -587,11 +688,11 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
        int ret;
        int i;
 
-       mutex_lock(&ps->stats_mutex);
+       mutex_lock(&ps->smi_mutex);
 
-       ret = mv88e6xxx_stats_snapshot(ds, port);
+       ret = _mv88e6xxx_stats_snapshot(ds, port);
        if (ret < 0) {
-               mutex_unlock(&ps->stats_mutex);
+               mutex_unlock(&ps->smi_mutex);
                return;
        }
 
@@ -602,14 +703,14 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
                u32 high = 0;
 
                if (s->reg >= 0x100) {
-                       ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
-                                                s->reg - 0x100);
+                       ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+                                                 s->reg - 0x100);
                        if (ret < 0)
                                goto error;
                        low = ret;
                        if (s->sizeof_stat == 4) {
-                               ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
-                                                        s->reg - 0x100 + 1);
+                               ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+                                                         s->reg - 0x100 + 1);
                                if (ret < 0)
                                        goto error;
                                high = ret;
@@ -617,14 +718,14 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
                        data[i] = (((u64)high) << 16) | low;
                        continue;
                }
-               mv88e6xxx_stats_read(ds, s->reg, &low);
+               _mv88e6xxx_stats_read(ds, s->reg, &low);
                if (s->sizeof_stat == 8)
-                       mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+                       _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
 
                data[i] = (((u64)high) << 32) | low;
        }
 error:
-       mutex_unlock(&ps->stats_mutex);
+       mutex_unlock(&ps->smi_mutex);
 }
 
 /* All the statistics in the table */
@@ -694,7 +795,7 @@ int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
 
        *temp = 0;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
 
        ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
        if (ret < 0)
@@ -727,19 +828,23 @@ int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
 
 error:
        _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 #endif /* CONFIG_NET_DSA_HWMON */
 
-static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+/* Must be called with SMI lock held */
+static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
+                          u16 mask)
 {
        unsigned long timeout = jiffies + HZ / 10;
 
        while (time_before(jiffies, timeout)) {
                int ret;
 
-               ret = REG_READ(reg, offset);
+               ret = _mv88e6xxx_reg_read(ds, reg, offset);
+               if (ret < 0)
+                       return ret;
                if (!(ret & mask))
                        return 0;
 
@@ -748,10 +853,22 @@ static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
        return -ETIMEDOUT;
 }
 
-int mv88e6xxx_phy_wait(struct dsa_switch *ds)
+static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+
+       mutex_lock(&ps->smi_mutex);
+       ret = _mv88e6xxx_wait(ds, reg, offset, mask);
+       mutex_unlock(&ps->smi_mutex);
+
+       return ret;
+}
+
+static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
 {
-       return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
-                             GLOBAL2_SMI_OP_BUSY);
+       return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+                              GLOBAL2_SMI_OP_BUSY);
 }
 
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
@@ -766,25 +883,6 @@ int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
                              GLOBAL2_EEPROM_OP_BUSY);
 }
 
-/* Must be called with SMI lock held */
-static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
-{
-       unsigned long timeout = jiffies + HZ / 10;
-
-       while (time_before(jiffies, timeout)) {
-               int ret;
-
-               ret = _mv88e6xxx_reg_read(ds, reg, offset);
-               if (ret < 0)
-                       return ret;
-               if (!(ret & mask))
-                       return 0;
-
-               usleep_range(1000, 2000);
-       }
-       return -ETIMEDOUT;
-}
-
 /* Must be called with SMI lock held */
 static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
 {
@@ -792,31 +890,40 @@ static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
                               GLOBAL_ATU_OP_BUSY);
 }
 
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
                                        int regnum)
 {
        int ret;
 
-       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
-                 GLOBAL2_SMI_OP_22_READ | (addr << 5) | regnum);
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+                                  GLOBAL2_SMI_OP_22_READ | (addr << 5) |
+                                  regnum);
+       if (ret < 0)
+               return ret;
 
-       ret = mv88e6xxx_phy_wait(ds);
+       ret = _mv88e6xxx_phy_wait(ds);
        if (ret < 0)
                return ret;
 
-       return REG_READ(REG_GLOBAL2, GLOBAL2_SMI_DATA);
+       return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
 }
 
-/* Must be called with phy mutex held */
+/* Must be called with SMI mutex held */
 static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
                                         int regnum, u16 val)
 {
-       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
-       REG_WRITE(REG_GLOBAL2, GLOBAL2_SMI_OP,
-                 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | regnum);
+       int ret;
 
-       return mv88e6xxx_phy_wait(ds);
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+       if (ret < 0)
+               return ret;
+
+       ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+                                  GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
+                                  regnum);
+
+       return _mv88e6xxx_phy_wait(ds);
 }
 
 int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
@@ -824,7 +931,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int reg;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
 
        reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
        if (reg < 0)
@@ -833,7 +940,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
        e->eee_enabled = !!(reg & 0x0200);
        e->tx_lpi_enabled = !!(reg & 0x0100);
 
-       reg = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+       reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
        if (reg < 0)
                goto out;
 
@@ -841,7 +948,7 @@ int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
        reg = 0;
 
 out:
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
        return reg;
 }
 
@@ -852,7 +959,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
        int reg;
        int ret;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
 
        ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
        if (ret < 0)
@@ -866,7 +973,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
 
        ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
 out:
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
 
        return ret;
 }
@@ -1241,13 +1348,212 @@ static void mv88e6xxx_bridge_work(struct work_struct *work)
        }
 }
 
-int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port)
+static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret, fid;
+       u16 reg;
 
        mutex_lock(&ps->smi_mutex);
 
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+           mv88e6xxx_6065_family(ds)) {
+               /* MAC Forcing register: don't force link, speed,
+                * duplex or flow control state to any particular
+                * values on physical ports, but force the CPU port
+                * and all DSA ports to their maximum bandwidth and
+                * full duplex.
+                */
+               reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+               if (dsa_is_cpu_port(ds, port) ||
+                   ds->dsa_port_mask & (1 << port)) {
+                       reg |= PORT_PCS_CTRL_FORCE_LINK |
+                               PORT_PCS_CTRL_LINK_UP |
+                               PORT_PCS_CTRL_DUPLEX_FULL |
+                               PORT_PCS_CTRL_FORCE_DUPLEX;
+                       if (mv88e6xxx_6065_family(ds))
+                               reg |= PORT_PCS_CTRL_100;
+                       else
+                               reg |= PORT_PCS_CTRL_1000;
+               } else {
+                       reg |= PORT_PCS_CTRL_UNFORCED;
+               }
+
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_PCS_CTRL, reg);
+               if (ret)
+                       goto abort;
+       }
+
+       /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+        * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+        * tunneling, determine priority by looking at 802.1p and IP
+        * priority fields (IP prio has precedence), and set STP state
+        * to Forwarding.
+        *
+        * If this is the CPU link, use DSA or EDSA tagging depending
+        * on which tagging mode was configured.
+        *
+        * If this is a link to another switch, use DSA tagging mode.
+        *
+        * If this is the upstream port for this switch, enable
+        * forwarding of unknown unicasts and multicasts.
+        */
+       reg = 0;
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+           mv88e6xxx_6185_family(ds))
+               reg = PORT_CONTROL_IGMP_MLD_SNOOP |
+               PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
+               PORT_CONTROL_STATE_FORWARDING;
+       if (dsa_is_cpu_port(ds, port)) {
+               if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+                       reg |= PORT_CONTROL_DSA_TAG;
+               if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+                       if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+                               reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
+                       else
+                               reg |= PORT_CONTROL_FRAME_MODE_DSA;
+               }
+
+               if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+                   mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+                   mv88e6xxx_6185_family(ds)) {
+                       if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
+                               reg |= PORT_CONTROL_EGRESS_ADD_TAG;
+               }
+       }
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
+               if (ds->dsa_port_mask & (1 << port))
+                       reg |= PORT_CONTROL_FRAME_MODE_DSA;
+               if (port == dsa_upstream_port(ds))
+                       reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+                               PORT_CONTROL_FORWARD_UNKNOWN_MC;
+       }
+       if (reg) {
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_CONTROL, reg);
+               if (ret)
+                       goto abort;
+       }
+
+       /* Port Control 2: don't force a good FCS, set the maximum
+        * frame size to 10240 bytes, don't let the switch add or
+        * strip 802.1q tags, don't discard tagged or untagged frames
+        * on this port, do a destination address lookup on all
+        * received packets as usual, disable ARP mirroring and don't
+        * send a copy of all transmitted/received frames on this port
+        * to the CPU.
+        */
+       reg = 0;
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6095_family(ds))
+               reg = PORT_CONTROL_2_MAP_DA;
+
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds))
+               reg |= PORT_CONTROL_2_JUMBO_10240;
+
+       if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
+               /* Set the upstream port this port should use */
+               reg |= dsa_upstream_port(ds);
+               /* enable forwarding of unknown multicast addresses to
+                * the upstream port
+                */
+               if (port == dsa_upstream_port(ds))
+                       reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+       }
+
+       if (reg) {
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_CONTROL_2, reg);
+               if (ret)
+                       goto abort;
+       }
+
+       /* Port Association Vector: when learning source addresses
+        * of packets, add the address to the address database using
+        * a port bitmap that has only the bit for this port set and
+        * the other bits clear.
+        */
+       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
+                                  1 << port);
+       if (ret)
+               goto abort;
+
+       /* Egress rate control 2: disable egress rate control. */
+       ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
+                                  0x0000);
+       if (ret)
+               goto abort;
+
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+               /* Do not limit the period of time that this port can
+                * be paused for by the remote end or the period of
+                * time that this port can pause the remote end.
+                */
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_PAUSE_CTRL, 0x0000);
+               if (ret)
+                       goto abort;
+
+               /* Port ATU control: disable limiting the number of
+                * address database entries that this port is allowed
+                * to use.
+                */
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_ATU_CONTROL, 0x0000);
+               /* Priority Override: disable DA, SA and VTU priority
+                * override.
+                */
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_PRI_OVERRIDE, 0x0000);
+               if (ret)
+                       goto abort;
+
+               /* Port Ethertype: use the Ethertype DSA Ethertype
+                * value.
+                */
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_ETH_TYPE, ETH_P_EDSA);
+               if (ret)
+                       goto abort;
+               /* Tag Remap: use an identity 802.1p prio -> switch
+                * prio mapping.
+                */
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_TAG_REGMAP_0123, 0x3210);
+               if (ret)
+                       goto abort;
+
+               /* Tag Remap 2: use an identity 802.1p prio -> switch
+                * prio mapping.
+                */
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_TAG_REGMAP_4567, 0x7654);
+               if (ret)
+                       goto abort;
+       }
+
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+               /* Rate Control: disable ingress rate limiting. */
+               ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+                                          PORT_RATE_CONTROL, 0x0001);
+               if (ret)
+                       goto abort;
+       }
+
        /* Port Control 1: disable trunking, disable sending
         * learning messages to this port.
         */
@@ -1281,13 +1587,25 @@ abort:
        return ret;
 }
 
+int mv88e6xxx_setup_ports(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+       int i;
+
+       for (i = 0; i < ps->num_ports; i++) {
+               ret = mv88e6xxx_setup_port(ds, i);
+               if (ret < 0)
+                       return ret;
+       }
+       return 0;
+}
+
 int mv88e6xxx_setup_common(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
 
        mutex_init(&ps->smi_mutex);
-       mutex_init(&ps->stats_mutex);
-       mutex_init(&ps->phy_mutex);
 
        ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
 
@@ -1298,6 +1616,104 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
        return 0;
 }
 
+int mv88e6xxx_setup_global(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int i;
+
+       /* Set the default address aging time to 5 minutes, and
+        * enable address learn messages to be sent to all message
+        * ports.
+        */
+       REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+                 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+
+       /* Configure the IP ToS mapping registers. */
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+       REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+
+       /* Configure the IEEE 802.1p priority mapping register. */
+       REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+
+       /* Send all frames with destination addresses matching
+        * 01:80:c2:00:00:0x to the CPU port.
+        */
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+
+       /* Ignore removed tag data on doubly tagged packets, disable
+        * flow control messages, force flow control priority to the
+        * highest, and send all special multicast frames to the CPU
+        * port at the highest priority.
+        */
+       REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+                 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
+                 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+
+       /* Program the DSA routing table. */
+       for (i = 0; i < 32; i++) {
+               int nexthop = 0x1f;
+
+               if (ds->pd->rtable &&
+                   i != ds->index && i < ds->dst->pd->nr_chips)
+                       nexthop = ds->pd->rtable[i] & 0x1f;
+
+               REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
+                         GLOBAL2_DEVICE_MAPPING_UPDATE |
+                         (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
+                         nexthop);
+       }
+
+       /* Clear all trunk masks. */
+       for (i = 0; i < 8; i++)
+               REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+                         0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
+                         ((1 << ps->num_ports) - 1));
+
+       /* Clear all trunk mappings. */
+       for (i = 0; i < 16; i++)
+               REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
+                         GLOBAL2_TRUNK_MAPPING_UPDATE |
+                         (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
+
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+               /* Send all frames with destination addresses matching
+                * 01:80:c2:00:00:2x to the CPU port.
+                */
+               REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
+
+               /* Initialise cross-chip port VLAN table to reset
+                * defaults.
+                */
+               REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
+
+               /* Clear the priority override table. */
+               for (i = 0; i < 16; i++)
+                       REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
+                                 0x8000 | (i << 8));
+       }
+
+       if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+               /* Disable ingress rate limiting by resetting all
+                * ingress rate limit registers to their initial
+                * state.
+                */
+               for (i = 0; i < ps->num_ports; i++)
+                       REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
+                                 0x9000 | (i << 8));
+       }
+
+       return 0;
+}
+
 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -1343,14 +1759,14 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
        ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
        if (ret < 0)
                goto error;
        ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
 error:
        _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 
@@ -1360,7 +1776,7 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
        int ret;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
        ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
        if (ret < 0)
                goto error;
@@ -1368,7 +1784,7 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
        ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
 error:
        _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 
@@ -1391,9 +1807,9 @@ mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
        if (addr < 0)
                return addr;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
        ret = _mv88e6xxx_phy_read(ds, addr, regnum);
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 
@@ -1407,9 +1823,9 @@ mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
        if (addr < 0)
                return addr;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
        ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 
@@ -1423,9 +1839,9 @@ mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
        if (addr < 0)
                return addr;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
        ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 
@@ -1440,9 +1856,9 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
        if (addr < 0)
                return addr;
 
-       mutex_lock(&ps->phy_mutex);
+       mutex_lock(&ps->smi_mutex);
        ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
-       mutex_unlock(&ps->phy_mutex);
+       mutex_unlock(&ps->smi_mutex);
        return ret;
 }
 
index e045154f33646692cb292150efde8e0ad78a9326..e10ccdb4ffbcf9e5a63d6673e12f5cf05c638699 100644 (file)
 #define PORT_STATUS_TX_PAUSED  BIT(5)
 #define PORT_STATUS_FLOW_CTRL  BIT(4)
 #define PORT_PCS_CTRL          0x01
+#define PORT_PCS_CTRL_FC               BIT(7)
+#define PORT_PCS_CTRL_FORCE_FC         BIT(6)
+#define PORT_PCS_CTRL_LINK_UP          BIT(5)
+#define PORT_PCS_CTRL_FORCE_LINK       BIT(4)
+#define PORT_PCS_CTRL_DUPLEX_FULL      BIT(3)
+#define PORT_PCS_CTRL_FORCE_DUPLEX     BIT(2)
+#define PORT_PCS_CTRL_10               0x00
+#define PORT_PCS_CTRL_100              0x01
+#define PORT_PCS_CTRL_1000             0x02
+#define PORT_PCS_CTRL_UNFORCED         0x03
+#define PORT_PAUSE_CTRL                0x02
 #define PORT_SWITCH_ID         0x03
+#define PORT_SWITCH_ID_6031    0x0310
+#define PORT_SWITCH_ID_6035    0x0350
+#define PORT_SWITCH_ID_6046    0x0480
+#define PORT_SWITCH_ID_6061    0x0610
+#define PORT_SWITCH_ID_6065    0x0650
 #define PORT_SWITCH_ID_6085    0x04a0
+#define PORT_SWITCH_ID_6092    0x0970
 #define PORT_SWITCH_ID_6095    0x0950
+#define PORT_SWITCH_ID_6096    0x0980
+#define PORT_SWITCH_ID_6097    0x0990
+#define PORT_SWITCH_ID_6108    0x1070
+#define PORT_SWITCH_ID_6121    0x1040
+#define PORT_SWITCH_ID_6122    0x1050
 #define PORT_SWITCH_ID_6123    0x1210
 #define PORT_SWITCH_ID_6123_A1 0x1212
 #define PORT_SWITCH_ID_6123_A2 0x1213
 #define PORT_SWITCH_ID_6165_A2 0x1653
 #define PORT_SWITCH_ID_6171    0x1710
 #define PORT_SWITCH_ID_6172    0x1720
+#define PORT_SWITCH_ID_6175    0x1750
 #define PORT_SWITCH_ID_6176    0x1760
 #define PORT_SWITCH_ID_6182    0x1a60
 #define PORT_SWITCH_ID_6185    0x1a70
+#define PORT_SWITCH_ID_6240    0x2400
+#define PORT_SWITCH_ID_6320    0x1250
+#define PORT_SWITCH_ID_6350    0x3710
+#define PORT_SWITCH_ID_6351    0x3750
 #define PORT_SWITCH_ID_6352    0x3520
 #define PORT_SWITCH_ID_6352_A0 0x3521
 #define PORT_SWITCH_ID_6352_A1 0x3522
 #define PORT_CONTROL           0x04
+#define PORT_CONTROL_USE_CORE_TAG      BIT(15)
+#define PORT_CONTROL_DROP_ON_LOCK      BIT(14)
+#define PORT_CONTROL_EGRESS_UNMODIFIED (0x0 << 12)
+#define PORT_CONTROL_EGRESS_UNTAGGED   (0x1 << 12)
+#define PORT_CONTROL_EGRESS_TAGGED     (0x2 << 12)
+#define PORT_CONTROL_EGRESS_ADD_TAG    (0x3 << 12)
+#define PORT_CONTROL_HEADER            BIT(11)
+#define PORT_CONTROL_IGMP_MLD_SNOOP    BIT(10)
+#define PORT_CONTROL_DOUBLE_TAG                BIT(9)
+#define PORT_CONTROL_FRAME_MODE_NORMAL         (0x0 << 8)
+#define PORT_CONTROL_FRAME_MODE_DSA            (0x1 << 8)
+#define PORT_CONTROL_FRAME_MODE_PROVIDER       (0x2 << 8)
+#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA      (0x3 << 8)
+#define PORT_CONTROL_DSA_TAG           BIT(8)
+#define PORT_CONTROL_VLAN_TUNNEL       BIT(7)
+#define PORT_CONTROL_TAG_IF_BOTH       BIT(6)
+#define PORT_CONTROL_USE_IP            BIT(5)
+#define PORT_CONTROL_USE_TAG           BIT(4)
+#define PORT_CONTROL_FORWARD_UNKNOWN_MC        BIT(3)
+#define PORT_CONTROL_FORWARD_UNKNOWN   BIT(2)
 #define PORT_CONTROL_STATE_MASK                0x03
 #define PORT_CONTROL_STATE_DISABLED    0x00
 #define PORT_CONTROL_STATE_BLOCKING    0x01
 #define PORT_BASE_VLAN         0x06
 #define PORT_DEFAULT_VLAN      0x07
 #define PORT_CONTROL_2         0x08
+#define PORT_CONTROL_2_IGNORE_FCS      BIT(15)
+#define PORT_CONTROL_2_VTU_PRI_OVERRIDE        BIT(14)
+#define PORT_CONTROL_2_SA_PRIO_OVERRIDE        BIT(13)
+#define PORT_CONTROL_2_DA_PRIO_OVERRIDE        BIT(12)
+#define PORT_CONTROL_2_JUMBO_1522      (0x00 << 12)
+#define PORT_CONTROL_2_JUMBO_2048      (0x01 << 12)
+#define PORT_CONTROL_2_JUMBO_10240     (0x02 << 12)
+#define PORT_CONTROL_2_DISCARD_TAGGED  BIT(9)
+#define PORT_CONTROL_2_DISCARD_UNTAGGED        BIT(8)
+#define PORT_CONTROL_2_MAP_DA          BIT(7)
+#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6)
+#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
+#define PORT_CONTROL_2_EGRESS_MONITOR  BIT(5)
+#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
 #define PORT_RATE_CONTROL      0x09
 #define PORT_RATE_CONTROL_2    0x0a
 #define PORT_ASSOC_VECTOR      0x0b
+#define PORT_ATU_CONTROL       0x0c
+#define PORT_PRI_OVERRIDE      0x0d
+#define PORT_ETH_TYPE          0x0f
 #define PORT_IN_DISCARD_LO     0x10
 #define PORT_IN_DISCARD_HI     0x11
 #define PORT_IN_FILTERED       0x12
 #define PORT_OUT_FILTERED      0x13
-#define PORT_TAG_REGMAP_0123   0x19
-#define PORT_TAG_REGMAP_4567   0x1a
+#define PORT_TAG_REGMAP_0123   0x18
+#define PORT_TAG_REGMAP_4567   0x19
 
 #define REG_GLOBAL             0x1b
 #define GLOBAL_STATUS          0x00
 #define GLOBAL_CONTROL_DISCARD_EXCESS  BIT(13) /* 6352 */
 #define GLOBAL_CONTROL_SCHED_PRIO      BIT(11) /* 6152 */
 #define GLOBAL_CONTROL_MAX_FRAME_1632  BIT(10) /* 6152 */
-#define GLOBAL_CONTROL_RELOAD_EEPROM   BIT(9)  /* 6152 */
+#define GLOBAL_CONTROL_RELOAD_EEPROM   BIT(9)  /* 6152 */
 #define GLOBAL_CONTROL_DEVICE_EN       BIT(7)
 #define GLOBAL_CONTROL_STATS_DONE_EN   BIT(6)
 #define GLOBAL_CONTROL_VTU_PROBLEM_EN  BIT(5)
 #define GLOBAL_VTU_DATA_4_7    0x08
 #define GLOBAL_VTU_DATA_8_11   0x09
 #define GLOBAL_ATU_CONTROL     0x0a
+#define GLOBAL_ATU_CONTROL_LEARN2ALL   BIT(3)
 #define GLOBAL_ATU_OP          0x0b
 #define GLOBAL_ATU_OP_BUSY     BIT(15)
 #define GLOBAL_ATU_OP_NOP              (0 << 12)
 #define GLOBAL_IEEE_PRI                0x18
 #define GLOBAL_CORE_TAG_TYPE   0x19
 #define GLOBAL_MONITOR_CONTROL 0x1a
+#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT   12
+#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT    8
+#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT       4
+#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT    0
+#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED    (0xf0)
 #define GLOBAL_CONTROL_2       0x1c
+#define GLOBAL_CONTROL_2_NO_CASCADE            0xe000
+#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE      0xf000
+
 #define GLOBAL_STATS_OP                0x1d
 #define GLOBAL_STATS_OP_BUSY   BIT(15)
 #define GLOBAL_STATS_OP_NOP            (0 << 12)
 #define GLOBAL2_MGMT_EN_0X     0x03
 #define GLOBAL2_FLOW_CONTROL   0x04
 #define GLOBAL2_SWITCH_MGMT    0x05
+#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA        BIT(15)
+#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS      BIT(14)
+#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG   BIT(13)
+#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI        BIT(7)
+#define GLOBAL2_SWITCH_MGMT_RSVD2CPU           BIT(3)
 #define GLOBAL2_DEVICE_MAPPING 0x06
+#define GLOBAL2_DEVICE_MAPPING_UPDATE          BIT(15)
+#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT    8
 #define GLOBAL2_TRUNK_MASK     0x07
+#define GLOBAL2_TRUNK_MASK_UPDATE              BIT(15)
+#define GLOBAL2_TRUNK_MASK_NUM_SHIFT           12
 #define GLOBAL2_TRUNK_MAPPING  0x08
+#define GLOBAL2_TRUNK_MAPPING_UPDATE           BIT(15)
+#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT         11
 #define GLOBAL2_INGRESS_OP     0x09
 #define GLOBAL2_INGRESS_DATA   0x0a
 #define GLOBAL2_PVT_ADDR       0x0b
 #define GLOBAL2_SWITCH_MAC_BUSY BIT(15)
 #define GLOBAL2_ATU_STATS      0x0e
 #define GLOBAL2_PRIO_OVERRIDE  0x0f
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP      BIT(7)
+#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT      4
+#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP                BIT(3)
+#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT                0
 #define GLOBAL2_EEPROM_OP      0x14
 #define GLOBAL2_EEPROM_OP_BUSY BIT(15)
 #define GLOBAL2_EEPROM_OP_LOAD BIT(11)
@@ -260,14 +348,14 @@ struct mv88e6xxx_hw_stat {
 };
 
 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
-int mv88e6xxx_setup_port_common(struct dsa_switch *ds, int port);
+int mv88e6xxx_setup_ports(struct dsa_switch *ds);
 int mv88e6xxx_setup_common(struct dsa_switch *ds);
+int mv88e6xxx_setup_global(struct dsa_switch *ds);
 int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
 int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
 int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
                          int reg, u16 val);
 int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
-int mv88e6xxx_config_prio(struct dsa_switch *ds);
 int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
 int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
 int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
@@ -289,7 +377,6 @@ int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
                        struct ethtool_regs *regs, void *_p);
 int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_phy_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
index eadcb053807e46e8ae1f5c8ebce984abf0166219..9a8308553520318d7aa78ed3114e0e17064ddd1e 100644 (file)
@@ -34,6 +34,7 @@ source "drivers/net/ethernet/adi/Kconfig"
 source "drivers/net/ethernet/broadcom/Kconfig"
 source "drivers/net/ethernet/brocade/Kconfig"
 source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
 source "drivers/net/ethernet/chelsio/Kconfig"
 source "drivers/net/ethernet/cirrus/Kconfig"
 source "drivers/net/ethernet/cisco/Kconfig"
index 1367afcd0a8b2cd29681fc1867e7f0af71889fe0..4395d99115a00134dc25e3e00185348e0ed17d05 100644 (file)
@@ -20,6 +20,7 @@ obj-$(CONFIG_NET_BFIN) += adi/
 obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
 obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
 obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
 obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
 obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
 obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
index 426916036151649ca3a2cef4e69eb00735826c05..acd53173fcc0362db5b542bf42e38316f7716c5e 100644 (file)
@@ -179,10 +179,8 @@ config SUNLANCE
 
 config AMD_XGBE
        tristate "AMD 10GbE Ethernet driver"
-       depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
+       depends on ((OF_NET && OF_ADDRESS) || ACPI) && HAS_IOMEM && HAS_DMA
        depends on ARM64 || COMPILE_TEST
-       select PHYLIB
-       select AMD_XGBE_PHY
        select BITREVERSE
        select CRC32
        select PTP_1588_CLOCK
index 34c28aac767ff916fa9a10dac02786b74fac4fa9..b6fa89102526b95e579d5dd16cbecad3fe9226d2 100644 (file)
  */
 #define PCS_MMD_SELECT                 0xff
 
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1                   0x002c
+#define SIR0_STATUS                    0x0040
+#define SIR1_SPEED                     0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX       11
+#define SIR0_KR_RT_1_RESET_WIDTH       1
+#define SIR0_STATUS_RX_READY_INDEX     0
+#define SIR0_STATUS_RX_READY_WIDTH     1
+#define SIR0_STATUS_TX_READY_INDEX     8
+#define SIR0_STATUS_TX_READY_WIDTH     1
+#define SIR1_SPEED_CDR_RATE_INDEX      12
+#define SIR1_SPEED_CDR_RATE_WIDTH      4
+#define SIR1_SPEED_DATARATE_INDEX      4
+#define SIR1_SPEED_DATARATE_WIDTH      2
+#define SIR1_SPEED_PLLSEL_INDEX                3
+#define SIR1_SPEED_PLLSEL_WIDTH                1
+#define SIR1_SPEED_RATECHANGE_INDEX    6
+#define SIR1_SPEED_RATECHANGE_WIDTH    1
+#define SIR1_SPEED_TXAMP_INDEX         8
+#define SIR1_SPEED_TXAMP_WIDTH         4
+#define SIR1_SPEED_WORDMODE_INDEX      0
+#define SIR1_SPEED_WORDMODE_WIDTH      3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6                      0x0018
+#define RXTX_REG20                     0x0050
+#define RXTX_REG22                     0x0058
+#define RXTX_REG114                    0x01c8
+#define RXTX_REG129                    0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX     8
+#define RXTX_REG6_RESETB_RXD_WIDTH     1
+#define RXTX_REG20_BLWC_ENA_INDEX      2
+#define RXTX_REG20_BLWC_ENA_WIDTH      1
+#define RXTX_REG114_PQ_REG_INDEX       9
+#define RXTX_REG114_PQ_REG_WIDTH       7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
 /* Descriptor/Packet entry bit positions and sizes */
 #define RX_PACKET_ERRORS_CRC_INDEX             2
 #define RX_PACKET_ERRORS_CRC_WIDTH             1
 #define TX_NORMAL_DESC2_VLAN_INSERT            0x2
 
 /* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL                0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL         0x00ab
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP                    0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX                    0x0019
+#endif
+
 #ifndef MDIO_AN_COMP_STAT
 #define MDIO_AN_COMP_STAT              0x0030
 #endif
 
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK                        0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT                    0x8002
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G             (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+/* MDIO mask values */
+#define XGBE_XNP_MCF_NULL_MESSAGE      0x001
+#define XGBE_XNP_ACK_PROCESSED         BIT(12)
+#define XGBE_XNP_MP_FORMATTED          BIT(13)
+#define XGBE_XNP_NP_EXCHANGE           BIT(15)
+
+#define XGBE_KR_TRAINING_START         BIT(0)
+#define XGBE_KR_TRAINING_ENABLE                BIT(1)
+
 /* Bit setting and getting macros
  *  The get macro will extract the current bit field value from within
  *  the variable
@@ -1118,6 +1197,82 @@ do {                                                                     \
 #define XPCS_IOREAD(_pdata, _off)                                      \
        ioread32((_pdata)->xpcs_regs + (_off))
 
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field)                            \
+       GET_BITS((_var),                                                \
+                _prefix##_##_field##_INDEX,                            \
+                _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val)                      \
+       SET_BITS((_var),                                                \
+                _prefix##_##_field##_INDEX,                            \
+                _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg)                                     \
+       ioread16((_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field)                                \
+       GET_BITS(XSIR0_IOREAD((_pdata), _reg),                          \
+                _reg##_##_field##_INDEX,                               \
+                _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val)                              \
+       iowrite16((_val), (_pdata)->sir0_regs + _reg)
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val)                 \
+do {                                                                   \
+       u16 reg_val = XSIR0_IOREAD((_pdata), _reg);                     \
+       SET_BITS(reg_val,                                               \
+                _reg##_##_field##_INDEX,                               \
+                _reg##_##_field##_WIDTH, (_val));                      \
+       XSIR0_IOWRITE((_pdata), _reg, reg_val);                         \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg)                                     \
+       ioread16((_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field)                                \
+       GET_BITS(XSIR1_IOREAD((_pdata), _reg),                          \
+                _reg##_##_field##_INDEX,                               \
+                _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val)                              \
+       iowrite16((_val), (_pdata)->sir1_regs + _reg)
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val)                 \
+do {                                                                   \
+       u16 reg_val = XSIR1_IOREAD((_pdata), _reg);                     \
+       SET_BITS(reg_val,                                               \
+                _reg##_##_field##_INDEX,                               \
+                _reg##_##_field##_WIDTH, (_val));                      \
+       XSIR1_IOWRITE((_pdata), _reg, reg_val);                         \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg)                                     \
+       ioread16((_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field)                                \
+       GET_BITS(XRXTX_IOREAD((_pdata), _reg),                          \
+                _reg##_##_field##_INDEX,                               \
+                _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val)                              \
+       iowrite16((_val), (_pdata)->rxtx_regs + _reg)
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val)                 \
+do {                                                                   \
+       u16 reg_val = XRXTX_IOREAD((_pdata), _reg);                     \
+       SET_BITS(reg_val,                                               \
+                _reg##_##_field##_INDEX,                               \
+                _reg##_##_field##_WIDTH, (_val));                      \
+       XRXTX_IOWRITE((_pdata), _reg, reg_val);                         \
+} while (0)
+
 /* Macros for building, reading or writing register values or bits
  * using MDIO.  Different from above because of the use of standardized
  * Linux include values.  No shifting is performed with the bit
index 8a50b01c2686292b06e97dae11c21c57095f6844..a6b9899e285fd4ffae7cf061aa65c71afc2d2818 100644 (file)
@@ -150,9 +150,12 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev,
        tc_ets = 0;
        tc_ets_weight = 0;
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               DBGPR("  TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
-                     ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
-               DBGPR("  PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+               netif_dbg(pdata, drv, netdev,
+                         "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+                         ets->tc_tx_bw[i], ets->tc_rx_bw[i],
+                         ets->tc_tsa[i]);
+               netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
+                         ets->prio_tc[i]);
 
                if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
                    (i >= pdata->hw_feat.tc_cnt))
@@ -214,8 +217,9 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-       DBGPR("  cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
-             pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+       netif_dbg(pdata, drv, netdev,
+                 "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+                 pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
 
        if (!pdata->pfc) {
                pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
@@ -238,9 +242,10 @@ static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
 
 static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
 {
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
        u8 support = xgbe_dcb_getdcbx(netdev);
 
-       DBGPR("  DCBX=%#hhx\n", dcbx);
+       netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
 
        if (dcbx & ~support)
                return 1;
index d81fc6bd4759064f72d716e3cd81f0045f8d1bc9..dd03ad865cafb83b16389e4f1840a335ff3fe9d4 100644 (file)
@@ -208,8 +208,9 @@ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
        if (!ring->rdata)
                return -ENOMEM;
 
-       DBGPR("    rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
-             ring->rdesc, ring->rdesc_dma, ring->rdata);
+       netif_dbg(pdata, drv, pdata->netdev,
+                 "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
+                 ring->rdesc, &ring->rdesc_dma, ring->rdata);
 
        DBGPR("<--xgbe_init_ring\n");
 
@@ -226,7 +227,9 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
 
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
-               DBGPR("  %s - tx_ring:\n", channel->name);
+               netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+                         channel->name);
+
                ret = xgbe_init_ring(pdata, channel->tx_ring,
                                     pdata->tx_desc_count);
                if (ret) {
@@ -235,12 +238,14 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
                        goto err_ring;
                }
 
-               DBGPR("  %s - rx_ring:\n", channel->name);
+               netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
+                         channel->name);
+
                ret = xgbe_init_ring(pdata, channel->rx_ring,
                                     pdata->rx_desc_count);
                if (ret) {
                        netdev_alert(pdata->netdev,
-                                    "error initializing Tx ring\n");
+                                    "error initializing Rx ring\n");
                        goto err_ring;
                }
        }
@@ -476,8 +481,6 @@ static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
 
        if (rdata->state_saved) {
                rdata->state_saved = 0;
-               rdata->state.incomplete = 0;
-               rdata->state.context_next = 0;
                rdata->state.skb = NULL;
                rdata->state.len = 0;
                rdata->state.error = 0;
@@ -518,8 +521,6 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
        rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 
        if (tso) {
-               DBGPR("  TSO packet\n");
-
                /* Map the TSO header */
                skb_dma = dma_map_single(pdata->dev, skb->data,
                                         packet->header_len, DMA_TO_DEVICE);
@@ -529,6 +530,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                }
                rdata->skb_dma = skb_dma;
                rdata->skb_dma_len = packet->header_len;
+               netif_dbg(pdata, tx_queued, pdata->netdev,
+                         "skb header: index=%u, dma=%pad, len=%u\n",
+                         cur_index, &skb_dma, packet->header_len);
 
                offset = packet->header_len;
 
@@ -550,8 +554,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                }
                rdata->skb_dma = skb_dma;
                rdata->skb_dma_len = len;
-               DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
-                     cur_index, skb_dma, len);
+               netif_dbg(pdata, tx_queued, pdata->netdev,
+                         "skb data: index=%u, dma=%pad, len=%u\n",
+                         cur_index, &skb_dma, len);
 
                datalen -= len;
                offset += len;
@@ -563,7 +568,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-               DBGPR("  mapping frag %u\n", i);
+               netif_dbg(pdata, tx_queued, pdata->netdev,
+                         "mapping frag %u\n", i);
 
                frag = &skb_shinfo(skb)->frags[i];
                offset = 0;
@@ -582,8 +588,9 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                        rdata->skb_dma = skb_dma;
                        rdata->skb_dma_len = len;
                        rdata->mapped_as_page = 1;
-                       DBGPR("  skb data: index=%u, dma=0x%llx, len=%u\n",
-                             cur_index, skb_dma, len);
+                       netif_dbg(pdata, tx_queued, pdata->netdev,
+                                 "skb frag: index=%u, dma=%pad, len=%u\n",
+                                 cur_index, &skb_dma, len);
 
                        datalen -= len;
                        offset += len;
index 21d9497518fde356f45e6e0eb0632368d680116b..506e832c9e9a80f8e6da438ac4e2cb42a49435a7 100644 (file)
@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
        if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
                return 0;
 
-       DBGPR("  %s promiscuous mode\n", enable ? "entering" : "leaving");
+       netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+                 enable ? "entering" : "leaving");
        XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
 
        return 0;
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
        if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
                return 0;
 
-       DBGPR("  %s allmulti mode\n", enable ? "entering" : "leaving");
+       netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+                 enable ? "entering" : "leaving");
        XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
 
        return 0;
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
                mac_addr[0] = ha->addr[4];
                mac_addr[1] = ha->addr[5];
 
-               DBGPR("  adding mac address %pM at 0x%04x\n", ha->addr,
-                     *mac_reg);
+               netif_dbg(pdata, drv, pdata->netdev,
+                         "adding mac address %pM at %#x\n",
+                         ha->addr, *mac_reg);
 
                XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
        }
@@ -907,23 +910,6 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
        else
                mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
 
-       /* If the PCS is changing modes, match the MAC speed to it */
-       if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
-           ((mmd_address & 0xffff) == MDIO_CTRL2)) {
-               struct phy_device *phydev = pdata->phydev;
-
-               if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
-                       /* KX mode */
-                       if (phydev->supported & SUPPORTED_1000baseKX_Full)
-                               xgbe_set_gmii_speed(pdata);
-                       else
-                               xgbe_set_gmii_2500_speed(pdata);
-               } else {
-                       /* KR mode */
-                       xgbe_set_xgmii_speed(pdata);
-               }
-       }
-
        /* The PCS registers are accessed using mmio. The underlying APB3
         * management interface uses indirect addressing to access the MMD
         * register sets. This requires accessing of the PCS register in two
@@ -1322,7 +1308,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
                switch (ets->tc_tsa[i]) {
                case IEEE_8021QAZ_TSA_STRICT:
-                       DBGPR("  TC%u using SP\n", i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TC%u using SP\n", i);
                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
                                               MTL_TSA_SP);
                        break;
@@ -1330,7 +1317,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
                        weight = total_weight * ets->tc_tx_bw[i] / 100;
                        weight = clamp(weight, min_weight, total_weight);
 
-                       DBGPR("  TC%u using DWRR (weight %u)\n", i, weight);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TC%u using DWRR (weight %u)\n", i, weight);
                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
                                               MTL_TSA_ETS);
                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
@@ -1359,7 +1347,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
                }
                mask &= 0xff;
 
-               DBGPR("  TC%u PFC mask=%#x\n", tc, mask);
+               netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
+                         tc, mask);
                reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
                reg_val = XGMAC_IOREAD(pdata, reg);
 
@@ -1457,8 +1446,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        /* Create a context descriptor if this is a TSO packet */
        if (tso_context || vlan_context) {
                if (tso_context) {
-                       DBGPR("  TSO context descriptor, mss=%u\n",
-                             packet->mss);
+                       netif_dbg(pdata, tx_queued, pdata->netdev,
+                                 "TSO context descriptor, mss=%u\n",
+                                 packet->mss);
 
                        /* Set the MSS size */
                        XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
@@ -1476,8 +1466,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
                }
 
                if (vlan_context) {
-                       DBGPR("  VLAN context descriptor, ctag=%u\n",
-                             packet->vlan_ctag);
+                       netif_dbg(pdata, tx_queued, pdata->netdev,
+                                 "VLAN context descriptor, ctag=%u\n",
+                                 packet->vlan_ctag);
 
                        /* Mark it as a CONTEXT descriptor */
                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
@@ -1533,6 +1524,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
                                  packet->tcp_payload_len);
                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
                                  packet->tcp_header_len / 4);
+
+               pdata->ext_stats.tx_tso_packets++;
        } else {
                /* Enable CRC and Pad Insertion */
                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
@@ -1594,9 +1587,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        rdesc = rdata->rdesc;
        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
 
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
-       xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
-#endif
+       if (netif_msg_tx_queued(pdata))
+               xgbe_dump_tx_desc(pdata, ring, start_index,
+                                 packet->rdesc_count, 1);
 
        /* Make sure ownership is written to the descriptor */
        dma_wmb();
@@ -1618,11 +1611,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
 
 static int xgbe_dev_read(struct xgbe_channel *channel)
 {
+       struct xgbe_prv_data *pdata = channel->pdata;
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
        struct xgbe_ring_desc *rdesc;
        struct xgbe_packet_data *packet = &ring->packet_data;
-       struct net_device *netdev = channel->pdata->netdev;
+       struct net_device *netdev = pdata->netdev;
        unsigned int err, etlt, l34t;
 
        DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1637,9 +1631,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
        /* Make sure descriptor fields are read after reading the OWN bit */
        dma_rmb();
 
-#ifdef XGMAC_ENABLE_RX_DESC_DUMP
-       xgbe_dump_rx_desc(ring, rdesc, ring->cur);
-#endif
+       if (netif_msg_rx_status(pdata))
+               xgbe_dump_rx_desc(pdata, ring, ring->cur);
 
        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
                /* Timestamp Context Descriptor */
@@ -1661,9 +1654,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                               CONTEXT_NEXT, 1);
 
        /* Get the header length */
-       if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+       if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
                rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
                                                      RX_NORMAL_DESC2, HL);
+               if (rdata->rx.hdr_len)
+                       pdata->ext_stats.rx_split_header_packets++;
+       }
 
        /* Get the RSS hash */
        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
@@ -1700,14 +1696,14 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                       INCOMPLETE, 0);
 
        /* Set checksum done indicator as appropriate */
-       if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+       if (netdev->features & NETIF_F_RXCSUM)
                XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
                               CSUM_DONE, 1);
 
        /* Check for errors (only valid in last descriptor) */
        err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
        etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
-       DBGPR("  err=%u, etlt=%#x\n", err, etlt);
+       netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
 
        if (!err || !etlt) {
                /* No error if err is 0 or etlt is 0 */
@@ -1718,7 +1714,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                        packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
                                                              RX_NORMAL_DESC0,
                                                              OVT);
-                       DBGPR("  vlan-ctag=0x%04x\n", packet->vlan_ctag);
+                       netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+                                 packet->vlan_ctag);
                }
        } else {
                if ((etlt == 0x05) || (etlt == 0x06))
@@ -2026,9 +2023,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
 
-       netdev_notice(pdata->netdev,
-                     "%d Tx hardware queues, %d byte fifo per queue\n",
-                     pdata->tx_q_count, ((fifo_size + 1) * 256));
+       netif_info(pdata, drv, pdata->netdev,
+                  "%d Tx hardware queues, %d byte fifo per queue\n",
+                  pdata->tx_q_count, ((fifo_size + 1) * 256));
 }
 
 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -2042,9 +2039,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
 
-       netdev_notice(pdata->netdev,
-                     "%d Rx hardware queues, %d byte fifo per queue\n",
-                     pdata->rx_q_count, ((fifo_size + 1) * 256));
+       netif_info(pdata, drv, pdata->netdev,
+                  "%d Rx hardware queues, %d byte fifo per queue\n",
+                  pdata->rx_q_count, ((fifo_size + 1) * 256));
 }
 
 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2063,14 +2060,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
 
        for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
                for (j = 0; j < qptc; j++) {
-                       DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TXq%u mapped to TC%u\n", queue, i);
                        XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
                                               Q2TCMAP, i);
                        pdata->q2tc_map[queue++] = i;
                }
 
                if (i < qptc_extra) {
-                       DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TXq%u mapped to TC%u\n", queue, i);
                        XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
                                               Q2TCMAP, i);
                        pdata->q2tc_map[queue++] = i;
@@ -2088,13 +2087,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
        for (i = 0, prio = 0; i < prio_queues;) {
                mask = 0;
                for (j = 0; j < ppq; j++) {
-                       DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "PRIO%u mapped to RXq%u\n", prio, i);
                        mask |= (1 << prio);
                        pdata->prio2q_map[prio++] = i;
                }
 
                if (i < ppq_extra) {
-                       DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "PRIO%u mapped to RXq%u\n", prio, i);
                        mask |= (1 << prio);
                        pdata->prio2q_map[prio++] = i;
                }
index 9fd6c69a8bac3c77d1c0c6e99eb4f3644561f78a..1e9c28d19ef88ccd4e0c8b45bf7ee9351c03935c 100644 (file)
@@ -183,9 +183,10 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
                        channel->rx_ring = rx_ring++;
                }
 
-               DBGPR("  %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
-                     channel->name, channel->queue_index, channel->dma_regs,
-                     channel->dma_irq, channel->tx_ring, channel->rx_ring);
+               netif_dbg(pdata, drv, pdata->netdev,
+                         "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+                         channel->name, channel->dma_regs, channel->dma_irq,
+                         channel->tx_ring, channel->rx_ring);
        }
 
        pdata->channel = channel_mem;
@@ -235,7 +236,8 @@ static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
        struct xgbe_prv_data *pdata = channel->pdata;
 
        if (count > xgbe_tx_avail_desc(ring)) {
-               DBGPR("  Tx queue stopped, not enough descriptors available\n");
+               netif_info(pdata, drv, pdata->netdev,
+                          "Tx queue stopped, not enough descriptors available\n");
                netif_stop_subqueue(pdata->netdev, channel->queue_index);
                ring->tx.queue_stopped = 1;
 
@@ -330,7 +332,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
        if (!dma_isr)
                goto isr_done;
 
-       DBGPR("  DMA_ISR = %08x\n", dma_isr);
+       netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
 
        for (i = 0; i < pdata->channel_count; i++) {
                if (!(dma_isr & (1 << i)))
@@ -339,7 +341,8 @@ static irqreturn_t xgbe_isr(int irq, void *data)
                channel = pdata->channel + i;
 
                dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
-               DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+               netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+                         i, dma_ch_isr);
 
                /* The TI or RI interrupt bits may still be set even if using
                 * per channel DMA interrupts. Check to be sure those are not
@@ -386,8 +389,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
                }
        }
 
-       DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
-
 isr_done:
        return IRQ_HANDLED;
 }
@@ -436,43 +437,61 @@ static void xgbe_tx_timer(unsigned long data)
        DBGPR("<--xgbe_tx_timer\n");
 }
 
-static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_service(struct work_struct *work)
+{
+       struct xgbe_prv_data *pdata = container_of(work,
+                                                  struct xgbe_prv_data,
+                                                  service_work);
+
+       pdata->phy_if.phy_status(pdata);
+}
+
+static void xgbe_service_timer(unsigned long data)
+{
+       struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+       schedule_work(&pdata->service_work);
+
+       mod_timer(&pdata->service_timer, jiffies + HZ);
+}
+
+static void xgbe_init_timers(struct xgbe_prv_data *pdata)
 {
        struct xgbe_channel *channel;
        unsigned int i;
 
-       DBGPR("-->xgbe_init_tx_timers\n");
+       setup_timer(&pdata->service_timer, xgbe_service_timer,
+                   (unsigned long)pdata);
 
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
                if (!channel->tx_ring)
                        break;
 
-               DBGPR("  %s adding tx timer\n", channel->name);
                setup_timer(&channel->tx_timer, xgbe_tx_timer,
                            (unsigned long)channel);
        }
+}
 
-       DBGPR("<--xgbe_init_tx_timers\n");
+static void xgbe_start_timers(struct xgbe_prv_data *pdata)
+{
+       mod_timer(&pdata->service_timer, jiffies + HZ);
 }
 
-static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
 {
        struct xgbe_channel *channel;
        unsigned int i;
 
-       DBGPR("-->xgbe_stop_tx_timers\n");
+       del_timer_sync(&pdata->service_timer);
 
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
                if (!channel->tx_ring)
                        break;
 
-               DBGPR("  %s deleting tx timer\n", channel->name);
                del_timer_sync(&channel->tx_timer);
        }
-
-       DBGPR("<--xgbe_stop_tx_timers\n");
 }
 
 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
@@ -512,6 +531,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
                                                RXFIFOSIZE);
        hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
                                                TXFIFOSIZE);
+       hw_feat->adv_ts_hi     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADVTHWORD);
        hw_feat->dma_width     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
        hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
        hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
@@ -759,112 +779,12 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
        DBGPR("<--xgbe_free_rx_data\n");
 }
 
-static void xgbe_adjust_link(struct net_device *netdev)
-{
-       struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct phy_device *phydev = pdata->phydev;
-       int new_state = 0;
-
-       if (!phydev)
-               return;
-
-       if (phydev->link) {
-               /* Flow control support */
-               if (pdata->pause_autoneg) {
-                       if (phydev->pause || phydev->asym_pause) {
-                               pdata->tx_pause = 1;
-                               pdata->rx_pause = 1;
-                       } else {
-                               pdata->tx_pause = 0;
-                               pdata->rx_pause = 0;
-                       }
-               }
-
-               if (pdata->tx_pause != pdata->phy_tx_pause) {
-                       hw_if->config_tx_flow_control(pdata);
-                       pdata->phy_tx_pause = pdata->tx_pause;
-               }
-
-               if (pdata->rx_pause != pdata->phy_rx_pause) {
-                       hw_if->config_rx_flow_control(pdata);
-                       pdata->phy_rx_pause = pdata->rx_pause;
-               }
-
-               /* Speed support */
-               if (phydev->speed != pdata->phy_speed) {
-                       new_state = 1;
-
-                       switch (phydev->speed) {
-                       case SPEED_10000:
-                               hw_if->set_xgmii_speed(pdata);
-                               break;
-
-                       case SPEED_2500:
-                               hw_if->set_gmii_2500_speed(pdata);
-                               break;
-
-                       case SPEED_1000:
-                               hw_if->set_gmii_speed(pdata);
-                               break;
-                       }
-                       pdata->phy_speed = phydev->speed;
-               }
-
-               if (phydev->link != pdata->phy_link) {
-                       new_state = 1;
-                       pdata->phy_link = 1;
-               }
-       } else if (pdata->phy_link) {
-               new_state = 1;
-               pdata->phy_link = 0;
-               pdata->phy_speed = SPEED_UNKNOWN;
-       }
-
-       if (new_state)
-               phy_print_status(phydev);
-}
-
 static int xgbe_phy_init(struct xgbe_prv_data *pdata)
 {
-       struct net_device *netdev = pdata->netdev;
-       struct phy_device *phydev = pdata->phydev;
-       int ret;
-
        pdata->phy_link = -1;
        pdata->phy_speed = SPEED_UNKNOWN;
-       pdata->phy_tx_pause = pdata->tx_pause;
-       pdata->phy_rx_pause = pdata->rx_pause;
 
-       ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
-                                pdata->phy_mode);
-       if (ret) {
-               netdev_err(netdev, "phy_connect_direct failed\n");
-               return ret;
-       }
-
-       if (!phydev->drv || (phydev->drv->phy_id == 0)) {
-               netdev_err(netdev, "phy_id not valid\n");
-               ret = -ENODEV;
-               goto err_phy_connect;
-       }
-       DBGPR("  phy_connect_direct succeeded for PHY %s, link=%d\n",
-             dev_name(&phydev->dev), phydev->link);
-
-       return 0;
-
-err_phy_connect:
-       phy_disconnect(phydev);
-
-       return ret;
-}
-
-static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
-{
-       if (!pdata->phydev)
-               return;
-
-       phy_disconnect(pdata->phydev);
+       return pdata->phy_if.phy_reset(pdata);
 }
 
 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
@@ -889,13 +809,14 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
 
        netif_tx_stop_all_queues(netdev);
 
+       xgbe_stop_timers(pdata);
+       flush_workqueue(pdata->dev_workqueue);
+
        hw_if->powerdown_tx(pdata);
        hw_if->powerdown_rx(pdata);
 
        xgbe_napi_disable(pdata, 0);
 
-       phy_stop(pdata->phydev);
-
        pdata->power_down = 1;
 
        spin_unlock_irqrestore(&pdata->lock, flags);
@@ -924,8 +845,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
 
        pdata->power_down = 0;
 
-       phy_start(pdata->phydev);
-
        xgbe_napi_enable(pdata, 0);
 
        hw_if->powerup_tx(pdata);
@@ -936,6 +855,8 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
 
        netif_tx_start_all_queues(netdev);
 
+       xgbe_start_timers(pdata);
+
        spin_unlock_irqrestore(&pdata->lock, flags);
 
        DBGPR("<--xgbe_powerup\n");
@@ -946,6 +867,7 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
 static int xgbe_start(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_phy_if *phy_if = &pdata->phy_if;
        struct net_device *netdev = pdata->netdev;
        int ret;
 
@@ -953,7 +875,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 
        hw_if->init(pdata);
 
-       phy_start(pdata->phydev);
+       ret = phy_if->phy_start(pdata);
+       if (ret)
+               goto err_phy;
 
        xgbe_napi_enable(pdata, 1);
 
@@ -964,10 +888,11 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
        hw_if->enable_tx(pdata);
        hw_if->enable_rx(pdata);
 
-       xgbe_init_tx_timers(pdata);
-
        netif_tx_start_all_queues(netdev);
 
+       xgbe_start_timers(pdata);
+       schedule_work(&pdata->service_work);
+
        DBGPR("<--xgbe_start\n");
 
        return 0;
@@ -975,8 +900,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 err_napi:
        xgbe_napi_disable(pdata, 1);
 
-       phy_stop(pdata->phydev);
+       phy_if->phy_stop(pdata);
 
+err_phy:
        hw_if->exit(pdata);
 
        return ret;
@@ -985,6 +911,7 @@ err_napi:
 static void xgbe_stop(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
+       struct xgbe_phy_if *phy_if = &pdata->phy_if;
        struct xgbe_channel *channel;
        struct net_device *netdev = pdata->netdev;
        struct netdev_queue *txq;
@@ -994,7 +921,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 
        netif_tx_stop_all_queues(netdev);
 
-       xgbe_stop_tx_timers(pdata);
+       xgbe_stop_timers(pdata);
+       flush_workqueue(pdata->dev_workqueue);
 
        hw_if->disable_tx(pdata);
        hw_if->disable_rx(pdata);
@@ -1003,7 +931,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
 
        xgbe_napi_disable(pdata, 1);
 
-       phy_stop(pdata->phydev);
+       phy_if->phy_stop(pdata);
 
        hw_if->exit(pdata);
 
@@ -1374,7 +1302,7 @@ static int xgbe_open(struct net_device *netdev)
        ret = clk_prepare_enable(pdata->sysclk);
        if (ret) {
                netdev_alert(netdev, "dma clk_prepare_enable failed\n");
-               goto err_phy_init;
+               return ret;
        }
 
        ret = clk_prepare_enable(pdata->ptpclk);
@@ -1399,14 +1327,17 @@ static int xgbe_open(struct net_device *netdev)
        if (ret)
                goto err_channels;
 
-       /* Initialize the device restart and Tx timestamp work struct */
+       INIT_WORK(&pdata->service_work, xgbe_service);
        INIT_WORK(&pdata->restart_work, xgbe_restart);
        INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+       xgbe_init_timers(pdata);
 
        ret = xgbe_start(pdata);
        if (ret)
                goto err_rings;
 
+       clear_bit(XGBE_DOWN, &pdata->dev_state);
+
        DBGPR("<--xgbe_open\n");
 
        return 0;
@@ -1423,9 +1354,6 @@ err_ptpclk:
 err_sysclk:
        clk_disable_unprepare(pdata->sysclk);
 
-err_phy_init:
-       xgbe_phy_exit(pdata);
-
        return ret;
 }
 
@@ -1449,8 +1377,7 @@ static int xgbe_close(struct net_device *netdev)
        clk_disable_unprepare(pdata->ptpclk);
        clk_disable_unprepare(pdata->sysclk);
 
-       /* Release the phy */
-       xgbe_phy_exit(pdata);
+       set_bit(XGBE_DOWN, &pdata->dev_state);
 
        DBGPR("<--xgbe_close\n");
 
@@ -1478,7 +1405,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
        ret = NETDEV_TX_OK;
 
        if (skb->len == 0) {
-               netdev_err(netdev, "empty skb received from stack\n");
+               netif_err(pdata, tx_err, netdev,
+                         "empty skb received from stack\n");
                dev_kfree_skb_any(skb);
                goto tx_netdev_return;
        }
@@ -1494,7 +1422,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        ret = xgbe_prep_tso(skb, packet);
        if (ret) {
-               netdev_err(netdev, "error processing TSO packet\n");
+               netif_err(pdata, tx_err, netdev,
+                         "error processing TSO packet\n");
                dev_kfree_skb_any(skb);
                goto tx_netdev_return;
        }
@@ -1513,9 +1442,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
        /* Configure required descriptor fields for transmission */
        hw_if->dev_xmit(channel);
 
-#ifdef XGMAC_ENABLE_TX_PKT_DUMP
-       xgbe_print_pkt(netdev, skb, true);
-#endif
+       if (netif_msg_pktdata(pdata))
+               xgbe_print_pkt(netdev, skb, true);
 
        /* Stop the queue in advance if there may not be enough descriptors */
        xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
@@ -1710,7 +1638,8 @@ static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
                               (pdata->q2tc_map[queue] == i))
                                queue++;
 
-                       DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
+                       netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n",
+                                 i, offset, queue - 1);
                        netdev_set_tc_queue(netdev, i, queue - offset, offset);
                        offset = queue;
                }
@@ -1820,9 +1749,10 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
                          lower_32_bits(rdata->rdesc_dma));
 }
 
-static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
+static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
+                                      struct napi_struct *napi,
                                       struct xgbe_ring_data *rdata,
-                                      unsigned int *len)
+                                      unsigned int len)
 {
        struct sk_buff *skb;
        u8 *packet;
@@ -1832,14 +1762,31 @@ static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
        if (!skb)
                return NULL;
 
+       /* Start with the header buffer which may contain just the header
+        * or the header plus data
+        */
+       dma_sync_single_for_cpu(pdata->dev, rdata->rx.hdr.dma,
+                               rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
+
        packet = page_address(rdata->rx.hdr.pa.pages) +
                 rdata->rx.hdr.pa.pages_offset;
-       copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+       copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
        copy_len = min(rdata->rx.hdr.dma_len, copy_len);
        skb_copy_to_linear_data(skb, packet, copy_len);
        skb_put(skb, copy_len);
 
-       *len -= copy_len;
+       len -= copy_len;
+       if (len) {
+               /* Add the remaining data as a frag */
+               dma_sync_single_for_cpu(pdata->dev, rdata->rx.buf.dma,
+                                       rdata->rx.buf.dma_len, DMA_FROM_DEVICE);
+
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                               rdata->rx.buf.pa.pages,
+                               rdata->rx.buf.pa.pages_offset,
+                               len, rdata->rx.buf.dma_len);
+               rdata->rx.buf.pa.pages = NULL;
+       }
 
        return skb;
 }
@@ -1877,9 +1824,8 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
                 * bit */
                dma_rmb();
 
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
-               xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
-#endif
+               if (netif_msg_tx_done(pdata))
+                       xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
 
                if (hw_if->is_last_desc(rdesc)) {
                        tx_packets += rdata->tx.packets;
@@ -1922,7 +1868,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        struct sk_buff *skb;
        struct skb_shared_hwtstamps *hwtstamps;
        unsigned int incomplete, error, context_next, context;
-       unsigned int len, put_len, max_len;
+       unsigned int len, rdesc_len, max_len;
        unsigned int received = 0;
        int packet_count = 0;
 
@@ -1932,6 +1878,9 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        if (!ring)
                return 0;
 
+       incomplete = 0;
+       context_next = 0;
+
        napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
 
        rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
@@ -1941,15 +1890,11 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
 
                /* First time in loop see if we need to restore state */
                if (!received && rdata->state_saved) {
-                       incomplete = rdata->state.incomplete;
-                       context_next = rdata->state.context_next;
                        skb = rdata->state.skb;
                        error = rdata->state.error;
                        len = rdata->state.len;
                } else {
                        memset(packet, 0, sizeof(*packet));
-                       incomplete = 0;
-                       context_next = 0;
                        skb = NULL;
                        error = 0;
                        len = 0;
@@ -1983,29 +1928,23 @@ read_again:
 
                if (error || packet->errors) {
                        if (packet->errors)
-                               DBGPR("Error in received packet\n");
+                               netif_err(pdata, rx_err, netdev,
+                                         "error in received packet\n");
                        dev_kfree_skb(skb);
                        goto next_packet;
                }
 
                if (!context) {
-                       put_len = rdata->rx.len - len;
-                       len += put_len;
-
-                       if (!skb) {
-                               dma_sync_single_for_cpu(pdata->dev,
-                                                       rdata->rx.hdr.dma,
-                                                       rdata->rx.hdr.dma_len,
-                                                       DMA_FROM_DEVICE);
-
-                               skb = xgbe_create_skb(napi, rdata, &put_len);
-                               if (!skb) {
+                       /* Length is cumulative, get this descriptor's length */
+                       rdesc_len = rdata->rx.len - len;
+                       len += rdesc_len;
+
+                       if (rdesc_len && !skb) {
+                               skb = xgbe_create_skb(pdata, napi, rdata,
+                                                     rdesc_len);
+                               if (!skb)
                                        error = 1;
-                                       goto skip_data;
-                               }
-                       }
-
-                       if (put_len) {
+                       } else if (rdesc_len) {
                                dma_sync_single_for_cpu(pdata->dev,
                                                        rdata->rx.buf.dma,
                                                        rdata->rx.buf.dma_len,
@@ -2014,12 +1953,12 @@ read_again:
                                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                                rdata->rx.buf.pa.pages,
                                                rdata->rx.buf.pa.pages_offset,
-                                               put_len, rdata->rx.buf.dma_len);
+                                               rdesc_len,
+                                               rdata->rx.buf.dma_len);
                                rdata->rx.buf.pa.pages = NULL;
                        }
                }
 
-skip_data:
                if (incomplete || context_next)
                        goto read_again;
 
@@ -2033,14 +1972,14 @@ skip_data:
                        max_len += VLAN_HLEN;
 
                if (skb->len > max_len) {
-                       DBGPR("packet length exceeds configured MTU\n");
+                       netif_err(pdata, rx_err, netdev,
+                                 "packet length exceeds configured MTU\n");
                        dev_kfree_skb(skb);
                        goto next_packet;
                }
 
-#ifdef XGMAC_ENABLE_RX_PKT_DUMP
-               xgbe_print_pkt(netdev, skb, false);
-#endif
+               if (netif_msg_pktdata(pdata))
+                       xgbe_print_pkt(netdev, skb, false);
 
                skb_checksum_none_assert(skb);
                if (XGMAC_GET_BITS(packet->attributes,
@@ -2072,7 +2011,6 @@ skip_data:
                skb_record_rx_queue(skb, channel->queue_index);
                skb_mark_napi_id(skb, napi);
 
-               netdev->last_rx = jiffies;
                napi_gro_receive(napi, skb);
 
 next_packet:
@@ -2083,8 +2021,6 @@ next_packet:
        if (received && (incomplete || context_next)) {
                rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
                rdata->state_saved = 1;
-               rdata->state.incomplete = incomplete;
-               rdata->state.context_next = context_next;
                rdata->state.skb = skb;
                rdata->state.len = len;
                rdata->state.error = error;
@@ -2165,8 +2101,8 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
        return processed;
 }
 
-void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
-                      unsigned int count, unsigned int flag)
+void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
+                      unsigned int idx, unsigned int count, unsigned int flag)
 {
        struct xgbe_ring_data *rdata;
        struct xgbe_ring_desc *rdesc;
@@ -2174,20 +2110,29 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
        while (count--) {
                rdata = XGBE_GET_DESC_DATA(ring, idx);
                rdesc = rdata->rdesc;
-               pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
-                        (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
-                        le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
-                        le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+               netdev_dbg(pdata->netdev,
+                          "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+                          (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+                          le32_to_cpu(rdesc->desc0),
+                          le32_to_cpu(rdesc->desc1),
+                          le32_to_cpu(rdesc->desc2),
+                          le32_to_cpu(rdesc->desc3));
                idx++;
        }
 }
 
-void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
                       unsigned int idx)
 {
-       pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
-                le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
-                le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+       struct xgbe_ring_data *rdata;
+       struct xgbe_ring_desc *rdesc;
+
+       rdata = XGBE_GET_DESC_DATA(ring, idx);
+       rdesc = rdata->rdesc;
+       netdev_dbg(pdata->netdev,
+                  "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
+                  idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+                  le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
 }
 
 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
@@ -2197,21 +2142,21 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
        unsigned char buffer[128];
        unsigned int i, j;
 
-       netdev_alert(netdev, "\n************** SKB dump ****************\n");
+       netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 
-       netdev_alert(netdev, "%s packet of %d bytes\n",
-                    (tx_rx ? "TX" : "RX"), skb->len);
+       netdev_dbg(netdev, "%s packet of %d bytes\n",
+                  (tx_rx ? "TX" : "RX"), skb->len);
 
-       netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
-       netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
-       netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+       netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+       netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
+       netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
 
        for (i = 0, j = 0; i < skb->len;) {
                j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
                              buf[i++]);
 
                if ((i % 32) == 0) {
-                       netdev_alert(netdev, "  0x%04x: %s\n", i - 32, buffer);
+                       netdev_dbg(netdev, "  %#06x: %s\n", i - 32, buffer);
                        j = 0;
                } else if ((i % 16) == 0) {
                        buffer[j++] = ' ';
@@ -2221,7 +2166,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
                }
        }
        if (i % 32)
-               netdev_alert(netdev, "  0x%04x: %s\n", i - (i % 32), buffer);
+               netdev_dbg(netdev, "  %#06x: %s\n", i - (i % 32), buffer);
 
-       netdev_alert(netdev, "\n************** SKB dump ****************\n");
+       netdev_dbg(netdev, "\n************** SKB dump ****************\n");
 }
index 5f149e8ee20f0fa1a0878c37f2d44427fbe75f7f..59e090e95c0e8648cfc27cac1fc63981c102df11 100644 (file)
@@ -133,6 +133,12 @@ struct xgbe_stats {
          offsetof(struct xgbe_prv_data, mmc_stats._var),       \
        }
 
+#define XGMAC_EXT_STAT(_string, _var)                          \
+       { _string,                                              \
+         FIELD_SIZEOF(struct xgbe_ext_stats, _var),            \
+         offsetof(struct xgbe_prv_data, ext_stats._var),       \
+       }
+
 static const struct xgbe_stats xgbe_gstring_stats[] = {
        XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
        XGMAC_MMC_STAT("tx_packets", txframecount_gb),
@@ -140,6 +146,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
        XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
        XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
        XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+       XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
        XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
        XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
        XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
@@ -171,6 +178,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
        XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
        XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
        XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+       XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
 };
 
 #define XGBE_STATS_COUNT       ARRAY_SIZE(xgbe_gstring_stats)
@@ -239,9 +247,9 @@ static void xgbe_get_pauseparam(struct net_device *netdev,
 
        DBGPR("-->xgbe_get_pauseparam\n");
 
-       pause->autoneg = pdata->pause_autoneg;
-       pause->tx_pause = pdata->tx_pause;
-       pause->rx_pause = pdata->rx_pause;
+       pause->autoneg = pdata->phy.pause_autoneg;
+       pause->tx_pause = pdata->phy.tx_pause;
+       pause->rx_pause = pdata->phy.rx_pause;
 
        DBGPR("<--xgbe_get_pauseparam\n");
 }
@@ -250,7 +258,6 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
                               struct ethtool_pauseparam *pause)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct phy_device *phydev = pdata->phydev;
        int ret = 0;
 
        DBGPR("-->xgbe_set_pauseparam\n");
@@ -258,21 +265,26 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
        DBGPR("  autoneg = %d, tx_pause = %d, rx_pause = %d\n",
              pause->autoneg, pause->tx_pause, pause->rx_pause);
 
-       pdata->pause_autoneg = pause->autoneg;
-       if (pause->autoneg) {
-               phydev->advertising |= ADVERTISED_Pause;
-               phydev->advertising |= ADVERTISED_Asym_Pause;
+       if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE))
+               return -EINVAL;
+
+       pdata->phy.pause_autoneg = pause->autoneg;
+       pdata->phy.tx_pause = pause->tx_pause;
+       pdata->phy.rx_pause = pause->rx_pause;
 
-       } else {
-               phydev->advertising &= ~ADVERTISED_Pause;
-               phydev->advertising &= ~ADVERTISED_Asym_Pause;
+       pdata->phy.advertising &= ~ADVERTISED_Pause;
+       pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
 
-               pdata->tx_pause = pause->tx_pause;
-               pdata->rx_pause = pause->rx_pause;
+       if (pause->rx_pause) {
+               pdata->phy.advertising |= ADVERTISED_Pause;
+               pdata->phy.advertising |= ADVERTISED_Asym_Pause;
        }
 
+       if (pause->tx_pause)
+               pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
        if (netif_running(netdev))
-               ret = phy_start_aneg(phydev);
+               ret = pdata->phy_if.phy_config_aneg(pdata);
 
        DBGPR("<--xgbe_set_pauseparam\n");
 
@@ -283,36 +295,39 @@ static int xgbe_get_settings(struct net_device *netdev,
                             struct ethtool_cmd *cmd)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       int ret;
 
        DBGPR("-->xgbe_get_settings\n");
 
-       if (!pdata->phydev)
-               return -ENODEV;
+       cmd->phy_address = pdata->phy.address;
+
+       cmd->supported = pdata->phy.supported;
+       cmd->advertising = pdata->phy.advertising;
+       cmd->lp_advertising = pdata->phy.lp_advertising;
+
+       cmd->autoneg = pdata->phy.autoneg;
+       ethtool_cmd_speed_set(cmd, pdata->phy.speed);
+       cmd->duplex = pdata->phy.duplex;
 
-       ret = phy_ethtool_gset(pdata->phydev, cmd);
+       cmd->port = PORT_NONE;
+       cmd->transceiver = XCVR_INTERNAL;
 
        DBGPR("<--xgbe_get_settings\n");
 
-       return ret;
+       return 0;
 }
 
 static int xgbe_set_settings(struct net_device *netdev,
                             struct ethtool_cmd *cmd)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct phy_device *phydev = pdata->phydev;
        u32 speed;
        int ret;
 
        DBGPR("-->xgbe_set_settings\n");
 
-       if (!pdata->phydev)
-               return -ENODEV;
-
        speed = ethtool_cmd_speed(cmd);
 
-       if (cmd->phy_address != phydev->addr)
+       if (cmd->phy_address != pdata->phy.address)
                return -EINVAL;
 
        if ((cmd->autoneg != AUTONEG_ENABLE) &&
@@ -333,23 +348,23 @@ static int xgbe_set_settings(struct net_device *netdev,
                        return -EINVAL;
        }
 
-       cmd->advertising &= phydev->supported;
+       cmd->advertising &= pdata->phy.supported;
        if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
                return -EINVAL;
 
        ret = 0;
-       phydev->autoneg = cmd->autoneg;
-       phydev->speed = speed;
-       phydev->duplex = cmd->duplex;
-       phydev->advertising = cmd->advertising;
+       pdata->phy.autoneg = cmd->autoneg;
+       pdata->phy.speed = speed;
+       pdata->phy.duplex = cmd->duplex;
+       pdata->phy.advertising = cmd->advertising;
 
        if (cmd->autoneg == AUTONEG_ENABLE)
-               phydev->advertising |= ADVERTISED_Autoneg;
+               pdata->phy.advertising |= ADVERTISED_Autoneg;
        else
-               phydev->advertising &= ~ADVERTISED_Autoneg;
+               pdata->phy.advertising &= ~ADVERTISED_Autoneg;
 
        if (netif_running(netdev))
-               ret = phy_start_aneg(phydev);
+               ret = pdata->phy_if.phy_config_aneg(pdata);
 
        DBGPR("<--xgbe_set_settings\n");
 
index 7149053849008de10da3be7eb054884b4a808f8c..fb7c961da49f892fb9bbffeab89d78de1d121f07 100644 (file)
 #include <linux/of.h>
 #include <linux/of_net.h>
 #include <linux/of_address.h>
+#include <linux/of_platform.h>
 #include <linux/clk.h>
 #include <linux/property.h>
 #include <linux/acpi.h>
+#include <linux/mdio.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -136,6 +138,49 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(XGBE_DRV_VERSION);
 MODULE_DESCRIPTION(XGBE_DRV_DESC);
 
+static int debug = -1;
+module_param(debug, int, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+                                     NETIF_MSG_IFUP);
+
+static const u32 xgbe_serdes_blwc[] = {
+       XGBE_SPEED_1000_BLWC,
+       XGBE_SPEED_2500_BLWC,
+       XGBE_SPEED_10000_BLWC,
+};
+
+static const u32 xgbe_serdes_cdr_rate[] = {
+       XGBE_SPEED_1000_CDR,
+       XGBE_SPEED_2500_CDR,
+       XGBE_SPEED_10000_CDR,
+};
+
+static const u32 xgbe_serdes_pq_skew[] = {
+       XGBE_SPEED_1000_PQ,
+       XGBE_SPEED_2500_PQ,
+       XGBE_SPEED_10000_PQ,
+};
+
+static const u32 xgbe_serdes_tx_amp[] = {
+       XGBE_SPEED_1000_TXAMP,
+       XGBE_SPEED_2500_TXAMP,
+       XGBE_SPEED_10000_TXAMP,
+};
+
+static const u32 xgbe_serdes_dfe_tap_cfg[] = {
+       XGBE_SPEED_1000_DFE_TAP_CONFIG,
+       XGBE_SPEED_2500_DFE_TAP_CONFIG,
+       XGBE_SPEED_10000_DFE_TAP_CONFIG,
+};
+
+static const u32 xgbe_serdes_dfe_tap_ena[] = {
+       XGBE_SPEED_1000_DFE_TAP_ENABLE,
+       XGBE_SPEED_2500_DFE_TAP_ENABLE,
+       XGBE_SPEED_10000_DFE_TAP_ENABLE,
+};
+
 static void xgbe_default_config(struct xgbe_prv_data *pdata)
 {
        DBGPR("-->xgbe_default_config\n");
@@ -153,8 +198,6 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
        pdata->rx_pause = 1;
        pdata->phy_speed = SPEED_UNKNOWN;
        pdata->power_down = 0;
-       pdata->default_autoneg = AUTONEG_ENABLE;
-       pdata->default_speed = SPEED_10000;
 
        DBGPR("<--xgbe_default_config\n");
 }
@@ -162,6 +205,7 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
 static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
 {
        xgbe_init_function_ptrs_dev(&pdata->hw_if);
+       xgbe_init_function_ptrs_phy(&pdata->phy_if);
        xgbe_init_function_ptrs_desc(&pdata->desc_if);
 }
 
@@ -248,23 +292,82 @@ static int xgbe_of_support(struct xgbe_prv_data *pdata)
 
        return 0;
 }
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+       struct device *dev = pdata->dev;
+       struct device_node *phy_node;
+       struct platform_device *phy_pdev;
+
+       phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+       if (phy_node) {
+               /* Old style device tree:
+                *   The XGBE and PHY resources are separate
+                */
+               phy_pdev = of_find_device_by_node(phy_node);
+               of_node_put(phy_node);
+       } else {
+               /* New style device tree:
+                *   The XGBE and PHY resources are grouped together with
+                *   the PHY resources listed last
+                */
+               get_device(dev);
+               phy_pdev = pdata->pdev;
+       }
+
+       return phy_pdev;
+}
 #else   /* CONFIG_OF */
 static int xgbe_of_support(struct xgbe_prv_data *pdata)
 {
        return -EINVAL;
 }
-#endif  /*CONFIG_OF */
+
+static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+       return NULL;
+}
+#endif  /* CONFIG_OF */
+
+static unsigned int xgbe_resource_count(struct platform_device *pdev,
+                                       unsigned int type)
+{
+       unsigned int count;
+       int i;
+
+       for (i = 0, count = 0; i < pdev->num_resources; i++) {
+               struct resource *res = &pdev->resource[i];
+
+               if (type == resource_type(res))
+                       count++;
+       }
+
+       return count;
+}
+
+static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
+{
+       struct platform_device *phy_pdev;
+
+       if (pdata->use_acpi) {
+               get_device(pdata->dev);
+               phy_pdev = pdata->pdev;
+       } else {
+               phy_pdev = xgbe_of_get_phy_pdev(pdata);
+       }
+
+       return phy_pdev;
+}
 
 static int xgbe_probe(struct platform_device *pdev)
 {
        struct xgbe_prv_data *pdata;
-       struct xgbe_hw_if *hw_if;
-       struct xgbe_desc_if *desc_if;
        struct net_device *netdev;
-       struct device *dev = &pdev->dev;
+       struct device *dev = &pdev->dev, *phy_dev;
+       struct platform_device *phy_pdev;
        struct resource *res;
        const char *phy_mode;
-       unsigned int i;
+       unsigned int i, phy_memnum, phy_irqnum;
        int ret;
 
        DBGPR("--> xgbe_probe\n");
@@ -289,9 +392,36 @@ static int xgbe_probe(struct platform_device *pdev)
        mutex_init(&pdata->rss_mutex);
        spin_lock_init(&pdata->tstamp_lock);
 
+       pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+       set_bit(XGBE_DOWN, &pdata->dev_state);
+
        /* Check if we should use ACPI or DT */
        pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
 
+       phy_pdev = xgbe_get_phy_pdev(pdata);
+       if (!phy_pdev) {
+               dev_err(dev, "unable to obtain phy device\n");
+               ret = -EINVAL;
+               goto err_phydev;
+       }
+       phy_dev = &phy_pdev->dev;
+
+       if (pdev == phy_pdev) {
+               /* New style device tree or ACPI:
+                *   The XGBE and PHY resources are grouped together with
+                *   the PHY resources listed last
+                */
+               phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
+               phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+       } else {
+               /* Old style device tree:
+                *   The XGBE and PHY resources are separate
+                */
+               phy_memnum = 0;
+               phy_irqnum = 0;
+       }
+
        /* Set and validate the number of descriptors for a ring */
        BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
        pdata->tx_desc_count = XGBE_TX_DESC_CNT;
@@ -318,7 +448,8 @@ static int xgbe_probe(struct platform_device *pdev)
                ret = PTR_ERR(pdata->xgmac_regs);
                goto err_io;
        }
-       DBGPR("  xgmac_regs = %p\n", pdata->xgmac_regs);
+       if (netif_msg_probe(pdata))
+               dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        pdata->xpcs_regs = devm_ioremap_resource(dev, res);
@@ -327,7 +458,38 @@ static int xgbe_probe(struct platform_device *pdev)
                ret = PTR_ERR(pdata->xpcs_regs);
                goto err_io;
        }
-       DBGPR("  xpcs_regs  = %p\n", pdata->xpcs_regs);
+       if (netif_msg_probe(pdata))
+               dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);
+
+       res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+       pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(pdata->rxtx_regs)) {
+               dev_err(dev, "rxtx ioremap failed\n");
+               ret = PTR_ERR(pdata->rxtx_regs);
+               goto err_io;
+       }
+       if (netif_msg_probe(pdata))
+               dev_dbg(dev, "rxtx_regs  = %p\n", pdata->rxtx_regs);
+
+       res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+       pdata->sir0_regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(pdata->sir0_regs)) {
+               dev_err(dev, "sir0 ioremap failed\n");
+               ret = PTR_ERR(pdata->sir0_regs);
+               goto err_io;
+       }
+       if (netif_msg_probe(pdata))
+               dev_dbg(dev, "sir0_regs  = %p\n", pdata->sir0_regs);
+
+       res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+       pdata->sir1_regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(pdata->sir1_regs)) {
+               dev_err(dev, "sir1 ioremap failed\n");
+               ret = PTR_ERR(pdata->sir1_regs);
+               goto err_io;
+       }
+       if (netif_msg_probe(pdata))
+               dev_dbg(dev, "sir1_regs  = %p\n", pdata->sir1_regs);
 
        /* Retrieve the MAC address */
        ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
@@ -355,6 +517,115 @@ static int xgbe_probe(struct platform_device *pdev)
        if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
                pdata->per_channel_irq = 1;
 
+       /* Retrieve the PHY speedset */
+       ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
+                                      &pdata->speed_set);
+       if (ret) {
+               dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+               goto err_io;
+       }
+
+       switch (pdata->speed_set) {
+       case XGBE_SPEEDSET_1000_10000:
+       case XGBE_SPEEDSET_2500_10000:
+               break;
+       default:
+               dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+               ret = -EINVAL;
+               goto err_io;
+       }
+
+       /* Retrieve the PHY configuration properties */
+       if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_BLWC_PROPERTY,
+                                                    pdata->serdes_blwc,
+                                                    XGBE_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_BLWC_PROPERTY);
+                       goto err_io;
+               }
+       } else {
+               memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
+                      sizeof(pdata->serdes_blwc));
+       }
+
+       if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_CDR_RATE_PROPERTY,
+                                                    pdata->serdes_cdr_rate,
+                                                    XGBE_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_CDR_RATE_PROPERTY);
+                       goto err_io;
+               }
+       } else {
+               memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
+                      sizeof(pdata->serdes_cdr_rate));
+       }
+
+       if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_PQ_SKEW_PROPERTY,
+                                                    pdata->serdes_pq_skew,
+                                                    XGBE_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_PQ_SKEW_PROPERTY);
+                       goto err_io;
+               }
+       } else {
+               memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
+                      sizeof(pdata->serdes_pq_skew));
+       }
+
+       if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_TX_AMP_PROPERTY,
+                                                    pdata->serdes_tx_amp,
+                                                    XGBE_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_TX_AMP_PROPERTY);
+                       goto err_io;
+               }
+       } else {
+               memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
+                      sizeof(pdata->serdes_tx_amp));
+       }
+
+       if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_DFE_CFG_PROPERTY,
+                                                    pdata->serdes_dfe_tap_cfg,
+                                                    XGBE_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_DFE_CFG_PROPERTY);
+                       goto err_io;
+               }
+       } else {
+               memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
+                      sizeof(pdata->serdes_dfe_tap_cfg));
+       }
+
+       if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
+               ret = device_property_read_u32_array(phy_dev,
+                                                    XGBE_DFE_ENA_PROPERTY,
+                                                    pdata->serdes_dfe_tap_ena,
+                                                    XGBE_SPEEDS);
+               if (ret) {
+                       dev_err(dev, "invalid %s property\n",
+                               XGBE_DFE_ENA_PROPERTY);
+                       goto err_io;
+               }
+       } else {
+               memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
+                      sizeof(pdata->serdes_dfe_tap_ena));
+       }
+
        /* Obtain device settings unique to ACPI/OF */
        if (pdata->use_acpi)
                ret = xgbe_acpi_support(pdata);
@@ -382,17 +653,23 @@ static int xgbe_probe(struct platform_device *pdev)
        }
        pdata->dev_irq = ret;
 
+       /* Get the auto-negotiation interrupt */
+       ret = platform_get_irq(phy_pdev, phy_irqnum++);
+       if (ret < 0) {
+               dev_err(dev, "platform_get_irq phy 0 failed\n");
+               goto err_io;
+       }
+       pdata->an_irq = ret;
+
        netdev->irq = pdata->dev_irq;
        netdev->base_addr = (unsigned long)pdata->xgmac_regs;
        memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
 
        /* Set all the function pointers */
        xgbe_init_all_fptrs(pdata);
-       hw_if = &pdata->hw_if;
-       desc_if = &pdata->desc_if;
 
        /* Issue software reset to device */
-       hw_if->exit(pdata);
+       pdata->hw_if.exit(pdata);
 
        /* Populate the hardware features */
        xgbe_get_all_hw_features(pdata);
@@ -401,8 +678,6 @@ static int xgbe_probe(struct platform_device *pdev)
        xgbe_default_config(pdata);
 
        /* Set the DMA mask */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
        ret = dma_set_mask_and_coherent(dev,
                                        DMA_BIT_MASK(pdata->hw_feat.dma_width));
        if (ret) {
@@ -447,16 +722,8 @@ static int xgbe_probe(struct platform_device *pdev)
        XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
        XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
-       /* Prepare to regsiter with MDIO */
-       pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
-       if (!pdata->mii_bus_id) {
-               dev_err(dev, "failed to allocate mii bus id\n");
-               ret = -ENOMEM;
-               goto err_io;
-       }
-       ret = xgbe_mdio_register(pdata);
-       if (ret)
-               goto err_bus_id;
+       /* Call MDIO/PHY initialization routine */
+       pdata->phy_if.phy_init(pdata);
 
        /* Set device operations */
        netdev->netdev_ops = xgbe_get_netdev_ops();
@@ -501,26 +768,52 @@ static int xgbe_probe(struct platform_device *pdev)
        ret = register_netdev(netdev);
        if (ret) {
                dev_err(dev, "net device registration failed\n");
-               goto err_reg_netdev;
+               goto err_io;
+       }
+
+       /* Create the PHY/ANEG name based on netdev name */
+       snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+                netdev_name(netdev));
+
+       /* Create workqueues */
+       pdata->dev_workqueue =
+               create_singlethread_workqueue(netdev_name(netdev));
+       if (!pdata->dev_workqueue) {
+               netdev_err(netdev, "device workqueue creation failed\n");
+               ret = -ENOMEM;
+               goto err_netdev;
+       }
+
+       pdata->an_workqueue =
+               create_singlethread_workqueue(pdata->an_name);
+       if (!pdata->an_workqueue) {
+               netdev_err(netdev, "phy workqueue creation failed\n");
+               ret = -ENOMEM;
+               goto err_wq;
        }
 
        xgbe_ptp_register(pdata);
 
        xgbe_debugfs_init(pdata);
 
+       platform_device_put(phy_pdev);
+
        netdev_notice(netdev, "net device enabled\n");
 
        DBGPR("<-- xgbe_probe\n");
 
        return 0;
 
-err_reg_netdev:
-       xgbe_mdio_unregister(pdata);
+err_wq:
+       destroy_workqueue(pdata->dev_workqueue);
 
-err_bus_id:
-       kfree(pdata->mii_bus_id);
+err_netdev:
+       unregister_netdev(netdev);
 
 err_io:
+       platform_device_put(phy_pdev);
+
+err_phydev:
        free_netdev(netdev);
 
 err_alloc:
@@ -540,11 +833,13 @@ static int xgbe_remove(struct platform_device *pdev)
 
        xgbe_ptp_unregister(pdata);
 
-       unregister_netdev(netdev);
+       flush_workqueue(pdata->an_workqueue);
+       destroy_workqueue(pdata->an_workqueue);
 
-       xgbe_mdio_unregister(pdata);
+       flush_workqueue(pdata->dev_workqueue);
+       destroy_workqueue(pdata->dev_workqueue);
 
-       kfree(pdata->mii_bus_id);
+       unregister_netdev(netdev);
 
        free_netdev(netdev);
 
@@ -557,16 +852,17 @@ static int xgbe_remove(struct platform_device *pdev)
 static int xgbe_suspend(struct device *dev)
 {
        struct net_device *netdev = dev_get_drvdata(dev);
-       int ret;
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       int ret = 0;
 
        DBGPR("-->xgbe_suspend\n");
 
-       if (!netif_running(netdev)) {
-               DBGPR("<--xgbe_dev_suspend\n");
-               return -EINVAL;
-       }
+       if (netif_running(netdev))
+               ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
 
-       ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+       pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+       pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
 
        DBGPR("<--xgbe_suspend\n");
 
@@ -576,16 +872,16 @@ static int xgbe_suspend(struct device *dev)
 static int xgbe_resume(struct device *dev)
 {
        struct net_device *netdev = dev_get_drvdata(dev);
-       int ret;
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       int ret = 0;
 
        DBGPR("-->xgbe_resume\n");
 
-       if (!netif_running(netdev)) {
-               DBGPR("<--xgbe_dev_resume\n");
-               return -EINVAL;
-       }
+       pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
 
-       ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+       if (netif_running(netdev))
+               ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
 
        DBGPR("<--xgbe_resume\n");
 
index 59e267f3f1b77e1ca34cbd0eaca7c760653450b4..9088c3a35a207ec6a60e1b1eb10e112f1e2f2e90 100644 (file)
 #include <linux/mdio.h>
 #include <linux/phy.h>
 #include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
 
-static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_prv_data *pdata = mii->priv;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       int mmd_data;
+       unsigned int reg;
 
-       DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
-                  prtad, mmd_reg);
+       reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
 
-       mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+       reg |= XGBE_KR_TRAINING_ENABLE;
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
+{
+       unsigned int reg;
 
-       DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+       reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
 
-       return mmd_data;
+       reg &= ~XGBE_KR_TRAINING_ENABLE;
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
 }
 
-static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
-                          u16 mmd_val)
+static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
 {
-       struct xgbe_prv_data *pdata = mii->priv;
-       struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       int mmd_data = mmd_val;
+       unsigned int reg;
 
-       DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
-                  prtad, mmd_reg, mmd_data);
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
 
-       hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+       reg |= MDIO_CTRL1_LPOWER;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
 
-       DBGPR_MDIO("<--xgbe_mdio_write\n");
+       usleep_range(75, 100);
 
-       return 0;
+       reg &= ~MDIO_CTRL1_LPOWER;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
 }
 
-void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
 {
-       struct device *dev = pdata->dev;
-       struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
-       int i;
-
-       dev_alert(dev, "\n************* PHY Reg dump **********************\n");
-
-       dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
-                 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
-       dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
-                 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
-       dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
-                 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
-       dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
-                 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
-       dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
-                 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
-       dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
-                 XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
-
-       dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
-                 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
-       dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
-                 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
-       dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
-                 MDIO_AN_ADVERTISE,
-                 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
-       dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
-                 MDIO_AN_ADVERTISE + 1,
-                 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
-       dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
-                 MDIO_AN_ADVERTISE + 2,
-                 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
-       dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
-                 MDIO_AN_COMP_STAT,
-                 XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
-
-       dev_alert(dev, "MMD Device Mask = %#x\n",
-                 phydev->c45_ids.devices_in_package);
-       for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
-               dev_alert(dev, "  MMD %d: ID = %#08x\n", i,
-                         phydev->c45_ids.device_ids[i]);
-
-       dev_alert(dev, "\n*************************************************\n");
-}
-
-int xgbe_mdio_register(struct xgbe_prv_data *pdata)
-{
-       struct mii_bus *mii;
-       struct phy_device *phydev;
-       int ret = 0;
-
-       DBGPR("-->xgbe_mdio_register\n");
-
-       mii = mdiobus_alloc();
-       if (!mii) {
-               dev_err(pdata->dev, "mdiobus_alloc failed\n");
-               return -ENOMEM;
+       /* Assert Rx and Tx ratechange */
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
+}
+
+static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
+{
+       unsigned int wait;
+       u16 status;
+
+       /* Release Rx and Tx ratechange */
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
+
+       /* Wait for Rx and Tx ready */
+       wait = XGBE_RATECHANGE_COUNT;
+       while (wait--) {
+               usleep_range(50, 75);
+
+               status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+               if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+                   XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+                       goto rx_reset;
        }
 
-       /* Register on the MDIO bus (don't probe any PHYs) */
-       mii->name = XGBE_PHY_NAME;
-       mii->read = xgbe_mdio_read;
-       mii->write = xgbe_mdio_write;
-       snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
-       mii->priv = pdata;
-       mii->phy_mask = ~0;
-       mii->parent = pdata->dev;
-       ret = mdiobus_register(mii);
-       if (ret) {
-               dev_err(pdata->dev, "mdiobus_register failed\n");
-               goto err_mdiobus_alloc;
+       netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
+                 status);
+
+rx_reset:
+       /* Perform Rx reset for the DFE changes */
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
+}
+
+static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+       unsigned int reg;
+
+       /* Enable KR training */
+       xgbe_an_enable_kr_training(pdata);
+
+       /* Set MAC to 10G speed */
+       pdata->hw_if.set_xgmii_speed(pdata);
+
+       /* Set PCS to KR/10G speed */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+       reg &= ~MDIO_PCS_CTRL2_TYPE;
+       reg |= MDIO_PCS_CTRL2_10GBR;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+       reg &= ~MDIO_CTRL1_SPEEDSEL;
+       reg |= MDIO_CTRL1_SPEED10G;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+       xgbe_pcs_power_cycle(pdata);
+
+       /* Set SerDes to 10G speed */
+       xgbe_serdes_start_ratechange(pdata);
+
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
+
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+                          pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+                          pdata->serdes_tx_amp[XGBE_SPEED_10000]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+                          pdata->serdes_blwc[XGBE_SPEED_10000]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+                          pdata->serdes_pq_skew[XGBE_SPEED_10000]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+                          pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
+       XRXTX_IOWRITE(pdata, RXTX_REG22,
+                     pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
+
+       xgbe_serdes_complete_ratechange(pdata);
+
+       netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
+}
+
+static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+       unsigned int reg;
+
+       /* Disable KR training */
+       xgbe_an_disable_kr_training(pdata);
+
+       /* Set MAC to 2.5G speed */
+       pdata->hw_if.set_gmii_2500_speed(pdata);
+
+       /* Set PCS to KX/1G speed */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+       reg &= ~MDIO_PCS_CTRL2_TYPE;
+       reg |= MDIO_PCS_CTRL2_10GBX;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+       reg &= ~MDIO_CTRL1_SPEEDSEL;
+       reg |= MDIO_CTRL1_SPEED1G;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+       xgbe_pcs_power_cycle(pdata);
+
+       /* Set SerDes to 2.5G speed */
+       xgbe_serdes_start_ratechange(pdata);
+
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
+
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+                          pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+                          pdata->serdes_tx_amp[XGBE_SPEED_2500]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+                          pdata->serdes_blwc[XGBE_SPEED_2500]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+                          pdata->serdes_pq_skew[XGBE_SPEED_2500]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+                          pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
+       XRXTX_IOWRITE(pdata, RXTX_REG22,
+                     pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
+
+       xgbe_serdes_complete_ratechange(pdata);
+
+       netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
+}
+
+static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
+{
+       unsigned int reg;
+
+       /* Disable KR training */
+       xgbe_an_disable_kr_training(pdata);
+
+       /* Set MAC to 1G speed */
+       pdata->hw_if.set_gmii_speed(pdata);
+
+       /* Set PCS to KX/1G speed */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+       reg &= ~MDIO_PCS_CTRL2_TYPE;
+       reg |= MDIO_PCS_CTRL2_10GBX;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+       reg &= ~MDIO_CTRL1_SPEEDSEL;
+       reg |= MDIO_CTRL1_SPEED1G;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+       xgbe_pcs_power_cycle(pdata);
+
+       /* Set SerDes to 1G speed */
+       xgbe_serdes_start_ratechange(pdata);
+
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
+
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+                          pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
+       XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+                          pdata->serdes_tx_amp[XGBE_SPEED_1000]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+                          pdata->serdes_blwc[XGBE_SPEED_1000]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+                          pdata->serdes_pq_skew[XGBE_SPEED_1000]);
+       XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+                          pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
+       XRXTX_IOWRITE(pdata, RXTX_REG22,
+                     pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
+
+       xgbe_serdes_complete_ratechange(pdata);
+
+       netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
+}
+
+static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
+                         enum xgbe_mode *mode)
+{
+       unsigned int reg;
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+       if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+               *mode = XGBE_MODE_KR;
+       else
+               *mode = XGBE_MODE_KX;
+}
+
+static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+{
+       enum xgbe_mode mode;
+
+       xgbe_cur_mode(pdata, &mode);
+
+       return (mode == XGBE_MODE_KR);
+}
+
+static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+{
+       /* If we are in KR switch to KX, and vice-versa */
+       if (xgbe_in_kr_mode(pdata)) {
+               if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
+                       xgbe_gmii_mode(pdata);
+               else
+                       xgbe_gmii_2500_mode(pdata);
+       } else {
+               xgbe_xgmii_mode(pdata);
        }
-       DBGPR("  mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
-
-       /* Probe the PCS using Clause 45 */
-       phydev = get_phy_device(mii, XGBE_PRTAD, true);
-       if (IS_ERR(phydev) || !phydev ||
-           !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
-               dev_err(pdata->dev, "get_phy_device failed\n");
-               ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
-               goto err_mdiobus_register;
+}
+
+static void xgbe_set_mode(struct xgbe_prv_data *pdata,
+                         enum xgbe_mode mode)
+{
+       enum xgbe_mode cur_mode;
+
+       xgbe_cur_mode(pdata, &cur_mode);
+       if (mode != cur_mode)
+               xgbe_switch_mode(pdata);
+}
+
+static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
+{
+       if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+               if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+                       return true;
+       } else {
+               if (pdata->phy.speed == SPEED_10000)
+                       return true;
        }
-       request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
-                      MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
 
-       ret = phy_device_register(phydev);
-       if (ret) {
-               dev_err(pdata->dev, "phy_device_register failed\n");
-               goto err_phy_device;
+       return false;
+}
+
+static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
+{
+       if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+               if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+                       return true;
+       } else {
+               if (pdata->phy.speed == SPEED_2500)
+                       return true;
        }
-       if (!phydev->dev.driver) {
-               dev_err(pdata->dev, "phy driver probe failed\n");
-               ret = -EIO;
-               goto err_phy_device;
+
+       return false;
+}
+
+static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
+{
+       if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+               if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+                       return true;
+       } else {
+               if (pdata->phy.speed == SPEED_1000)
+                       return true;
        }
 
-       /* Add a reference to the PHY driver so it can't be unloaded */
-       pdata->phy_module = phydev->dev.driver->owner;
-       if (!try_module_get(pdata->phy_module)) {
-               dev_err(pdata->dev, "try_module_get failed\n");
-               ret = -EIO;
-               goto err_phy_device;
+       return false;
+}
+
+static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
+{
+       unsigned int reg;
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+       reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+       if (enable)
+               reg |= MDIO_AN_CTRL1_ENABLE;
+
+       if (restart)
+               reg |= MDIO_AN_CTRL1_RESTART;
+
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void xgbe_restart_an(struct xgbe_prv_data *pdata)
+{
+       xgbe_set_an(pdata, true, true);
+
+       netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
+}
+
+static void xgbe_disable_an(struct xgbe_prv_data *pdata)
+{
+       xgbe_set_an(pdata, false, false);
+
+       netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
+}
+
+static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
+                                       enum xgbe_rx *state)
+{
+       unsigned int ad_reg, lp_reg, reg;
+
+       *state = XGBE_RX_COMPLETE;
+
+       /* If we're not in KR mode then we're done */
+       if (!xgbe_in_kr_mode(pdata))
+               return XGBE_AN_PAGE_RECEIVED;
+
+       /* Enable/Disable FEC */
+       ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+       lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+       reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+       if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+               reg |= pdata->fec_ability;
+
+       XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+       /* Start KR training */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+       if (reg & XGBE_KR_TRAINING_ENABLE) {
+               XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
+
+               reg |= XGBE_KR_TRAINING_START;
+               XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+                           reg);
+
+               XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
+
+               netif_dbg(pdata, link, pdata->netdev,
+                         "KR training initiated\n");
        }
 
-       pdata->mii = mii;
-       pdata->mdio_mmd = MDIO_MMD_PCS;
+       return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
+                                  enum xgbe_rx *state)
+{
+       u16 msg;
+
+       *state = XGBE_RX_XNP;
+
+       msg = XGBE_XNP_MCF_NULL_MESSAGE;
+       msg |= XGBE_XNP_MP_FORMATTED;
+
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+       return XGBE_AN_PAGE_RECEIVED;
+}
+
+static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
+                                  enum xgbe_rx *state)
+{
+       unsigned int link_support;
+       unsigned int reg, ad_reg, lp_reg;
+
+       /* Read Base Ability register 2 first */
+       reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+       /* Check for a supported mode, otherwise restart in a different one */
+       link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+       if (!(reg & link_support))
+               return XGBE_AN_INCOMPAT_LINK;
+
+       /* Check Extended Next Page support */
+       ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+       lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+       return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+               (lp_reg & XGBE_XNP_NP_EXCHANGE))
+              ? xgbe_an_tx_xnp(pdata, state)
+              : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
+                                  enum xgbe_rx *state)
+{
+       unsigned int ad_reg, lp_reg;
+
+       /* Check Extended Next Page support */
+       ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+       lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+       return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+               (lp_reg & XGBE_XNP_NP_EXCHANGE))
+              ? xgbe_an_tx_xnp(pdata, state)
+              : xgbe_an_tx_training(pdata, state);
+}
+
+static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+{
+       enum xgbe_rx *state;
+       unsigned long an_timeout;
+       enum xgbe_an ret;
+
+       if (!pdata->an_start) {
+               pdata->an_start = jiffies;
+       } else {
+               an_timeout = pdata->an_start +
+                            msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+               if (time_after(jiffies, an_timeout)) {
+                       /* Auto-negotiation timed out, reset state */
+                       pdata->kr_state = XGBE_RX_BPA;
+                       pdata->kx_state = XGBE_RX_BPA;
+
+                       pdata->an_start = jiffies;
+
+                       netif_dbg(pdata, link, pdata->netdev,
+                                 "AN timed out, resetting state\n");
+               }
+       }
 
-       phydev->autoneg = pdata->default_autoneg;
-       if (phydev->autoneg == AUTONEG_DISABLE) {
-               phydev->speed = pdata->default_speed;
-               phydev->duplex = DUPLEX_FULL;
+       state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
+                                          : &pdata->kx_state;
 
-               phydev->advertising &= ~ADVERTISED_Autoneg;
+       switch (*state) {
+       case XGBE_RX_BPA:
+               ret = xgbe_an_rx_bpa(pdata, state);
+               break;
+
+       case XGBE_RX_XNP:
+               ret = xgbe_an_rx_xnp(pdata, state);
+               break;
+
+       default:
+               ret = XGBE_AN_ERROR;
+       }
+
+       return ret;
+}
+
+static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
+{
+       /* Be sure we aren't looping trying to negotiate */
+       if (xgbe_in_kr_mode(pdata)) {
+               pdata->kr_state = XGBE_RX_ERROR;
+
+               if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+                   !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+                       return XGBE_AN_NO_LINK;
+
+               if (pdata->kx_state != XGBE_RX_BPA)
+                       return XGBE_AN_NO_LINK;
+       } else {
+               pdata->kx_state = XGBE_RX_ERROR;
+
+               if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+                       return XGBE_AN_NO_LINK;
+
+               if (pdata->kr_state != XGBE_RX_BPA)
+                       return XGBE_AN_NO_LINK;
+       }
+
+       xgbe_disable_an(pdata);
+
+       xgbe_switch_mode(pdata);
+
+       xgbe_restart_an(pdata);
+
+       return XGBE_AN_INCOMPAT_LINK;
+}
+
+static irqreturn_t xgbe_an_isr(int irq, void *data)
+{
+       struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+       netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
+
+       /* Interrupt reason must be read and cleared outside of IRQ context */
+       disable_irq_nosync(pdata->an_irq);
+
+       queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+
+       return IRQ_HANDLED;
+}
+
+static void xgbe_an_irq_work(struct work_struct *work)
+{
+       struct xgbe_prv_data *pdata = container_of(work,
+                                                  struct xgbe_prv_data,
+                                                  an_irq_work);
+
+       /* Avoid a race between enabling the IRQ and exiting the work by
+        * waiting for the work to finish and then queueing it
+        */
+       flush_work(&pdata->an_work);
+       queue_work(pdata->an_workqueue, &pdata->an_work);
+}
+
+static const char *xgbe_state_as_string(enum xgbe_an state)
+{
+       switch (state) {
+       case XGBE_AN_READY:
+               return "Ready";
+       case XGBE_AN_PAGE_RECEIVED:
+               return "Page-Received";
+       case XGBE_AN_INCOMPAT_LINK:
+               return "Incompatible-Link";
+       case XGBE_AN_COMPLETE:
+               return "Complete";
+       case XGBE_AN_NO_LINK:
+               return "No-Link";
+       case XGBE_AN_ERROR:
+               return "Error";
+       default:
+               return "Undefined";
+       }
+}
+
+static void xgbe_an_state_machine(struct work_struct *work)
+{
+       struct xgbe_prv_data *pdata = container_of(work,
+                                                  struct xgbe_prv_data,
+                                                  an_work);
+       enum xgbe_an cur_state = pdata->an_state;
+       unsigned int int_reg, int_mask;
+
+       mutex_lock(&pdata->an_mutex);
+
+       /* Read the interrupt */
+       int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+       if (!int_reg)
+               goto out;
+
+next_int:
+       if (int_reg & XGBE_AN_PG_RCV) {
+               pdata->an_state = XGBE_AN_PAGE_RECEIVED;
+               int_mask = XGBE_AN_PG_RCV;
+       } else if (int_reg & XGBE_AN_INC_LINK) {
+               pdata->an_state = XGBE_AN_INCOMPAT_LINK;
+               int_mask = XGBE_AN_INC_LINK;
+       } else if (int_reg & XGBE_AN_INT_CMPLT) {
+               pdata->an_state = XGBE_AN_COMPLETE;
+               int_mask = XGBE_AN_INT_CMPLT;
+       } else {
+               pdata->an_state = XGBE_AN_ERROR;
+               int_mask = 0;
        }
 
-       pdata->phydev = phydev;
+       /* Clear the interrupt to be processed */
+       int_reg &= ~int_mask;
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
+
+       pdata->an_result = pdata->an_state;
+
+again:
+       netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
+                 xgbe_state_as_string(pdata->an_state));
+
+       cur_state = pdata->an_state;
+
+       switch (pdata->an_state) {
+       case XGBE_AN_READY:
+               pdata->an_supported = 0;
+               break;
+
+       case XGBE_AN_PAGE_RECEIVED:
+               pdata->an_state = xgbe_an_page_received(pdata);
+               pdata->an_supported++;
+               break;
+
+       case XGBE_AN_INCOMPAT_LINK:
+               pdata->an_supported = 0;
+               pdata->parallel_detect = 0;
+               pdata->an_state = xgbe_an_incompat_link(pdata);
+               break;
 
-       DBGPHY_REGS(pdata);
+       case XGBE_AN_COMPLETE:
+               pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+               netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
+                         pdata->an_supported ? "Auto negotiation"
+                                             : "Parallel detection");
+               break;
 
-       DBGPR("<--xgbe_mdio_register\n");
+       case XGBE_AN_NO_LINK:
+               break;
+
+       default:
+               pdata->an_state = XGBE_AN_ERROR;
+       }
+
+       if (pdata->an_state == XGBE_AN_NO_LINK) {
+               int_reg = 0;
+               XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+       } else if (pdata->an_state == XGBE_AN_ERROR) {
+               netdev_err(pdata->netdev,
+                          "error during auto-negotiation, state=%u\n",
+                          cur_state);
+
+               int_reg = 0;
+               XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+       }
+
+       if (pdata->an_state >= XGBE_AN_COMPLETE) {
+               pdata->an_result = pdata->an_state;
+               pdata->an_state = XGBE_AN_READY;
+               pdata->kr_state = XGBE_RX_BPA;
+               pdata->kx_state = XGBE_RX_BPA;
+               pdata->an_start = 0;
+
+               netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
+                         xgbe_state_as_string(pdata->an_result));
+       }
+
+       if (cur_state != pdata->an_state)
+               goto again;
+
+       if (int_reg)
+               goto next_int;
+
+out:
+       enable_irq(pdata->an_irq);
+
+       mutex_unlock(&pdata->an_mutex);
+}
+
+static void xgbe_an_init(struct xgbe_prv_data *pdata)
+{
+       unsigned int reg;
+
+       /* Set up Advertisement register 3 first */
+       reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+       if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
+               reg |= 0xc000;
+       else
+               reg &= ~0xc000;
+
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+       /* Set up Advertisement register 2 next */
+       reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+       if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+               reg |= 0x80;
+       else
+               reg &= ~0x80;
+
+       if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+           (pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+               reg |= 0x20;
+       else
+               reg &= ~0x20;
+
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+       /* Set up Advertisement register 1 last */
+       reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+       if (pdata->phy.advertising & ADVERTISED_Pause)
+               reg |= 0x400;
+       else
+               reg &= ~0x400;
+
+       if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
+               reg |= 0x800;
+       else
+               reg &= ~0x800;
+
+       /* We don't intend to perform XNP */
+       reg &= ~XGBE_XNP_NP_EXCHANGE;
+
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+       netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
+}
+
+static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+{
+       if (pdata->tx_pause && pdata->rx_pause)
+               return "rx/tx";
+       else if (pdata->rx_pause)
+               return "rx";
+       else if (pdata->tx_pause)
+               return "tx";
+       else
+               return "off";
+}
+
+static const char *xgbe_phy_speed_string(int speed)
+{
+       switch (speed) {
+       case SPEED_1000:
+               return "1Gbps";
+       case SPEED_2500:
+               return "2.5Gbps";
+       case SPEED_10000:
+               return "10Gbps";
+       case SPEED_UNKNOWN:
+               return "Unknown";
+       default:
+               return "Unsupported";
+       }
+}
+
+static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
+{
+       if (pdata->phy.link)
+               netdev_info(pdata->netdev,
+                           "Link is Up - %s/%s - flow control %s\n",
+                           xgbe_phy_speed_string(pdata->phy.speed),
+                           pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
+                           xgbe_phy_fc_string(pdata));
+       else
+               netdev_info(pdata->netdev, "Link is Down\n");
+}
+
+static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+{
+       int new_state = 0;
+
+       if (pdata->phy.link) {
+               /* Flow control support */
+               pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+               if (pdata->tx_pause != pdata->phy.tx_pause) {
+                       new_state = 1;
+                       pdata->hw_if.config_tx_flow_control(pdata);
+                       pdata->tx_pause = pdata->phy.tx_pause;
+               }
+
+               if (pdata->rx_pause != pdata->phy.rx_pause) {
+                       new_state = 1;
+                       pdata->hw_if.config_rx_flow_control(pdata);
+                       pdata->rx_pause = pdata->phy.rx_pause;
+               }
+
+               /* Speed support */
+               if (pdata->phy_speed != pdata->phy.speed) {
+                       new_state = 1;
+                       pdata->phy_speed = pdata->phy.speed;
+               }
+
+               if (pdata->phy_link != pdata->phy.link) {
+                       new_state = 1;
+                       pdata->phy_link = pdata->phy.link;
+               }
+       } else if (pdata->phy_link) {
+               new_state = 1;
+               pdata->phy_link = 0;
+               pdata->phy_speed = SPEED_UNKNOWN;
+       }
+
+       if (new_state && netif_msg_link(pdata))
+               xgbe_phy_print_status(pdata);
+}
+
+static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+{
+       netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
+
+       /* Disable auto-negotiation */
+       xgbe_disable_an(pdata);
+
+       /* Validate/Set specified speed */
+       switch (pdata->phy.speed) {
+       case SPEED_10000:
+               xgbe_set_mode(pdata, XGBE_MODE_KR);
+               break;
+
+       case SPEED_2500:
+       case SPEED_1000:
+               xgbe_set_mode(pdata, XGBE_MODE_KX);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       /* Validate duplex mode */
+       if (pdata->phy.duplex != DUPLEX_FULL)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+       set_bit(XGBE_LINK_INIT, &pdata->dev_state);
+       pdata->link_check = jiffies;
+
+       if (pdata->phy.autoneg != AUTONEG_ENABLE)
+               return xgbe_phy_config_fixed(pdata);
+
+       netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
+
+       /* Disable auto-negotiation interrupt */
+       disable_irq(pdata->an_irq);
+
+       /* Start auto-negotiation in a supported mode */
+       if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
+               xgbe_set_mode(pdata, XGBE_MODE_KR);
+       } else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+                  (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
+               xgbe_set_mode(pdata, XGBE_MODE_KX);
+       } else {
+               enable_irq(pdata->an_irq);
+               return -EINVAL;
+       }
+
+       /* Disable and stop any in progress auto-negotiation */
+       xgbe_disable_an(pdata);
+
+       /* Clear any auto-negotitation interrupts */
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+       pdata->an_result = XGBE_AN_READY;
+       pdata->an_state = XGBE_AN_READY;
+       pdata->kr_state = XGBE_RX_BPA;
+       pdata->kx_state = XGBE_RX_BPA;
+
+       /* Re-enable auto-negotiation interrupt */
+       enable_irq(pdata->an_irq);
+
+       /* Set up advertisement registers based on current settings */
+       xgbe_an_init(pdata);
+
+       /* Enable and start auto-negotiation */
+       xgbe_restart_an(pdata);
 
        return 0;
+}
+
+static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+{
+       int ret;
+
+       mutex_lock(&pdata->an_mutex);
+
+       ret = __xgbe_phy_config_aneg(pdata);
+       if (ret)
+               set_bit(XGBE_LINK_ERR, &pdata->dev_state);
+       else
+               clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
+
+       mutex_unlock(&pdata->an_mutex);
+
+       return ret;
+}
+
+static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+{
+       return (pdata->an_result == XGBE_AN_COMPLETE);
+}
+
+static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+{
+       unsigned long link_timeout;
+
+       link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
+       if (time_after(jiffies, link_timeout)) {
+               netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+               xgbe_phy_config_aneg(pdata);
+       }
+}
+
+static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
+{
+       if (xgbe_in_kr_mode(pdata)) {
+               pdata->phy.speed = SPEED_10000;
+       } else {
+               switch (pdata->speed_set) {
+               case XGBE_SPEEDSET_1000_10000:
+                       pdata->phy.speed = SPEED_1000;
+                       break;
+
+               case XGBE_SPEEDSET_2500_10000:
+                       pdata->phy.speed = SPEED_2500;
+                       break;
+               }
+       }
+       pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+{
+       unsigned int ad_reg, lp_reg;
+
+       pdata->phy.lp_advertising = 0;
+
+       if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+               return xgbe_phy_status_force(pdata);
+
+       pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+       pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+       /* Compare Advertisement and Link Partner register 1 */
+       ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+       lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+       if (lp_reg & 0x400)
+               pdata->phy.lp_advertising |= ADVERTISED_Pause;
+       if (lp_reg & 0x800)
+               pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+       if (pdata->phy.pause_autoneg) {
+               /* Set flow control based on auto-negotiation result */
+               pdata->phy.tx_pause = 0;
+               pdata->phy.rx_pause = 0;
+
+               if (ad_reg & lp_reg & 0x400) {
+                       pdata->phy.tx_pause = 1;
+                       pdata->phy.rx_pause = 1;
+               } else if (ad_reg & lp_reg & 0x800) {
+                       if (ad_reg & 0x400)
+                               pdata->phy.rx_pause = 1;
+                       else if (lp_reg & 0x400)
+                               pdata->phy.tx_pause = 1;
+               }
+       }
+
+       /* Compare Advertisement and Link Partner register 2 */
+       ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+       lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+       if (lp_reg & 0x80)
+               pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+       if (lp_reg & 0x20) {
+               switch (pdata->speed_set) {
+               case XGBE_SPEEDSET_1000_10000:
+                       pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+                       break;
+               case XGBE_SPEEDSET_2500_10000:
+                       pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
+                       break;
+               }
+       }
+
+       ad_reg &= lp_reg;
+       if (ad_reg & 0x80) {
+               pdata->phy.speed = SPEED_10000;
+               xgbe_set_mode(pdata, XGBE_MODE_KR);
+       } else if (ad_reg & 0x20) {
+               switch (pdata->speed_set) {
+               case XGBE_SPEEDSET_1000_10000:
+                       pdata->phy.speed = SPEED_1000;
+                       break;
+
+               case XGBE_SPEEDSET_2500_10000:
+                       pdata->phy.speed = SPEED_2500;
+                       break;
+               }
+
+               xgbe_set_mode(pdata, XGBE_MODE_KX);
+       } else {
+               pdata->phy.speed = SPEED_UNKNOWN;
+       }
+
+       /* Compare Advertisement and Link Partner register 3 */
+       ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+       lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+       if (lp_reg & 0xc000)
+               pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+       pdata->phy.duplex = DUPLEX_FULL;
+}
+
+static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+{
+       unsigned int reg, link_aneg;
+
+       if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+               if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+                       netif_carrier_off(pdata->netdev);
+
+               pdata->phy.link = 0;
+               goto adjust_link;
+       }
+
+       link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+       /* Get the link status. Link status is latched low, so read
+        * once to clear and then read again to get current state
+        */
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+       pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
+
+       if (pdata->phy.link) {
+               if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+                       xgbe_check_link_timeout(pdata);
+                       return;
+               }
+
+               xgbe_phy_status_aneg(pdata);
+
+               if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+                       clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+
+               if (!test_bit(XGBE_LINK, &pdata->dev_state)) {
+                       set_bit(XGBE_LINK, &pdata->dev_state);
+                       netif_carrier_on(pdata->netdev);
+               }
+       } else {
+               if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+                       xgbe_check_link_timeout(pdata);
+
+                       if (link_aneg)
+                               return;
+               }
+
+               xgbe_phy_status_aneg(pdata);
+
+               if (test_bit(XGBE_LINK, &pdata->dev_state)) {
+                       clear_bit(XGBE_LINK, &pdata->dev_state);
+                       netif_carrier_off(pdata->netdev);
+               }
+       }
+
+adjust_link:
+       xgbe_phy_adjust_link(pdata);
+}
+
+static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+{
+       netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
+       /* Disable auto-negotiation */
+       xgbe_disable_an(pdata);
+
+       /* Disable auto-negotiation interrupts */
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+       devm_free_irq(pdata->dev, pdata->an_irq, pdata);
 
-err_phy_device:
-       phy_device_free(phydev);
+       pdata->phy.link = 0;
+       if (test_and_clear_bit(XGBE_LINK, &pdata->dev_state))
+               netif_carrier_off(pdata->netdev);
 
-err_mdiobus_register:
-       mdiobus_unregister(mii);
+       xgbe_phy_adjust_link(pdata);
+}
+
+static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       int ret;
+
+       netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
+
+       ret = devm_request_irq(pdata->dev, pdata->an_irq,
+                              xgbe_an_isr, 0, pdata->an_name,
+                              pdata);
+       if (ret) {
+               netdev_err(netdev, "phy irq request failed\n");
+               return ret;
+       }
+
+       /* Set initial mode - call the mode setting routines
+        * directly to insure we are properly configured
+        */
+       if (xgbe_use_xgmii_mode(pdata)) {
+               xgbe_xgmii_mode(pdata);
+       } else if (xgbe_use_gmii_mode(pdata)) {
+               xgbe_gmii_mode(pdata);
+       } else if (xgbe_use_gmii_2500_mode(pdata)) {
+               xgbe_gmii_2500_mode(pdata);
+       } else {
+               ret = -EINVAL;
+               goto err_irq;
+       }
+
+       /* Set up advertisement registers based on current settings */
+       xgbe_an_init(pdata);
+
+       /* Enable auto-negotiation interrupts */
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
 
-err_mdiobus_alloc:
-       mdiobus_free(mii);
+       return xgbe_phy_config_aneg(pdata);
+
+err_irq:
+       devm_free_irq(pdata->dev, pdata->an_irq, pdata);
 
        return ret;
 }
 
-void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+{
+       unsigned int count, reg;
+
+       reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+       reg |= MDIO_CTRL1_RESET;
+       XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+
+       count = 50;
+       do {
+               msleep(20);
+               reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+       } while ((reg & MDIO_CTRL1_RESET) && --count);
+
+       if (reg & MDIO_CTRL1_RESET)
+               return -ETIMEDOUT;
+
+       /* Disable auto-negotiation for now */
+       xgbe_disable_an(pdata);
+
+       /* Clear auto-negotiation interrupts */
+       XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+
+       return 0;
+}
+
+static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
 {
-       DBGPR("-->xgbe_mdio_unregister\n");
+       struct device *dev = pdata->dev;
+
+       dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
+
+       dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+               XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+       dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+               XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+       dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+               XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+       dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+               XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+       dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+               XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+       dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+               XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+       dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+               XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+       dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+               XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+       dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+               MDIO_AN_ADVERTISE,
+               XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+       dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+               MDIO_AN_ADVERTISE + 1,
+               XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+       dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+               MDIO_AN_ADVERTISE + 2,
+               XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+       dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+               MDIO_AN_COMP_STAT,
+               XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+       dev_dbg(dev, "\n*************************************************\n");
+}
+
+static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+       mutex_init(&pdata->an_mutex);
+       INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
+       INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+       pdata->mdio_mmd = MDIO_MMD_PCS;
+
+       /* Initialize supported features */
+       pdata->phy.supported = SUPPORTED_Autoneg;
+       pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+       pdata->phy.supported |= SUPPORTED_Backplane;
+       pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+       switch (pdata->speed_set) {
+       case XGBE_SPEEDSET_1000_10000:
+               pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+               break;
+       case XGBE_SPEEDSET_2500_10000:
+               pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+               break;
+       }
 
-       pdata->phydev = NULL;
+       pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+                                       MDIO_PMA_10GBR_FECABLE);
+       pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+                              MDIO_PMA_10GBR_FECABLE_ERRABLE);
+       if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+               pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
 
-       module_put(pdata->phy_module);
-       pdata->phy_module = NULL;
+       pdata->phy.advertising = pdata->phy.supported;
 
-       mdiobus_unregister(pdata->mii);
-       pdata->mii->priv = NULL;
+       pdata->phy.address = 0;
+
+       pdata->phy.autoneg = AUTONEG_ENABLE;
+       pdata->phy.speed = SPEED_UNKNOWN;
+       pdata->phy.duplex = DUPLEX_UNKNOWN;
+
+       pdata->phy.link = 0;
+
+       pdata->phy.pause_autoneg = pdata->pause_autoneg;
+       pdata->phy.tx_pause = pdata->tx_pause;
+       pdata->phy.rx_pause = pdata->rx_pause;
+
+       /* Fix up Flow Control advertising */
+       pdata->phy.advertising &= ~ADVERTISED_Pause;
+       pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+       if (pdata->rx_pause) {
+               pdata->phy.advertising |= ADVERTISED_Pause;
+               pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+       }
+
+       if (pdata->tx_pause)
+               pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+
+       if (netif_msg_drv(pdata))
+               xgbe_dump_phy_registers(pdata);
+}
+
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+{
+       phy_if->phy_init        = xgbe_phy_init;
 
-       mdiobus_free(pdata->mii);
-       pdata->mii = NULL;
+       phy_if->phy_reset       = xgbe_phy_reset;
+       phy_if->phy_start       = xgbe_phy_start;
+       phy_if->phy_stop        = xgbe_phy_stop;
 
-       DBGPR("<--xgbe_mdio_unregister\n");
+       phy_if->phy_status      = xgbe_phy_status;
+       phy_if->phy_config_aneg = xgbe_phy_config_aneg;
 }
index e62dfa2deab67565cbb6c62116f4f8b6b722384f..63d72a140053956c35b82b2982497db2fbf1e440 100644 (file)
 #include <net/dcbnl.h>
 
 #define XGBE_DRV_NAME          "amd-xgbe"
-#define XGBE_DRV_VERSION       "1.0.0-a"
+#define XGBE_DRV_VERSION       "1.0.2"
 #define XGBE_DRV_DESC          "AMD 10 Gigabit Ethernet Driver"
 
 /* Descriptor related defines */
 #define XGMAC_JUMBO_PACKET_MTU 9000
 #define XGMAC_MAX_JUMBO_PACKET 9018
 
-/* MDIO bus phy name */
-#define XGBE_PHY_NAME          "amd_xgbe_phy"
-#define XGBE_PRTAD             0
-
 /* Common property names */
 #define XGBE_MAC_ADDR_PROPERTY "mac-address"
 #define XGBE_PHY_MODE_PROPERTY "phy-mode"
 #define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+#define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
+#define XGBE_BLWC_PROPERTY     "amd,serdes-blwc"
+#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+#define XGBE_PQ_SKEW_PROPERTY  "amd,serdes-pq-skew"
+#define XGBE_TX_AMP_PROPERTY   "amd,serdes-tx-amp"
+#define XGBE_DFE_CFG_PROPERTY  "amd,serdes-dfe-tap-config"
+#define XGBE_DFE_ENA_PROPERTY  "amd,serdes-dfe-tap-enable"
 
 /* Device-tree clock names */
 #define XGBE_DMA_CLOCK         "dma_clk"
 #define XGBE_RSS_LOOKUP_TABLE_TYPE     0
 #define XGBE_RSS_HASH_KEY_TYPE         1
 
+/* Auto-negotiation */
+#define XGBE_AN_MS_TIMEOUT             500
+#define XGBE_LINK_TIMEOUT              10
+
+#define XGBE_AN_INT_CMPLT              0x01
+#define XGBE_AN_INC_LINK               0x02
+#define XGBE_AN_PG_RCV                 0x04
+#define XGBE_AN_INT_MASK               0x07
+
+/* Rate-change complete wait/retry count */
+#define XGBE_RATECHANGE_COUNT          500
+
+/* Default SerDes settings */
+#define XGBE_SPEED_10000_BLWC          0
+#define XGBE_SPEED_10000_CDR           0x7
+#define XGBE_SPEED_10000_PLL           0x1
+#define XGBE_SPEED_10000_PQ            0x12
+#define XGBE_SPEED_10000_RATE          0x0
+#define XGBE_SPEED_10000_TXAMP         0xa
+#define XGBE_SPEED_10000_WORD          0x7
+#define XGBE_SPEED_10000_DFE_TAP_CONFIG        0x1
+#define XGBE_SPEED_10000_DFE_TAP_ENABLE        0x7f
+
+#define XGBE_SPEED_2500_BLWC           1
+#define XGBE_SPEED_2500_CDR            0x2
+#define XGBE_SPEED_2500_PLL            0x0
+#define XGBE_SPEED_2500_PQ             0xa
+#define XGBE_SPEED_2500_RATE           0x1
+#define XGBE_SPEED_2500_TXAMP          0xf
+#define XGBE_SPEED_2500_WORD           0x1
+#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
+
+#define XGBE_SPEED_1000_BLWC           1
+#define XGBE_SPEED_1000_CDR            0x2
+#define XGBE_SPEED_1000_PLL            0x0
+#define XGBE_SPEED_1000_PQ             0xa
+#define XGBE_SPEED_1000_RATE           0x3
+#define XGBE_SPEED_1000_TXAMP          0xf
+#define XGBE_SPEED_1000_WORD           0x1
+#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
+#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
+
 struct xgbe_prv_data;
 
 struct xgbe_packet_data {
@@ -334,8 +380,6 @@ struct xgbe_ring_data {
         */
        unsigned int state_saved;
        struct {
-               unsigned int incomplete;
-               unsigned int context_next;
                struct sk_buff *skb;
                unsigned int len;
                unsigned int error;
@@ -414,6 +458,13 @@ struct xgbe_channel {
        struct xgbe_ring *rx_ring;
 } ____cacheline_aligned;
 
+enum xgbe_state {
+       XGBE_DOWN,
+       XGBE_LINK,
+       XGBE_LINK_INIT,
+       XGBE_LINK_ERR,
+};
+
 enum xgbe_int {
        XGMAC_INT_DMA_CH_SR_TI,
        XGMAC_INT_DMA_CH_SR_TPS,
@@ -445,6 +496,57 @@ enum xgbe_mtl_fifo_size {
        XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
 };
 
+enum xgbe_speed {
+       XGBE_SPEED_1000 = 0,
+       XGBE_SPEED_2500,
+       XGBE_SPEED_10000,
+       XGBE_SPEEDS,
+};
+
+enum xgbe_an {
+       XGBE_AN_READY = 0,
+       XGBE_AN_PAGE_RECEIVED,
+       XGBE_AN_INCOMPAT_LINK,
+       XGBE_AN_COMPLETE,
+       XGBE_AN_NO_LINK,
+       XGBE_AN_ERROR,
+};
+
+enum xgbe_rx {
+       XGBE_RX_BPA = 0,
+       XGBE_RX_XNP,
+       XGBE_RX_COMPLETE,
+       XGBE_RX_ERROR,
+};
+
+enum xgbe_mode {
+       XGBE_MODE_KR = 0,
+       XGBE_MODE_KX,
+};
+
+enum xgbe_speedset {
+       XGBE_SPEEDSET_1000_10000 = 0,
+       XGBE_SPEEDSET_2500_10000,
+};
+
+struct xgbe_phy {
+       u32 supported;
+       u32 advertising;
+       u32 lp_advertising;
+
+       int address;
+
+       int autoneg;
+       int speed;
+       int duplex;
+
+       int link;
+
+       int pause_autoneg;
+       int tx_pause;
+       int rx_pause;
+};
+
 struct xgbe_mmc_stats {
        /* Tx Stats */
        u64 txoctetcount_gb;
@@ -492,6 +594,11 @@ struct xgbe_mmc_stats {
        u64 rxwatchdogerror;
 };
 
+struct xgbe_ext_stats {
+       u64 tx_tso_packets;
+       u64 rx_split_header_packets;
+};
+
 struct xgbe_hw_if {
        int (*tx_complete)(struct xgbe_ring_desc *);
 
@@ -591,6 +698,20 @@ struct xgbe_hw_if {
        int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
 };
 
+struct xgbe_phy_if {
+       /* For initial PHY setup */
+       void (*phy_init)(struct xgbe_prv_data *);
+
+       /* For PHY support when setting device up/down */
+       int (*phy_reset)(struct xgbe_prv_data *);
+       int (*phy_start)(struct xgbe_prv_data *);
+       void (*phy_stop)(struct xgbe_prv_data *);
+
+       /* For PHY support while device is up */
+       void (*phy_status)(struct xgbe_prv_data *);
+       int (*phy_config_aneg)(struct xgbe_prv_data *);
+};
+
 struct xgbe_desc_if {
        int (*alloc_ring_resources)(struct xgbe_prv_data *);
        void (*free_ring_resources)(struct xgbe_prv_data *);
@@ -660,6 +781,9 @@ struct xgbe_prv_data {
        /* XGMAC/XPCS related mmio registers */
        void __iomem *xgmac_regs;       /* XGMAC CSRs */
        void __iomem *xpcs_regs;        /* XPCS MMD registers */
+       void __iomem *rxtx_regs;        /* SerDes Rx/Tx CSRs */
+       void __iomem *sir0_regs;        /* SerDes integration registers (1/2) */
+       void __iomem *sir1_regs;        /* SerDes integration registers (2/2) */
 
        /* Overall device lock */
        spinlock_t lock;
@@ -670,10 +794,14 @@ struct xgbe_prv_data {
        /* RSS addressing mutex */
        struct mutex rss_mutex;
 
+       /* Flags representing xgbe_state */
+       unsigned long dev_state;
+
        int dev_irq;
        unsigned int per_channel_irq;
 
        struct xgbe_hw_if hw_if;
+       struct xgbe_phy_if phy_if;
        struct xgbe_desc_if desc_if;
 
        /* AXI DMA settings */
@@ -682,6 +810,11 @@ struct xgbe_prv_data {
        unsigned int arcache;
        unsigned int awcache;
 
+       /* Service routine support */
+       struct workqueue_struct *dev_workqueue;
+       struct work_struct service_work;
+       struct timer_list service_timer;
+
        /* Rings for Tx/Rx on a DMA channel */
        struct xgbe_channel *channel;
        unsigned int channel_count;
@@ -729,27 +862,12 @@ struct xgbe_prv_data {
        u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
        u32 rss_options;
 
-       /* MDIO settings */
-       struct module *phy_module;
-       char *mii_bus_id;
-       struct mii_bus *mii;
-       int mdio_mmd;
-       struct phy_device *phydev;
-       int default_autoneg;
-       int default_speed;
-
-       /* Current PHY settings */
-       phy_interface_t phy_mode;
-       int phy_link;
-       int phy_speed;
-       unsigned int phy_tx_pause;
-       unsigned int phy_rx_pause;
-
        /* Netdev related settings */
        unsigned char mac_addr[ETH_ALEN];
        netdev_features_t netdev_features;
        struct napi_struct napi;
        struct xgbe_mmc_stats mmc_stats;
+       struct xgbe_ext_stats ext_stats;
 
        /* Filtering support */
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -787,6 +905,54 @@ struct xgbe_prv_data {
        /* Keeps track of power mode */
        unsigned int power_down;
 
+       /* Network interface message level setting */
+       u32 msg_enable;
+
+       /* Current PHY settings */
+       phy_interface_t phy_mode;
+       int phy_link;
+       int phy_speed;
+
+       /* MDIO/PHY related settings */
+       struct xgbe_phy phy;
+       int mdio_mmd;
+       unsigned long link_check;
+
+       char an_name[IFNAMSIZ + 32];
+       struct workqueue_struct *an_workqueue;
+
+       int an_irq;
+       struct work_struct an_irq_work;
+
+       unsigned int speed_set;
+
+       /* SerDes UEFI configurable settings.
+        *   Switching between modes/speeds requires new values for some
+        *   SerDes settings.  The values can be supplied as device
+        *   properties in array format.  The first array entry is for
+        *   1GbE, second for 2.5GbE and third for 10GbE
+        */
+       u32 serdes_blwc[XGBE_SPEEDS];
+       u32 serdes_cdr_rate[XGBE_SPEEDS];
+       u32 serdes_pq_skew[XGBE_SPEEDS];
+       u32 serdes_tx_amp[XGBE_SPEEDS];
+       u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
+       u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
+
+       /* Auto-negotiation state machine support */
+       struct mutex an_mutex;
+       enum xgbe_an an_result;
+       enum xgbe_an an_state;
+       enum xgbe_rx kr_state;
+       enum xgbe_rx kx_state;
+       struct work_struct an_work;
+       unsigned int an_supported;
+       unsigned int parallel_detect;
+       unsigned int fec_ability;
+       unsigned long an_start;
+
+       unsigned int lpm_ctrl;          /* CTRL1 for resume */
+
 #ifdef CONFIG_DEBUG_FS
        struct dentry *xgbe_debugfs;
 
@@ -800,6 +966,7 @@ struct xgbe_prv_data {
 /* Function prototypes*/
 
 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
 struct net_device_ops *xgbe_get_netdev_ops(void);
 struct ethtool_ops *xgbe_get_ethtool_ops(void);
@@ -807,14 +974,11 @@ struct ethtool_ops *xgbe_get_ethtool_ops(void);
 const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
 #endif
 
-int xgbe_mdio_register(struct xgbe_prv_data *);
-void xgbe_mdio_unregister(struct xgbe_prv_data *);
-void xgbe_dump_phy_registers(struct xgbe_prv_data *);
 void xgbe_ptp_register(struct xgbe_prv_data *);
 void xgbe_ptp_unregister(struct xgbe_prv_data *);
-void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
-                      unsigned int);
-void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
+                      unsigned int, unsigned int, unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
                       unsigned int);
 void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
 void xgbe_get_all_hw_features(struct xgbe_prv_data *);
@@ -831,18 +995,6 @@ static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
 static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
 #endif /* CONFIG_DEBUG_FS */
 
-/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_DESC_DUMP
-#define XGMAC_ENABLE_RX_DESC_DUMP
-#endif
-
-/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
-#if 0
-#define XGMAC_ENABLE_TX_PKT_DUMP
-#define XGMAC_ENABLE_RX_PKT_DUMP
-#endif
-
 /* NOTE: Uncomment for function trace log messages in KERNEL LOG */
 #if 0
 #define YDEBUG
@@ -852,10 +1004,8 @@ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
 /* For debug prints */
 #ifdef YDEBUG
 #define DBGPR(x...) pr_alert(x)
-#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
 #else
 #define DBGPR(x...) do { } while (0)
-#define DBGPHY_REGS(x...) do { } while (0)
 #endif
 
 #ifdef YDEBUG_MDIO
index 68be565548c09de5c7ecbaec377b9d5d291a5567..700b5abe5de5f5fe95c1c5fcf45b1413b4253cc6 100644 (file)
@@ -3,5 +3,5 @@
 #
 
 xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \
-                  xgene_enet_main.o xgene_enet_ethtool.o
+                  xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o
 obj-$(CONFIG_NET_XGENE) += xgene-enet.o
index b927021c6c4030c5f63abd9644aac7169d8b6034..25873d142b95a5cd453b73af03c43d1a4355f345 100644 (file)
@@ -87,10 +87,11 @@ static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
 
 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
 {
+       struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
        int i;
 
        xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
-       for (i = 0; i < NUM_RING_CONFIG; i++) {
+       for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
                xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
                                     ring->state[i]);
        }
@@ -98,7 +99,7 @@ static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
 
 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
 {
-       memset(ring->state, 0, sizeof(u32) * NUM_RING_CONFIG);
+       memset(ring->state, 0, sizeof(ring->state));
        xgene_enet_write_ring_state(ring);
 }
 
@@ -141,8 +142,8 @@ static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
        xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
 }
 
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
-                                       struct xgene_enet_desc_ring *ring)
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+                                   struct xgene_enet_desc_ring *ring)
 {
        u32 size = ring->size;
        u32 i, data;
@@ -168,7 +169,7 @@ struct xgene_enet_desc_ring *xgene_enet_setup_ring(
        return ring;
 }
 
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
 {
        u32 data;
        bool is_bufpool;
@@ -186,6 +187,22 @@ out:
        xgene_enet_clr_ring_state(ring);
 }
 
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+       iowrite32(count, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+       u32 __iomem *cmd_base = ring->cmd_base;
+       u32 ring_state, num_msgs;
+
+       ring_state = ioread32(&cmd_base[1]);
+       num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
+
+       return num_msgs;
+}
+
 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
                            struct xgene_enet_pdata *pdata,
                            enum xgene_enet_err_code status)
@@ -803,3 +820,12 @@ struct xgene_port_ops xgene_gport_ops = {
        .cle_bypass = xgene_enet_cle_bypass,
        .shutdown = xgene_gport_shutdown,
 };
+
+struct xgene_ring_ops xgene_ring1_ops = {
+       .num_ring_config = NUM_RING_CONFIG,
+       .num_ring_id_shift = 6,
+       .setup = xgene_enet_setup_ring,
+       .clear = xgene_enet_clear_ring,
+       .wr_cmd = xgene_enet_wr_cmd,
+       .len = xgene_enet_ring_len,
+};
index d9bc89d69266cfd75a644888ffcb373d0ce34b77..541bed0560126e412dcaefd46adc0b8e131d1237 100644 (file)
@@ -26,6 +26,7 @@
 
 struct xgene_enet_pdata;
 struct xgene_enet_stats;
+struct xgene_enet_desc_ring;
 
 /* clears and then set bits */
 static inline void xgene_set_bits(u32 *dst, u32 val, u32 start, u32 len)
@@ -101,8 +102,8 @@ enum xgene_enet_rm {
 
 #define BLOCK_ETH_CSR_OFFSET           0x2000
 #define BLOCK_ETH_RING_IF_OFFSET       0x9000
+#define BLOCK_ETH_CLKRST_CSR_OFFSET    0xc000
 #define BLOCK_ETH_DIAG_CSR_OFFSET      0xD000
-
 #define BLOCK_ETH_MAC_OFFSET           0x0000
 #define BLOCK_ETH_MAC_CSR_OFFSET       0x2800
 
@@ -261,6 +262,7 @@ enum xgene_enet_ring_type {
 
 enum xgene_ring_owner {
        RING_OWNER_ETH0,
+       RING_OWNER_ETH1,
        RING_OWNER_CPU = 15,
        RING_OWNER_INVALID
 };
@@ -314,9 +316,6 @@ static inline u16 xgene_enet_get_numslots(u16 id, u32 size)
                      size / WORK_DESC_SIZE;
 }
 
-struct xgene_enet_desc_ring *xgene_enet_setup_ring(
-               struct xgene_enet_desc_ring *ring);
-void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring);
 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
                            struct xgene_enet_pdata *pdata,
                            enum xgene_enet_err_code status);
@@ -327,5 +326,6 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
 
 extern struct xgene_mac_ops xgene_gmac_ops;
 extern struct xgene_port_ops xgene_gport_ops;
+extern struct xgene_ring_ops xgene_ring1_ops;
 
 #endif /* __XGENE_ENET_HW_H__ */
index 40d3530d7f30966af178eb5890b2172cd3197b49..1bb317532f75f5c1166b0b32041db6d7a6206cc8 100644 (file)
@@ -28,6 +28,8 @@
 #define RES_RING_CSR   1
 #define RES_RING_CMD   2
 
+static const struct of_device_id xgene_enet_of_match[];
+
 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
        struct xgene_enet_raw_desc16 *raw_desc;
@@ -48,6 +50,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
 {
        struct sk_buff *skb;
        struct xgene_enet_raw_desc16 *raw_desc;
+       struct xgene_enet_pdata *pdata;
        struct net_device *ndev;
        struct device *dev;
        dma_addr_t dma_addr;
@@ -58,6 +61,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
 
        ndev = buf_pool->ndev;
        dev = ndev_to_dev(buf_pool->ndev);
+       pdata = netdev_priv(ndev);
        bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0));
        len = XGENE_ENET_MAX_MTU;
 
@@ -82,7 +86,7 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool,
                tail = (tail + 1) & slots;
        }
 
-       iowrite32(nbuf, buf_pool->cmd);
+       pdata->ring_ops->wr_cmd(buf_pool, nbuf);
        buf_pool->tail = tail;
 
        return 0;
@@ -102,26 +106,16 @@ static u8 xgene_enet_hdr_len(const void *data)
        return (eth->h_proto == htons(ETH_P_8021Q)) ? VLAN_ETH_HLEN : ETH_HLEN;
 }
 
-static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
-{
-       u32 __iomem *cmd_base = ring->cmd_base;
-       u32 ring_state, num_msgs;
-
-       ring_state = ioread32(&cmd_base[1]);
-       num_msgs = ring_state & CREATE_MASK(NUMMSGSINQ_POS, NUMMSGSINQ_LEN);
-
-       return num_msgs >> NUMMSGSINQ_POS;
-}
-
 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
 {
+       struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
        struct xgene_enet_raw_desc16 *raw_desc;
        u32 slots = buf_pool->slots - 1;
        u32 tail = buf_pool->tail;
        u32 userinfo;
        int i, len;
 
-       len = xgene_enet_ring_len(buf_pool);
+       len = pdata->ring_ops->len(buf_pool);
        for (i = 0; i < len; i++) {
                tail = (tail - 1) & slots;
                raw_desc = &buf_pool->raw_desc16[tail];
@@ -131,7 +125,7 @@ static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
                dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
        }
 
-       iowrite32(-len, buf_pool->cmd);
+       pdata->ring_ops->wr_cmd(buf_pool, -len);
        buf_pool->tail = tail;
 }
 
@@ -263,8 +257,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
        struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
        u32 tx_level, cq_level;
 
-       tx_level = xgene_enet_ring_len(tx_ring);
-       cq_level = xgene_enet_ring_len(cp_ring);
+       tx_level = pdata->ring_ops->len(tx_ring);
+       cq_level = pdata->ring_ops->len(cp_ring);
        if (unlikely(tx_level > pdata->tx_qcnt_hi ||
                     cq_level > pdata->cp_qcnt_hi)) {
                netif_stop_queue(ndev);
@@ -276,7 +270,7 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
-       iowrite32(1, tx_ring->cmd);
+       pdata->ring_ops->wr_cmd(tx_ring, 1);
        skb_tx_timestamp(skb);
        tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
 
@@ -389,11 +383,11 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
        } while (--budget);
 
        if (likely(count)) {
-               iowrite32(-count, ring->cmd);
+               pdata->ring_ops->wr_cmd(ring, -count);
                ring->head = head;
 
                if (netif_queue_stopped(ring->ndev)) {
-                       if (xgene_enet_ring_len(ring) < pdata->cp_qcnt_low)
+                       if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
                                netif_wake_queue(ring->ndev);
                }
        }
@@ -510,6 +504,7 @@ static int xgene_enet_open(struct net_device *ndev)
        else
                schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
 
+       netif_carrier_off(ndev);
        netif_start_queue(ndev);
 
        return ret;
@@ -545,7 +540,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
        pdata = netdev_priv(ring->ndev);
        dev = ndev_to_dev(ring->ndev);
 
-       xgene_enet_clear_ring(ring);
+       pdata->ring_ops->clear(ring);
        dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
 }
 
@@ -598,15 +593,17 @@ static int xgene_enet_get_ring_size(struct device *dev,
 
 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
 {
+       struct xgene_enet_pdata *pdata;
        struct device *dev;
 
        if (!ring)
                return;
 
        dev = ndev_to_dev(ring->ndev);
+       pdata = netdev_priv(ring->ndev);
 
        if (ring->desc_addr) {
-               xgene_enet_clear_ring(ring);
+               pdata->ring_ops->clear(ring);
                dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
        }
        devm_kfree(dev, ring);
@@ -637,6 +634,25 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
        }
 }
 
+static bool is_irq_mbox_required(struct xgene_enet_pdata *pdata,
+                                struct xgene_enet_desc_ring *ring)
+{
+       if ((pdata->enet_id == XGENE_ENET2) &&
+           (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)) {
+               return true;
+       }
+
+       return false;
+}
+
+static void __iomem *xgene_enet_ring_cmd_base(struct xgene_enet_pdata *pdata,
+                                             struct xgene_enet_desc_ring *ring)
+{
+       u8 num_ring_id_shift = pdata->ring_ops->num_ring_id_shift;
+
+       return pdata->ring_cmd_addr + (ring->num << num_ring_id_shift);
+}
+
 static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
                        struct net_device *ndev, u32 ring_num,
                        enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
@@ -668,9 +684,20 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
        }
        ring->size = size;
 
-       ring->cmd_base = pdata->ring_cmd_addr + (ring->num << 6);
+       if (is_irq_mbox_required(pdata, ring)) {
+               ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
+                               &ring->irq_mbox_dma, GFP_KERNEL);
+               if (!ring->irq_mbox_addr) {
+                       dma_free_coherent(dev, size, ring->desc_addr,
+                                         ring->dma);
+                       devm_kfree(dev, ring);
+                       return NULL;
+               }
+       }
+
+       ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
        ring->cmd = ring->cmd_base + INC_DEC_CMD_ADDR;
-       ring = xgene_enet_setup_ring(ring);
+       ring = pdata->ring_ops->setup(ring);
        netdev_dbg(ndev, "ring info: num=%d  size=%d  id=%d  slots=%d\n",
                   ring->num, ring->size, ring->id, ring->slots);
 
@@ -682,12 +709,34 @@ static u16 xgene_enet_get_ring_id(enum xgene_ring_owner owner, u8 bufnum)
        return (owner << 6) | (bufnum & GENMASK(5, 0));
 }
 
+static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
+{
+       enum xgene_ring_owner owner;
+
+       if (p->enet_id == XGENE_ENET1) {
+               switch (p->phy_mode) {
+               case PHY_INTERFACE_MODE_SGMII:
+                       owner = RING_OWNER_ETH0;
+                       break;
+               default:
+                       owner = (!p->port_id) ? RING_OWNER_ETH0 :
+                                               RING_OWNER_ETH1;
+                       break;
+               }
+       } else {
+               owner = (!p->port_id) ? RING_OWNER_ETH0 : RING_OWNER_ETH1;
+       }
+
+       return owner;
+}
+
 static int xgene_enet_create_desc_rings(struct net_device *ndev)
 {
        struct xgene_enet_pdata *pdata = netdev_priv(ndev);
        struct device *dev = ndev_to_dev(ndev);
        struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
        struct xgene_enet_desc_ring *buf_pool = NULL;
+       enum xgene_ring_owner owner;
        u8 cpu_bufnum = pdata->cpu_bufnum;
        u8 eth_bufnum = pdata->eth_bufnum;
        u8 bp_bufnum = pdata->bp_bufnum;
@@ -696,6 +745,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        int ret;
 
        /* allocate rx descriptor ring */
+       owner = xgene_derive_ring_owner(pdata);
        ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++);
        rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
                                              RING_CFGSIZE_16KB, ring_id);
@@ -705,7 +755,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        }
 
        /* allocate buffer pool for receiving packets */
-       ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, bp_bufnum++);
+       owner = xgene_derive_ring_owner(pdata);
+       ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++);
        buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++,
                                               RING_CFGSIZE_2KB, ring_id);
        if (!buf_pool) {
@@ -734,7 +785,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
        pdata->rx_ring = rx_ring;
 
        /* allocate tx descriptor ring */
-       ring_id = xgene_enet_get_ring_id(RING_OWNER_ETH0, eth_bufnum++);
+       owner = xgene_derive_ring_owner(pdata);
+       ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++);
        tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++,
                                              RING_CFGSIZE_16KB, ring_id);
        if (!tx_ring) {
@@ -824,14 +876,21 @@ static int xgene_get_port_id(struct device *dev, struct xgene_enet_pdata *pdata)
        int ret;
 
        ret = device_property_read_u32(dev, "port-id", &id);
-       if (!ret && id > 1) {
-               dev_err(dev, "Incorrect port-id specified\n");
-               return -ENODEV;
-       }
 
-       pdata->port_id = id;
+       switch (ret) {
+       case -EINVAL:
+               pdata->port_id = 0;
+               ret = 0;
+               break;
+       case 0:
+               pdata->port_id = id & BIT(0);
+               break;
+       default:
+               dev_err(dev, "Incorrect port-id specified: errno: %d\n", ret);
+               break;
+       }
 
-       return 0;
+       return ret;
 }
 
 static int xgene_get_mac_address(struct device *dev,
@@ -876,6 +935,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        struct device *dev;
        struct resource *res;
        void __iomem *base_addr;
+       u32 offset;
        int ret;
 
        pdev = pdata->pdev;
@@ -962,14 +1022,20 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
                pdata->clk = NULL;
        }
 
-       base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+       if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
+               base_addr = pdata->base_addr - (pdata->port_id * MAC_OFFSET);
+       else
+               base_addr = pdata->base_addr;
        pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET;
        pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET;
        pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET;
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII ||
            pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
                pdata->mcx_mac_addr = pdata->base_addr + BLOCK_ETH_MAC_OFFSET;
-               pdata->mcx_mac_csr_addr = base_addr + BLOCK_ETH_MAC_CSR_OFFSET;
+               offset = (pdata->enet_id == XGENE_ENET1) ?
+                         BLOCK_ETH_MAC_CSR_OFFSET :
+                         X2_BLOCK_ETH_MAC_CSR_OFFSET;
+               pdata->mcx_mac_csr_addr = base_addr + offset;
        } else {
                pdata->mcx_mac_addr = base_addr + BLOCK_AXG_MAC_OFFSET;
                pdata->mcx_mac_csr_addr = base_addr + BLOCK_AXG_MAC_CSR_OFFSET;
@@ -1034,23 +1100,44 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
                break;
        }
 
-       switch (pdata->port_id) {
-       case 0:
-               pdata->cpu_bufnum = START_CPU_BUFNUM_0;
-               pdata->eth_bufnum = START_ETH_BUFNUM_0;
-               pdata->bp_bufnum = START_BP_BUFNUM_0;
-               pdata->ring_num = START_RING_NUM_0;
-               break;
-       case 1:
-               pdata->cpu_bufnum = START_CPU_BUFNUM_1;
-               pdata->eth_bufnum = START_ETH_BUFNUM_1;
-               pdata->bp_bufnum = START_BP_BUFNUM_1;
-               pdata->ring_num = START_RING_NUM_1;
-               break;
-       default:
-               break;
+       if (pdata->enet_id == XGENE_ENET1) {
+               switch (pdata->port_id) {
+               case 0:
+                       pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+                       pdata->eth_bufnum = START_ETH_BUFNUM_0;
+                       pdata->bp_bufnum = START_BP_BUFNUM_0;
+                       pdata->ring_num = START_RING_NUM_0;
+                       break;
+               case 1:
+                       pdata->cpu_bufnum = START_CPU_BUFNUM_1;
+                       pdata->eth_bufnum = START_ETH_BUFNUM_1;
+                       pdata->bp_bufnum = START_BP_BUFNUM_1;
+                       pdata->ring_num = START_RING_NUM_1;
+                       break;
+               default:
+                       break;
+               }
+               pdata->ring_ops = &xgene_ring1_ops;
+       } else {
+               switch (pdata->port_id) {
+               case 0:
+                       pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+                       pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+                       pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+                       pdata->ring_num = X2_START_RING_NUM_0;
+                       break;
+               case 1:
+                       pdata->cpu_bufnum = X2_START_CPU_BUFNUM_1;
+                       pdata->eth_bufnum = X2_START_ETH_BUFNUM_1;
+                       pdata->bp_bufnum = X2_START_BP_BUFNUM_1;
+                       pdata->ring_num = X2_START_RING_NUM_1;
+                       break;
+               default:
+                       break;
+               }
+               pdata->rm = RM0;
+               pdata->ring_ops = &xgene_ring2_ops;
        }
-
 }
 
 static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
@@ -1086,6 +1173,9 @@ static int xgene_enet_probe(struct platform_device *pdev)
        struct xgene_enet_pdata *pdata;
        struct device *dev = &pdev->dev;
        struct xgene_mac_ops *mac_ops;
+#ifdef CONFIG_OF
+       const struct of_device_id *of_id;
+#endif
        int ret;
 
        ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata));
@@ -1104,6 +1194,17 @@ static int xgene_enet_probe(struct platform_device *pdev)
                          NETIF_F_GSO |
                          NETIF_F_GRO;
 
+#ifdef CONFIG_OF
+       of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
+       if (of_id) {
+               pdata->enet_id = (enum xgene_enet_id)of_id->data;
+               if (!pdata->enet_id) {
+                       free_netdev(ndev);
+                       return -ENODEV;
+               }
+       }
+#endif
+
        ret = xgene_enet_get_resources(pdata);
        if (ret)
                goto err;
@@ -1175,9 +1276,11 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
 
 #ifdef CONFIG_OF
 static const struct of_device_id xgene_enet_of_match[] = {
-       {.compatible = "apm,xgene-enet",},
-       {.compatible = "apm,xgene1-sgenet",},
-       {.compatible = "apm,xgene1-xgenet",},
+       {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
+       {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+       {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+       {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+       {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
        {},
 };
 
index 8f3d232b09bc8c5d0f321fc398aea52c498f22cc..1c85fc87703abc48c14b835b8536fba733fc5923 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/if_vlan.h>
 #include <linux/phy.h>
 #include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
 
 #define XGENE_DRV_VERSION      "v1.0"
 #define XGENE_ENET_MAX_MTU     1536
 #define START_BP_BUFNUM_1      0x2A
 #define START_RING_NUM_1       264
 
+#define X2_START_CPU_BUFNUM_0  0
+#define X2_START_ETH_BUFNUM_0  0
+#define X2_START_BP_BUFNUM_0   0x20
+#define X2_START_RING_NUM_0    0
+#define X2_START_CPU_BUFNUM_1  0xc
+#define X2_START_ETH_BUFNUM_1  0
+#define X2_START_BP_BUFNUM_1   0x20
+#define X2_START_RING_NUM_1    256
+
 #define IRQ_ID_SIZE            16
 #define XGENE_MAX_TXC_RINGS    1
 
 #define PHY_POLL_LINK_ON       (10 * HZ)
 #define PHY_POLL_LINK_OFF      (PHY_POLL_LINK_ON / 5)
 
+enum xgene_enet_id {
+       XGENE_ENET1 = 1,
+       XGENE_ENET2
+};
+
 /* software context of a descriptor ring */
 struct xgene_enet_desc_ring {
        struct net_device *ndev;
@@ -68,10 +83,12 @@ struct xgene_enet_desc_ring {
        u16 irq;
        char irq_name[IRQ_ID_SIZE];
        u32 size;
-       u32 state[NUM_RING_CONFIG];
+       u32 state[X2_NUM_RING_CONFIG];
        void __iomem *cmd_base;
        void __iomem *cmd;
        dma_addr_t dma;
+       dma_addr_t irq_mbox_dma;
+       void *irq_mbox_addr;
        u16 dst_ring_num;
        u8 nbufpool;
        struct sk_buff *(*rx_skb);
@@ -105,6 +122,15 @@ struct xgene_port_ops {
        void (*shutdown)(struct xgene_enet_pdata *pdata);
 };
 
+struct xgene_ring_ops {
+       u8 num_ring_config;
+       u8 num_ring_id_shift;
+       struct xgene_enet_desc_ring * (*setup)(struct xgene_enet_desc_ring *);
+       void (*clear)(struct xgene_enet_desc_ring *);
+       void (*wr_cmd)(struct xgene_enet_desc_ring *, int);
+       u32 (*len)(struct xgene_enet_desc_ring *);
+};
+
 /* ethernet private data */
 struct xgene_enet_pdata {
        struct net_device *ndev;
@@ -113,6 +139,7 @@ struct xgene_enet_pdata {
        int phy_speed;
        struct clk *clk;
        struct platform_device *pdev;
+       enum xgene_enet_id enet_id;
        struct xgene_enet_desc_ring *tx_ring;
        struct xgene_enet_desc_ring *rx_ring;
        char *dev_name;
@@ -136,6 +163,7 @@ struct xgene_enet_pdata {
        struct rtnl_link_stats64 stats;
        struct xgene_mac_ops *mac_ops;
        struct xgene_port_ops *port_ops;
+       struct xgene_ring_ops *ring_ops;
        struct delayed_work link_work;
        u32 port_id;
        u8 cpu_bufnum;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c
new file mode 100644 (file)
index 0000000..0b6896b
--- /dev/null
@@ -0,0 +1,200 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "xgene_enet_main.h"
+#include "xgene_enet_hw.h"
+#include "xgene_enet_ring2.h"
+
+static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
+{
+       u32 *ring_cfg = ring->state;
+       u64 addr = ring->dma;
+
+       if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+               ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
+               ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
+       }
+       ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1);
+
+       addr >>= 8;
+       ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
+
+       addr >>= 27;
+       ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
+                   | ACCEPTLERR
+                   | SET_VAL(RINGADDRH, addr);
+       ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
+       ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
+}
+
+static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
+{
+       u32 *ring_cfg = ring->state;
+       bool is_bufpool;
+       u32 val;
+
+       is_bufpool = xgene_enet_is_bufpool(ring->id);
+       val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
+       ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
+       if (is_bufpool)
+               ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
+}
+
+static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
+{
+       u32 *ring_cfg = ring->state;
+
+       ring_cfg[3] |= RECOMBBUF;
+       ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
+}
+
+static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
+                                u32 offset, u32 data)
+{
+       struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+
+       iowrite32(data, pdata->ring_csr_addr + offset);
+}
+
+static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
+{
+       struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
+       int i;
+
+       xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
+       for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
+               xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
+                                    ring->state[i]);
+       }
+}
+
+static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
+{
+       memset(ring->state, 0, sizeof(ring->state));
+       xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
+{
+       enum xgene_ring_owner owner;
+
+       xgene_enet_ring_set_type(ring);
+
+       owner = xgene_enet_ring_owner(ring->id);
+       if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
+               xgene_enet_ring_set_recombbuf(ring);
+
+       xgene_enet_ring_init(ring);
+       xgene_enet_write_ring_state(ring);
+}
+
+static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
+{
+       u32 ring_id_val, ring_id_buf;
+       bool is_bufpool;
+
+       if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
+               return;
+
+       is_bufpool = xgene_enet_is_bufpool(ring->id);
+
+       ring_id_val = ring->id & GENMASK(9, 0);
+       ring_id_val |= OVERWRITE;
+
+       ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
+       ring_id_buf |= PREFETCH_BUF_EN;
+       if (is_bufpool)
+               ring_id_buf |= IS_BUFFER_POOL;
+
+       xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
+       xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
+}
+
+static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
+{
+       u32 ring_id;
+
+       ring_id = ring->id | OVERWRITE;
+       xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
+       xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
+}
+
+static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
+                                   struct xgene_enet_desc_ring *ring)
+{
+       bool is_bufpool;
+       u32 addr, i;
+
+       xgene_enet_clr_ring_state(ring);
+       xgene_enet_set_ring_state(ring);
+       xgene_enet_set_ring_id(ring);
+
+       ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
+
+       is_bufpool = xgene_enet_is_bufpool(ring->id);
+       if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
+               return ring;
+
+       addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
+       xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
+
+       for (i = 0; i < ring->slots; i++)
+               xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
+
+       return ring;
+}
+
+static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
+{
+       xgene_enet_clr_desc_ring_id(ring);
+       xgene_enet_clr_ring_state(ring);
+}
+
+static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
+{
+       u32 data = 0;
+
+       if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
+               data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
+                      INTR_CLEAR;
+       }
+       data |= (count & GENMASK(16, 0));
+
+       iowrite32(data, ring->cmd);
+}
+
+static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
+{
+       u32 __iomem *cmd_base = ring->cmd_base;
+       u32 ring_state, num_msgs;
+
+       ring_state = ioread32(&cmd_base[1]);
+       num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
+
+       return num_msgs;
+}
+
+struct xgene_ring_ops xgene_ring2_ops = {
+       .num_ring_config = X2_NUM_RING_CONFIG,
+       .num_ring_id_shift = 13,
+       .setup = xgene_enet_setup_ring,
+       .clear = xgene_enet_clear_ring,
+       .wr_cmd = xgene_enet_wr_cmd,
+       .len = xgene_enet_ring_len,
+};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.h
new file mode 100644 (file)
index 0000000..8b235db
--- /dev/null
@@ -0,0 +1,49 @@
+/* Applied Micro X-Gene SoC Ethernet Driver
+ *
+ * Copyright (c) 2015, Applied Micro Circuits Corporation
+ * Author: Iyappan Subramanian <isubramanian@apm.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __XGENE_ENET_RING2_H__
+#define __XGENE_ENET_RING2_H__
+
+#include "xgene_enet_main.h"
+
+#define X2_NUM_RING_CONFIG     6
+
+#define INTR_MBOX_SIZE         1024
+#define CSR_VMID0_INTR_MBOX    0x0270
+#define INTR_CLEAR             BIT(23)
+
+#define X2_MSG_AM_POS          10
+#define X2_QBASE_AM_POS                11
+#define X2_INTLINE_POS         24
+#define X2_INTLINE_LEN         5
+#define X2_CFGCRID_POS         29
+#define X2_CFGCRID_LEN         3
+#define X2_SELTHRSH_POS                7
+#define X2_SELTHRSH_LEN                3
+#define X2_RINGTYPE_POS                23
+#define X2_RINGTYPE_LEN                2
+#define X2_DEQINTEN_POS                29
+#define X2_RECOMTIMEOUT_POS    0
+#define X2_RECOMTIMEOUT_LEN    7
+#define X2_NUMMSGSINQ_POS      0
+#define X2_NUMMSGSINQ_LEN      17
+
+extern struct xgene_ring_ops xgene_ring2_ops;
+
+#endif /* __XGENE_ENET_RING2_H__ */
index f27fb6f2a93b90864bf072cc433a56a2f403d175..ff240b3cb2b877cf968fa80fcb933eb6ddfc89ac 100644 (file)
@@ -21,6 +21,7 @@
 #include "xgene_enet_main.h"
 #include "xgene_enet_hw.h"
 #include "xgene_enet_sgmac.h"
+#include "xgene_enet_xgmac.h"
 
 static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
 {
@@ -39,6 +40,14 @@ static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
        iowrite32(val, p->eth_diag_csr_addr + offset);
 }
 
+static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
+                                 u32 offset, u32 val)
+{
+       void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
+
+       iowrite32(val, addr);
+}
+
 static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
                                   u32 wr_addr, u32 wr_data)
 {
@@ -140,8 +149,9 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
 
 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
 {
-       u32 val = 0xffffffff;
+       u32 val;
 
+       val = (p->enet_id == XGENE_ENET1) ? 0xffffffff : 0;
        xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
        xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
 }
@@ -227,6 +237,8 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
 {
        u32 data, loop = 10;
        u32 offset = p->port_id * 4;
+       u32 enet_spare_cfg_reg, rsif_config_reg;
+       u32 cfg_bypass_reg, rx_dv_gate_reg;
 
        xgene_sgmac_reset(p);
 
@@ -239,7 +251,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
                                          SGMII_STATUS_ADDR >> 2);
                if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
                        break;
-               usleep_range(10, 20);
+               usleep_range(1000, 2000);
        }
        if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
                netdev_err(p->ndev, "Auto-negotiation failed\n");
@@ -249,33 +261,38 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
        xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
        xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
 
-       data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
+       if (p->enet_id == XGENE_ENET1) {
+               enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
+               rsif_config_reg = RSIF_CONFIG_REG_ADDR;
+               cfg_bypass_reg = CFG_BYPASS_ADDR;
+               rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
+       } else {
+               enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
+               rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
+               cfg_bypass_reg = XG_CFG_BYPASS_ADDR;
+               rx_dv_gate_reg = XG_MCX_RX_DV_GATE_REG_0_ADDR;
+       }
+
+       data = xgene_enet_rd_csr(p, enet_spare_cfg_reg);
        data |= MPA_IDLE_WITH_QMI_EMPTY;
-       xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
+       xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
 
        xgene_sgmac_set_mac_addr(p);
 
-       data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
-       data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
-       xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
-
        /* Adjust MDC clock frequency */
        data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
        MGMT_CLOCK_SEL_SET(&data, 7);
        xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
 
        /* Enable drop if bufpool not available */
-       data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
+       data = xgene_enet_rd_csr(p, rsif_config_reg);
        data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
-       xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
-
-       /* Rtype should be copied from FP */
-       xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
+       xgene_enet_wr_csr(p, rsif_config_reg, data);
 
        /* Bypass traffic gating */
-       xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
-       xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
-       xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
+       xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
+       xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
+       xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
 }
 
 static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -331,14 +348,23 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
                                  u32 dst_ring_num, u16 bufpool_id)
 {
        u32 data, fpsel;
+       u32 cle_bypass_reg0, cle_bypass_reg1;
        u32 offset = p->port_id * MAC_OFFSET;
 
+       if (p->enet_id == XGENE_ENET1) {
+               cle_bypass_reg0 = CLE_BYPASS_REG0_0_ADDR;
+               cle_bypass_reg1 = CLE_BYPASS_REG1_0_ADDR;
+       } else {
+               cle_bypass_reg0 = XCLE_BYPASS_REG0_ADDR;
+               cle_bypass_reg1 = XCLE_BYPASS_REG1_ADDR;
+       }
+
        data = CFG_CLE_BYPASS_EN0;
-       xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
+       xgene_enet_wr_csr(p, cle_bypass_reg0 + offset, data);
 
        fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
        data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
-       xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
+       xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
 }
 
 static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
index a18a9d1f11432d4469d2d711ec633f22565017eb..27ba2fe3fca61888d6267eddfc3c9844e2140454 100644 (file)
@@ -122,7 +122,6 @@ static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
 
        return true;
 }
-
 static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
                              u32 rd_addr, u32 *rd_data)
 {
index 5a5296a6d1df0a65e806dc7c9b3f78f5f6b27608..bf0a9943573700454ebfc432411df7744ca9ba6d 100644 (file)
 #ifndef __XGENE_ENET_XGMAC_H__
 #define __XGENE_ENET_XGMAC_H__
 
+#define X2_BLOCK_ETH_MAC_CSR_OFFSET    0x3000
 #define BLOCK_AXG_MAC_OFFSET           0x0800
 #define BLOCK_AXG_MAC_CSR_OFFSET       0x2000
 
+#define XGENET_CONFIG_REG_ADDR         0x20
+#define XGENET_SRST_ADDR               0x00
+#define XGENET_CLKEN_ADDR              0x08
+
+#define CSR_CLK                BIT(0)
+#define XGENET_CLK     BIT(1)
+#define PCS_CLK                BIT(3)
+#define AN_REF_CLK     BIT(4)
+#define AN_CLK         BIT(5)
+#define AD_CLK         BIT(6)
+
+#define CSR_RST                BIT(0)
+#define XGENET_RST     BIT(1)
+#define PCS_RST                BIT(3)
+#define AN_REF_RST     BIT(4)
+#define AN_RST         BIT(5)
+#define AD_RST         BIT(6)
+
 #define AXGMAC_CONFIG_0                        0x0000
 #define AXGMAC_CONFIG_1                        0x0004
 #define HSTMACRST                      BIT(31)
@@ -38,6 +57,7 @@
 #define HSTMACADR_MSW_ADDR             0x0014
 #define HSTMAXFRAME_LENGTH_ADDR                0x0020
 
+#define XG_MCX_RX_DV_GATE_REG_0_ADDR   0x0004
 #define XG_RSIF_CONFIG_REG_ADDR                0x00a0
 #define XCLE_BYPASS_REG0_ADDR           0x0160
 #define XCLE_BYPASS_REG1_ADDR           0x0164
index 783543ad1fcfa1a4976090e0797f7f15f29a1724..909ad7a0d48088fcaa8295dfdd3b3617becf8d12 100644 (file)
@@ -456,6 +456,67 @@ static int bcm_sysport_set_wol(struct net_device *dev,
        return 0;
 }
 
+static int bcm_sysport_get_coalesce(struct net_device *dev,
+                                   struct ethtool_coalesce *ec)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
+
+       ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
+       ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
+
+       reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+
+       ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
+       ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
+
+       return 0;
+}
+
+static int bcm_sysport_set_coalesce(struct net_device *dev,
+                                   struct ethtool_coalesce *ec)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+
+       /* Base system clock is 125Mhz, DMA timeout is this reference clock
+        * divided by 1024, which yield roughly 8.192 us, our maximum value has
+        * to fit in the RING_TIMEOUT_MASK (16 bits).
+        */
+       if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
+           ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
+           ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
+           ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
+               return -EINVAL;
+
+       if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
+           (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
+               return -EINVAL;
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
+               reg &= ~(RING_INTR_THRESH_MASK |
+                        RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
+               reg |= ec->tx_max_coalesced_frames;
+               reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
+                        RING_TIMEOUT_SHIFT;
+               tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
+       }
+
+       reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+       reg &= ~(RDMA_INTR_THRESH_MASK |
+                RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
+       reg |= ec->rx_max_coalesced_frames;
+       reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
+                           RDMA_TIMEOUT_SHIFT;
+       rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+
+       return 0;
+}
+
 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
 {
        dev_kfree_skb_any(cb->skb);
@@ -463,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
        dma_unmap_addr_set(cb, dma_addr, 0);
 }
 
-static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
-                                struct bcm_sysport_cb *cb)
+static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+                                            struct bcm_sysport_cb *cb)
 {
        struct device *kdev = &priv->pdev->dev;
        struct net_device *ndev = priv->netdev;
+       struct sk_buff *skb, *rx_skb;
        dma_addr_t mapping;
-       int ret;
 
-       cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
-       if (!cb->skb) {
+       /* Allocate a new SKB for a new packet */
+       skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+       if (!skb) {
+               priv->mib.alloc_rx_buff_failed++;
                netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
-               return -ENOMEM;
+               return NULL;
        }
 
-       mapping = dma_map_single(kdev, cb->skb->data,
+       mapping = dma_map_single(kdev, skb->data,
                                 RX_BUF_LENGTH, DMA_FROM_DEVICE);
-       ret = dma_mapping_error(kdev, mapping);
-       if (ret) {
+       if (dma_mapping_error(kdev, mapping)) {
                priv->mib.rx_dma_failed++;
-               bcm_sysport_free_cb(cb);
+               dev_kfree_skb_any(skb);
                netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
-               return ret;
+               return NULL;
        }
 
-       dma_unmap_addr_set(cb, dma_addr, mapping);
-       dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+       /* Grab the current SKB on the ring */
+       rx_skb = cb->skb;
+       if (likely(rx_skb))
+               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+                                RX_BUF_LENGTH, DMA_FROM_DEVICE);
 
-       priv->rx_bd_assign_index++;
-       priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
-       priv->rx_bd_assign_ptr = priv->rx_bds +
-               (priv->rx_bd_assign_index * DESC_SIZE);
+       /* Put the new SKB on the ring */
+       cb->skb = skb;
+       dma_unmap_addr_set(cb, dma_addr, mapping);
+       dma_desc_set_addr(priv, cb->bd_addr, mapping);
 
        netif_dbg(priv, rx_status, ndev, "RX refill\n");
 
-       return 0;
+       /* Return the current SKB to the caller */
+       return rx_skb;
 }
 
 static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
 {
        struct bcm_sysport_cb *cb;
-       int ret = 0;
+       struct sk_buff *skb;
        unsigned int i;
 
        for (i = 0; i < priv->num_rx_bds; i++) {
-               cb = &priv->rx_cbs[priv->rx_bd_assign_index];
-               if (cb->skb)
-                       continue;
-
-               ret = bcm_sysport_rx_refill(priv, cb);
-               if (ret)
-                       break;
+               cb = &priv->rx_cbs[i];
+               skb = bcm_sysport_rx_refill(priv, cb);
+               if (skb)
+                       dev_kfree_skb(skb);
+               if (!cb->skb)
+                       return -ENOMEM;
        }
 
-       return ret;
+       return 0;
 }
 
 /* Poll the hardware for up to budget packets to process */
 static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                                        unsigned int budget)
 {
-       struct device *kdev = &priv->pdev->dev;
        struct net_device *ndev = priv->netdev;
        unsigned int processed = 0, to_process;
        struct bcm_sysport_cb *cb;
@@ -531,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
        unsigned int p_index;
        u16 len, status;
        struct bcm_rsb *rsb;
-       int ret;
 
        /* Determine how much we should process since last call */
        p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -549,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
 
        while ((processed < to_process) && (processed < budget)) {
                cb = &priv->rx_cbs[priv->rx_read_ptr];
-               skb = cb->skb;
+               skb = bcm_sysport_rx_refill(priv, cb);
 
-               processed++;
-               priv->rx_read_ptr++;
-
-               if (priv->rx_read_ptr == priv->num_rx_bds)
-                       priv->rx_read_ptr = 0;
 
                /* We do not have a backing SKB, so we do not a corresponding
                 * DMA mapping for this incoming packet since
@@ -566,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                        netif_err(priv, rx_err, ndev, "out of memory!\n");
                        ndev->stats.rx_dropped++;
                        ndev->stats.rx_errors++;
-                       goto refill;
+                       goto next;
                }
 
-               dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
-                                RX_BUF_LENGTH, DMA_FROM_DEVICE);
-
                /* Extract the Receive Status Block prepended */
                rsb = (struct bcm_rsb *)skb->data;
                len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
@@ -583,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                          p_index, priv->rx_c_index, priv->rx_read_ptr,
                          len, status);
 
+               if (unlikely(len > RX_BUF_LENGTH)) {
+                       netif_err(priv, rx_status, ndev, "oversized packet\n");
+                       ndev->stats.rx_length_errors++;
+                       ndev->stats.rx_errors++;
+                       dev_kfree_skb_any(skb);
+                       goto next;
+               }
+
                if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
                        netif_err(priv, rx_status, ndev, "fragmented packet!\n");
                        ndev->stats.rx_dropped++;
                        ndev->stats.rx_errors++;
-                       bcm_sysport_free_cb(cb);
-                       goto refill;
+                       dev_kfree_skb_any(skb);
+                       goto next;
                }
 
                if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
@@ -597,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                                ndev->stats.rx_over_errors++;
                        ndev->stats.rx_dropped++;
                        ndev->stats.rx_errors++;
-                       bcm_sysport_free_cb(cb);
-                       goto refill;
+                       dev_kfree_skb_any(skb);
+                       goto next;
                }
 
                skb_put(skb, len);
@@ -625,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                ndev->stats.rx_bytes += len;
 
                napi_gro_receive(&priv->napi, skb);
-refill:
-               ret = bcm_sysport_rx_refill(priv, cb);
-               if (ret)
-                       priv->mib.alloc_rx_buff_failed++;
+next:
+               processed++;
+               priv->rx_read_ptr++;
+
+               if (priv->rx_read_ptr == priv->num_rx_bds)
+                       priv->rx_read_ptr = 0;
        }
 
        return processed;
@@ -1269,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
 
 static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
 {
+       struct bcm_sysport_cb *cb;
        u32 reg;
        int ret;
+       int i;
 
        /* Initialize SW view of the RX ring */
        priv->num_rx_bds = NUM_RX_DESC;
        priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
-       priv->rx_bd_assign_ptr = priv->rx_bds;
-       priv->rx_bd_assign_index = 0;
        priv->rx_c_index = 0;
        priv->rx_read_ptr = 0;
        priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
@@ -1286,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
                return -ENOMEM;
        }
 
+       for (i = 0; i < priv->num_rx_bds; i++) {
+               cb = priv->rx_cbs + i;
+               cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
+       }
+
        ret = bcm_sysport_alloc_rx_bufs(priv);
        if (ret) {
                netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
@@ -1641,6 +1711,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
        .get_sset_count         = bcm_sysport_get_sset_count,
        .get_wol                = bcm_sysport_get_wol,
        .set_wol                = bcm_sysport_set_wol,
+       .get_coalesce           = bcm_sysport_get_coalesce,
+       .set_coalesce           = bcm_sysport_set_coalesce,
 };
 
 static const struct net_device_ops bcm_sysport_netdev_ops = {
index e2c043eabbf39d165644312aba5bbecb4f07fcf8..f28bf545d7f466527b76a2b2d98f8f9e31602cc0 100644 (file)
@@ -292,7 +292,7 @@ struct bcm_rsb {
 #define RDMA_END_ADDR_LO               0x102c
 
 #define RDMA_MBDONE_INTR               0x1030
-#define  RDMA_INTR_THRESH_MASK         0xff
+#define  RDMA_INTR_THRESH_MASK         0x1ff
 #define  RDMA_TIMEOUT_SHIFT            16
 #define  RDMA_TIMEOUT_MASK             0xffff
 
@@ -663,8 +663,6 @@ struct bcm_sysport_priv {
 
        /* Receive queue */
        void __iomem            *rx_bds;
-       void __iomem            *rx_bd_assign_ptr;
-       unsigned int            rx_bd_assign_index;
        struct bcm_sysport_cb   *rx_cbs;
        unsigned int            num_rx_bds;
        unsigned int            rx_read_ptr;
index 1f82a04ce01a8468e7d8dde208babdea4220ab88..7a4aaa3c01b69d43b8f0bd7f1023150f203c7179 100644 (file)
@@ -357,6 +357,7 @@ struct sw_tx_bd {
 struct sw_rx_page {
        struct page     *page;
        DEFINE_DMA_UNMAP_ADDR(mapping);
+       unsigned int    offset;
 };
 
 union db_prod {
@@ -381,9 +382,10 @@ union db_prod {
 
 #define PAGES_PER_SGE_SHIFT    0
 #define PAGES_PER_SGE          (1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE          PAGE_SIZE
-#define SGE_PAGE_SHIFT         PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr)   PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGE_SHIFT         12
+#define SGE_PAGE_SIZE          (1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK          (~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr)   (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
 #define SGE_PAGES              (SGE_PAGE_SIZE * PAGES_PER_SGE)
 #define TPA_AGG_SIZE           min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
                                            SGE_PAGES), 0xffff)
@@ -526,6 +528,12 @@ enum bnx2x_tpa_mode_t {
        TPA_MODE_GRO
 };
 
+struct bnx2x_alloc_pool {
+       struct page     *page;
+       dma_addr_t      dma;
+       unsigned int    offset;
+};
+
 struct bnx2x_fastpath {
        struct bnx2x            *bp; /* parent */
 
@@ -599,6 +607,8 @@ struct bnx2x_fastpath {
             4 (for the digits and to make it DWORD aligned) */
 #define FP_NAME_SIZE           (sizeof(((struct net_device *)0)->name) + 8)
        char                    name[FP_NAME_SIZE];
+
+       struct bnx2x_alloc_pool page_pool;
 };
 
 #define bnx2x_fp(bp, nr, var)  ((bp)->fp[(nr)].var)
index ec56a9b65dc3a313e1b0571e8a58047c161f6507..e2a65334708d8d61703dd44d55ab3ef9d0dda67f 100644 (file)
@@ -544,30 +544,49 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                              u16 index, gfp_t gfp_mask)
 {
-       struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+       struct bnx2x_alloc_pool *pool = &fp->page_pool;
        dma_addr_t mapping;
 
-       if (unlikely(page == NULL)) {
-               BNX2X_ERR("Can't alloc sge\n");
-               return -ENOMEM;
-       }
+       if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
 
-       mapping = dma_map_page(&bp->pdev->dev, page, 0,
-                              SGE_PAGES, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-               __free_pages(page, PAGES_PER_SGE_SHIFT);
-               BNX2X_ERR("Can't map sge\n");
-               return -ENOMEM;
+               /* put page reference used by the memory pool, since we
+                * won't be using this page as the mempool anymore.
+                */
+               if (pool->page)
+                       put_page(pool->page);
+
+               pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+               if (unlikely(!pool->page)) {
+                       BNX2X_ERR("Can't alloc sge\n");
+                       return -ENOMEM;
+               }
+
+               pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
+                                        PAGE_SIZE, DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(&bp->pdev->dev,
+                                              pool->dma))) {
+                       __free_pages(pool->page, PAGES_PER_SGE_SHIFT);
+                       pool->page = NULL;
+                       BNX2X_ERR("Can't map sge\n");
+                       return -ENOMEM;
+               }
+               pool->offset = 0;
        }
 
-       sw_buf->page = page;
+       get_page(pool->page);
+       sw_buf->page = pool->page;
+       sw_buf->offset = pool->offset;
+
+       mapping = pool->dma + sw_buf->offset;
        dma_unmap_addr_set(sw_buf, mapping, mapping);
 
        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 
+       pool->offset += SGE_PAGE_SIZE;
+
        return 0;
 }
 
@@ -629,20 +648,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                        return err;
                }
 
-               /* Unmap the page as we're going to pass it to the stack */
-               dma_unmap_page(&bp->pdev->dev,
-                              dma_unmap_addr(&old_rx_pg, mapping),
-                              SGE_PAGES, DMA_FROM_DEVICE);
+               dma_unmap_single(&bp->pdev->dev,
+                                dma_unmap_addr(&old_rx_pg, mapping),
+                                SGE_PAGE_SIZE, DMA_FROM_DEVICE);
                /* Add one frag and update the appropriate fields in the skb */
                if (fp->mode == TPA_MODE_LRO)
-                       skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+                       skb_fill_page_desc(skb, j, old_rx_pg.page,
+                                          old_rx_pg.offset, frag_len);
                else { /* GRO */
                        int rem;
                        int offset = 0;
                        for (rem = frag_len; rem > 0; rem -= gro_size) {
                                int len = rem > gro_size ? gro_size : rem;
                                skb_fill_page_desc(skb, frag_id++,
-                                                  old_rx_pg.page, offset, len);
+                                                  old_rx_pg.page,
+                                                  old_rx_pg.offset + offset,
+                                                  len);
                                if (offset)
                                        get_page(old_rx_pg.page);
                                offset += len;
@@ -662,7 +683,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 {
        if (fp->rx_frag_size)
-               put_page(virt_to_head_page(data));
+               skb_free_frag(data);
        else
                kfree(data);
 }
index d7a71758e87615de36fe06664a914291bdb3cfa3..2b30081ec26d128ec86c602eb5a097c77c664159 100644 (file)
@@ -804,9 +804,13 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
        if (!page)
                return;
 
-       dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
-                      SGE_PAGES, DMA_FROM_DEVICE);
-       __free_pages(page, PAGES_PER_SGE_SHIFT);
+       /* Since many fragments can share the same page, make sure to
+        * only unmap and free the page once.
+        */
+       dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+                        SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+       put_page(page);
 
        sw_buf->page = NULL;
        sge->addr_hi = 0;
@@ -964,6 +968,25 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
        ((u8 *)fw_lo)[1]  = mac[4];
 }
 
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+                                         struct bnx2x_alloc_pool *pool)
+{
+       if (!pool->page)
+               return;
+
+       /* Page was not fully fragmented.  Unmap unused space */
+       if (pool->offset < PAGE_SIZE) {
+               dma_addr_t dma = pool->dma + pool->offset;
+               int size = PAGE_SIZE - pool->offset;
+
+               dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
+       }
+
+       put_page(pool->page);
+
+       pool->page = NULL;
+}
+
 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
                                           struct bnx2x_fastpath *fp, int last)
 {
@@ -974,6 +997,8 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
 
        for (i = 0; i < last; i++)
                bnx2x_free_rx_sge(bp, fp, i);
+
+       bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
 }
 
 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
index 6043734ea613bdae8d1a8c0abe7f14719e3a8cbd..b43b2cb9b830bfc64c1586fb4058f06596671f6e 100644 (file)
@@ -2770,12 +2770,79 @@ static int bcmgenet_close(struct net_device *dev)
        return ret;
 }
 
+static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
+{
+       struct bcmgenet_priv *priv = ring->priv;
+       u32 p_index, c_index, intsts, intmsk;
+       struct netdev_queue *txq;
+       unsigned int free_bds;
+       unsigned long flags;
+       bool txq_stopped;
+
+       if (!netif_msg_tx_err(priv))
+               return;
+
+       txq = netdev_get_tx_queue(priv->dev, ring->queue);
+
+       spin_lock_irqsave(&ring->lock, flags);
+       if (ring->index == DESC_INDEX) {
+               intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+               intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
+       } else {
+               intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+               intmsk = 1 << ring->index;
+       }
+       c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+       p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
+       txq_stopped = netif_tx_queue_stopped(txq);
+       free_bds = ring->free_bds;
+       spin_unlock_irqrestore(&ring->lock, flags);
+
+       netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
+                 "TX queue status: %s, interrupts: %s\n"
+                 "(sw)free_bds: %d (sw)size: %d\n"
+                 "(sw)p_index: %d (hw)p_index: %d\n"
+                 "(sw)c_index: %d (hw)c_index: %d\n"
+                 "(sw)clean_p: %d (sw)write_p: %d\n"
+                 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
+                 ring->index, ring->queue,
+                 txq_stopped ? "stopped" : "active",
+                 intsts & intmsk ? "enabled" : "disabled",
+                 free_bds, ring->size,
+                 ring->prod_index, p_index & DMA_P_INDEX_MASK,
+                 ring->c_index, c_index & DMA_C_INDEX_MASK,
+                 ring->clean_ptr, ring->write_ptr,
+                 ring->cb_ptr, ring->end_ptr);
+}
+
 static void bcmgenet_timeout(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 int0_enable = 0;
+       u32 int1_enable = 0;
+       unsigned int q;
 
        netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
 
+       bcmgenet_disable_tx_napi(priv);
+
+       for (q = 0; q < priv->hw_params->tx_queues; q++)
+               bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
+       bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
+
+       bcmgenet_tx_reclaim_all(dev);
+
+       for (q = 0; q < priv->hw_params->tx_queues; q++)
+               int1_enable |= (1 << q);
+
+       int0_enable = UMAC_IRQ_TXDMA_DONE;
+
+       /* Re-enable TX interrupts if disabled */
+       bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
+
+       bcmgenet_enable_tx_napi(priv);
+
        dev->trans_start = jiffies;
 
        dev->stats.tx_errors++;
index 069952fa5d644b62b7d1a04fdb8b615a4ed3d69a..73c934cf6c61c28dc43f8d7e20a7f718a1067236 100644 (file)
@@ -6618,7 +6618,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
 static void tg3_frag_free(bool is_frag, void *data)
 {
        if (is_frag)
-               put_page(virt_to_head_page(data));
+               skb_free_frag(data);
        else
                kfree(data);
 }
index fc646a41d5481406400bb4013ced8f96cf236092..740d04fd222333c8a70fa0a579f4c408b995a093 100644 (file)
@@ -54,6 +54,8 @@
 #define MACB_MAX_TX_LEN                ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
 #define GEM_MAX_TX_LEN         ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
 
+#define GEM_MTU_MIN_SIZE       68
+
 /*
  * Graceful stop timeouts in us. We should allow up to
  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -785,7 +787,7 @@ static int gem_rx(struct macb *bp, int budget)
                }
                /* now everything is ready for receiving packet */
                bp->rx_skbuff[entry] = NULL;
-               len = MACB_BFEXT(RX_FRMLEN, ctrl);
+               len = ctrl & bp->rx_frm_len_mask;
 
                netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
 
@@ -831,7 +833,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
        struct macb_dma_desc *desc;
 
        desc = macb_rx_desc(bp, last_frag);
-       len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
+       len = desc->ctrl & bp->rx_frm_len_mask;
 
        netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
                macb_rx_ring_wrap(first_frag),
@@ -1651,7 +1653,10 @@ static void macb_init_hw(struct macb *bp)
        config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
        config |= MACB_BIT(PAE);                /* PAuse Enable */
        config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
-       config |= MACB_BIT(BIG);                /* Receive oversized frames */
+       if (bp->caps & MACB_CAPS_JUMBO)
+               config |= MACB_BIT(JFRAME);     /* Enable jumbo frames */
+       else
+               config |= MACB_BIT(BIG);        /* Receive oversized frames */
        if (bp->dev->flags & IFF_PROMISC)
                config |= MACB_BIT(CAF);        /* Copy All Frames */
        else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
@@ -1660,8 +1665,13 @@ static void macb_init_hw(struct macb *bp)
                config |= MACB_BIT(NBC);        /* No BroadCast */
        config |= macb_dbw(bp);
        macb_writel(bp, NCFGR, config);
+       if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+               gem_writel(bp, JML, bp->jumbo_max_len);
        bp->speed = SPEED_10;
        bp->duplex = DUPLEX_HALF;
+       bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
+       if (bp->caps & MACB_CAPS_JUMBO)
+               bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
 
        macb_configure_dma(bp);
 
@@ -1865,6 +1875,26 @@ static int macb_close(struct net_device *dev)
        return 0;
 }
 
+static int macb_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct macb *bp = netdev_priv(dev);
+       u32 max_mtu;
+
+       if (netif_running(dev))
+               return -EBUSY;
+
+       max_mtu = ETH_DATA_LEN;
+       if (bp->caps & MACB_CAPS_JUMBO)
+               max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+
+       if ((new_mtu > max_mtu) || (new_mtu < GEM_MTU_MIN_SIZE))
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+
+       return 0;
+}
+
 static void gem_update_stats(struct macb *bp)
 {
        int i;
@@ -2141,7 +2171,7 @@ static const struct net_device_ops macb_netdev_ops = {
        .ndo_get_stats          = macb_get_stats,
        .ndo_do_ioctl           = macb_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_change_mtu         = macb_change_mtu,
        .ndo_set_mac_address    = eth_mac_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = macb_poll_controller,
@@ -2702,6 +2732,16 @@ static const struct macb_config emac_config = {
        .init = at91ether_init,
 };
 
+
+static const struct macb_config zynqmp_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+               MACB_CAPS_JUMBO,
+       .dma_burst_length = 16,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+       .jumbo_max_len = 10240,
+};
+
 static const struct macb_config zynq_config = {
        .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
                MACB_CAPS_NO_GIGABIT_HALF,
@@ -2720,6 +2760,7 @@ static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
        { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
        { .compatible = "cdns,emac", .data = &emac_config },
+       { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
        { .compatible = "cdns,zynq-gem", .data = &zynq_config },
        { /* sentinel */ }
 };
@@ -2789,6 +2830,10 @@ static int macb_probe(struct platform_device *pdev)
        bp->pclk = pclk;
        bp->hclk = hclk;
        bp->tx_clk = tx_clk;
+       if (macb_config->jumbo_max_len) {
+               bp->jumbo_max_len = macb_config->jumbo_max_len;
+       }
+
        spin_lock_init(&bp->lock);
 
        /* setup capabilities */
index 24b1d9bcd8654d5aba2401b7b6a85b563f09d9cc..d74655993d4bf19cec68ab227263f5f069467e4c 100644 (file)
@@ -71,6 +71,7 @@
 #define GEM_NCFGR              0x0004 /* Network Config */
 #define GEM_USRIO              0x000c /* User IO */
 #define GEM_DMACFG             0x0010 /* DMA Configuration */
+#define GEM_JML                        0x0048 /* Jumbo Max Length */
 #define GEM_HRB                        0x0080 /* Hash Bottom */
 #define GEM_HRT                        0x0084 /* Hash Top */
 #define GEM_SA1B               0x0088 /* Specific1 Bottom */
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
 #define MACB_CAPS_MACB_IS_GEM                  0x80000000
+#define MACB_CAPS_JUMBO                                0x00000008
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)                                 \
@@ -515,6 +517,9 @@ struct macb_dma_desc {
 #define MACB_RX_BROADCAST_OFFSET               31
 #define MACB_RX_BROADCAST_SIZE                 1
 
+#define MACB_RX_FRMLEN_MASK                    0xFFF
+#define MACB_RX_JFRMLEN_MASK                   0x3FFF
+
 /* RX checksum offload disabled: bit 24 clear in NCFGR */
 #define GEM_RX_TYPEID_MATCH_OFFSET             22
 #define GEM_RX_TYPEID_MATCH_SIZE               2
@@ -758,6 +763,7 @@ struct macb_config {
        int     (*clk_init)(struct platform_device *pdev, struct clk **pclk,
                            struct clk **hclk, struct clk **tx_clk);
        int     (*init)(struct platform_device *pdev);
+       int     jumbo_max_len;
 };
 
 struct macb_queue {
@@ -827,6 +833,9 @@ struct macb {
        unsigned int            max_tx_length;
 
        u64                     ethtool_stats[GEM_STATS_LEN];
+
+       unsigned int            rx_frm_len_mask;
+       unsigned int            jumbo_max_len;
 };
 
 static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
new file mode 100644 (file)
index 0000000..fc3d8e3
--- /dev/null
@@ -0,0 +1,40 @@
+#
+# Cavium ethernet device configuration
+#
+
+config NET_VENDOR_CAVIUM
+       tristate "Cavium ethernet drivers"
+       depends on PCI && 64BIT
+       ---help---
+         Enable support for the Cavium ThunderX Network Interface
+         Controller (NIC). The NIC provides the controller and DMA
+         engines to move network traffic to/from the memory. The NIC
+         works closely with TNS, BGX and SerDes to implement the
+         functions replacing and virtualizing those of a typical
+         standalone PCIe NIC chip.
+
+         If you have a Cavium Thunder board, say Y.
+
+if NET_VENDOR_CAVIUM
+
+config THUNDER_NIC_PF
+       tristate "Thunder Physical function driver"
+       default NET_VENDOR_CAVIUM
+       select THUNDER_NIC_BGX
+       ---help---
+         This driver supports Thunder's NIC physical function.
+
+config THUNDER_NIC_VF
+       tristate "Thunder Virtual function driver"
+       default NET_VENDOR_CAVIUM
+       ---help---
+         This driver supports Thunder's NIC virtual function
+
+config THUNDER_NIC_BGX
+       tristate "Thunder MAC interface driver (BGX)"
+       default NET_VENDOR_CAVIUM
+       ---help---
+         This driver supports programming and controlling of MAC
+         interface from NIC physical function driver.
+
+endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
new file mode 100644 (file)
index 0000000..7aac478
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cavium ethernet device drivers.
+#
+
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
new file mode 100644 (file)
index 0000000..5c4615c
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Makefile for Cavium's Thunder ethernet device
+#
+
+obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
+obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
+obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+
+nicpf-y := nic_main.o
+nicvf-y := nicvf_main.o nicvf_queues.o
+nicvf-y += nicvf_ethtool.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
new file mode 100644 (file)
index 0000000..a3b43e5
--- /dev/null
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_H
+#define        NIC_H
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include "thunder_bgx.h"
+
+/* PCI device IDs */
+#define        PCI_DEVICE_ID_THUNDER_NIC_PF            0xA01E
+#define        PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF      0x0011
+#define        PCI_DEVICE_ID_THUNDER_NIC_VF            0xA034
+#define        PCI_DEVICE_ID_THUNDER_BGX               0xA026
+
+/* PCI BAR nos */
+#define        PCI_CFG_REG_BAR_NUM             0
+#define        PCI_MSIX_REG_BAR_NUM            4
+
+/* NIC SRIOV VF count */
+#define        MAX_NUM_VFS_SUPPORTED           128
+#define        DEFAULT_NUM_VF_ENABLED          8
+
+#define        NIC_TNS_BYPASS_MODE             0
+#define        NIC_TNS_MODE                    1
+
+/* NIC priv flags */
+#define        NIC_SRIOV_ENABLED               BIT(0)
+
+/* Min/Max packet size */
+#define        NIC_HW_MIN_FRS                  64
+#define        NIC_HW_MAX_FRS                  9200 /* 9216 max packet including FCS */
+
+/* Max pkinds */
+#define        NIC_MAX_PKIND                   16
+
+/* Rx Channels */
+/* Receive channel configuration in TNS bypass mode
+ * Below is configuration in TNS bypass mode
+ * BGX0-LMAC0-CHAN0 - VNIC CHAN0
+ * BGX0-LMAC1-CHAN0 - VNIC CHAN16
+ * ...
+ * BGX1-LMAC0-CHAN0 - VNIC CHAN128
+ * ...
+ * BGX1-LMAC3-CHAN0 - VNIC CHAN174
+ */
+#define        NIC_INTF_COUNT                  2  /* Interfaces btw VNIC and TNS/BGX */
+#define        NIC_CHANS_PER_INF               128
+#define        NIC_MAX_CHANS                   (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
+#define        NIC_CPI_COUNT                   2048 /* No of channel parse indices */
+
+/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
+#define NIC_MAX_BGX                    MAX_BGX_PER_CN88XX
+#define        NIC_CPI_PER_BGX                 (NIC_CPI_COUNT / NIC_MAX_BGX)
+#define        NIC_MAX_CPI_PER_LMAC            64 /* Max when CPI_ALG is IP diffserv */
+#define        NIC_RSSI_PER_BGX                (NIC_RSSI_COUNT / NIC_MAX_BGX)
+
+/* Tx scheduling */
+#define        NIC_MAX_TL4                     1024
+#define        NIC_MAX_TL4_SHAPERS             256 /* 1 shaper for 4 TL4s */
+#define        NIC_MAX_TL3                     256
+#define        NIC_MAX_TL3_SHAPERS             64  /* 1 shaper for 4 TL3s */
+#define        NIC_MAX_TL2                     64
+#define        NIC_MAX_TL2_SHAPERS             2  /* 1 shaper for 32 TL2s */
+#define        NIC_MAX_TL1                     2
+
+/* TNS bypass mode */
+#define        NIC_TL2_PER_BGX                 32
+#define        NIC_TL4_PER_BGX                 (NIC_MAX_TL4 / NIC_MAX_BGX)
+#define        NIC_TL4_PER_LMAC                (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+
+/* NIC VF Interrupts */
+#define        NICVF_INTR_CQ                   0
+#define        NICVF_INTR_SQ                   1
+#define        NICVF_INTR_RBDR                 2
+#define        NICVF_INTR_PKT_DROP             3
+#define        NICVF_INTR_TCP_TIMER            4
+#define        NICVF_INTR_MBOX                 5
+#define        NICVF_INTR_QS_ERR               6
+
+#define        NICVF_INTR_CQ_SHIFT             0
+#define        NICVF_INTR_SQ_SHIFT             8
+#define        NICVF_INTR_RBDR_SHIFT           16
+#define        NICVF_INTR_PKT_DROP_SHIFT       20
+#define        NICVF_INTR_TCP_TIMER_SHIFT      21
+#define        NICVF_INTR_MBOX_SHIFT           22
+#define        NICVF_INTR_QS_ERR_SHIFT         23
+
+#define        NICVF_INTR_CQ_MASK              (0xFF << NICVF_INTR_CQ_SHIFT)
+#define        NICVF_INTR_SQ_MASK              (0xFF << NICVF_INTR_SQ_SHIFT)
+#define        NICVF_INTR_RBDR_MASK            (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define        NICVF_INTR_PKT_DROP_MASK        BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define        NICVF_INTR_TCP_TIMER_MASK       BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define        NICVF_INTR_MBOX_MASK            BIT(NICVF_INTR_MBOX_SHIFT)
+#define        NICVF_INTR_QS_ERR_MASK          BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define        NIC_PF_MSIX_VECTORS             10
+#define        NIC_VF_MSIX_VECTORS             20
+
+#define NIC_PF_INTR_ID_ECC0_SBE                0
+#define NIC_PF_INTR_ID_ECC0_DBE                1
+#define NIC_PF_INTR_ID_ECC1_SBE                2
+#define NIC_PF_INTR_ID_ECC1_DBE                3
+#define NIC_PF_INTR_ID_ECC2_SBE                4
+#define NIC_PF_INTR_ID_ECC2_DBE                5
+#define NIC_PF_INTR_ID_ECC3_SBE                6
+#define NIC_PF_INTR_ID_ECC3_DBE                7
+#define NIC_PF_INTR_ID_MBOX0           8
+#define NIC_PF_INTR_ID_MBOX1           9
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16th of what is expected
+ *
+ * 1 tick per 0.05usec = value of 2.2
+ * This 10% would be covered in CQ timer thresh value
+ */
+#define NICPF_CLK_PER_INT_TICK         2
+
+struct nicvf_cq_poll {
+       u8      cq_idx;         /* Completion queue index */
+       struct  napi_struct napi;
+};
+
+#define        NIC_RSSI_COUNT                  4096 /* Total no of RSS indices */
+#define NIC_MAX_RSS_HASH_BITS          8
+#define NIC_MAX_RSS_IDR_TBL_SIZE       (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE              5 /* 320 bit key */
+
+struct nicvf_rss_info {
+       bool enable;
+#define        RSS_L2_EXTENDED_HASH_ENA        BIT(0)
+#define        RSS_IP_HASH_ENA                 BIT(1)
+#define        RSS_TCP_HASH_ENA                BIT(2)
+#define        RSS_TCP_SYN_DIS                 BIT(3)
+#define        RSS_UDP_HASH_ENA                BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA       BIT(5)
+#define        RSS_ROCE_ENA                    BIT(6)
+#define        RSS_L3_BI_DIRECTION_ENA         BIT(7)
+#define        RSS_L4_BI_DIRECTION_ENA         BIT(8)
+       u64 cfg;
+       u8  hash_bits;
+       u16 rss_size;
+       u8  ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+       u64 key[RSS_HASH_KEY_SIZE];
+} ____cacheline_aligned_in_smp;
+
+enum rx_stats_reg_offset {
+       RX_OCTS = 0x0,
+       RX_UCAST = 0x1,
+       RX_BCAST = 0x2,
+       RX_MCAST = 0x3,
+       RX_RED = 0x4,
+       RX_RED_OCTS = 0x5,
+       RX_ORUN = 0x6,
+       RX_ORUN_OCTS = 0x7,
+       RX_FCS = 0x8,
+       RX_L2ERR = 0x9,
+       RX_DRP_BCAST = 0xa,
+       RX_DRP_MCAST = 0xb,
+       RX_DRP_L3BCAST = 0xc,
+       RX_DRP_L3MCAST = 0xd,
+       RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+       TX_OCTS = 0x0,
+       TX_UCAST = 0x1,
+       TX_BCAST = 0x2,
+       TX_MCAST = 0x3,
+       TX_DROP = 0x4,
+       TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+       u64 rx_bytes_ok;
+       u64 rx_ucast_frames_ok;
+       u64 rx_bcast_frames_ok;
+       u64 rx_mcast_frames_ok;
+       u64 rx_fcs_errors;
+       u64 rx_l2_errors;
+       u64 rx_drop_red;
+       u64 rx_drop_red_bytes;
+       u64 rx_drop_overrun;
+       u64 rx_drop_overrun_bytes;
+       u64 rx_drop_bcast;
+       u64 rx_drop_mcast;
+       u64 rx_drop_l3_bcast;
+       u64 rx_drop_l3_mcast;
+       u64 tx_bytes_ok;
+       u64 tx_ucast_frames_ok;
+       u64 tx_bcast_frames_ok;
+       u64 tx_mcast_frames_ok;
+       u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+       /* Rx */
+       u64 rx_frames_ok;
+       u64 rx_frames_64;
+       u64 rx_frames_127;
+       u64 rx_frames_255;
+       u64 rx_frames_511;
+       u64 rx_frames_1023;
+       u64 rx_frames_1518;
+       u64 rx_frames_jumbo;
+       u64 rx_drops;
+       /* Tx */
+       u64 tx_frames_ok;
+       u64 tx_drops;
+       u64 tx_busy;
+       u64 tx_tso;
+};
+
+struct nicvf {
+       struct net_device       *netdev;
+       struct pci_dev          *pdev;
+       u8                      vf_id;
+       u8                      node;
+       u8                      tns_mode;
+       u16                     mtu;
+       struct queue_set        *qs;
+       void __iomem            *reg_base;
+       bool                    link_up;
+       u8                      duplex;
+       u32                     speed;
+       struct page             *rb_page;
+       u32                     rb_page_offset;
+       bool                    rb_alloc_fail;
+       bool                    rb_work_scheduled;
+       struct delayed_work     rbdr_work;
+       struct tasklet_struct   rbdr_task;
+       struct tasklet_struct   qs_err_task;
+       struct tasklet_struct   cq_task;
+       struct nicvf_cq_poll    *napi[8];
+       struct nicvf_rss_info   rss_info;
+       u8                      cpi_alg;
+       /* Interrupt coalescing settings */
+       u32                     cq_coalesce_usecs;
+
+       u32                     msg_enable;
+       struct nicvf_hw_stats   stats;
+       struct nicvf_drv_stats  drv_stats;
+       struct bgx_stats        bgx_stats;
+       struct work_struct      reset_task;
+
+       /* MSI-X  */
+       bool                    msix_enabled;
+       u8                      num_vec;
+       struct msix_entry       msix_entries[NIC_VF_MSIX_VECTORS];
+       char                    irq_name[NIC_VF_MSIX_VECTORS][20];
+       bool                    irq_allocated[NIC_VF_MSIX_VECTORS];
+
+       bool                    pf_ready_to_rcv_msg;
+       bool                    pf_acked;
+       bool                    pf_nacked;
+       bool                    bgx_stats_acked;
+} ____cacheline_aligned_in_smp;
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define        NIC_PF_VF_MAILBOX_SIZE          2
+#define        NIC_MBOX_MSG_TIMEOUT            2000 /* ms */
+
+/* Mailbox message types */
+#define        NIC_MBOX_MSG_READY              0x01    /* Is PF ready to rcv msgs */
+#define        NIC_MBOX_MSG_ACK                0x02    /* ACK the message received */
+#define        NIC_MBOX_MSG_NACK               0x03    /* NACK the message received */
+#define        NIC_MBOX_MSG_QS_CFG             0x04    /* Configure Qset */
+#define        NIC_MBOX_MSG_RQ_CFG             0x05    /* Configure receive queue */
+#define        NIC_MBOX_MSG_SQ_CFG             0x06    /* Configure Send queue */
+#define        NIC_MBOX_MSG_RQ_DROP_CFG        0x07    /* Configure receive queue */
+#define        NIC_MBOX_MSG_SET_MAC            0x08    /* Add MAC ID to DMAC filter */
+#define        NIC_MBOX_MSG_SET_MAX_FRS        0x09    /* Set max frame size */
+#define        NIC_MBOX_MSG_CPI_CFG            0x0A    /* Config CPI, RSSI */
+#define        NIC_MBOX_MSG_RSS_SIZE           0x0B    /* Get RSS indir_tbl size */
+#define        NIC_MBOX_MSG_RSS_CFG            0x0C    /* Config RSS table */
+#define        NIC_MBOX_MSG_RSS_CFG_CONT       0x0D    /* RSS config continuation */
+#define        NIC_MBOX_MSG_RQ_BP_CFG          0x0E    /* RQ backpressure config */
+#define        NIC_MBOX_MSG_RQ_SW_SYNC         0x0F    /* Flush inflight pkts to RQ */
+#define        NIC_MBOX_MSG_BGX_STATS          0x10    /* Get stats from BGX */
+#define        NIC_MBOX_MSG_BGX_LINK_CHANGE    0x11    /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_CFG_DONE          0x12    /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN          0x13    /* VF is being shutdown */
+
+struct nic_cfg_msg {
+       u8    msg;
+       u8    vf_id;
+       u8    tns_mode;
+       u8    node_id;
+       u8    mac_addr[ETH_ALEN];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+       u8    msg;
+       u8    num;
+       u64   cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+       u8    msg;
+       u8    qs_num;
+       u8    rq_num;
+       u64   cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+       u8    msg;
+       u8    qs_num;
+       u8    sq_num;
+       u64   cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+       u8    msg;
+       u8    vf_id;
+       u8    mac_addr[ETH_ALEN];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+       u8    msg;
+       u8    vf_id;
+       u16   max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+       u8    msg;
+       u8    vf_id;
+       u8    rq_cnt;
+       u8    cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+       u8    msg;
+       u8    vf_id;
+       u16   ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+       u8    msg;
+       u8    vf_id;
+       u8    hash_bits;
+       u8    tbl_len;
+       u8    tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG    8
+       u8    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+       u8    msg;
+       u8    vf_id;
+       u8    rx;
+       u8    idx;
+       u64   stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+       u8    msg;
+       u8    link_up;
+       u8    duplex;
+       u32   speed;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+       struct { u8 msg; }      msg;
+       struct nic_cfg_msg      nic_cfg;
+       struct qs_cfg_msg       qs;
+       struct rq_cfg_msg       rq;
+       struct sq_cfg_msg       sq;
+       struct set_mac_msg      mac;
+       struct set_frs_msg      frs;
+       struct cpi_cfg_msg      cpi_cfg;
+       struct rss_sz_msg       rss_size;
+       struct rss_cfg_msg      rss_cfg;
+       struct bgx_stats_msg    bgx_stats;
+       struct bgx_link_status  link_status;
+};
+
+#define NIC_NODE_ID_MASK       0x03
+#define NIC_NODE_ID_SHIFT      44
+
+static inline int nic_get_node_id(struct pci_dev *pdev)
+{
+       u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
+       return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+                             int tx_queues, int rx_queues);
+int nicvf_open(struct net_device *netdev);
+int nicvf_stop(struct net_device *netdev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_config_rss(struct nicvf *nic);
+void nicvf_set_rss_key(struct nicvf *nic);
+void nicvf_set_ethtool_ops(struct net_device *netdev);
+void nicvf_update_stats(struct nicvf *nic);
+void nicvf_update_lmac_stats(struct nicvf *nic);
+
+#endif /* NIC_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
new file mode 100644 (file)
index 0000000..6e0c031
--- /dev/null
@@ -0,0 +1,932 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME       "thunder-nic"
+#define DRV_VERSION    "1.0"
+
+struct nicpf {
+       struct pci_dev          *pdev;
+       u8                      rev_id;
+       u8                      node;
+       unsigned int            flags;
+       u8                      num_vf_en;      /* No of VF enabled */
+       bool                    vf_enabled[MAX_NUM_VFS_SUPPORTED];
+       void __iomem            *reg_base;       /* Register start address */
+       struct pkind_cfg        pkind;
+#define        NIC_SET_VF_LMAC_MAP(bgx, lmac)  (((bgx & 0xF) << 4) | (lmac & 0xF))
+#define        NIC_GET_BGX_FROM_VF_LMAC_MAP(map)       ((map >> 4) & 0xF)
+#define        NIC_GET_LMAC_FROM_VF_LMAC_MAP(map)      (map & 0xF)
+       u8                      vf_lmac_map[MAX_LMAC];
+       struct delayed_work     dwork;
+       struct workqueue_struct *check_link;
+       u8                      link[MAX_LMAC];
+       u8                      duplex[MAX_LMAC];
+       u32                     speed[MAX_LMAC];
+       u16                     cpi_base[MAX_NUM_VFS_SUPPORTED];
+       u16                     rss_ind_tbl_size;
+       bool                    mbx_lock[MAX_NUM_VFS_SUPPORTED];
+
+       /* MSI-X */
+       bool                    msix_enabled;
+       u8                      num_vec;
+       struct msix_entry       msix_entries[NIC_PF_MSIX_VECTORS];
+       bool                    irq_allocated[NIC_PF_MSIX_VECTORS];
+};
+
+/* Supported devices */
+static const struct pci_device_id nic_id_table[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
+       { 0, }  /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nic_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver.  The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+       writeq_relaxed(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+       return readq_relaxed(nic->reg_base + offset);
+}
+
+/* PF -> VF mailbox communication APIs */
+static void nic_enable_mbx_intr(struct nicpf *nic)
+{
+       /* Enable mailbox interrupt for all 128 VFs */
+       nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
+       nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+}
+
+static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
+{
+       nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+       return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+/* Send a mailbox message to VF
+ * @vf: vf to which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+       void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
+       u64 *msg = (u64 *)mbx;
+
+       /* In first revision HW, mbox interrupt is triggerred
+        * when PF writes to MBOX(1), in next revisions when
+        * PF writes to MBOX(0)
+        */
+       if (nic->rev_id == 0) {
+               /* see the comment for nic_reg_write()/nic_reg_read()
+                * functions above
+                */
+               writeq_relaxed(msg[0], mbx_addr);
+               writeq_relaxed(msg[1], mbx_addr + 8);
+       } else {
+               writeq_relaxed(msg[1], mbx_addr + 8);
+               writeq_relaxed(msg[0], mbx_addr);
+       }
+}
+
+/* Responds to VF's READY message with VF's
+ * ID, node, MAC address e.t.c
+ * @vf: VF which sent READY message
+ */
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+       union nic_mbx mbx = {};
+       int bgx_idx, lmac;
+       const char *mac;
+
+       mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+       mbx.nic_cfg.vf_id = vf;
+
+       mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+       bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+       lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+       mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+       if (mac)
+               ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+
+       mbx.nic_cfg.node_id = nic->node;
+       nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+       union nic_mbx mbx = {};
+
+       mbx.msg.msg = NIC_MBOX_MSG_ACK;
+       nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+       union nic_mbx mbx = {};
+
+       mbx.msg.msg = NIC_MBOX_MSG_NACK;
+       nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Flush all in flight receive packets to memory and
+ * bring down an active RQ
+ */
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+       u16 timeout = ~0x00;
+
+       nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+       /* Wait till sync cycle is finished */
+       while (timeout) {
+               if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+                       break;
+               timeout--;
+       }
+       nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+       if (!timeout) {
+               dev_err(&nic->pdev->dev, "Receive queue software sync failed");
+               return 1;
+       }
+       return 0;
+}
+
+/* Get BGX Rx/Tx stats and respond to VF's request */
+static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
+{
+       int bgx_idx, lmac;
+       union nic_mbx mbx = {};
+
+       bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+       lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+
+       mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+       mbx.bgx_stats.vf_id = bgx->vf_id;
+       mbx.bgx_stats.rx = bgx->rx;
+       mbx.bgx_stats.idx = bgx->idx;
+       if (bgx->rx)
+               mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
+                                                           lmac, bgx->idx);
+       else
+               mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
+                                                           lmac, bgx->idx);
+       nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
+}
+
+/* Update hardware min/max frame size */
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+       if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
+               dev_err(&nic->pdev->dev,
+                       "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
+                          vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+               return 1;
+       }
+       new_frs += ETH_HLEN;
+       if (new_frs <= nic->pkind.maxlen)
+               return 0;
+
+       nic->pkind.maxlen = new_frs;
+       nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+       return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+       int lmac;
+       u64 lmac_cfg;
+
+       /* Max value that can be set is 60 */
+       if (size > 60)
+               size = 60;
+
+       for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+               lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+               lmac_cfg &= ~(0xF << 2);
+               lmac_cfg |= ((size / 4) << 2);
+               nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+       }
+}
+
+/* Function to check number of LMACs present and set VF::LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+       unsigned bgx_map = bgx_get_map(nic->node);
+       int bgx, next_bgx_lmac = 0;
+       int lmac, lmac_cnt = 0;
+       u64 lmac_credit;
+
+       nic->num_vf_en = 0;
+
+       for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+               if (!(bgx_map & (1 << bgx)))
+                       continue;
+               lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+               for (lmac = 0; lmac < lmac_cnt; lmac++)
+                       nic->vf_lmac_map[next_bgx_lmac++] =
+                                               NIC_SET_VF_LMAC_MAP(bgx, lmac);
+               nic->num_vf_en += lmac_cnt;
+
+               /* Program LMAC credits */
+               lmac_credit = (1ull << 1); /* channel credit enable */
+               lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
+               /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
+               lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+                               NIC_HW_MAX_FRS) / 16) << 12);
+               lmac = bgx * MAX_LMAC_PER_BGX;
+               for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+                       nic_reg_write(nic,
+                                     NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+                                     lmac_credit);
+       }
+}
+
+#define BGX0_BLOCK 8
+#define BGX1_BLOCK 9
+
+static void nic_init_hw(struct nicpf *nic)
+{
+       int i;
+
+       /* Reset NIC, in case the driver is repeatedly inserted and removed */
+       nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
+
+       /* Enable NIC HW block */
+       nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+       /* Enable backpressure */
+       nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+
+       /* Disable TNS mode on both interfaces */
+       nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+                     (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+       nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+                     (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+       nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
+                     (1ULL << 63) | BGX0_BLOCK);
+       nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+                     (1ULL << 63) | BGX1_BLOCK);
+
+       /* PKIND configuration */
+       nic->pkind.minlen = 0;
+       nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+       nic->pkind.lenerr_en = 1;
+       nic->pkind.rx_hdr = 0;
+       nic->pkind.hdr_sl = 0;
+
+       for (i = 0; i < NIC_MAX_PKIND; i++)
+               nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
+                             *(u64 *)&nic->pkind);
+
+       nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+       /* Timer config */
+       nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+       u32 vnic, bgx, lmac, chan;
+       u32 padd, cpi_count = 0;
+       u64 cpi_base, cpi, rssi_base, rssi;
+       u8  qset, rq_idx = 0;
+
+       vnic = cfg->vf_id;
+       bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+       lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+       chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+       cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
+       rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+
+       /* Rx channel configuration */
+       nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+                     (1ull << 63) | (vnic << 0));
+       nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+                     ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+       if (cfg->cpi_alg == CPI_ALG_NONE)
+               cpi_count = 1;
+       else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+               cpi_count = 8;
+       else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+               cpi_count = 16;
+       else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+               cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+       /* RSS Qset, Qidx mapping */
+       qset = cfg->vf_id;
+       rssi = rssi_base;
+       for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+               nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+                             (qset << 3) | rq_idx);
+               rq_idx++;
+       }
+
+       rssi = 0;
+       cpi = cpi_base;
+       for (; cpi < (cpi_base + cpi_count); cpi++) {
+               /* Determine port to channel adder */
+               if (cfg->cpi_alg != CPI_ALG_DIFF)
+                       padd = cpi % cpi_count;
+               else
+                       padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+               /* Leave RSS_SIZE as '0' to disable RSS */
+               nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+                             (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+
+               if ((rssi + 1) >= cfg->rq_cnt)
+                       continue;
+
+               if (cfg->cpi_alg == CPI_ALG_VLAN)
+                       rssi++;
+               else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+                       rssi = ((cpi - cpi_base) & 0xe) >> 1;
+               else if (cfg->cpi_alg == CPI_ALG_DIFF)
+                       rssi = ((cpi - cpi_base) & 0x38) >> 3;
+       }
+       nic->cpi_base[cfg->vf_id] = cpi_base;
+}
+
+/* Responsds to VF with its RSS indirection table size */
+static void nic_send_rss_size(struct nicpf *nic, int vf)
+{
+       union nic_mbx mbx = {};
+       u64  *msg;
+
+       msg = (u64 *)&mbx;
+
+       mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+       mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+       nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Receive side scaling configuration
+ * configure:
+ * - RSS index
+ * - indir table i.e hash::RQ mapping
+ * - no of hash bits to consider
+ */
+static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
+{
+       u8  qset, idx = 0;
+       u64 cpi_cfg, cpi_base, rssi_base, rssi;
+
+       cpi_base = nic->cpi_base[cfg->vf_id];
+       cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
+       rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+
+       rssi = rssi_base;
+       qset = cfg->vf_id;
+
+       for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+               nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+                             (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
+               idx++;
+       }
+
+       cpi_cfg &= ~(0xFULL << 20);
+       cpi_cfg |= (cfg->hash_bits << 20);
+       nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+}
+
+/* 4 level transmit side scheduler configutation
+ * for TNS bypass mode
+ *
+ * Sample configuration for SQ0
+ * VNIC0-SQ0 -> TL4(0)   -> TL3[0]   -> TL2[0]  -> TL1[0] -> BGX0
+ * VNIC1-SQ0 -> TL4(8)   -> TL3[2]   -> TL2[0]  -> TL1[0] -> BGX0
+ * VNIC2-SQ0 -> TL4(16)  -> TL3[4]   -> TL2[1]  -> TL1[0] -> BGX0
+ * VNIC3-SQ0 -> TL4(24)  -> TL3[6]   -> TL2[1]  -> TL1[0] -> BGX0
+ * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
+ * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+{
+       u32 bgx, lmac, chan;
+       u32 tl2, tl3, tl4;
+       u32 rr_quantum;
+
+       bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+       lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+       /* 24 bytes for FCS, IPG and preamble */
+       rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+       tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+       tl4 += sq_idx;
+       tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+       nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+                     ((u64)vnic << NIC_QS_ID_SHIFT) |
+                     ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+       nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+                     ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+       nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+       chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+       nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+       /* Enable backpressure on the channel */
+       nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+       tl2 = tl3 >> 2;
+       nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+       nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+       /* No priorities as of now */
+       nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+       union nic_mbx mbx = {};
+       u64 *mbx_data;
+       u64 mbx_addr;
+       u64 reg_addr;
+       int bgx, lmac;
+       int i;
+       int ret = 0;
+
+       nic->mbx_lock[vf] = true;
+
+       mbx_addr = nic_get_mbx_addr(vf);
+       mbx_data = (u64 *)&mbx;
+
+       for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+               *mbx_data = nic_reg_read(nic, mbx_addr);
+               mbx_data++;
+               mbx_addr += sizeof(u64);
+       }
+
+       dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+               __func__, mbx.msg.msg, vf);
+       switch (mbx.msg.msg) {
+       case NIC_MBOX_MSG_READY:
+               nic_mbx_send_ready(nic, vf);
+               nic->link[vf] = 0;
+               nic->duplex[vf] = 0;
+               nic->speed[vf] = 0;
+               ret = 1;
+               break;
+       case NIC_MBOX_MSG_QS_CFG:
+               reg_addr = NIC_PF_QSET_0_127_CFG |
+                          (mbx.qs.num << NIC_QS_ID_SHIFT);
+               nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+               break;
+       case NIC_MBOX_MSG_RQ_CFG:
+               reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+                          (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+                          (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+               nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+               break;
+       case NIC_MBOX_MSG_RQ_BP_CFG:
+               reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+                          (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+                          (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+               nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+               break;
+       case NIC_MBOX_MSG_RQ_SW_SYNC:
+               ret = nic_rcv_queue_sw_sync(nic);
+               break;
+       case NIC_MBOX_MSG_RQ_DROP_CFG:
+               reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+                          (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+                          (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+               nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+               break;
+       case NIC_MBOX_MSG_SQ_CFG:
+               reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+                          (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+                          (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+               nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+               nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+               break;
+       case NIC_MBOX_MSG_SET_MAC:
+               lmac = mbx.mac.vf_id;
+               bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+               lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+               bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+               break;
+       case NIC_MBOX_MSG_SET_MAX_FRS:
+               ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+                                       mbx.frs.vf_id);
+               break;
+       case NIC_MBOX_MSG_CPI_CFG:
+               nic_config_cpi(nic, &mbx.cpi_cfg);
+               break;
+       case NIC_MBOX_MSG_RSS_SIZE:
+               nic_send_rss_size(nic, vf);
+               goto unlock;
+       case NIC_MBOX_MSG_RSS_CFG:
+       case NIC_MBOX_MSG_RSS_CFG_CONT:
+               nic_config_rss(nic, &mbx.rss_cfg);
+               break;
+       case NIC_MBOX_MSG_CFG_DONE:
+               /* Last message of VF config msg sequence */
+               nic->vf_enabled[vf] = true;
+               goto unlock;
+       case NIC_MBOX_MSG_SHUTDOWN:
+               /* First msg in VF teardown sequence */
+               nic->vf_enabled[vf] = false;
+               break;
+       case NIC_MBOX_MSG_BGX_STATS:
+               nic_get_bgx_stats(nic, &mbx.bgx_stats);
+               goto unlock;
+       default:
+               dev_err(&nic->pdev->dev,
+                       "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+               break;
+       }
+
+       if (!ret)
+               nic_mbx_send_ack(nic, vf);
+       else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+               nic_mbx_send_nack(nic, vf);
+unlock:
+       nic->mbx_lock[vf] = false;
+}
+
+static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+{
+       u64 intr;
+       u8  vf, vf_per_mbx_reg = 64;
+
+       intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
+       dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
+       for (vf = 0; vf < vf_per_mbx_reg; vf++) {
+               if (intr & (1ULL << vf)) {
+                       dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
+                               vf + (mbx * vf_per_mbx_reg));
+                       if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
+                               break;
+                       nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
+                       nic_clear_mbx_intr(nic, vf, mbx);
+               }
+       }
+}
+
+static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
+{
+       struct nicpf *nic = (struct nicpf *)nic_irq;
+
+       nic_mbx_intr_handler(nic, 0);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
+{
+       struct nicpf *nic = (struct nicpf *)nic_irq;
+
+       nic_mbx_intr_handler(nic, 1);
+
+       return IRQ_HANDLED;
+}
+
+static int nic_enable_msix(struct nicpf *nic)
+{
+       int i, ret;
+
+       nic->num_vec = NIC_PF_MSIX_VECTORS;
+
+       for (i = 0; i < nic->num_vec; i++)
+               nic->msix_entries[i].entry = i;
+
+       ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+       if (ret) {
+               dev_err(&nic->pdev->dev,
+                       "Request for #%d msix vectors failed\n",
+                          nic->num_vec);
+               return ret;
+       }
+
+       nic->msix_enabled = 1;
+       return 0;
+}
+
+static void nic_disable_msix(struct nicpf *nic)
+{
+       if (nic->msix_enabled) {
+               pci_disable_msix(nic->pdev);
+               nic->msix_enabled = 0;
+               nic->num_vec = 0;
+       }
+}
+
+static void nic_free_all_interrupts(struct nicpf *nic)
+{
+       int irq;
+
+       for (irq = 0; irq < nic->num_vec; irq++) {
+               if (nic->irq_allocated[irq])
+                       free_irq(nic->msix_entries[irq].vector, nic);
+               nic->irq_allocated[irq] = false;
+       }
+}
+
+static int nic_register_interrupts(struct nicpf *nic)
+{
+       int ret;
+
+       /* Enable MSI-X */
+       ret = nic_enable_msix(nic);
+       if (ret)
+               return ret;
+
+       /* Register mailbox interrupt handlers */
+       ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
+                         nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
+       if (ret)
+               goto fail;
+
+       nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+
+       ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
+                         nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
+       if (ret)
+               goto fail;
+
+       nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+
+       /* Enable mailbox interrupt */
+       nic_enable_mbx_intr(nic);
+       return 0;
+
+fail:
+       dev_err(&nic->pdev->dev, "Request irq failed\n");
+       nic_free_all_interrupts(nic);
+       return ret;
+}
+
+static void nic_unregister_interrupts(struct nicpf *nic)
+{
+       nic_free_all_interrupts(nic);
+       nic_disable_msix(nic);
+}
+
+static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
+{
+       int pos = 0;
+       int err;
+       u16 total_vf_cnt;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+       if (!pos) {
+               dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
+               return -ENODEV;
+       }
+
+       pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
+       if (total_vf_cnt < nic->num_vf_en)
+               nic->num_vf_en = total_vf_cnt;
+
+       if (!total_vf_cnt)
+               return 0;
+
+       err = pci_enable_sriov(pdev, nic->num_vf_en);
+       if (err) {
+               dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
+                       nic->num_vf_en);
+               nic->num_vf_en = 0;
+               return err;
+       }
+
+       dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
+                nic->num_vf_en);
+
+       nic->flags |= NIC_SRIOV_ENABLED;
+       return 0;
+}
+
+/* Poll for BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_poll_for_link(struct work_struct *work)
+{
+       union nic_mbx mbx = {};
+       struct nicpf *nic;
+       struct bgx_link_status link;
+       u8 vf, bgx, lmac;
+
+       nic = container_of(work, struct nicpf, dwork.work);
+
+       mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+       for (vf = 0; vf < nic->num_vf_en; vf++) {
+               /* Poll only if VF is UP */
+               if (!nic->vf_enabled[vf])
+                       continue;
+
+               /* Get BGX, LMAC indices for the VF */
+               bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+               lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+               /* Get interface link status */
+               bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+               /* Inform VF only if link status changed */
+               if (nic->link[vf] == link.link_up)
+                       continue;
+
+               if (!nic->mbx_lock[vf]) {
+                       nic->link[vf] = link.link_up;
+                       nic->duplex[vf] = link.duplex;
+                       nic->speed[vf] = link.speed;
+
+                       /* Send a mbox message to VF with current link status */
+                       mbx.link_status.link_up = link.link_up;
+                       mbx.link_status.duplex = link.duplex;
+                       mbx.link_status.speed = link.speed;
+                       nic_send_msg_to_vf(nic, vf, &mbx);
+               }
+       }
+       queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
+}
+
+static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct device *dev = &pdev->dev;
+       struct nicpf *nic;
+       int    err;
+
+       BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
+
+       nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
+       if (!nic)
+               return -ENOMEM;
+
+       pci_set_drvdata(pdev, nic);
+
+       nic->pdev = pdev;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(dev, "Failed to enable PCI device\n");
+               pci_set_drvdata(pdev, NULL);
+               return err;
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               dev_err(dev, "PCI request regions failed 0x%x\n", err);
+               goto err_disable_device;
+       }
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+       if (err) {
+               dev_err(dev, "Unable to get usable DMA configuration\n");
+               goto err_release_regions;
+       }
+
+       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+       if (err) {
+               dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+               goto err_release_regions;
+       }
+
+       /* MAP PF's configuration registers */
+       nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+       if (!nic->reg_base) {
+               dev_err(dev, "Cannot map config register space, aborting\n");
+               err = -ENOMEM;
+               goto err_release_regions;
+       }
+
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
+
+       nic->node = nic_get_node_id(pdev);
+
+       nic_set_lmac_vf_mapping(nic);
+
+       /* Initialize hardware */
+       nic_init_hw(nic);
+
+       /* Set RSS TBL size for each VF */
+       nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+
+       /* Register interrupts */
+       err = nic_register_interrupts(nic);
+       if (err)
+               goto err_release_regions;
+
+       /* Configure SRIOV */
+       err = nic_sriov_init(pdev, nic);
+       if (err)
+               goto err_unregister_interrupts;
+
+       /* Register a physical link status poll fn() */
+       nic->check_link = alloc_workqueue("check_link_status",
+                                         WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+       if (!nic->check_link) {
+               err = -ENOMEM;
+               goto err_disable_sriov;
+       }
+
+       INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
+       queue_delayed_work(nic->check_link, &nic->dwork, 0);
+
+       return 0;
+
+err_disable_sriov:
+       if (nic->flags & NIC_SRIOV_ENABLED)
+               pci_disable_sriov(pdev);
+err_unregister_interrupts:
+       nic_unregister_interrupts(nic);
+err_release_regions:
+       pci_release_regions(pdev);
+err_disable_device:
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+       return err;
+}
+
+static void nic_remove(struct pci_dev *pdev)
+{
+       struct nicpf *nic = pci_get_drvdata(pdev);
+
+       if (nic->flags & NIC_SRIOV_ENABLED)
+               pci_disable_sriov(pdev);
+
+       if (nic->check_link) {
+               /* Destroy work Queue */
+               cancel_delayed_work(&nic->dwork);
+               flush_workqueue(nic->check_link);
+               destroy_workqueue(nic->check_link);
+       }
+
+       nic_unregister_interrupts(nic);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver nic_driver = {
+       .name = DRV_NAME,
+       .id_table = nic_id_table,
+       .probe = nic_probe,
+       .remove = nic_remove,
+};
+
+static int __init nic_init_module(void)
+{
+       pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+       return pci_register_driver(&nic_driver);
+}
+
+static void __exit nic_cleanup_module(void)
+{
+       pci_unregister_driver(&nic_driver);
+}
+
+module_init(nic_init_module);
+module_exit(nic_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
new file mode 100644 (file)
index 0000000..58197bb
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define   NIC_PF_REG_COUNT                     29573
+#define   NIC_VF_REG_COUNT                     249
+
+/* Physical function register offsets */
+#define   NIC_PF_CFG                           (0x0000)
+#define   NIC_PF_STATUS                                (0x0010)
+#define   NIC_PF_INTR_TIMER_CFG                        (0x0030)
+#define   NIC_PF_BIST_STATUS                   (0x0040)
+#define   NIC_PF_SOFT_RESET                    (0x0050)
+#define   NIC_PF_TCP_TIMER                     (0x0060)
+#define   NIC_PF_BP_CFG                                (0x0080)
+#define   NIC_PF_RRM_CFG                       (0x0088)
+#define   NIC_PF_CQM_CF                                (0x00A0)
+#define   NIC_PF_CNM_CF                                (0x00A8)
+#define   NIC_PF_CNM_STATUS                    (0x00B0)
+#define   NIC_PF_CQ_AVG_CFG                    (0x00C0)
+#define   NIC_PF_RRM_AVG_CFG                   (0x00C8)
+#define   NIC_PF_INTF_0_1_SEND_CFG             (0x0200)
+#define   NIC_PF_INTF_0_1_BP_CFG               (0x0208)
+#define   NIC_PF_INTF_0_1_BP_DIS_0_1           (0x0210)
+#define   NIC_PF_INTF_0_1_BP_SW_0_1            (0x0220)
+#define   NIC_PF_RBDR_BP_STATE_0_3             (0x0240)
+#define   NIC_PF_MAILBOX_INT                   (0x0410)
+#define   NIC_PF_MAILBOX_INT_W1S               (0x0430)
+#define   NIC_PF_MAILBOX_ENA_W1C               (0x0450)
+#define   NIC_PF_MAILBOX_ENA_W1S               (0x0470)
+#define   NIC_PF_RX_ETYPE_0_7                  (0x0500)
+#define   NIC_PF_PKIND_0_15_CFG                        (0x0600)
+#define   NIC_PF_ECC0_FLIP0                    (0x1000)
+#define   NIC_PF_ECC1_FLIP0                    (0x1008)
+#define   NIC_PF_ECC2_FLIP0                    (0x1010)
+#define   NIC_PF_ECC3_FLIP0                    (0x1018)
+#define   NIC_PF_ECC0_FLIP1                    (0x1080)
+#define   NIC_PF_ECC1_FLIP1                    (0x1088)
+#define   NIC_PF_ECC2_FLIP1                    (0x1090)
+#define   NIC_PF_ECC3_FLIP1                    (0x1098)
+#define   NIC_PF_ECC0_CDIS                     (0x1100)
+#define   NIC_PF_ECC1_CDIS                     (0x1108)
+#define   NIC_PF_ECC2_CDIS                     (0x1110)
+#define   NIC_PF_ECC3_CDIS                     (0x1118)
+#define   NIC_PF_BIST0_STATUS                  (0x1280)
+#define   NIC_PF_BIST1_STATUS                  (0x1288)
+#define   NIC_PF_BIST2_STATUS                  (0x1290)
+#define   NIC_PF_BIST3_STATUS                  (0x1298)
+#define   NIC_PF_ECC0_SBE_INT                  (0x2000)
+#define   NIC_PF_ECC0_SBE_INT_W1S              (0x2008)
+#define   NIC_PF_ECC0_SBE_ENA_W1C              (0x2010)
+#define   NIC_PF_ECC0_SBE_ENA_W1S              (0x2018)
+#define   NIC_PF_ECC0_DBE_INT                  (0x2100)
+#define   NIC_PF_ECC0_DBE_INT_W1S              (0x2108)
+#define   NIC_PF_ECC0_DBE_ENA_W1C              (0x2110)
+#define   NIC_PF_ECC0_DBE_ENA_W1S              (0x2118)
+#define   NIC_PF_ECC1_SBE_INT                  (0x2200)
+#define   NIC_PF_ECC1_SBE_INT_W1S              (0x2208)
+#define   NIC_PF_ECC1_SBE_ENA_W1C              (0x2210)
+#define   NIC_PF_ECC1_SBE_ENA_W1S              (0x2218)
+#define   NIC_PF_ECC1_DBE_INT                  (0x2300)
+#define   NIC_PF_ECC1_DBE_INT_W1S              (0x2308)
+#define   NIC_PF_ECC1_DBE_ENA_W1C              (0x2310)
+#define   NIC_PF_ECC1_DBE_ENA_W1S              (0x2318)
+#define   NIC_PF_ECC2_SBE_INT                  (0x2400)
+#define   NIC_PF_ECC2_SBE_INT_W1S              (0x2408)
+#define   NIC_PF_ECC2_SBE_ENA_W1C              (0x2410)
+#define   NIC_PF_ECC2_SBE_ENA_W1S              (0x2418)
+#define   NIC_PF_ECC2_DBE_INT                  (0x2500)
+#define   NIC_PF_ECC2_DBE_INT_W1S              (0x2508)
+#define   NIC_PF_ECC2_DBE_ENA_W1C              (0x2510)
+#define   NIC_PF_ECC2_DBE_ENA_W1S              (0x2518)
+#define   NIC_PF_ECC3_SBE_INT                  (0x2600)
+#define   NIC_PF_ECC3_SBE_INT_W1S              (0x2608)
+#define   NIC_PF_ECC3_SBE_ENA_W1C              (0x2610)
+#define   NIC_PF_ECC3_SBE_ENA_W1S              (0x2618)
+#define   NIC_PF_ECC3_DBE_INT                  (0x2700)
+#define   NIC_PF_ECC3_DBE_INT_W1S              (0x2708)
+#define   NIC_PF_ECC3_DBE_ENA_W1C              (0x2710)
+#define   NIC_PF_ECC3_DBE_ENA_W1S              (0x2718)
+#define   NIC_PF_CPI_0_2047_CFG                        (0x200000)
+#define   NIC_PF_RSSI_0_4097_RQ                        (0x220000)
+#define   NIC_PF_LMAC_0_7_CFG                  (0x240000)
+#define   NIC_PF_LMAC_0_7_SW_XOFF              (0x242000)
+#define   NIC_PF_LMAC_0_7_CREDIT               (0x244000)
+#define   NIC_PF_CHAN_0_255_TX_CFG             (0x400000)
+#define   NIC_PF_CHAN_0_255_RX_CFG             (0x420000)
+#define   NIC_PF_CHAN_0_255_SW_XOFF            (0x440000)
+#define   NIC_PF_CHAN_0_255_CREDIT             (0x460000)
+#define   NIC_PF_CHAN_0_255_RX_BP_CFG          (0x480000)
+#define   NIC_PF_SW_SYNC_RX                    (0x490000)
+#define   NIC_PF_SW_SYNC_RX_DONE               (0x490008)
+#define   NIC_PF_TL2_0_63_CFG                  (0x500000)
+#define   NIC_PF_TL2_0_63_PRI                  (0x520000)
+#define   NIC_PF_TL2_0_63_SH_STATUS            (0x580000)
+#define   NIC_PF_TL3A_0_63_CFG                 (0x5F0000)
+#define   NIC_PF_TL3_0_255_CFG                 (0x600000)
+#define   NIC_PF_TL3_0_255_CHAN                        (0x620000)
+#define   NIC_PF_TL3_0_255_PIR                 (0x640000)
+#define   NIC_PF_TL3_0_255_SW_XOFF             (0x660000)
+#define   NIC_PF_TL3_0_255_CNM_RATE            (0x680000)
+#define   NIC_PF_TL3_0_255_SH_STATUS           (0x6A0000)
+#define   NIC_PF_TL4A_0_255_CFG                        (0x6F0000)
+#define   NIC_PF_TL4_0_1023_CFG                        (0x800000)
+#define   NIC_PF_TL4_0_1023_SW_XOFF            (0x820000)
+#define   NIC_PF_TL4_0_1023_SH_STATUS          (0x840000)
+#define   NIC_PF_TL4A_0_1023_CNM_RATE          (0x880000)
+#define   NIC_PF_TL4A_0_1023_CNM_STATUS                (0x8A0000)
+#define   NIC_PF_VF_0_127_MAILBOX_0_1          (0x20002030)
+#define   NIC_PF_VNIC_0_127_TX_STAT_0_4                (0x20004000)
+#define   NIC_PF_VNIC_0_127_RX_STAT_0_13       (0x20004100)
+#define   NIC_PF_QSET_0_127_LOCK_0_15          (0x20006000)
+#define   NIC_PF_QSET_0_127_CFG                        (0x20010000)
+#define   NIC_PF_QSET_0_127_RQ_0_7_CFG         (0x20010400)
+#define   NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG    (0x20010420)
+#define   NIC_PF_QSET_0_127_RQ_0_7_BP_CFG      (0x20010500)
+#define   NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1    (0x20010600)
+#define   NIC_PF_QSET_0_127_SQ_0_7_CFG         (0x20010C00)
+#define   NIC_PF_QSET_0_127_SQ_0_7_CFG2                (0x20010C08)
+#define   NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1    (0x20010D00)
+
+#define   NIC_PF_MSIX_VEC_0_18_ADDR            (0x000000)
+#define   NIC_PF_MSIX_VEC_0_CTL                        (0x000008)
+#define   NIC_PF_MSIX_PBA_0                    (0x0F0000)
+
+/* Virtual function register offsets */
+#define   NIC_VNIC_CFG                         (0x000020)
+#define   NIC_VF_PF_MAILBOX_0_1                        (0x000130)
+#define   NIC_VF_INT                           (0x000200)
+#define   NIC_VF_INT_W1S                       (0x000220)
+#define   NIC_VF_ENA_W1C                       (0x000240)
+#define   NIC_VF_ENA_W1S                       (0x000260)
+
+#define   NIC_VNIC_RSS_CFG                     (0x0020E0)
+#define   NIC_VNIC_RSS_KEY_0_4                 (0x002200)
+#define   NIC_VNIC_TX_STAT_0_4                 (0x004000)
+#define   NIC_VNIC_RX_STAT_0_13                        (0x004100)
+#define   NIC_QSET_RQ_GEN_CFG                  (0x010010)
+
+#define   NIC_QSET_CQ_0_7_CFG                  (0x010400)
+#define   NIC_QSET_CQ_0_7_CFG2                 (0x010408)
+#define   NIC_QSET_CQ_0_7_THRESH               (0x010410)
+#define   NIC_QSET_CQ_0_7_BASE                 (0x010420)
+#define   NIC_QSET_CQ_0_7_HEAD                 (0x010428)
+#define   NIC_QSET_CQ_0_7_TAIL                 (0x010430)
+#define   NIC_QSET_CQ_0_7_DOOR                 (0x010438)
+#define   NIC_QSET_CQ_0_7_STATUS               (0x010440)
+#define   NIC_QSET_CQ_0_7_STATUS2              (0x010448)
+#define   NIC_QSET_CQ_0_7_DEBUG                        (0x010450)
+
+#define   NIC_QSET_RQ_0_7_CFG                  (0x010600)
+#define   NIC_QSET_RQ_0_7_STAT_0_1             (0x010700)
+
+#define   NIC_QSET_SQ_0_7_CFG                  (0x010800)
+#define   NIC_QSET_SQ_0_7_THRESH               (0x010810)
+#define   NIC_QSET_SQ_0_7_BASE                 (0x010820)
+#define   NIC_QSET_SQ_0_7_HEAD                 (0x010828)
+#define   NIC_QSET_SQ_0_7_TAIL                 (0x010830)
+#define   NIC_QSET_SQ_0_7_DOOR                 (0x010838)
+#define   NIC_QSET_SQ_0_7_STATUS               (0x010840)
+#define   NIC_QSET_SQ_0_7_DEBUG                        (0x010848)
+#define   NIC_QSET_SQ_0_7_CNM_CHG              (0x010860)
+#define   NIC_QSET_SQ_0_7_STAT_0_1             (0x010900)
+
+#define   NIC_QSET_RBDR_0_1_CFG                        (0x010C00)
+#define   NIC_QSET_RBDR_0_1_THRESH             (0x010C10)
+#define   NIC_QSET_RBDR_0_1_BASE               (0x010C20)
+#define   NIC_QSET_RBDR_0_1_HEAD               (0x010C28)
+#define   NIC_QSET_RBDR_0_1_TAIL               (0x010C30)
+#define   NIC_QSET_RBDR_0_1_DOOR               (0x010C38)
+#define   NIC_QSET_RBDR_0_1_STATUS0            (0x010C40)
+#define   NIC_QSET_RBDR_0_1_STATUS1            (0x010C48)
+#define   NIC_QSET_RBDR_0_1_PREFETCH_STATUS    (0x010C50)
+
+#define   NIC_VF_MSIX_VECTOR_0_19_ADDR         (0x000000)
+#define   NIC_VF_MSIX_VECTOR_0_19_CTL          (0x000008)
+#define   NIC_VF_MSIX_PBA                      (0x0F0000)
+
+/* Offsets within registers */
+#define   NIC_MSIX_VEC_SHIFT                   4
+#define   NIC_Q_NUM_SHIFT                      18
+#define   NIC_QS_ID_SHIFT                      21
+#define   NIC_VF_NUM_SHIFT                     21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64 reserved_42_63:22;
+       u64 hdr_sl:5;   /* Header skip length */
+       u64 rx_hdr:3;   /* TNS Receive header present */
+       u64 lenerr_en:1;/* L2 length error check enable */
+       u64 reserved_32_32:1;
+       u64 maxlen:16;  /* Max frame size */
+       u64 minlen:16;  /* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64 minlen:16;
+       u64 maxlen:16;
+       u64 reserved_32_32:1;
+       u64 lenerr_en:1;
+       u64 rx_hdr:3;
+       u64 hdr_sl:5;
+       u64 reserved_42_63:22;
+#endif
+};
+
+#endif /* NIC_REG_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
new file mode 100644 (file)
index 0000000..16bd2d7
--- /dev/null
@@ -0,0 +1,600 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/* ETHTOOL Support for VNIC_VF Device*/
+
+#include <linux/pci.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME       "thunder-nicvf"
+#define DRV_VERSION     "1.0"
+
+struct nicvf_stat {
+       char name[ETH_GSTRING_LEN];
+       unsigned int index;
+};
+
+#define NICVF_HW_STAT(stat) { \
+       .name = #stat, \
+       .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
+}
+
+#define NICVF_DRV_STAT(stat) { \
+       .name = #stat, \
+       .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
+}
+
+static const struct nicvf_stat nicvf_hw_stats[] = {
+       NICVF_HW_STAT(rx_bytes_ok),
+       NICVF_HW_STAT(rx_ucast_frames_ok),
+       NICVF_HW_STAT(rx_bcast_frames_ok),
+       NICVF_HW_STAT(rx_mcast_frames_ok),
+       NICVF_HW_STAT(rx_fcs_errors),
+       NICVF_HW_STAT(rx_l2_errors),
+       NICVF_HW_STAT(rx_drop_red),
+       NICVF_HW_STAT(rx_drop_red_bytes),
+       NICVF_HW_STAT(rx_drop_overrun),
+       NICVF_HW_STAT(rx_drop_overrun_bytes),
+       NICVF_HW_STAT(rx_drop_bcast),
+       NICVF_HW_STAT(rx_drop_mcast),
+       NICVF_HW_STAT(rx_drop_l3_bcast),
+       NICVF_HW_STAT(rx_drop_l3_mcast),
+       NICVF_HW_STAT(tx_bytes_ok),
+       NICVF_HW_STAT(tx_ucast_frames_ok),
+       NICVF_HW_STAT(tx_bcast_frames_ok),
+       NICVF_HW_STAT(tx_mcast_frames_ok),
+};
+
+static const struct nicvf_stat nicvf_drv_stats[] = {
+       NICVF_DRV_STAT(rx_frames_ok),
+       NICVF_DRV_STAT(rx_frames_64),
+       NICVF_DRV_STAT(rx_frames_127),
+       NICVF_DRV_STAT(rx_frames_255),
+       NICVF_DRV_STAT(rx_frames_511),
+       NICVF_DRV_STAT(rx_frames_1023),
+       NICVF_DRV_STAT(rx_frames_1518),
+       NICVF_DRV_STAT(rx_frames_jumbo),
+       NICVF_DRV_STAT(rx_drops),
+       NICVF_DRV_STAT(tx_frames_ok),
+       NICVF_DRV_STAT(tx_busy),
+       NICVF_DRV_STAT(tx_tso),
+       NICVF_DRV_STAT(tx_drops),
+};
+
+static const struct nicvf_stat nicvf_queue_stats[] = {
+       { "bytes", 0 },
+       { "frames", 1 },
+};
+
+static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
+static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
+static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
+
+static int nicvf_get_settings(struct net_device *netdev,
+                             struct ethtool_cmd *cmd)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+
+       cmd->supported = 0;
+       cmd->transceiver = XCVR_EXTERNAL;
+       if (nic->speed <= 1000) {
+               cmd->port = PORT_MII;
+               cmd->autoneg = AUTONEG_ENABLE;
+       } else {
+               cmd->port = PORT_FIBRE;
+               cmd->autoneg = AUTONEG_DISABLE;
+       }
+       cmd->duplex = nic->duplex;
+       ethtool_cmd_speed_set(cmd, nic->speed);
+
+       return 0;
+}
+
+static void nicvf_get_drvinfo(struct net_device *netdev,
+                             struct ethtool_drvinfo *info)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+
+       strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+}
+
+static u32 nicvf_get_msglevel(struct net_device *netdev)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+
+       return nic->msg_enable;
+}
+
+static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+
+       nic->msg_enable = lvl;
+}
+
+static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+       int stats, qidx;
+
+       if (sset != ETH_SS_STATS)
+               return;
+
+       for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
+               memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
+               data += ETH_GSTRING_LEN;
+       }
+
+       for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
+               memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
+               data += ETH_GSTRING_LEN;
+       }
+
+       for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+               for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+                       sprintf(data, "rxq%d: %s", qidx,
+                               nicvf_queue_stats[stats].name);
+                       data += ETH_GSTRING_LEN;
+               }
+       }
+
+       for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+               for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+                       sprintf(data, "txq%d: %s", qidx,
+                               nicvf_queue_stats[stats].name);
+                       data += ETH_GSTRING_LEN;
+               }
+       }
+
+       for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
+               sprintf(data, "bgx_rxstat%d: ", stats);
+               data += ETH_GSTRING_LEN;
+       }
+
+       for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
+               sprintf(data, "bgx_txstat%d: ", stats);
+               data += ETH_GSTRING_LEN;
+       }
+}
+
+static int nicvf_get_sset_count(struct net_device *netdev, int sset)
+{
+       if (sset != ETH_SS_STATS)
+               return -EINVAL;
+
+       return nicvf_n_hw_stats + nicvf_n_drv_stats +
+               (nicvf_n_queue_stats *
+                (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) +
+               BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
+}
+
+static void nicvf_get_ethtool_stats(struct net_device *netdev,
+                                   struct ethtool_stats *stats, u64 *data)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+       int stat, qidx;
+
+       nicvf_update_stats(nic);
+
+       /* Update LMAC stats */
+       nicvf_update_lmac_stats(nic);
+
+       for (stat = 0; stat < nicvf_n_hw_stats; stat++)
+               *(data++) = ((u64 *)&nic->stats)
+                               [nicvf_hw_stats[stat].index];
+       for (stat = 0; stat < nicvf_n_drv_stats; stat++)
+               *(data++) = ((u64 *)&nic->drv_stats)
+                               [nicvf_drv_stats[stat].index];
+
+       for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+               for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+                       *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
+                                       [nicvf_queue_stats[stat].index];
+       }
+
+       for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+               for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+                       *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
+                                       [nicvf_queue_stats[stat].index];
+       }
+
+       for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
+               *(data++) = nic->bgx_stats.rx_stats[stat];
+       for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
+               *(data++) = nic->bgx_stats.tx_stats[stat];
+}
+
+static int nicvf_get_regs_len(struct net_device *dev)
+{
+       return sizeof(u64) * NIC_VF_REG_COUNT;
+}
+
+static void nicvf_get_regs(struct net_device *dev,
+                          struct ethtool_regs *regs, void *reg)
+{
+       struct nicvf *nic = netdev_priv(dev);
+       u64 *p = (u64 *)reg;
+       u64 reg_offset;
+       int mbox, key, stat, q;
+       int i = 0;
+
+       regs->version = 0;
+       memset(p, 0, NIC_VF_REG_COUNT);
+
+       p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
+       /* Mailbox registers */
+       for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
+               p[i++] = nicvf_reg_read(nic,
+                                       NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
+
+       p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
+       p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
+       p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
+       p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+       p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+       for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
+               p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
+
+       /* Tx/Rx statistics */
+       for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
+               p[i++] = nicvf_reg_read(nic,
+                                       NIC_VNIC_TX_STAT_0_4 | (stat << 3));
+
+       for (i = 0; i < RX_STATS_ENUM_LAST; i++)
+               p[i++] = nicvf_reg_read(nic,
+                                       NIC_VNIC_RX_STAT_0_13 | (stat << 3));
+
+       p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
+
+       /* All completion queue's registers */
+       for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
+       }
+
+       /* All receive queue's registers */
+       for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
+               p[i++] = nicvf_queue_reg_read(nic,
+                                                 NIC_QSET_RQ_0_7_STAT_0_1, q);
+               reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
+               p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+       }
+
+       for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
+               reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
+               p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+       }
+
+       for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
+               p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
+               p[i++] = nicvf_queue_reg_read(nic,
+                                             NIC_QSET_RBDR_0_1_STATUS0, q);
+               p[i++] = nicvf_queue_reg_read(nic,
+                                             NIC_QSET_RBDR_0_1_STATUS1, q);
+               reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
+               p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+       }
+}
+
+static int nicvf_get_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *cmd)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+
+       cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
+       return 0;
+}
+
+static void nicvf_get_ringparam(struct net_device *netdev,
+                               struct ethtool_ringparam *ring)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+       struct queue_set *qs = nic->qs;
+
+       ring->rx_max_pending = MAX_RCV_BUF_COUNT;
+       ring->rx_pending = qs->rbdr_len;
+       ring->tx_max_pending = MAX_SND_QUEUE_LEN;
+       ring->tx_pending = qs->sq_len;
+}
+
+static int nicvf_get_rss_hash_opts(struct nicvf *nic,
+                                  struct ethtool_rxnfc *info)
+{
+       info->data = 0;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+       case UDP_V4_FLOW:
+       case UDP_V6_FLOW:
+       case SCTP_V4_FLOW:
+       case SCTP_V6_FLOW:
+               info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+       case IPV4_FLOW:
+       case IPV6_FLOW:
+               info->data |= RXH_IP_SRC | RXH_IP_DST;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int nicvf_get_rxnfc(struct net_device *dev,
+                          struct ethtool_rxnfc *info, u32 *rules)
+{
+       struct nicvf *nic = netdev_priv(dev);
+       int ret = -EOPNOTSUPP;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = nic->qs->rq_cnt;
+               ret = 0;
+               break;
+       case ETHTOOL_GRXFH:
+               return nicvf_get_rss_hash_opts(nic, info);
+       default:
+               break;
+       }
+       return ret;
+}
+
+static int nicvf_set_rss_hash_opts(struct nicvf *nic,
+                                  struct ethtool_rxnfc *info)
+{
+       struct nicvf_rss_info *rss = &nic->rss_info;
+       u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+       if (!rss->enable)
+               netdev_err(nic->netdev,
+                          "RSS is disabled, hash cannot be set\n");
+
+       netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+                   info->flow_type, info->data);
+
+       if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
+               return -EINVAL;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       rss_cfg &= ~(1ULL << RSS_HASH_TCP);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       rss_cfg |= (1ULL << RSS_HASH_TCP);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case UDP_V4_FLOW:
+       case UDP_V6_FLOW:
+               switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       rss_cfg &= ~(1ULL << RSS_HASH_UDP);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       rss_cfg |= (1ULL << RSS_HASH_UDP);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case SCTP_V4_FLOW:
+       case SCTP_V6_FLOW:
+               switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+               case 0:
+                       rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
+                       break;
+               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       rss_cfg |= (1ULL << RSS_HASH_L4ETC);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case IPV4_FLOW:
+       case IPV6_FLOW:
+               rss_cfg = RSS_HASH_IP;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
+       return 0;
+}
+
+static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+       struct nicvf *nic = netdev_priv(dev);
+
+       switch (info->cmd) {
+       case ETHTOOL_SRXFH:
+               return nicvf_set_rss_hash_opts(nic, info);
+       default:
+               break;
+       }
+       return -EOPNOTSUPP;
+}
+
+static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
+{
+       return RSS_HASH_KEY_SIZE * sizeof(u64);
+}
+
+static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
+{
+       struct nicvf *nic = netdev_priv(dev);
+
+       return nic->rss_info.rss_size;
+}
+
+static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
+                         u8 *hfunc)
+{
+       struct nicvf *nic = netdev_priv(dev);
+       struct nicvf_rss_info *rss = &nic->rss_info;
+       int idx;
+
+       if (indir) {
+               for (idx = 0; idx < rss->rss_size; idx++)
+                       indir[idx] = rss->ind_tbl[idx];
+       }
+
+       if (hkey)
+               memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       return 0;
+}
+
+static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
+                         const u8 *hkey, u8 hfunc)
+{
+       struct nicvf *nic = netdev_priv(dev);
+       struct nicvf_rss_info *rss = &nic->rss_info;
+       int idx;
+
+       if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
+               rss->enable = false;
+               rss->hash_bits = 0;
+               return -EIO;
+       }
+
+       /* We do not allow change in unsupported parameters */
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       rss->enable = true;
+       if (indir) {
+               for (idx = 0; idx < rss->rss_size; idx++)
+                       rss->ind_tbl[idx] = indir[idx];
+       }
+
+       if (hkey) {
+               memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+               nicvf_set_rss_key(nic);
+       }
+
+       nicvf_config_rss(nic);
+       return 0;
+}
+
+/* Get no of queues device supports and current queue count */
+static void nicvf_get_channels(struct net_device *dev,
+                              struct ethtool_channels *channel)
+{
+       struct nicvf *nic = netdev_priv(dev);
+
+       memset(channel, 0, sizeof(*channel));
+
+       channel->max_rx = MAX_RCV_QUEUES_PER_QS;
+       channel->max_tx = MAX_SND_QUEUES_PER_QS;
+
+       channel->rx_count = nic->qs->rq_cnt;
+       channel->tx_count = nic->qs->sq_cnt;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int nicvf_set_channels(struct net_device *dev,
+                             struct ethtool_channels *channel)
+{
+       struct nicvf *nic = netdev_priv(dev);
+       int err = 0;
+
+       if (!channel->rx_count || !channel->tx_count)
+               return -EINVAL;
+       if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+               return -EINVAL;
+       if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+               return -EINVAL;
+
+       nic->qs->rq_cnt = channel->rx_count;
+       nic->qs->sq_cnt = channel->tx_count;
+       nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+       err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+       if (err)
+               return err;
+
+       if (!netif_running(dev))
+               return err;
+
+       nicvf_stop(dev);
+       nicvf_open(dev);
+       netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+                   nic->qs->sq_cnt, nic->qs->rq_cnt);
+
+       return err;
+}
+
+static const struct ethtool_ops nicvf_ethtool_ops = {
+       .get_settings           = nicvf_get_settings,
+       .get_link               = ethtool_op_get_link,
+       .get_drvinfo            = nicvf_get_drvinfo,
+       .get_msglevel           = nicvf_get_msglevel,
+       .set_msglevel           = nicvf_set_msglevel,
+       .get_strings            = nicvf_get_strings,
+       .get_sset_count         = nicvf_get_sset_count,
+       .get_ethtool_stats      = nicvf_get_ethtool_stats,
+       .get_regs_len           = nicvf_get_regs_len,
+       .get_regs               = nicvf_get_regs,
+       .get_coalesce           = nicvf_get_coalesce,
+       .get_ringparam          = nicvf_get_ringparam,
+       .get_rxnfc              = nicvf_get_rxnfc,
+       .set_rxnfc              = nicvf_set_rxnfc,
+       .get_rxfh_key_size      = nicvf_get_rxfh_key_size,
+       .get_rxfh_indir_size    = nicvf_get_rxfh_indir_size,
+       .get_rxfh               = nicvf_get_rxfh,
+       .set_rxfh               = nicvf_set_rxfh,
+       .get_channels           = nicvf_get_channels,
+       .set_channels           = nicvf_set_channels,
+       .get_ts_info            = ethtool_op_get_ts_info,
+};
+
+void nicvf_set_ethtool_ops(struct net_device *netdev)
+{
+       netdev->ethtool_ops = &nicvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
new file mode 100644 (file)
index 0000000..02da802
--- /dev/null
@@ -0,0 +1,1331 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/log2.h>
+#include <linux/prefetch.h>
+#include <linux/irq.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME       "thunder-nicvf"
+#define DRV_VERSION    "1.0"
+
+/* Supported devices */
+static const struct pci_device_id nicvf_id_table[] = {
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+                        PCI_DEVICE_ID_THUNDER_NIC_VF,
+                        PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+                        PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
+                        PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+       { 0, }  /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nicvf_id_table);
+
+static int debug = 0x00;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug message level bitmap");
+
+static int cpi_alg = CPI_ALG_NONE;
+module_param(cpi_alg, int, S_IRUGO);
+MODULE_PARM_DESC(cpi_alg,
+                "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+
+static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
+                                         struct sk_buff *skb)
+{
+       if (skb->len <= 64)
+               nic->drv_stats.rx_frames_64++;
+       else if (skb->len <= 127)
+               nic->drv_stats.rx_frames_127++;
+       else if (skb->len <= 255)
+               nic->drv_stats.rx_frames_255++;
+       else if (skb->len <= 511)
+               nic->drv_stats.rx_frames_511++;
+       else if (skb->len <= 1023)
+               nic->drv_stats.rx_frames_1023++;
+       else if (skb->len <= 1518)
+               nic->drv_stats.rx_frames_1518++;
+       else
+               nic->drv_stats.rx_frames_jumbo++;
+}
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver.  The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+       writeq_relaxed(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+       return readq_relaxed(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+                          u64 qidx, u64 val)
+{
+       void __iomem *addr = nic->reg_base + offset;
+
+       writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+       void __iomem *addr = nic->reg_base + offset;
+
+       return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+/* VF -> PF mailbox communication */
+
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+       u64 *msg = (u64 *)mbx;
+
+       nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+       nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+       int timeout = NIC_MBOX_MSG_TIMEOUT;
+       int sleep = 10;
+
+       nic->pf_acked = false;
+       nic->pf_nacked = false;
+
+       nicvf_write_to_mbx(nic, mbx);
+
+       /* Wait for previous message to be acked, timeout 2sec */
+       while (!nic->pf_acked) {
+               if (nic->pf_nacked)
+                       return -EINVAL;
+               msleep(sleep);
+               if (nic->pf_acked)
+                       break;
+               timeout -= sleep;
+               if (!timeout) {
+                       netdev_err(nic->netdev,
+                                  "PF didn't ack to mbox msg %d from VF%d\n",
+                                  (mbx->msg.msg & 0xFF), nic->vf_id);
+                       return -EBUSY;
+               }
+       }
+       return 0;
+}
+
+/* Checks if VF is able to comminicate with PF
+* and also gets the VNIC number this VF is associated to.
+*/
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+       int timeout = 5000, sleep = 20;
+       union nic_mbx mbx = {};
+
+       mbx.msg.msg = NIC_MBOX_MSG_READY;
+
+       nic->pf_ready_to_rcv_msg = false;
+
+       nicvf_write_to_mbx(nic, &mbx);
+
+       while (!nic->pf_ready_to_rcv_msg) {
+               msleep(sleep);
+               if (nic->pf_ready_to_rcv_msg)
+                       break;
+               timeout -= sleep;
+               if (!timeout) {
+                       netdev_err(nic->netdev,
+                                  "PF didn't respond to READY msg\n");
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
+{
+       if (bgx->rx)
+               nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
+       else
+               nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
+}
+
+static void  nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+       union nic_mbx mbx = {};
+       u64 *mbx_data;
+       u64 mbx_addr;
+       int i;
+
+       mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+       mbx_data = (u64 *)&mbx;
+
+       for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+               *mbx_data = nicvf_reg_read(nic, mbx_addr);
+               mbx_data++;
+               mbx_addr += sizeof(u64);
+       }
+
+       netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
+       switch (mbx.msg.msg) {
+       case NIC_MBOX_MSG_READY:
+               nic->pf_ready_to_rcv_msg = true;
+               nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+               nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+               nic->node = mbx.nic_cfg.node_id;
+               ether_addr_copy(nic->netdev->dev_addr, mbx.nic_cfg.mac_addr);
+               nic->link_up = false;
+               nic->duplex = 0;
+               nic->speed = 0;
+               break;
+       case NIC_MBOX_MSG_ACK:
+               nic->pf_acked = true;
+               break;
+       case NIC_MBOX_MSG_NACK:
+               nic->pf_nacked = true;
+               break;
+       case NIC_MBOX_MSG_RSS_SIZE:
+               nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+               nic->pf_acked = true;
+               break;
+       case NIC_MBOX_MSG_BGX_STATS:
+               nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
+               nic->pf_acked = true;
+               nic->bgx_stats_acked = true;
+               break;
+       case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+               nic->pf_acked = true;
+               nic->link_up = mbx.link_status.link_up;
+               nic->duplex = mbx.link_status.duplex;
+               nic->speed = mbx.link_status.speed;
+               if (nic->link_up) {
+                       netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
+                                   nic->netdev->name, nic->speed,
+                                   nic->duplex == DUPLEX_FULL ?
+                               "Full duplex" : "Half duplex");
+                       netif_carrier_on(nic->netdev);
+                       netif_tx_wake_all_queues(nic->netdev);
+               } else {
+                       netdev_info(nic->netdev, "%s: Link is Down\n",
+                                   nic->netdev->name);
+                       netif_carrier_off(nic->netdev);
+                       netif_tx_stop_all_queues(nic->netdev);
+               }
+               break;
+       default:
+               netdev_err(nic->netdev,
+                          "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+               break;
+       }
+       nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
+{
+       union nic_mbx mbx = {};
+
+       mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+       mbx.mac.vf_id = nic->vf_id;
+       ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
+
+       return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+       union nic_mbx mbx = {};
+
+       mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+       mbx.cpi_cfg.vf_id = nic->vf_id;
+       mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+       mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+       nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_rss_size(struct nicvf *nic)
+{
+       union nic_mbx mbx = {};
+
+       mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+       mbx.rss_size.vf_id = nic->vf_id;
+       nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+void nicvf_config_rss(struct nicvf *nic)
+{
+       union nic_mbx mbx = {};
+       struct nicvf_rss_info *rss = &nic->rss_info;
+       int ind_tbl_len = rss->rss_size;
+       int i, nextq = 0;
+
+       mbx.rss_cfg.vf_id = nic->vf_id;
+       mbx.rss_cfg.hash_bits = rss->hash_bits;
+       while (ind_tbl_len) {
+               mbx.rss_cfg.tbl_offset = nextq;
+               mbx.rss_cfg.tbl_len = min(ind_tbl_len,
+                                              RSS_IND_TBL_LEN_PER_MBX_MSG);
+               mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
+                         NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+
+               for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
+                       mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
+
+               nicvf_send_msg_to_pf(nic, &mbx);
+
+               ind_tbl_len -= mbx.rss_cfg.tbl_len;
+       }
+}
+
+void nicvf_set_rss_key(struct nicvf *nic)
+{
+       struct nicvf_rss_info *rss = &nic->rss_info;
+       u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
+       int idx;
+
+       for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+               nicvf_reg_write(nic, key_addr, rss->key[idx]);
+               key_addr += sizeof(u64);
+       }
+}
+
+static int nicvf_rss_init(struct nicvf *nic)
+{
+       struct nicvf_rss_info *rss = &nic->rss_info;
+       int idx;
+
+       nicvf_get_rss_size(nic);
+
+       if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+               rss->enable = false;
+               rss->hash_bits = 0;
+               return 0;
+       }
+
+       rss->enable = true;
+
+       /* Using the HW reset value for now */
+       rss->key[0] = 0xFEED0BADFEED0BADULL;
+       rss->key[1] = 0xFEED0BADFEED0BADULL;
+       rss->key[2] = 0xFEED0BADFEED0BADULL;
+       rss->key[3] = 0xFEED0BADFEED0BADULL;
+       rss->key[4] = 0xFEED0BADFEED0BADULL;
+
+       nicvf_set_rss_key(nic);
+
+       rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
+       nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
+
+       rss->hash_bits =  ilog2(rounddown_pow_of_two(rss->rss_size));
+
+       for (idx = 0; idx < rss->rss_size; idx++)
+               rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
+                                                              nic->qs->rq_cnt);
+       nicvf_config_rss(nic);
+       return 1;
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+                             int tx_queues, int rx_queues)
+{
+       int err = 0;
+
+       err = netif_set_real_num_tx_queues(netdev, tx_queues);
+       if (err) {
+               netdev_err(netdev,
+                          "Failed to set no of Tx queues: %d\n", tx_queues);
+               return err;
+       }
+
+       err = netif_set_real_num_rx_queues(netdev, rx_queues);
+       if (err)
+               netdev_err(netdev,
+                          "Failed to set no of Rx queues: %d\n", rx_queues);
+       return err;
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+       int err;
+       union nic_mbx mbx = {};
+
+       mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+
+       /* Enable Qset */
+       nicvf_qset_config(nic, true);
+
+       /* Initialize queues and HW for data transfer */
+       err = nicvf_config_data_transfer(nic, true);
+       if (err) {
+               netdev_err(nic->netdev,
+                          "Failed to alloc/config VF's QSet resources\n");
+               return err;
+       }
+
+       /* Send VF config done msg to PF */
+       nicvf_write_to_mbx(nic, &mbx);
+
+       return 0;
+}
+
+static void nicvf_snd_pkt_handler(struct net_device *netdev,
+                                 struct cmp_queue *cq,
+                                 struct cqe_send_t *cqe_tx, int cqe_type)
+{
+       struct sk_buff *skb = NULL;
+       struct nicvf *nic = netdev_priv(netdev);
+       struct snd_queue *sq;
+       struct sq_hdr_subdesc *hdr;
+
+       sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+       hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+       if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+               return;
+
+       netdev_dbg(nic->netdev,
+                  "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
+                  __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
+                  cqe_tx->sqe_ptr, hdr->subdesc_cnt);
+
+       nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+       nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+       skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
+       /* For TSO offloaded packets only one head SKB needs to be freed */
+       if (skb) {
+               prefetch(skb);
+               dev_consume_skb_any(skb);
+       }
+}
+
+static void nicvf_rcv_pkt_handler(struct net_device *netdev,
+                                 struct napi_struct *napi,
+                                 struct cmp_queue *cq,
+                                 struct cqe_rx_t *cqe_rx, int cqe_type)
+{
+       struct sk_buff *skb;
+       struct nicvf *nic = netdev_priv(netdev);
+       int err = 0;
+
+       /* Check for errors */
+       err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+       if (err && !cqe_rx->rb_cnt)
+               return;
+
+       skb = nicvf_get_rcv_skb(nic, cqe_rx);
+       if (!skb) {
+               netdev_dbg(nic->netdev, "Packet not received\n");
+               return;
+       }
+
+       if (netif_msg_pktdata(nic)) {
+               netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
+                           skb, skb->len);
+               print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+                              skb->data, skb->len, true);
+       }
+
+       nicvf_set_rx_frame_cnt(nic, skb);
+
+       skb_record_rx_queue(skb, cqe_rx->rq_idx);
+       if (netdev->hw_features & NETIF_F_RXCSUM) {
+               /* HW by default verifies TCP/UDP/SCTP checksums */
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else {
+               skb_checksum_none_assert(skb);
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       if (napi && (netdev->features & NETIF_F_GRO))
+               napi_gro_receive(napi, skb);
+       else
+               netif_receive_skb(skb);
+}
+
+static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
+                                struct napi_struct *napi, int budget)
+{
+       int processed_cqe, work_done = 0;
+       int cqe_count, cqe_head;
+       struct nicvf *nic = netdev_priv(netdev);
+       struct queue_set *qs = nic->qs;
+       struct cmp_queue *cq = &qs->cq[cq_idx];
+       struct cqe_rx_t *cq_desc;
+
+       spin_lock_bh(&cq->lock);
+loop:
+       processed_cqe = 0;
+       /* Get no of valid CQ entries to process */
+       cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
+       cqe_count &= CQ_CQE_COUNT;
+       if (!cqe_count)
+               goto done;
+
+       /* Get head of the valid CQ entries */
+       cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
+       cqe_head &= 0xFFFF;
+
+       netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
+                  __func__, cqe_count, cqe_head);
+       while (processed_cqe < cqe_count) {
+               /* Get the CQ descriptor */
+               cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+               cqe_head++;
+               cqe_head &= (cq->dmem.q_len - 1);
+               /* Initiate prefetch for next descriptor */
+               prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+               if ((work_done >= budget) && napi &&
+                   (cq_desc->cqe_type != CQE_TYPE_SEND)) {
+                       break;
+               }
+
+               netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
+                          cq_desc->cqe_type);
+               switch (cq_desc->cqe_type) {
+               case CQE_TYPE_RX:
+                       nicvf_rcv_pkt_handler(netdev, napi, cq,
+                                             cq_desc, CQE_TYPE_RX);
+                       work_done++;
+               break;
+               case CQE_TYPE_SEND:
+                       nicvf_snd_pkt_handler(netdev, cq,
+                                             (void *)cq_desc, CQE_TYPE_SEND);
+               break;
+               case CQE_TYPE_INVALID:
+               case CQE_TYPE_RX_SPLIT:
+               case CQE_TYPE_RX_TCP:
+               case CQE_TYPE_SEND_PTP:
+                       /* Ignore for now */
+               break;
+               }
+               processed_cqe++;
+       }
+       netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
+                  __func__, processed_cqe, work_done, budget);
+
+       /* Ring doorbell to inform H/W to reuse processed CQEs */
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+                             cq_idx, processed_cqe);
+
+       if ((work_done < budget) && napi)
+               goto loop;
+
+done:
+       spin_unlock_bh(&cq->lock);
+       return work_done;
+}
+
+static int nicvf_poll(struct napi_struct *napi, int budget)
+{
+       u64  cq_head;
+       int  work_done = 0;
+       struct net_device *netdev = napi->dev;
+       struct nicvf *nic = netdev_priv(netdev);
+       struct nicvf_cq_poll *cq;
+       struct netdev_queue *txq;
+
+       cq = container_of(napi, struct nicvf_cq_poll, napi);
+       work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
+
+       txq = netdev_get_tx_queue(netdev, cq->cq_idx);
+       if (netif_tx_queue_stopped(txq))
+               netif_tx_wake_queue(txq);
+
+       if (work_done < budget) {
+               /* Slow packet rate, exit polling */
+               napi_complete(napi);
+               /* Re-enable interrupts */
+               cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
+                                              cq->cq_idx);
+               nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+               nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
+                                     cq->cq_idx, cq_head);
+               nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+       }
+       return work_done;
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+static void nicvf_handle_qs_err(unsigned long data)
+{
+       struct nicvf *nic = (struct nicvf *)data;
+       struct queue_set *qs = nic->qs;
+       int qidx;
+       u64 status;
+
+       netif_tx_disable(nic->netdev);
+
+       /* Check if it is CQ err */
+       for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+               status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+                                             qidx);
+               if (!(status & CQ_ERR_MASK))
+                       continue;
+               /* Process already queued CQEs and reconfig CQ */
+               nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+               nicvf_sq_disable(nic, qidx);
+               nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
+               nicvf_cmp_queue_config(nic, qs, qidx, true);
+               nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
+               nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+
+               nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+       }
+
+       netif_tx_start_all_queues(nic->netdev);
+       /* Re-enable Qset error interrupt */
+       nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+}
+
+static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
+{
+       struct nicvf *nic = (struct nicvf *)nicvf_irq;
+       u64 intr;
+
+       intr = nicvf_reg_read(nic, NIC_VF_INT);
+       /* Check for spurious interrupt */
+       if (!(intr & NICVF_INTR_MBOX_MASK))
+               return IRQ_HANDLED;
+
+       nicvf_handle_mbx_intr(nic);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+{
+       u64 qidx, intr, clear_intr = 0;
+       u64 cq_intr, rbdr_intr, qs_err_intr;
+       struct nicvf *nic = (struct nicvf *)nicvf_irq;
+       struct queue_set *qs = nic->qs;
+       struct nicvf_cq_poll *cq_poll = NULL;
+
+       intr = nicvf_reg_read(nic, NIC_VF_INT);
+       if (netif_msg_intr(nic))
+               netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+                           nic->netdev->name, intr);
+
+       qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
+       if (qs_err_intr) {
+               /* Disable Qset err interrupt and schedule softirq */
+               nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+               tasklet_hi_schedule(&nic->qs_err_task);
+               clear_intr |= qs_err_intr;
+       }
+
+       /* Disable interrupts and start polling */
+       cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
+       for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+               if (!(cq_intr & (1 << qidx)))
+                       continue;
+               if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
+                       continue;
+
+               nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+               clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
+
+               cq_poll = nic->napi[qidx];
+               /* Schedule NAPI */
+               if (cq_poll)
+                       napi_schedule(&cq_poll->napi);
+       }
+
+       /* Handle RBDR interrupts */
+       rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
+       if (rbdr_intr) {
+               /* Disable RBDR interrupt and schedule softirq */
+               for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+                       if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
+                               continue;
+                       nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+                       tasklet_hi_schedule(&nic->rbdr_task);
+                       clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
+               }
+       }
+
+       /* Clear interrupts */
+       nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
+       return IRQ_HANDLED;
+}
+
+static int nicvf_enable_msix(struct nicvf *nic)
+{
+       int ret, vec;
+
+       nic->num_vec = NIC_VF_MSIX_VECTORS;
+
+       for (vec = 0; vec < nic->num_vec; vec++)
+               nic->msix_entries[vec].entry = vec;
+
+       ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+       if (ret) {
+               netdev_err(nic->netdev,
+                          "Req for #%d msix vectors failed\n", nic->num_vec);
+               return 0;
+       }
+       nic->msix_enabled = 1;
+       return 1;
+}
+
+static void nicvf_disable_msix(struct nicvf *nic)
+{
+       if (nic->msix_enabled) {
+               pci_disable_msix(nic->pdev);
+               nic->msix_enabled = 0;
+               nic->num_vec = 0;
+       }
+}
+
+static int nicvf_register_interrupts(struct nicvf *nic)
+{
+       int irq, free, ret = 0;
+       int vector;
+
+       for_each_cq_irq(irq)
+               sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
+                       nic->vf_id, irq);
+
+       for_each_sq_irq(irq)
+               sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
+                       nic->vf_id, irq - NICVF_INTR_ID_SQ);
+
+       for_each_rbdr_irq(irq)
+               sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
+                       nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+
+       /* Register all interrupts except mailbox */
+       for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+               vector = nic->msix_entries[irq].vector;
+               ret = request_irq(vector, nicvf_intr_handler,
+                                 0, nic->irq_name[irq], nic);
+               if (ret)
+                       break;
+               nic->irq_allocated[irq] = true;
+       }
+
+       for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+               vector = nic->msix_entries[irq].vector;
+               ret = request_irq(vector, nicvf_intr_handler,
+                                 0, nic->irq_name[irq], nic);
+               if (ret)
+                       break;
+               nic->irq_allocated[irq] = true;
+       }
+
+       sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
+               "NICVF%d Qset error", nic->vf_id);
+       if (!ret) {
+               vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
+               irq = NICVF_INTR_ID_QS_ERR;
+               ret = request_irq(vector, nicvf_intr_handler,
+                                 0, nic->irq_name[irq], nic);
+               if (!ret)
+                       nic->irq_allocated[irq] = true;
+       }
+
+       if (ret) {
+               netdev_err(nic->netdev, "Request irq failed\n");
+               for (free = 0; free < irq; free++)
+                       free_irq(nic->msix_entries[free].vector, nic);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void nicvf_unregister_interrupts(struct nicvf *nic)
+{
+       int irq;
+
+       /* Free registered interrupts */
+       for (irq = 0; irq < nic->num_vec; irq++) {
+               if (nic->irq_allocated[irq])
+                       free_irq(nic->msix_entries[irq].vector, nic);
+               nic->irq_allocated[irq] = false;
+       }
+
+       /* Disable MSI-X */
+       nicvf_disable_msix(nic);
+}
+
+/* Initialize MSIX vectors and register MISC interrupt.
+ * Send READY message to PF to check if its alive
+ */
+static int nicvf_register_misc_interrupt(struct nicvf *nic)
+{
+       int ret = 0;
+       int irq = NICVF_INTR_ID_MISC;
+
+       /* Return if mailbox interrupt is already registered */
+       if (nic->msix_enabled)
+               return 0;
+
+       /* Enable MSI-X */
+       if (!nicvf_enable_msix(nic))
+               return 1;
+
+       sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
+       /* Register Misc interrupt */
+       ret = request_irq(nic->msix_entries[irq].vector,
+                         nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
+
+       if (ret)
+               return ret;
+       nic->irq_allocated[irq] = true;
+
+       /* Enable mailbox interrupt */
+       nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
+
+       /* Check if VF is able to communicate with PF */
+       if (!nicvf_check_pf_ready(nic)) {
+               nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+               nicvf_unregister_interrupts(nic);
+               return 1;
+       }
+
+       return 0;
+}
+
+static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+       int qid = skb_get_queue_mapping(skb);
+       struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
+
+       /* Check for minimum packet length */
+       if (skb->len <= ETH_HLEN) {
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
+       if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
+               netif_tx_stop_queue(txq);
+               nic->drv_stats.tx_busy++;
+               if (netif_msg_tx_err(nic))
+                       netdev_warn(netdev,
+                                   "%s: Transmit ring full, stopping SQ%d\n",
+                                   netdev->name, qid);
+
+               return NETDEV_TX_BUSY;
+       }
+
+       return NETDEV_TX_OK;
+}
+
+int nicvf_stop(struct net_device *netdev)
+{
+       int irq, qidx;
+       struct nicvf *nic = netdev_priv(netdev);
+       struct queue_set *qs = nic->qs;
+       struct nicvf_cq_poll *cq_poll = NULL;
+       union nic_mbx mbx = {};
+
+       mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+       nicvf_send_msg_to_pf(nic, &mbx);
+
+       netif_carrier_off(netdev);
+       netif_tx_disable(netdev);
+
+       /* Disable RBDR & QS error interrupts */
+       for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+               nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+               nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+       }
+       nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+       nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+       /* Wait for pending IRQ handlers to finish */
+       for (irq = 0; irq < nic->num_vec; irq++)
+               synchronize_irq(nic->msix_entries[irq].vector);
+
+       tasklet_kill(&nic->rbdr_task);
+       tasklet_kill(&nic->qs_err_task);
+       if (nic->rb_work_scheduled)
+               cancel_delayed_work_sync(&nic->rbdr_work);
+
+       for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+               cq_poll = nic->napi[qidx];
+               if (!cq_poll)
+                       continue;
+               nic->napi[qidx] = NULL;
+               napi_synchronize(&cq_poll->napi);
+               /* CQ intr is enabled while napi_complete,
+                * so disable it now
+                */
+               nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+               nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+               napi_disable(&cq_poll->napi);
+               netif_napi_del(&cq_poll->napi);
+               kfree(cq_poll);
+       }
+
+       /* Free resources */
+       nicvf_config_data_transfer(nic, false);
+
+       /* Disable HW Qset */
+       nicvf_qset_config(nic, false);
+
+       /* disable mailbox interrupt */
+       nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+
+       nicvf_unregister_interrupts(nic);
+
+       return 0;
+}
+
+int nicvf_open(struct net_device *netdev)
+{
+       int err, qidx;
+       struct nicvf *nic = netdev_priv(netdev);
+       struct queue_set *qs = nic->qs;
+       struct nicvf_cq_poll *cq_poll = NULL;
+
+       nic->mtu = netdev->mtu;
+
+       netif_carrier_off(netdev);
+
+       err = nicvf_register_misc_interrupt(nic);
+       if (err)
+               return err;
+
+       /* Register NAPI handler for processing CQEs */
+       for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+               cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
+               if (!cq_poll) {
+                       err = -ENOMEM;
+                       goto napi_del;
+               }
+               cq_poll->cq_idx = qidx;
+               netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
+                              NAPI_POLL_WEIGHT);
+               napi_enable(&cq_poll->napi);
+               nic->napi[qidx] = cq_poll;
+       }
+
+       /* Check if we got MAC address from PF or else generate a radom MAC */
+       if (is_zero_ether_addr(netdev->dev_addr)) {
+               eth_hw_addr_random(netdev);
+               nicvf_hw_set_mac_addr(nic, netdev);
+       }
+
+       /* Init tasklet for handling Qset err interrupt */
+       tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
+                    (unsigned long)nic);
+
+       /* Init RBDR tasklet which will refill RBDR */
+       tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
+                    (unsigned long)nic);
+       INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
+
+       /* Configure CPI alorithm */
+       nic->cpi_alg = cpi_alg;
+       nicvf_config_cpi(nic);
+
+       /* Configure receive side scaling */
+       nicvf_rss_init(nic);
+
+       err = nicvf_register_interrupts(nic);
+       if (err)
+               goto cleanup;
+
+       /* Initialize the queues */
+       err = nicvf_init_resources(nic);
+       if (err)
+               goto cleanup;
+
+       /* Make sure queue initialization is written */
+       wmb();
+
+       nicvf_reg_write(nic, NIC_VF_INT, -1);
+       /* Enable Qset err interrupt */
+       nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+       /* Enable completion queue interrupt */
+       for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+               nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+
+       /* Enable RBDR threshold interrupt */
+       for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+               nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+
+       netif_carrier_on(netdev);
+       netif_tx_start_all_queues(netdev);
+
+       return 0;
+cleanup:
+       nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+       nicvf_unregister_interrupts(nic);
+napi_del:
+       for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+               cq_poll = nic->napi[qidx];
+               if (!cq_poll)
+                       continue;
+               napi_disable(&cq_poll->napi);
+               netif_napi_del(&cq_poll->napi);
+               kfree(cq_poll);
+               nic->napi[qidx] = NULL;
+       }
+       return err;
+}
+
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+       union nic_mbx mbx = {};
+
+       mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+       mbx.frs.max_frs = mtu;
+       mbx.frs.vf_id = nic->vf_id;
+
+       return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+
+       if (new_mtu > NIC_HW_MAX_FRS)
+               return -EINVAL;
+
+       if (new_mtu < NIC_HW_MIN_FRS)
+               return -EINVAL;
+
+       if (nicvf_update_hw_max_frs(nic, new_mtu))
+               return -EINVAL;
+       netdev->mtu = new_mtu;
+       nic->mtu = new_mtu;
+
+       return 0;
+}
+
+static int nicvf_set_mac_address(struct net_device *netdev, void *p)
+{
+       struct sockaddr *addr = p;
+       struct nicvf *nic = netdev_priv(netdev);
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+       if (nic->msix_enabled)
+               if (nicvf_hw_set_mac_addr(nic, netdev))
+                       return -EBUSY;
+
+       return 0;
+}
+
+void nicvf_update_lmac_stats(struct nicvf *nic)
+{
+       int stat = 0;
+       union nic_mbx mbx = {};
+       int timeout;
+
+       if (!netif_running(nic->netdev))
+               return;
+
+       mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+       mbx.bgx_stats.vf_id = nic->vf_id;
+       /* Rx stats */
+       mbx.bgx_stats.rx = 1;
+       while (stat < BGX_RX_STATS_COUNT) {
+               nic->bgx_stats_acked = 0;
+               mbx.bgx_stats.idx = stat;
+               nicvf_send_msg_to_pf(nic, &mbx);
+               timeout = 0;
+               while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+                       msleep(2);
+                       timeout++;
+               }
+               stat++;
+       }
+
+       stat = 0;
+
+       /* Tx stats */
+       mbx.bgx_stats.rx = 0;
+       while (stat < BGX_TX_STATS_COUNT) {
+               nic->bgx_stats_acked = 0;
+               mbx.bgx_stats.idx = stat;
+               nicvf_send_msg_to_pf(nic, &mbx);
+               timeout = 0;
+               while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+                       msleep(2);
+                       timeout++;
+               }
+               stat++;
+       }
+}
+
+void nicvf_update_stats(struct nicvf *nic)
+{
+       int qidx;
+       struct nicvf_hw_stats *stats = &nic->stats;
+       struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+       struct queue_set *qs = nic->qs;
+
+#define GET_RX_STATS(reg) \
+       nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define GET_TX_STATS(reg) \
+       nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+       stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
+       stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
+       stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
+       stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+       stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
+       stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
+       stats->rx_drop_red = GET_RX_STATS(RX_RED);
+       stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+       stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
+       stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
+       stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
+       stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
+
+       stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
+       stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
+       stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
+       stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+       stats->tx_drops = GET_TX_STATS(TX_DROP);
+
+       drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
+                                 stats->rx_bcast_frames_ok +
+                                 stats->rx_mcast_frames_ok;
+       drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
+                                 stats->tx_bcast_frames_ok +
+                                 stats->tx_mcast_frames_ok;
+       drv_stats->rx_drops = stats->rx_drop_red +
+                             stats->rx_drop_overrun;
+       drv_stats->tx_drops = stats->tx_drops;
+
+       /* Update RQ and SQ stats */
+       for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+               nicvf_update_rq_stats(nic, qidx);
+       for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+               nicvf_update_sq_stats(nic, qidx);
+}
+
+static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
+                                           struct rtnl_link_stats64 *stats)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+       struct nicvf_hw_stats *hw_stats = &nic->stats;
+       struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+
+       nicvf_update_stats(nic);
+
+       stats->rx_bytes = hw_stats->rx_bytes_ok;
+       stats->rx_packets = drv_stats->rx_frames_ok;
+       stats->rx_dropped = drv_stats->rx_drops;
+
+       stats->tx_bytes = hw_stats->tx_bytes_ok;
+       stats->tx_packets = drv_stats->tx_frames_ok;
+       stats->tx_dropped = drv_stats->tx_drops;
+
+       return stats;
+}
+
+static void nicvf_tx_timeout(struct net_device *dev)
+{
+       struct nicvf *nic = netdev_priv(dev);
+
+       if (netif_msg_tx_err(nic))
+               netdev_warn(dev, "%s: Transmit timed out, resetting\n",
+                           dev->name);
+
+       schedule_work(&nic->reset_task);
+}
+
+static void nicvf_reset_task(struct work_struct *work)
+{
+       struct nicvf *nic;
+
+       nic = container_of(work, struct nicvf, reset_task);
+
+       if (!netif_running(nic->netdev))
+               return;
+
+       nicvf_stop(nic->netdev);
+       nicvf_open(nic->netdev);
+       nic->netdev->trans_start = jiffies;
+}
+
+static const struct net_device_ops nicvf_netdev_ops = {
+       .ndo_open               = nicvf_open,
+       .ndo_stop               = nicvf_stop,
+       .ndo_start_xmit         = nicvf_xmit,
+       .ndo_change_mtu         = nicvf_change_mtu,
+       .ndo_set_mac_address    = nicvf_set_mac_address,
+       .ndo_get_stats64        = nicvf_get_stats64,
+       .ndo_tx_timeout         = nicvf_tx_timeout,
+};
+
+static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct device *dev = &pdev->dev;
+       struct net_device *netdev;
+       struct nicvf *nic;
+       struct queue_set *qs;
+       int    err;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(dev, "Failed to enable PCI device\n");
+               return err;
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               dev_err(dev, "PCI request regions failed 0x%x\n", err);
+               goto err_disable_device;
+       }
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+       if (err) {
+               dev_err(dev, "Unable to get usable DMA configuration\n");
+               goto err_release_regions;
+       }
+
+       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+       if (err) {
+               dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
+               goto err_release_regions;
+       }
+
+       netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
+                                   MAX_RCV_QUEUES_PER_QS,
+                                   MAX_SND_QUEUES_PER_QS);
+       if (!netdev) {
+               err = -ENOMEM;
+               goto err_release_regions;
+       }
+
+       pci_set_drvdata(pdev, netdev);
+
+       SET_NETDEV_DEV(netdev, &pdev->dev);
+
+       nic = netdev_priv(netdev);
+       nic->netdev = netdev;
+       nic->pdev = pdev;
+
+       /* MAP VF's configuration registers */
+       nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+       if (!nic->reg_base) {
+               dev_err(dev, "Cannot map config register space, aborting\n");
+               err = -ENOMEM;
+               goto err_free_netdev;
+       }
+
+       err = nicvf_set_qset_resources(nic);
+       if (err)
+               goto err_free_netdev;
+
+       qs = nic->qs;
+
+       err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
+       if (err)
+               goto err_free_netdev;
+
+       /* Check if PF is alive and get MAC address for this VF */
+       err = nicvf_register_misc_interrupt(nic);
+       if (err)
+               goto err_free_netdev;
+
+       netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+                            NETIF_F_TSO | NETIF_F_GRO);
+       netdev->hw_features = netdev->features;
+
+       netdev->netdev_ops = &nicvf_netdev_ops;
+
+       INIT_WORK(&nic->reset_task, nicvf_reset_task);
+
+       err = register_netdev(netdev);
+       if (err) {
+               dev_err(dev, "Failed to register netdevice\n");
+               goto err_unregister_interrupts;
+       }
+
+       nic->msg_enable = debug;
+
+       nicvf_set_ethtool_ops(netdev);
+
+       return 0;
+
+err_unregister_interrupts:
+       nicvf_unregister_interrupts(nic);
+err_free_netdev:
+       pci_set_drvdata(pdev, NULL);
+       free_netdev(netdev);
+err_release_regions:
+       pci_release_regions(pdev);
+err_disable_device:
+       pci_disable_device(pdev);
+       return err;
+}
+
+static void nicvf_remove(struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct nicvf *nic = netdev_priv(netdev);
+
+       unregister_netdev(netdev);
+       nicvf_unregister_interrupts(nic);
+       pci_set_drvdata(pdev, NULL);
+       free_netdev(netdev);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+}
+
+static struct pci_driver nicvf_driver = {
+       .name = DRV_NAME,
+       .id_table = nicvf_id_table,
+       .probe = nicvf_probe,
+       .remove = nicvf_remove,
+};
+
+static int __init nicvf_init_module(void)
+{
+       pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+       return pci_register_driver(&nicvf_driver);
+}
+
+static void __exit nicvf_cleanup_module(void)
+{
+       pci_unregister_driver(&nicvf_driver);
+}
+
+module_init(nicvf_init_module);
+module_exit(nicvf_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
new file mode 100644 (file)
index 0000000..d69d228
--- /dev/null
@@ -0,0 +1,1545 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+struct rbuf_info {
+       struct page *page;
+       void    *data;
+       u64     offset;
+};
+
+#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
+
+/* Poll a register for a specific value */
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+                         u64 reg, int bit_pos, int bits, int val)
+{
+       u64 bit_mask;
+       u64 reg_val;
+       int timeout = 10;
+
+       bit_mask = (1ULL << bits) - 1;
+       bit_mask = (bit_mask << bit_pos);
+
+       while (timeout) {
+               reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+               if (((reg_val & bit_mask) >> bit_pos) == val)
+                       return 0;
+               usleep_range(1000, 2000);
+               timeout--;
+       }
+       netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
+       return 1;
+}
+
+/* Allocate memory for a queue's descriptors */
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+                                 int q_len, int desc_size, int align_bytes)
+{
+       dmem->q_len = q_len;
+       dmem->size = (desc_size * q_len) + align_bytes;
+       /* Save address, need it while freeing */
+       dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+                                               &dmem->dma, GFP_KERNEL);
+       if (!dmem->unalign_base)
+               return -ENOMEM;
+
+       /* Align memory address for 'align_bytes' */
+       dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+       dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+       return 0;
+}
+
+/* Free queue's descriptor memory */
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+       if (!dmem)
+               return;
+
+       dma_free_coherent(&nic->pdev->dev, dmem->size,
+                         dmem->unalign_base, dmem->dma);
+       dmem->unalign_base = NULL;
+       dmem->base = NULL;
+}
+
+/* Allocate buffer for packet reception
+ * HW returns memory address where packet is DMA'ed but not a pointer
+ * into RBDR ring, so save buffer address at the start of fragment and
+ * align the start address to a cache aligned address
+ */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
+                                        u32 buf_len, u64 **rbuf)
+{
+       u64 data;
+       struct rbuf_info *rinfo;
+       int order = get_order(buf_len);
+
+       /* Check if request can be accomodated in previous allocated page */
+       if (nic->rb_page) {
+               if ((nic->rb_page_offset + buf_len + buf_len) >
+                   (PAGE_SIZE << order)) {
+                       nic->rb_page = NULL;
+               } else {
+                       nic->rb_page_offset += buf_len;
+                       get_page(nic->rb_page);
+               }
+       }
+
+       /* Allocate a new page */
+       if (!nic->rb_page) {
+               nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
+               if (!nic->rb_page) {
+                       netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
+                       return -ENOMEM;
+               }
+               nic->rb_page_offset = 0;
+       }
+
+       data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
+
+       /* Align buffer addr to cache line i.e 128 bytes */
+       rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
+       /* Save page address for reference updation */
+       rinfo->page = nic->rb_page;
+       /* Store start address for later retrieval */
+       rinfo->data = (void *)data;
+       /* Store alignment offset */
+       rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
+
+       data += rinfo->offset;
+
+       /* Give next aligned address to hw for DMA */
+       *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
+       return 0;
+}
+
+/* Retrieve actual buffer start address and build skb for received packet */
+static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
+                                          u64 rb_ptr, int len)
+{
+       struct sk_buff *skb;
+       struct rbuf_info *rinfo;
+
+       rb_ptr = (u64)phys_to_virt(rb_ptr);
+       /* Get buffer start address and alignment offset */
+       rinfo = GET_RBUF_INFO(rb_ptr);
+
+       /* Now build an skb to give to stack */
+       skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+       if (!skb) {
+               put_page(rinfo->page);
+               return NULL;
+       }
+
+       /* Set correct skb->data */
+       skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
+
+       prefetch((void *)rb_ptr);
+       return skb;
+}
+
+/* Allocate RBDR ring and populate receive buffers */
+static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+                           int ring_len, int buf_size)
+{
+       int idx;
+       u64 *rbuf;
+       struct rbdr_entry_t *desc;
+       int err;
+
+       err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+                                    sizeof(struct rbdr_entry_t),
+                                    NICVF_RCV_BUF_ALIGN_BYTES);
+       if (err)
+               return err;
+
+       rbdr->desc = rbdr->dmem.base;
+       /* Buffer size has to be in multiples of 128 bytes */
+       rbdr->dma_size = buf_size;
+       rbdr->enable = true;
+       rbdr->thresh = RBDR_THRESH;
+
+       nic->rb_page = NULL;
+       for (idx = 0; idx < ring_len; idx++) {
+               err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
+                                            &rbuf);
+               if (err)
+                       return err;
+
+               desc = GET_RBDR_DESC(rbdr, idx);
+               desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+       }
+       return 0;
+}
+
+/* Free RBDR ring and its receive buffers */
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+       int head, tail;
+       u64 buf_addr;
+       struct rbdr_entry_t *desc;
+       struct rbuf_info *rinfo;
+
+       if (!rbdr)
+               return;
+
+       rbdr->enable = false;
+       if (!rbdr->dmem.base)
+               return;
+
+       head = rbdr->head;
+       tail = rbdr->tail;
+
+       /* Free SKBs */
+       while (head != tail) {
+               desc = GET_RBDR_DESC(rbdr, head);
+               buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+               rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+               put_page(rinfo->page);
+               head++;
+               head &= (rbdr->dmem.q_len - 1);
+       }
+       /* Free SKB of tail desc */
+       desc = GET_RBDR_DESC(rbdr, tail);
+       buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+       rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+       put_page(rinfo->page);
+
+       /* Free RBDR ring */
+       nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ */
+static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
+{
+       struct queue_set *qs = nic->qs;
+       int rbdr_idx = qs->rbdr_cnt;
+       int tail, qcount;
+       int refill_rb_cnt;
+       struct rbdr *rbdr;
+       struct rbdr_entry_t *desc;
+       u64 *rbuf;
+       int new_rb = 0;
+
+refill:
+       if (!rbdr_idx)
+               return;
+       rbdr_idx--;
+       rbdr = &qs->rbdr[rbdr_idx];
+       /* Check if it's enabled */
+       if (!rbdr->enable)
+               goto next_rbdr;
+
+       /* Get no of desc's to be refilled */
+       qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+       qcount &= 0x7FFFF;
+       /* Doorbell can be ringed with a max of ring size minus 1 */
+       if (qcount >= (qs->rbdr_len - 1))
+               goto next_rbdr;
+       else
+               refill_rb_cnt = qs->rbdr_len - qcount - 1;
+
+       /* Start filling descs from tail */
+       tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
+       while (refill_rb_cnt) {
+               tail++;
+               tail &= (rbdr->dmem.q_len - 1);
+
+               if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
+                       break;
+
+               desc = GET_RBDR_DESC(rbdr, tail);
+               desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+               refill_rb_cnt--;
+               new_rb++;
+       }
+
+       /* make sure all memory stores are done before ringing doorbell */
+       smp_wmb();
+
+       /* Check if buffer allocation failed */
+       if (refill_rb_cnt)
+               nic->rb_alloc_fail = true;
+       else
+               nic->rb_alloc_fail = false;
+
+       /* Notify HW */
+       nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+                             rbdr_idx, new_rb);
+next_rbdr:
+       /* Re-enable RBDR interrupts only if buffer allocation is success */
+       if (!nic->rb_alloc_fail && rbdr->enable)
+               nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
+
+       if (rbdr_idx)
+               goto refill;
+}
+
+/* Alloc rcv buffers in non-atomic mode for better success */
+void nicvf_rbdr_work(struct work_struct *work)
+{
+       struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
+
+       nicvf_refill_rbdr(nic, GFP_KERNEL);
+       if (nic->rb_alloc_fail)
+               schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+       else
+               nic->rb_work_scheduled = false;
+}
+
+/* In Softirq context, alloc rcv buffers in atomic mode */
+void nicvf_rbdr_task(unsigned long data)
+{
+       struct nicvf *nic = (struct nicvf *)data;
+
+       nicvf_refill_rbdr(nic, GFP_ATOMIC);
+       if (nic->rb_alloc_fail) {
+               nic->rb_work_scheduled = true;
+               schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+       }
+}
+
+/* Initialize completion queue */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+                               struct cmp_queue *cq, int q_len)
+{
+       int err;
+
+       err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
+                                    NICVF_CQ_BASE_ALIGN_BYTES);
+       if (err)
+               return err;
+
+       cq->desc = cq->dmem.base;
+       cq->thresh = CMP_QUEUE_CQE_THRESH;
+       nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
+
+       return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+       if (!cq)
+               return;
+       if (!cq->dmem.base)
+               return;
+
+       nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+/* Initialize transmit queue */
+static int nicvf_init_snd_queue(struct nicvf *nic,
+                               struct snd_queue *sq, int q_len)
+{
+       int err;
+
+       err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
+                                    NICVF_SQ_BASE_ALIGN_BYTES);
+       if (err)
+               return err;
+
+       sq->desc = sq->dmem.base;
+       sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+       if (!sq->skbuff)
+               return -ENOMEM;
+       sq->head = 0;
+       sq->tail = 0;
+       atomic_set(&sq->free_cnt, q_len - 1);
+       sq->thresh = SND_QUEUE_THRESH;
+
+       /* Preallocate memory for TSO segment's header */
+       sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+                                         q_len * TSO_HEADER_SIZE,
+                                         &sq->tso_hdrs_phys, GFP_KERNEL);
+       if (!sq->tso_hdrs)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+       if (!sq)
+               return;
+       if (!sq->dmem.base)
+               return;
+
+       if (sq->tso_hdrs)
+               dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
+                                 sq->tso_hdrs, sq->tso_hdrs_phys);
+
+       kfree(sq->skbuff);
+       nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+                                   struct queue_set *qs, int qidx)
+{
+       /* Disable send queue */
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+       /* Check if SQ is stopped */
+       if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+               return;
+       /* Reset send queue */
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+                                   struct queue_set *qs, int qidx)
+{
+       union nic_mbx mbx = {};
+
+       /* Make sure all packets in the pipeline are written back into mem */
+       mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+       nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+                                   struct queue_set *qs, int qidx)
+{
+       /* Disable timer threshold (doesn't get reset upon CQ reset */
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+       /* Disable completion queue */
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+       /* Reset completion queue */
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+                              struct rbdr *rbdr, int qidx)
+{
+       u64 tmp, fifo_state;
+       int timeout = 10;
+
+       /* Save head and tail pointers for feeing up buffers */
+       rbdr->head = nicvf_queue_reg_read(nic,
+                                         NIC_QSET_RBDR_0_1_HEAD,
+                                         qidx) >> 3;
+       rbdr->tail = nicvf_queue_reg_read(nic,
+                                         NIC_QSET_RBDR_0_1_TAIL,
+                                         qidx) >> 3;
+
+       /* If RBDR FIFO is in 'FAIL' state then do a reset first
+        * before relaiming.
+        */
+       fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+       if (((fifo_state >> 62) & 0x03) == 0x3)
+               nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+                                     qidx, NICVF_RBDR_RESET);
+
+       /* Disable RBDR */
+       nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+       if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+               return;
+       while (1) {
+               tmp = nicvf_queue_reg_read(nic,
+                                          NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+                                          qidx);
+               if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+                       break;
+               usleep_range(1000, 2000);
+               timeout--;
+               if (!timeout) {
+                       netdev_err(nic->netdev,
+                                  "Failed polling on prefetch status\n");
+                       return;
+               }
+       }
+       nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+                             qidx, NICVF_RBDR_RESET);
+
+       if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+               return;
+       nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+       if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+               return;
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+                                  int qidx, bool enable)
+{
+       union nic_mbx mbx = {};
+       struct rcv_queue *rq;
+       struct rq_cfg rq_cfg;
+
+       rq = &qs->rq[qidx];
+       rq->enable = enable;
+
+       /* Disable receive queue */
+       nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+       if (!rq->enable) {
+               nicvf_reclaim_rcv_queue(nic, qs, qidx);
+               return;
+       }
+
+       rq->cq_qs = qs->vnic_id;
+       rq->cq_idx = qidx;
+       rq->start_rbdr_qs = qs->vnic_id;
+       rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+       rq->cont_rbdr_qs = qs->vnic_id;
+       rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+       /* all writes of RBDR data to be loaded into L2 Cache as well*/
+       rq->caching = 1;
+
+       /* Send a mailbox msg to PF to config RQ */
+       mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+       mbx.rq.qs_num = qs->vnic_id;
+       mbx.rq.rq_num = qidx;
+       mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+                         (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+                         (rq->cont_qs_rbdr_idx << 8) |
+                         (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+       nicvf_send_msg_to_pf(nic, &mbx);
+
+       mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+       mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+       nicvf_send_msg_to_pf(nic, &mbx);
+
+       /* RQ drop config
+        * Enable CQ drop to reserve sufficient CQEs for all tx packets
+        */
+       mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+       mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+       nicvf_send_msg_to_pf(nic, &mbx);
+
+       nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+
+       /* Enable Receive queue */
+       rq_cfg.ena = 1;
+       rq_cfg.tcp_ena = 0;
+       nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+}
+
+/* Configures completion queue */
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+                           int qidx, bool enable)
+{
+       struct cmp_queue *cq;
+       struct cq_cfg cq_cfg;
+
+       cq = &qs->cq[qidx];
+       cq->enable = enable;
+
+       if (!cq->enable) {
+               nicvf_reclaim_cmp_queue(nic, qs, qidx);
+               return;
+       }
+
+       /* Reset completion queue */
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+       if (!cq->enable)
+               return;
+
+       spin_lock_init(&cq->lock);
+       /* Set completion queue base address */
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+                             qidx, (u64)(cq->dmem.phys_base));
+
+       /* Enable Completion queue */
+       cq_cfg.ena = 1;
+       cq_cfg.reset = 0;
+       cq_cfg.caching = 0;
+       cq_cfg.qsize = CMP_QSIZE;
+       cq_cfg.avg_con = 0;
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
+
+       /* Set threshold value for interrupt generation */
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+       nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
+                             qidx, nic->cq_coalesce_usecs);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+                                  int qidx, bool enable)
+{
+       union nic_mbx mbx = {};
+       struct snd_queue *sq;
+       struct sq_cfg sq_cfg;
+
+       sq = &qs->sq[qidx];
+       sq->enable = enable;
+
+       if (!sq->enable) {
+               nicvf_reclaim_snd_queue(nic, qs, qidx);
+               return;
+       }
+
+       /* Reset send queue */
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+       sq->cq_qs = qs->vnic_id;
+       sq->cq_idx = qidx;
+
+       /* Send a mailbox msg to PF to config SQ */
+       mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+       mbx.sq.qs_num = qs->vnic_id;
+       mbx.sq.sq_num = qidx;
+       mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+       nicvf_send_msg_to_pf(nic, &mbx);
+
+       /* Set queue base address */
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+                             qidx, (u64)(sq->dmem.phys_base));
+
+       /* Enable send queue  & set queue size */
+       sq_cfg.ena = 1;
+       sq_cfg.reset = 0;
+       sq_cfg.ldwb = 0;
+       sq_cfg.qsize = SND_QSIZE;
+       sq_cfg.tstmp_bgx_intf = 0;
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+
+       /* Set threshold value for interrupt generation */
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+
+       /* Set queue:cpu affinity for better load distribution */
+       if (cpu_online(qidx)) {
+               cpumask_set_cpu(qidx, &sq->affinity_mask);
+               netif_set_xps_queue(nic->netdev,
+                                   &sq->affinity_mask, qidx);
+       }
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+                             int qidx, bool enable)
+{
+       struct rbdr *rbdr;
+       struct rbdr_cfg rbdr_cfg;
+
+       rbdr = &qs->rbdr[qidx];
+       nicvf_reclaim_rbdr(nic, rbdr, qidx);
+       if (!enable)
+               return;
+
+       /* Set descriptor base address */
+       nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+                             qidx, (u64)(rbdr->dmem.phys_base));
+
+       /* Enable RBDR  & set queue size */
+       /* Buffer size should be in multiples of 128 bytes */
+       rbdr_cfg.ena = 1;
+       rbdr_cfg.reset = 0;
+       rbdr_cfg.ldwb = 0;
+       rbdr_cfg.qsize = RBDR_SIZE;
+       rbdr_cfg.avg_con = 0;
+       rbdr_cfg.lines = rbdr->dma_size / 128;
+       nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+                             qidx, *(u64 *)&rbdr_cfg);
+
+       /* Notify HW */
+       nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+                             qidx, qs->rbdr_len - 1);
+
+       /* Set threshold value for interrupt generation */
+       nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+                             qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+       union nic_mbx mbx = {};
+       struct queue_set *qs = nic->qs;
+       struct qs_cfg *qs_cfg;
+
+       if (!qs) {
+               netdev_warn(nic->netdev,
+                           "Qset is still not allocated, don't init queues\n");
+               return;
+       }
+
+       qs->enable = enable;
+       qs->vnic_id = nic->vf_id;
+
+       /* Send a mailbox msg to PF to config Qset */
+       mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+       mbx.qs.num = qs->vnic_id;
+
+       mbx.qs.cfg = 0;
+       qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+       if (qs->enable) {
+               qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+               qs_cfg->be = 1;
+#endif
+               qs_cfg->vnic = qs->vnic_id;
+       }
+       nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+       int qidx;
+       struct queue_set *qs = nic->qs;
+
+       /* Free receive buffer descriptor ring */
+       for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+               nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+       /* Free completion queue */
+       for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+               nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+       /* Free send queue */
+       for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+               nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+       int qidx;
+       struct queue_set *qs = nic->qs;
+
+       /* Alloc receive buffer descriptor ring */
+       for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+               if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+                                   DMA_BUFFER_LEN))
+                       goto alloc_fail;
+       }
+
+       /* Alloc send queue */
+       for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+               if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+                       goto alloc_fail;
+       }
+
+       /* Alloc completion queue */
+       for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+               if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+                       goto alloc_fail;
+       }
+
+       return 0;
+alloc_fail:
+       nicvf_free_resources(nic);
+       return -ENOMEM;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+       struct queue_set *qs;
+
+       qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
+       if (!qs)
+               return -ENOMEM;
+       nic->qs = qs;
+
+       /* Set count of each queue */
+       qs->rbdr_cnt = RBDR_CNT;
+       qs->rq_cnt = RCV_QUEUE_CNT;
+       qs->sq_cnt = SND_QUEUE_CNT;
+       qs->cq_cnt = CMP_QUEUE_CNT;
+
+       /* Set queue lengths */
+       qs->rbdr_len = RCV_BUF_COUNT;
+       qs->sq_len = SND_QUEUE_LEN;
+       qs->cq_len = CMP_QUEUE_LEN;
+       return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+       bool disable = false;
+       struct queue_set *qs = nic->qs;
+       int qidx;
+
+       if (!qs)
+               return 0;
+
+       if (enable) {
+               if (nicvf_alloc_resources(nic))
+                       return -ENOMEM;
+
+               for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+                       nicvf_snd_queue_config(nic, qs, qidx, enable);
+               for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+                       nicvf_cmp_queue_config(nic, qs, qidx, enable);
+               for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+                       nicvf_rbdr_config(nic, qs, qidx, enable);
+               for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+                       nicvf_rcv_queue_config(nic, qs, qidx, enable);
+       } else {
+               for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+                       nicvf_rcv_queue_config(nic, qs, qidx, disable);
+               for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+                       nicvf_rbdr_config(nic, qs, qidx, disable);
+               for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+                       nicvf_snd_queue_config(nic, qs, qidx, disable);
+               for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+                       nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+               nicvf_free_resources(nic);
+       }
+
+       return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+       int qentry;
+
+       qentry = sq->tail;
+       atomic_sub(desc_cnt, &sq->free_cnt);
+       sq->tail += desc_cnt;
+       sq->tail &= (sq->dmem.q_len - 1);
+
+       return qentry;
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+       atomic_add(desc_cnt, &sq->free_cnt);
+       sq->head += desc_cnt;
+       sq->head &= (sq->dmem.q_len - 1);
+}
+
+static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+       qentry++;
+       qentry &= (sq->dmem.q_len - 1);
+       return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+       u64 sq_cfg;
+
+       sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+       sq_cfg |= NICVF_SQ_EN;
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+       /* Ring doorbell so that H/W restarts processing SQEs */
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+       u64 sq_cfg;
+
+       sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+       sq_cfg &= ~NICVF_SQ_EN;
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
+                             int qidx)
+{
+       u64 head, tail;
+       struct sk_buff *skb;
+       struct nicvf *nic = netdev_priv(netdev);
+       struct sq_hdr_subdesc *hdr;
+
+       head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+       tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+       while (sq->head != head) {
+               hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+               if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+                       nicvf_put_sq_desc(sq, 1);
+                       continue;
+               }
+               skb = (struct sk_buff *)sq->skbuff[sq->head];
+               atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
+               atomic64_add(hdr->tot_len,
+                            (atomic64_t *)&netdev->stats.tx_bytes);
+               dev_kfree_skb_any(skb);
+               nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+       }
+}
+
+/* Calculate no of SQ subdescriptors needed to transmit all
+ * segments of this TSO packet.
+ * Taken from 'Tilera network driver' with a minor modification.
+ */
+static int nicvf_tso_count_subdescs(struct sk_buff *skb)
+{
+       struct skb_shared_info *sh = skb_shinfo(skb);
+       unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       unsigned int data_len = skb->len - sh_len;
+       unsigned int p_len = sh->gso_size;
+       long f_id = -1;    /* id of the current fragment */
+       long f_size = skb_headlen(skb) - sh_len;  /* current fragment size */
+       long f_used = 0;  /* bytes used from the current fragment */
+       long n;            /* size of the current piece of payload */
+       int num_edescs = 0;
+       int segment;
+
+       for (segment = 0; segment < sh->gso_segs; segment++) {
+               unsigned int p_used = 0;
+
+               /* One edesc for header and for each piece of the payload. */
+               for (num_edescs++; p_used < p_len; num_edescs++) {
+                       /* Advance as needed. */
+                       while (f_used >= f_size) {
+                               f_id++;
+                               f_size = skb_frag_size(&sh->frags[f_id]);
+                               f_used = 0;
+                       }
+
+                       /* Use bytes from the current fragment. */
+                       n = p_len - p_used;
+                       if (n > f_size - f_used)
+                               n = f_size - f_used;
+                       f_used += n;
+                       p_used += n;
+               }
+
+               /* The last segment may be less than gso_size. */
+               data_len -= p_len;
+               if (data_len < p_len)
+                       p_len = data_len;
+       }
+
+       /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
+       return num_edescs + sh->gso_segs;
+}
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
+{
+       int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+       if (skb_shinfo(skb)->gso_size) {
+               subdesc_cnt = nicvf_tso_count_subdescs(skb);
+               return subdesc_cnt;
+       }
+
+       if (skb_shinfo(skb)->nr_frags)
+               subdesc_cnt += skb_shinfo(skb)->nr_frags;
+
+       return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+                        int subdesc_cnt, struct sk_buff *skb, int len)
+{
+       int proto;
+       struct sq_hdr_subdesc *hdr;
+
+       hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+       sq->skbuff[qentry] = (u64)skb;
+
+       memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+       hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+       /* Enable notification via CQE after processing SQE */
+       hdr->post_cqe = 1;
+       /* No of subdescriptors following this */
+       hdr->subdesc_cnt = subdesc_cnt;
+       hdr->tot_len = len;
+
+       /* Offload checksum calculation to HW */
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (skb->protocol != htons(ETH_P_IP))
+                       return;
+
+               hdr->csum_l3 = 1; /* Enable IP csum calculation */
+               hdr->l3_offset = skb_network_offset(skb);
+               hdr->l4_offset = skb_transport_offset(skb);
+
+               proto = ip_hdr(skb)->protocol;
+               switch (proto) {
+               case IPPROTO_TCP:
+                       hdr->csum_l4 = SEND_L4_CSUM_TCP;
+                       break;
+               case IPPROTO_UDP:
+                       hdr->csum_l4 = SEND_L4_CSUM_UDP;
+                       break;
+               case IPPROTO_SCTP:
+                       hdr->csum_l4 = SEND_L4_CSUM_SCTP;
+                       break;
+               }
+       }
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+                                              int size, u64 data)
+{
+       struct sq_gather_subdesc *gather;
+
+       qentry &= (sq->dmem.q_len - 1);
+       gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+       memset(gather, 0, SND_QUEUE_DESC_SIZE);
+       gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+       gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
+       gather->size = size;
+       gather->addr = data;
+}
+
+/* Segment a TSO packet into 'gso_size' segments and append
+ * them to SQ for transfer
+ */
+static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
+                              int qentry, struct sk_buff *skb)
+{
+       struct tso_t tso;
+       int seg_subdescs = 0, desc_cnt = 0;
+       int seg_len, total_len, data_left;
+       int hdr_qentry = qentry;
+       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+       tso_start(skb, &tso);
+       total_len = skb->len - hdr_len;
+       while (total_len > 0) {
+               char *hdr;
+
+               /* Save Qentry for adding HDR_SUBDESC at the end */
+               hdr_qentry = qentry;
+
+               data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+               total_len -= data_left;
+
+               /* Add segment's header */
+               qentry = nicvf_get_nxt_sqentry(sq, qentry);
+               hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
+               tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+               nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
+                                           sq->tso_hdrs_phys +
+                                           qentry * TSO_HEADER_SIZE);
+               /* HDR_SUDESC + GATHER */
+               seg_subdescs = 2;
+               seg_len = hdr_len;
+
+               /* Add segment's payload fragments */
+               while (data_left > 0) {
+                       int size;
+
+                       size = min_t(int, tso.size, data_left);
+
+                       qentry = nicvf_get_nxt_sqentry(sq, qentry);
+                       nicvf_sq_add_gather_subdesc(sq, qentry, size,
+                                                   virt_to_phys(tso.data));
+                       seg_subdescs++;
+                       seg_len += size;
+
+                       data_left -= size;
+                       tso_build_data(skb, &tso, size);
+               }
+               nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+                                        seg_subdescs - 1, skb, seg_len);
+               sq->skbuff[hdr_qentry] = 0;
+               qentry = nicvf_get_nxt_sqentry(sq, qentry);
+
+               desc_cnt += seg_subdescs;
+       }
+       /* Save SKB in the last segment for freeing */
+       sq->skbuff[hdr_qentry] = (u64)skb;
+
+       /* make sure all memory stores are done before ringing doorbell */
+       smp_wmb();
+
+       /* Inform HW to xmit all TSO segments */
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+                             skb_get_queue_mapping(skb), desc_cnt);
+       return 1;
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
+{
+       int i, size;
+       int subdesc_cnt;
+       int sq_num, qentry;
+       struct queue_set *qs = nic->qs;
+       struct snd_queue *sq;
+
+       sq_num = skb_get_queue_mapping(skb);
+       sq = &qs->sq[sq_num];
+
+       subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
+       if (subdesc_cnt > atomic_read(&sq->free_cnt))
+               goto append_fail;
+
+       qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+       /* Check if its a TSO packet */
+       if (skb_shinfo(skb)->gso_size)
+               return nicvf_sq_append_tso(nic, sq, qentry, skb);
+
+       /* Add SQ header subdesc */
+       nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+
+       /* Add SQ gather subdescs */
+       qentry = nicvf_get_nxt_sqentry(sq, qentry);
+       size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+       nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+
+       /* Check for scattered buffer */
+       if (!skb_is_nonlinear(skb))
+               goto doorbell;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const struct skb_frag_struct *frag;
+
+               frag = &skb_shinfo(skb)->frags[i];
+
+               qentry = nicvf_get_nxt_sqentry(sq, qentry);
+               size = skb_frag_size(frag);
+               nicvf_sq_add_gather_subdesc(sq, qentry, size,
+                                           virt_to_phys(
+                                           skb_frag_address(frag)));
+       }
+
+doorbell:
+       /* make sure all memory stores are done before ringing doorbell */
+       smp_wmb();
+
+       /* Inform HW to xmit new packet */
+       nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+                             sq_num, subdesc_cnt);
+       return 1;
+
+append_fail:
+       netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
+       return 0;
+}
+
+static inline unsigned frag_num(unsigned i)
+{
+#ifdef __BIG_ENDIAN
+       return (i & ~3) + 3 - (i & 3);
+#else
+       return i;
+#endif
+}
+
+/* Returns SKB for a received packet */
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+{
+       int frag;
+       int payload_len = 0;
+       struct sk_buff *skb = NULL;
+       struct sk_buff *skb_frag = NULL;
+       struct sk_buff *prev_frag = NULL;
+       u16 *rb_lens = NULL;
+       u64 *rb_ptrs = NULL;
+
+       rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
+       rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+
+       netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
+                  __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
+
+       for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+               payload_len = rb_lens[frag_num(frag)];
+               if (!frag) {
+                       /* First fragment */
+                       skb = nicvf_rb_ptr_to_skb(nic,
+                                                 *rb_ptrs - cqe_rx->align_pad,
+                                                 payload_len);
+                       if (!skb)
+                               return NULL;
+                       skb_reserve(skb, cqe_rx->align_pad);
+                       skb_put(skb, payload_len);
+               } else {
+                       /* Add fragments */
+                       skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
+                                                      payload_len);
+                       if (!skb_frag) {
+                               dev_kfree_skb(skb);
+                               return NULL;
+                       }
+
+                       if (!skb_shinfo(skb)->frag_list)
+                               skb_shinfo(skb)->frag_list = skb_frag;
+                       else
+                               prev_frag->next = skb_frag;
+
+                       prev_frag = skb_frag;
+                       skb->len += payload_len;
+                       skb->data_len += payload_len;
+                       skb_frag->len = payload_len;
+               }
+               /* Next buffer pointer */
+               rb_ptrs++;
+       }
+       return skb;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+       u64 reg_val;
+
+       reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+       switch (int_type) {
+       case NICVF_INTR_CQ:
+               reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+               break;
+       case NICVF_INTR_SQ:
+               reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+               break;
+       case NICVF_INTR_RBDR:
+               reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+               break;
+       case NICVF_INTR_PKT_DROP:
+               reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+               break;
+       case NICVF_INTR_TCP_TIMER:
+               reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+               break;
+       case NICVF_INTR_MBOX:
+               reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+               break;
+       case NICVF_INTR_QS_ERR:
+               reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+               break;
+       default:
+               netdev_err(nic->netdev,
+                          "Failed to enable interrupt: unknown type\n");
+               break;
+       }
+
+       nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
+}
+
+/* Disable interrupt */
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+       u64 reg_val = 0;
+
+       switch (int_type) {
+       case NICVF_INTR_CQ:
+               reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+               break;
+       case NICVF_INTR_SQ:
+               reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+               break;
+       case NICVF_INTR_RBDR:
+               reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+               break;
+       case NICVF_INTR_PKT_DROP:
+               reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+               break;
+       case NICVF_INTR_TCP_TIMER:
+               reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+               break;
+       case NICVF_INTR_MBOX:
+               reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+               break;
+       case NICVF_INTR_QS_ERR:
+               reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+               break;
+       default:
+               netdev_err(nic->netdev,
+                          "Failed to disable interrupt: unknown type\n");
+               break;
+       }
+
+       nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+       u64 reg_val = 0;
+
+       switch (int_type) {
+       case NICVF_INTR_CQ:
+               reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+               break;
+       case NICVF_INTR_SQ:
+               reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+               break;
+       case NICVF_INTR_RBDR:
+               reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+               break;
+       case NICVF_INTR_PKT_DROP:
+               reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+               break;
+       case NICVF_INTR_TCP_TIMER:
+               reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+               break;
+       case NICVF_INTR_MBOX:
+               reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+               break;
+       case NICVF_INTR_QS_ERR:
+               reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+               break;
+       default:
+               netdev_err(nic->netdev,
+                          "Failed to clear interrupt: unknown type\n");
+               break;
+       }
+
+       nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+}
+
+/* Check if interrupt is enabled */
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
+{
+       u64 reg_val;
+       u64 mask = 0xff;
+
+       reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+       switch (int_type) {
+       case NICVF_INTR_CQ:
+               mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+               break;
+       case NICVF_INTR_SQ:
+               mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+               break;
+       case NICVF_INTR_RBDR:
+               mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+               break;
+       case NICVF_INTR_PKT_DROP:
+               mask = NICVF_INTR_PKT_DROP_MASK;
+               break;
+       case NICVF_INTR_TCP_TIMER:
+               mask = NICVF_INTR_TCP_TIMER_MASK;
+               break;
+       case NICVF_INTR_MBOX:
+               mask = NICVF_INTR_MBOX_MASK;
+               break;
+       case NICVF_INTR_QS_ERR:
+               mask = NICVF_INTR_QS_ERR_MASK;
+               break;
+       default:
+               netdev_err(nic->netdev,
+                          "Failed to check interrupt enable: unknown type\n");
+               break;
+       }
+
+       return (reg_val & mask);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+       struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+       nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+                           (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+       rq = &nic->qs->rq[rq_idx];
+       rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+       rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+       struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+       nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+                           (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+       sq = &nic->qs->sq[sq_idx];
+       sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+       sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+                           struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+{
+       struct cmp_queue_stats *stats = &cq->stats;
+
+       if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+               stats->rx.errop.good++;
+               return 0;
+       }
+
+       if (netif_msg_rx_err(nic))
+               netdev_err(nic->netdev,
+                          "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
+                          nic->netdev->name,
+                          cqe_rx->err_level, cqe_rx->err_opcode);
+
+       switch (cqe_rx->err_level) {
+       case CQ_ERRLVL_MAC:
+               stats->rx.errlvl.mac_errs++;
+               break;
+       case CQ_ERRLVL_L2:
+               stats->rx.errlvl.l2_errs++;
+               break;
+       case CQ_ERRLVL_L3:
+               stats->rx.errlvl.l3_errs++;
+               break;
+       case CQ_ERRLVL_L4:
+               stats->rx.errlvl.l4_errs++;
+               break;
+       }
+
+       switch (cqe_rx->err_opcode) {
+       case CQ_RX_ERROP_RE_PARTIAL:
+               stats->rx.errop.partial_pkts++;
+               break;
+       case CQ_RX_ERROP_RE_JABBER:
+               stats->rx.errop.jabber_errs++;
+               break;
+       case CQ_RX_ERROP_RE_FCS:
+               stats->rx.errop.fcs_errs++;
+               break;
+       case CQ_RX_ERROP_RE_TERMINATE:
+               stats->rx.errop.terminate_errs++;
+               break;
+       case CQ_RX_ERROP_RE_RX_CTL:
+               stats->rx.errop.bgx_rx_errs++;
+               break;
+       case CQ_RX_ERROP_PREL2_ERR:
+               stats->rx.errop.prel2_errs++;
+               break;
+       case CQ_RX_ERROP_L2_FRAGMENT:
+               stats->rx.errop.l2_frags++;
+               break;
+       case CQ_RX_ERROP_L2_OVERRUN:
+               stats->rx.errop.l2_overruns++;
+               break;
+       case CQ_RX_ERROP_L2_PFCS:
+               stats->rx.errop.l2_pfcs++;
+               break;
+       case CQ_RX_ERROP_L2_PUNY:
+               stats->rx.errop.l2_puny++;
+               break;
+       case CQ_RX_ERROP_L2_MAL:
+               stats->rx.errop.l2_hdr_malformed++;
+               break;
+       case CQ_RX_ERROP_L2_OVERSIZE:
+               stats->rx.errop.l2_oversize++;
+               break;
+       case CQ_RX_ERROP_L2_UNDERSIZE:
+               stats->rx.errop.l2_undersize++;
+               break;
+       case CQ_RX_ERROP_L2_LENMISM:
+               stats->rx.errop.l2_len_mismatch++;
+               break;
+       case CQ_RX_ERROP_L2_PCLP:
+               stats->rx.errop.l2_pclp++;
+               break;
+       case CQ_RX_ERROP_IP_NOT:
+               stats->rx.errop.non_ip++;
+               break;
+       case CQ_RX_ERROP_IP_CSUM_ERR:
+               stats->rx.errop.ip_csum_err++;
+               break;
+       case CQ_RX_ERROP_IP_MAL:
+               stats->rx.errop.ip_hdr_malformed++;
+               break;
+       case CQ_RX_ERROP_IP_MALD:
+               stats->rx.errop.ip_payload_malformed++;
+               break;
+       case CQ_RX_ERROP_IP_HOP:
+               stats->rx.errop.ip_hop_errs++;
+               break;
+       case CQ_RX_ERROP_L3_ICRC:
+               stats->rx.errop.l3_icrc_errs++;
+               break;
+       case CQ_RX_ERROP_L3_PCLP:
+               stats->rx.errop.l3_pclp++;
+               break;
+       case CQ_RX_ERROP_L4_MAL:
+               stats->rx.errop.l4_malformed++;
+               break;
+       case CQ_RX_ERROP_L4_CHK:
+               stats->rx.errop.l4_csum_errs++;
+               break;
+       case CQ_RX_ERROP_UDP_LEN:
+               stats->rx.errop.udp_len_err++;
+               break;
+       case CQ_RX_ERROP_L4_PORT:
+               stats->rx.errop.bad_l4_port++;
+               break;
+       case CQ_RX_ERROP_TCP_FLAG:
+               stats->rx.errop.bad_tcp_flag++;
+               break;
+       case CQ_RX_ERROP_TCP_OFFSET:
+               stats->rx.errop.tcp_offset_errs++;
+               break;
+       case CQ_RX_ERROP_L4_PCLP:
+               stats->rx.errop.l4_pclp++;
+               break;
+       case CQ_RX_ERROP_RBDR_TRUNC:
+               stats->rx.errop.pkt_truncated++;
+               break;
+       }
+
+       return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+                           struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+{
+       struct cmp_queue_stats *stats = &cq->stats;
+
+       switch (cqe_tx->send_status) {
+       case CQ_TX_ERROP_GOOD:
+               stats->tx.good++;
+               return 0;
+       case CQ_TX_ERROP_DESC_FAULT:
+               stats->tx.desc_fault++;
+               break;
+       case CQ_TX_ERROP_HDR_CONS_ERR:
+               stats->tx.hdr_cons_err++;
+               break;
+       case CQ_TX_ERROP_SUBDC_ERR:
+               stats->tx.subdesc_err++;
+               break;
+       case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+               stats->tx.imm_size_oflow++;
+               break;
+       case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+               stats->tx.data_seq_err++;
+               break;
+       case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+               stats->tx.mem_seq_err++;
+               break;
+       case CQ_TX_ERROP_LOCK_VIOL:
+               stats->tx.lock_viol++;
+               break;
+       case CQ_TX_ERROP_DATA_FAULT:
+               stats->tx.data_fault++;
+               break;
+       case CQ_TX_ERROP_TSTMP_CONFLICT:
+               stats->tx.tstmp_conflict++;
+               break;
+       case CQ_TX_ERROP_TSTMP_TIMEOUT:
+               stats->tx.tstmp_timeout++;
+               break;
+       case CQ_TX_ERROP_MEM_FAULT:
+               stats->tx.mem_fault++;
+               break;
+       case CQ_TX_ERROP_CK_OVERLAP:
+               stats->tx.csum_overlap++;
+               break;
+       case CQ_TX_ERROP_CK_OFLOW:
+               stats->tx.csum_overflow++;
+               break;
+       }
+
+       return 1;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
new file mode 100644 (file)
index 0000000..8341bdf
--- /dev/null
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include <linux/netdevice.h>
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET                  128
+#define MAX_RCV_QUEUES_PER_QS          8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS  2
+#define MAX_SND_QUEUES_PER_QS          8
+#define MAX_CMP_QUEUES_PER_QS          8
+
+/* VF's queue interrupt ranges */
+#define        NICVF_INTR_ID_CQ                0
+#define        NICVF_INTR_ID_SQ                8
+#define        NICVF_INTR_ID_RBDR              16
+#define        NICVF_INTR_ID_MISC              18
+#define        NICVF_INTR_ID_QS_ERR            19
+
+#define        for_each_cq_irq(irq)    \
+       for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
+#define        for_each_sq_irq(irq)    \
+       for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
+#define        for_each_rbdr_irq(irq)  \
+       for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
+
+#define RBDR_SIZE0             0ULL /* 8K entries */
+#define RBDR_SIZE1             1ULL /* 16K entries */
+#define RBDR_SIZE2             2ULL /* 32K entries */
+#define RBDR_SIZE3             3ULL /* 64K entries */
+#define RBDR_SIZE4             4ULL /* 126K entries */
+#define RBDR_SIZE5             5ULL /* 256K entries */
+#define RBDR_SIZE6             6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0                0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1                1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2                2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3                3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4                4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5                5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6                6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0                0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1                1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2                2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3                3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4                4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5                5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6                6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define RBDR_CNT               1
+#define RCV_QUEUE_CNT          8
+#define SND_QUEUE_CNT          8
+#define CMP_QUEUE_CNT          8 /* Max of RCV and SND qcount */
+
+#define SND_QSIZE              SND_QUEUE_SIZE4
+#define SND_QUEUE_LEN          (1ULL << (SND_QSIZE + 10))
+#define MAX_SND_QUEUE_LEN      (1ULL << (SND_QUEUE_SIZE6 + 10))
+#define SND_QUEUE_THRESH       2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT       2
+/* Since timestamp not enabled, otherwise 2 */
+#define MAX_CQE_PER_PKT_XMIT           1
+
+#define CMP_QSIZE              CMP_QUEUE_SIZE4
+#define CMP_QUEUE_LEN          (1ULL << (CMP_QSIZE + 10))
+#define CMP_QUEUE_CQE_THRESH   0
+#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
+
+#define RBDR_SIZE              RBDR_SIZE0
+#define RCV_BUF_COUNT          (1ULL << (RBDR_SIZE + 13))
+#define MAX_RCV_BUF_COUNT      (1ULL << (RBDR_SIZE6 + 13))
+#define RBDR_THRESH            (RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN         2048 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN   (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
+                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+                        (NICVF_RCV_BUF_ALIGN_BYTES * 2))
+#define RCV_DATA_OFFSET                NICVF_RCV_BUF_ALIGN_BYTES
+
+#define MAX_CQES_FOR_TX                ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
+                                MAX_CQE_PER_PKT_XMIT)
+#define RQ_CQ_DROP             ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+
+/* Descriptor size in bytes */
+#define SND_QUEUE_DESC_SIZE    16
+#define CMP_QUEUE_DESC_SIZE    512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN            7
+#define NICVF_RCV_BUF_ALIGN_BYTES      (1ULL << NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES      512  /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES      128  /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES)  ALIGN(ADDR, ALIGN_BYTES)
+#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
+       (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
+#define NICVF_RCV_BUF_ALIGN_LEN(X)\
+       (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN            BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET         BIT_ULL(41)
+#define NICVF_SQ_RESET         BIT_ULL(17)
+#define NICVF_RBDR_RESET       BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+       CQ_ERRLVL_MAC,
+       CQ_ERRLVL_L2,
+       CQ_ERRLVL_L3,
+       CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+       CQ_RX_ERROP_RE_NONE = 0x0,
+       CQ_RX_ERROP_RE_PARTIAL = 0x1,
+       CQ_RX_ERROP_RE_JABBER = 0x2,
+       CQ_RX_ERROP_RE_FCS = 0x7,
+       CQ_RX_ERROP_RE_TERMINATE = 0x9,
+       CQ_RX_ERROP_RE_RX_CTL = 0xb,
+       CQ_RX_ERROP_PREL2_ERR = 0x1f,
+       CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+       CQ_RX_ERROP_L2_OVERRUN = 0x21,
+       CQ_RX_ERROP_L2_PFCS = 0x22,
+       CQ_RX_ERROP_L2_PUNY = 0x23,
+       CQ_RX_ERROP_L2_MAL = 0x24,
+       CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+       CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+       CQ_RX_ERROP_L2_LENMISM = 0x27,
+       CQ_RX_ERROP_L2_PCLP = 0x28,
+       CQ_RX_ERROP_IP_NOT = 0x41,
+       CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+       CQ_RX_ERROP_IP_MAL = 0x43,
+       CQ_RX_ERROP_IP_MALD = 0x44,
+       CQ_RX_ERROP_IP_HOP = 0x45,
+       CQ_RX_ERROP_L3_ICRC = 0x46,
+       CQ_RX_ERROP_L3_PCLP = 0x47,
+       CQ_RX_ERROP_L4_MAL = 0x61,
+       CQ_RX_ERROP_L4_CHK = 0x62,
+       CQ_RX_ERROP_UDP_LEN = 0x63,
+       CQ_RX_ERROP_L4_PORT = 0x64,
+       CQ_RX_ERROP_TCP_FLAG = 0x65,
+       CQ_RX_ERROP_TCP_OFFSET = 0x66,
+       CQ_RX_ERROP_L4_PCLP = 0x67,
+       CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+       CQ_TX_ERROP_GOOD = 0x0,
+       CQ_TX_ERROP_DESC_FAULT = 0x10,
+       CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+       CQ_TX_ERROP_SUBDC_ERR = 0x12,
+       CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+       CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+       CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+       CQ_TX_ERROP_LOCK_VIOL = 0x83,
+       CQ_TX_ERROP_DATA_FAULT = 0x84,
+       CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+       CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+       CQ_TX_ERROP_MEM_FAULT = 0x87,
+       CQ_TX_ERROP_CK_OVERLAP = 0x88,
+       CQ_TX_ERROP_CK_OFLOW = 0x89,
+       CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+struct cmp_queue_stats {
+       struct rx_stats {
+               struct {
+                       u64 mac_errs;
+                       u64 l2_errs;
+                       u64 l3_errs;
+                       u64 l4_errs;
+               } errlvl;
+               struct {
+                       u64 good;
+                       u64 partial_pkts;
+                       u64 jabber_errs;
+                       u64 fcs_errs;
+                       u64 terminate_errs;
+                       u64 bgx_rx_errs;
+                       u64 prel2_errs;
+                       u64 l2_frags;
+                       u64 l2_overruns;
+                       u64 l2_pfcs;
+                       u64 l2_puny;
+                       u64 l2_hdr_malformed;
+                       u64 l2_oversize;
+                       u64 l2_undersize;
+                       u64 l2_len_mismatch;
+                       u64 l2_pclp;
+                       u64 non_ip;
+                       u64 ip_csum_err;
+                       u64 ip_hdr_malformed;
+                       u64 ip_payload_malformed;
+                       u64 ip_hop_errs;
+                       u64 l3_icrc_errs;
+                       u64 l3_pclp;
+                       u64 l4_malformed;
+                       u64 l4_csum_errs;
+                       u64 udp_len_err;
+                       u64 bad_l4_port;
+                       u64 bad_tcp_flag;
+                       u64 tcp_offset_errs;
+                       u64 l4_pclp;
+                       u64 pkt_truncated;
+               } errop;
+       } rx;
+       struct tx_stats {
+               u64 good;
+               u64 desc_fault;
+               u64 hdr_cons_err;
+               u64 subdesc_err;
+               u64 imm_size_oflow;
+               u64 data_seq_err;
+               u64 mem_seq_err;
+               u64 lock_viol;
+               u64 data_fault;
+               u64 tstmp_conflict;
+               u64 tstmp_timeout;
+               u64 mem_fault;
+               u64 csum_overlap;
+               u64 csum_overflow;
+       } tx;
+} ____cacheline_aligned_in_smp;
+
+enum RQ_SQ_STATS {
+       RQ_SQ_STATS_OCTS,
+       RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+       u64     bytes;
+       u64     pkts;
+} ____cacheline_aligned_in_smp;
+
+struct q_desc_mem {
+       dma_addr_t      dma;
+       u64             size;
+       u16             q_len;
+       dma_addr_t      phys_base;
+       void            *base;
+       void            *unalign_base;
+};
+
+struct rbdr {
+       bool            enable;
+       u32             dma_size;
+       u32             frag_len;
+       u32             thresh;         /* Threshold level for interrupt */
+       void            *desc;
+       u32             head;
+       u32             tail;
+       struct q_desc_mem   dmem;
+} ____cacheline_aligned_in_smp;
+
+struct rcv_queue {
+       bool            enable;
+       struct  rbdr    *rbdr_start;
+       struct  rbdr    *rbdr_cont;
+       bool            en_tcp_reassembly;
+       u8              cq_qs;  /* CQ's QS to which this RQ is assigned */
+       u8              cq_idx; /* CQ index (0 to 7) in the QS */
+       u8              cont_rbdr_qs;      /* Continue buffer ptrs - QS num */
+       u8              cont_qs_rbdr_idx;  /* RBDR idx in the cont QS */
+       u8              start_rbdr_qs;     /* First buffer ptrs - QS num */
+       u8              start_qs_rbdr_idx; /* RBDR idx in the above QS */
+       u8              caching;
+       struct          rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct cmp_queue {
+       bool            enable;
+       u16             thresh;
+       spinlock_t      lock;  /* lock to serialize processing CQEs */
+       void            *desc;
+       struct q_desc_mem   dmem;
+       struct cmp_queue_stats  stats;
+} ____cacheline_aligned_in_smp;
+
+struct snd_queue {
+       bool            enable;
+       u8              cq_qs;  /* CQ's QS to which this SQ is pointing */
+       u8              cq_idx; /* CQ index (0 to 7) in the above QS */
+       u16             thresh;
+       atomic_t        free_cnt;
+       u32             head;
+       u32             tail;
+       u64             *skbuff;
+       void            *desc;
+
+#define        TSO_HEADER_SIZE 128
+       /* For TSO segment's header */
+       char            *tso_hdrs;
+       dma_addr_t      tso_hdrs_phys;
+
+       cpumask_t       affinity_mask;
+       struct q_desc_mem   dmem;
+       struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct queue_set {
+       bool            enable;
+       bool            be_en;
+       u8              vnic_id;
+       u8              rq_cnt;
+       u8              cq_cnt;
+       u64             cq_len;
+       u8              sq_cnt;
+       u64             sq_len;
+       u8              rbdr_cnt;
+       u64             rbdr_len;
+       struct  rcv_queue       rq[MAX_RCV_QUEUES_PER_QS];
+       struct  cmp_queue       cq[MAX_CMP_QUEUES_PER_QS];
+       struct  snd_queue       sq[MAX_SND_QUEUES_PER_QS];
+       struct  rbdr            rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+} ____cacheline_aligned_in_smp;
+
+#define GET_RBDR_DESC(RING, idx)\
+               (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+               (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+               (&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define        CQ_WR_FULL      BIT(26)
+#define        CQ_WR_DISABLE   BIT(25)
+#define        CQ_WR_FAULT     BIT(24)
+#define        CQ_CQE_COUNT    (0xFFFF << 0)
+
+#define        CQ_ERR_MASK     (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+                           int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct net_device *netdev,
+                             struct snd_queue *sq, int qidx);
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
+
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_work(struct work_struct *work);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64  nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+                          u64 qidx, u64 val);
+u64  nicvf_queue_reg_read(struct nicvf *nic,
+                         u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+                           struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+                           struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
new file mode 100644 (file)
index 0000000..3c1de97
--- /dev/null
@@ -0,0 +1,701 @@
+/*
+ * This file contains HW queue descriptor formats, config register
+ * structures etc
+ *
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+       NIC_SEND_LD_TYPE_E_LDD = 0x0,
+       NIC_SEND_LD_TYPE_E_LDT = 0x1,
+       NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+       NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+       ETYPE_ALG_NONE = 0x0,
+       ETYPE_ALG_SKIP = 0x1,
+       ETYPE_ALG_ENDPARSE = 0x2,
+       ETYPE_ALG_VLAN = 0x3,
+       ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+       L3TYPE_NONE = 0x00,
+       L3TYPE_GRH = 0x01,
+       L3TYPE_IPV4 = 0x04,
+       L3TYPE_IPV4_OPTIONS = 0x05,
+       L3TYPE_IPV6 = 0x06,
+       L3TYPE_IPV6_OPTIONS = 0x07,
+       L3TYPE_ET_STOP = 0x0D,
+       L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+       L4TYPE_NONE = 0x00,
+       L4TYPE_IPSEC_ESP = 0x01,
+       L4TYPE_IPFRAG = 0x02,
+       L4TYPE_IPCOMP = 0x03,
+       L4TYPE_TCP = 0x04,
+       L4TYPE_UDP = 0x05,
+       L4TYPE_SCTP = 0x06,
+       L4TYPE_GRE = 0x07,
+       L4TYPE_ROCE_BTH = 0x08,
+       L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+       CPI_ALG_NONE = 0x0,
+       CPI_ALG_VLAN = 0x1,
+       CPI_ALG_VLAN16 = 0x2,
+       CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+       RSS_ALG_NONE = 0x00,
+       RSS_ALG_PORT = 0x01,
+       RSS_ALG_IP = 0x02,
+       RSS_ALG_TCP_IP = 0x03,
+       RSS_ALG_UDP_IP = 0x04,
+       RSS_ALG_SCTP_IP = 0x05,
+       RSS_ALG_GRE_IP = 0x06,
+       RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+       RSS_HASH_L2ETC = 0x00,
+       RSS_HASH_IP = 0x01,
+       RSS_HASH_TCP = 0x02,
+       RSS_HASH_TCP_SYN_DIS = 0x03,
+       RSS_HASH_UDP = 0x04,
+       RSS_HASH_L4ETC = 0x05,
+       RSS_HASH_ROCE = 0x06,
+       RSS_L3_BIDI = 0x07,
+       RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+       CQE_TYPE_INVALID = 0x0,
+       CQE_TYPE_RX = 0x2,
+       CQE_TYPE_RX_SPLIT = 0x3,
+       CQE_TYPE_RX_TCP = 0x4,
+       CQE_TYPE_SEND = 0x8,
+       CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+       CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+       CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+       CQE_SEND_STATUS_GOOD = 0x00,
+       CQE_SEND_STATUS_DESC_FAULT = 0x01,
+       CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+       CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+       CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+       CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+       CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+       CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+       CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+       CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+       CQE_SEND_STATUS_DATA_FAULT = 0x86,
+       CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+       CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+       CQE_SEND_STATUS_MEM_FAULT = 0x89,
+       CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+       CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+       CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+       CQE_RX_TCP_END_INVALID_FLAG = 1,
+       CQE_RX_TCP_END_TIMEOUT = 2,
+       CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+       CQE_RX_TCP_END_PKT_ERR = 4,
+       CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+       CQE_RX_ERRLVL_RE = 0x0,
+       CQE_RX_ERRLVL_L2 = 0x1,
+       CQE_RX_ERRLVL_L3 = 0x2,
+       CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+       CQE_RX_ERR_RE_NONE = 0x0,
+       CQE_RX_ERR_RE_PARTIAL = 0x1,
+       CQE_RX_ERR_RE_JABBER = 0x2,
+       CQE_RX_ERR_RE_FCS = 0x7,
+       CQE_RX_ERR_RE_TERMINATE = 0x9,
+       CQE_RX_ERR_RE_RX_CTL = 0xb,
+       CQE_RX_ERR_PREL2_ERR = 0x1f,
+       CQE_RX_ERR_L2_FRAGMENT = 0x20,
+       CQE_RX_ERR_L2_OVERRUN = 0x21,
+       CQE_RX_ERR_L2_PFCS = 0x22,
+       CQE_RX_ERR_L2_PUNY = 0x23,
+       CQE_RX_ERR_L2_MAL = 0x24,
+       CQE_RX_ERR_L2_OVERSIZE = 0x25,
+       CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+       CQE_RX_ERR_L2_LENMISM = 0x27,
+       CQE_RX_ERR_L2_PCLP = 0x28,
+       CQE_RX_ERR_IP_NOT = 0x41,
+       CQE_RX_ERR_IP_CHK = 0x42,
+       CQE_RX_ERR_IP_MAL = 0x43,
+       CQE_RX_ERR_IP_MALD = 0x44,
+       CQE_RX_ERR_IP_HOP = 0x45,
+       CQE_RX_ERR_L3_ICRC = 0x46,
+       CQE_RX_ERR_L3_PCLP = 0x47,
+       CQE_RX_ERR_L4_MAL = 0x61,
+       CQE_RX_ERR_L4_CHK = 0x62,
+       CQE_RX_ERR_UDP_LEN = 0x63,
+       CQE_RX_ERR_L4_PORT = 0x64,
+       CQE_RX_ERR_TCP_FLAG = 0x65,
+       CQE_RX_ERR_TCP_OFFSET = 0x66,
+       CQE_RX_ERR_L4_PCLP = 0x67,
+       CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64   cqe_type:4; /* W0 */
+       u64   stdn_fault:1;
+       u64   rsvd0:1;
+       u64   rq_qs:7;
+       u64   rq_idx:3;
+       u64   rsvd1:12;
+       u64   rss_alg:4;
+       u64   rsvd2:4;
+       u64   rb_cnt:4;
+       u64   vlan_found:1;
+       u64   vlan_stripped:1;
+       u64   vlan2_found:1;
+       u64   vlan2_stripped:1;
+       u64   l4_type:4;
+       u64   l3_type:4;
+       u64   l2_present:1;
+       u64   err_level:3;
+       u64   err_opcode:8;
+
+       u64   pkt_len:16; /* W1 */
+       u64   l2_ptr:8;
+       u64   l3_ptr:8;
+       u64   l4_ptr:8;
+       u64   cq_pkt_len:8;
+       u64   align_pad:3;
+       u64   rsvd3:1;
+       u64   chan:12;
+
+       u64   rss_tag:32; /* W2 */
+       u64   vlan_tci:16;
+       u64   vlan_ptr:8;
+       u64   vlan2_ptr:8;
+
+       u64   rb3_sz:16; /* W3 */
+       u64   rb2_sz:16;
+       u64   rb1_sz:16;
+       u64   rb0_sz:16;
+
+       u64   rb7_sz:16; /* W4 */
+       u64   rb6_sz:16;
+       u64   rb5_sz:16;
+       u64   rb4_sz:16;
+
+       u64   rb11_sz:16; /* W5 */
+       u64   rb10_sz:16;
+       u64   rb9_sz:16;
+       u64   rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64   err_opcode:8;
+       u64   err_level:3;
+       u64   l2_present:1;
+       u64   l3_type:4;
+       u64   l4_type:4;
+       u64   vlan2_stripped:1;
+       u64   vlan2_found:1;
+       u64   vlan_stripped:1;
+       u64   vlan_found:1;
+       u64   rb_cnt:4;
+       u64   rsvd2:4;
+       u64   rss_alg:4;
+       u64   rsvd1:12;
+       u64   rq_idx:3;
+       u64   rq_qs:7;
+       u64   rsvd0:1;
+       u64   stdn_fault:1;
+       u64   cqe_type:4; /* W0 */
+       u64   chan:12;
+       u64   rsvd3:1;
+       u64   align_pad:3;
+       u64   cq_pkt_len:8;
+       u64   l4_ptr:8;
+       u64   l3_ptr:8;
+       u64   l2_ptr:8;
+       u64   pkt_len:16; /* W1 */
+       u64   vlan2_ptr:8;
+       u64   vlan_ptr:8;
+       u64   vlan_tci:16;
+       u64   rss_tag:32; /* W2 */
+       u64   rb0_sz:16;
+       u64   rb1_sz:16;
+       u64   rb2_sz:16;
+       u64   rb3_sz:16; /* W3 */
+       u64   rb4_sz:16;
+       u64   rb5_sz:16;
+       u64   rb6_sz:16;
+       u64   rb7_sz:16; /* W4 */
+       u64   rb8_sz:16;
+       u64   rb9_sz:16;
+       u64   rb10_sz:16;
+       u64   rb11_sz:16; /* W5 */
+#endif
+       u64   rb0_ptr:64;
+       u64   rb1_ptr:64;
+       u64   rb2_ptr:64;
+       u64   rb3_ptr:64;
+       u64   rb4_ptr:64;
+       u64   rb5_ptr:64;
+       u64   rb6_ptr:64;
+       u64   rb7_ptr:64;
+       u64   rb8_ptr:64;
+       u64   rb9_ptr:64;
+       u64   rb10_ptr:64;
+       u64   rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64   cqe_type:4; /* W0 */
+       u64   rsvd0:60;
+
+       u64   rsvd1:4; /* W1 */
+       u64   partial_first:1;
+       u64   rsvd2:27;
+       u64   rbdr_bytes:8;
+       u64   rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64   rsvd0:60;
+       u64   cqe_type:4;
+
+       u64   rsvd3:24;
+       u64   rbdr_bytes:8;
+       u64   rsvd2:27;
+       u64   partial_first:1;
+       u64   rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64   cqe_type:4; /* W0 */
+       u64   rsvd0:52;
+       u64   cq_tcp_status:8;
+
+       u64   rsvd1:32; /* W1 */
+       u64   tcp_cntx_bytes:8;
+       u64   rsvd2:8;
+       u64   tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64   cq_tcp_status:8;
+       u64   rsvd0:52;
+       u64   cqe_type:4; /* W0 */
+
+       u64   tcp_err_bytes:16;
+       u64   rsvd2:8;
+       u64   tcp_cntx_bytes:8;
+       u64   rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64   cqe_type:4; /* W0 */
+       u64   rsvd0:4;
+       u64   sqe_ptr:16;
+       u64   rsvd1:4;
+       u64   rsvd2:10;
+       u64   sq_qs:7;
+       u64   sq_idx:3;
+       u64   rsvd3:8;
+       u64   send_status:8;
+
+       u64   ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64   send_status:8;
+       u64   rsvd3:8;
+       u64   sq_idx:3;
+       u64   sq_qs:7;
+       u64   rsvd2:10;
+       u64   rsvd1:4;
+       u64   sqe_ptr:16;
+       u64   rsvd0:4;
+       u64   cqe_type:4; /* W0 */
+
+       u64   ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+       u64    u[64];
+       struct cqe_send_t snd_hdr;
+       struct cqe_rx_t rx_hdr;
+       struct cqe_rx_tcp_t rx_tcp_hdr;
+       struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64   rsvd0:15;
+       u64   buf_addr:42;
+       u64   cache_align:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64   cache_align:7;
+       u64   buf_addr:42;
+       u64   rsvd0:15;
+#endif
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64   tcp_pkt_cnt:12;
+       u64   rsvd1:4;
+       u64   align_hdr_bytes:4;
+       u64   align_ptr_bytes:4;
+       u64   ptr_bytes:16;
+       u64   rsvd2:24;
+       u64   cqe_type:4;
+       u64   rsvd0:54;
+       u64   tcp_end_reason:2;
+       u64   tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64   tcp_status:4;
+       u64   tcp_end_reason:2;
+       u64   rsvd0:54;
+       u64   cqe_type:4;
+       u64   rsvd2:24;
+       u64   ptr_bytes:16;
+       u64   align_ptr_bytes:4;
+       u64   align_hdr_bytes:4;
+       u64   rsvd1:4;
+       u64   tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+       u64   opaque:32;
+       u64   rss_flow:8;
+       u64   skip_length:6;
+       u64   disable_rss:1;
+       u64   disable_tcp_reassembly:1;
+       u64   nodrop:1;
+       u64   dest_alg:2;
+       u64   rsvd0:2;
+       u64   dest_rq:11;
+};
+
+enum send_l4_csum_type {
+       SEND_L4_CSUM_DISABLE = 0x00,
+       SEND_L4_CSUM_UDP = 0x01,
+       SEND_L4_CSUM_TCP = 0x02,
+       SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+       SEND_CRCALG_CRC32 = 0x00,
+       SEND_CRCALG_CRC32C = 0x01,
+       SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+       SEND_LD_TYPE_LDD = 0x00,
+       SEND_LD_TYPE_LDT = 0x01,
+       SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+       SEND_MEMALG_SET = 0x00,
+       SEND_MEMALG_ADD = 0x08,
+       SEND_MEMALG_SUB = 0x09,
+       SEND_MEMALG_ADDLEN = 0x0A,
+       SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+       SEND_MEMDSZ_B64 = 0x00,
+       SEND_MEMDSZ_B32 = 0x01,
+       SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+       SQ_DESC_TYPE_INVALID = 0x00,
+       SQ_DESC_TYPE_HEADER = 0x01,
+       SQ_DESC_TYPE_CRC = 0x02,
+       SQ_DESC_TYPE_IMMEDIATE = 0x03,
+       SQ_DESC_TYPE_GATHER = 0x04,
+       SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64    rsvd1:32;
+       u64    crc_ival:32;
+       u64    subdesc_type:4;
+       u64    crc_alg:2;
+       u64    rsvd0:10;
+       u64    crc_insert_pos:16;
+       u64    hdr_start:16;
+       u64    crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64    crc_len:16;
+       u64    hdr_start:16;
+       u64    crc_insert_pos:16;
+       u64    rsvd0:10;
+       u64    crc_alg:2;
+       u64    subdesc_type:4;
+       u64    crc_ival:32;
+       u64    rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64    subdesc_type:4; /* W0 */
+       u64    ld_type:2;
+       u64    rsvd0:42;
+       u64    size:16;
+
+       u64    rsvd1:15; /* W1 */
+       u64    addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64    size:16;
+       u64    rsvd0:42;
+       u64    ld_type:2;
+       u64    subdesc_type:4; /* W0 */
+
+       u64    addr:49;
+       u64    rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64    subdesc_type:4; /* W0 */
+       u64    rsvd0:46;
+       u64    len:14;
+
+       u64    data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64    len:14;
+       u64    rsvd0:46;
+       u64    subdesc_type:4; /* W0 */
+
+       u64    data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64    subdesc_type:4; /* W0 */
+       u64    mem_alg:4;
+       u64    mem_dsz:2;
+       u64    wmem:1;
+       u64    rsvd0:21;
+       u64    offset:32;
+
+       u64    rsvd1:15; /* W1 */
+       u64    addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64    offset:32;
+       u64    rsvd0:21;
+       u64    wmem:1;
+       u64    mem_dsz:2;
+       u64    mem_alg:4;
+       u64    subdesc_type:4; /* W0 */
+
+       u64    addr:49;
+       u64    rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64    subdesc_type:4;
+       u64    tso:1;
+       u64    post_cqe:1; /* Post CQE on no error also */
+       u64    dont_send:1;
+       u64    tstmp:1;
+       u64    subdesc_cnt:8;
+       u64    csum_l4:2;
+       u64    csum_l3:1;
+       u64    rsvd0:5;
+       u64    l4_offset:8;
+       u64    l3_offset:8;
+       u64    rsvd1:4;
+       u64    tot_len:20; /* W0 */
+
+       u64    tso_sdc_cont:8;
+       u64    tso_sdc_first:8;
+       u64    tso_l4_offset:8;
+       u64    tso_flags_last:12;
+       u64    tso_flags_first:12;
+       u64    rsvd2:2;
+       u64    tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64    tot_len:20;
+       u64    rsvd1:4;
+       u64    l3_offset:8;
+       u64    l4_offset:8;
+       u64    rsvd0:5;
+       u64    csum_l3:1;
+       u64    csum_l4:2;
+       u64    subdesc_cnt:8;
+       u64    tstmp:1;
+       u64    dont_send:1;
+       u64    post_cqe:1; /* Post CQE on no error also */
+       u64    tso:1;
+       u64    subdesc_type:4; /* W0 */
+
+       u64    tso_max_paysize:14;
+       u64    rsvd2:2;
+       u64    tso_flags_first:12;
+       u64    tso_flags_last:12;
+       u64    tso_l4_offset:8;
+       u64    tso_sdc_first:8;
+       u64    tso_sdc_cont:8; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64 reserved_2_63:62;
+       u64 ena:1;
+       u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64 tcp_ena:1;
+       u64 ena:1;
+       u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64 reserved_43_63:21;
+       u64 ena:1;
+       u64 reset:1;
+       u64 caching:1;
+       u64 reserved_35_39:5;
+       u64 qsize:3;
+       u64 reserved_25_31:7;
+       u64 avg_con:9;
+       u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64 reserved_0_15:16;
+       u64 avg_con:9;
+       u64 reserved_25_31:7;
+       u64 qsize:3;
+       u64 reserved_35_39:5;
+       u64 caching:1;
+       u64 reset:1;
+       u64 ena:1;
+       u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64 reserved_20_63:44;
+       u64 ena:1;
+       u64 reserved_18_18:1;
+       u64 reset:1;
+       u64 ldwb:1;
+       u64 reserved_11_15:5;
+       u64 qsize:3;
+       u64 reserved_3_7:5;
+       u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64 tstmp_bgx_intf:3;
+       u64 reserved_3_7:5;
+       u64 qsize:3;
+       u64 reserved_11_15:5;
+       u64 ldwb:1;
+       u64 reset:1;
+       u64 reserved_18_18:1;
+       u64 ena:1;
+       u64 reserved_20_63:44;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64 reserved_45_63:19;
+       u64 ena:1;
+       u64 reset:1;
+       u64 ldwb:1;
+       u64 reserved_36_41:6;
+       u64 qsize:4;
+       u64 reserved_25_31:7;
+       u64 avg_con:9;
+       u64 reserved_12_15:4;
+       u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64 lines:12;
+       u64 reserved_12_15:4;
+       u64 avg_con:9;
+       u64 reserved_25_31:7;
+       u64 qsize:4;
+       u64 reserved_36_41:6;
+       u64 ldwb:1;
+       u64 reset:1;
+       u64 ena: 1;
+       u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u64 reserved_32_63:32;
+       u64 ena:1;
+       u64 reserved_27_30:4;
+       u64 sq_ins_ena:1;
+       u64 sq_ins_pos:6;
+       u64 lock_ena:1;
+       u64 lock_viol_cqe_ena:1;
+       u64 send_tstmp_ena:1;
+       u64 be:1;
+       u64 reserved_7_15:9;
+       u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u64 vnic:7;
+       u64 reserved_7_15:9;
+       u64 be:1;
+       u64 send_tstmp_ena:1;
+       u64 lock_viol_cqe_ena:1;
+       u64 lock_ena:1;
+       u64 sq_ins_pos:6;
+       u64 sq_ins_ena:1;
+       u64 reserved_27_30:4;
+       u64 ena:1;
+       u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
new file mode 100644 (file)
index 0000000..633ec05
--- /dev/null
@@ -0,0 +1,966 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME       "thunder-BGX"
+#define DRV_VERSION    "1.0"
+
+struct lmac {
+       struct bgx              *bgx;
+       int                     dmac;
+       unsigned char           mac[ETH_ALEN];
+       bool                    link_up;
+       int                     lmacid; /* ID within BGX */
+       int                     lmacid_bd; /* ID on board */
+       struct net_device       netdev;
+       struct phy_device       *phydev;
+       unsigned int            last_duplex;
+       unsigned int            last_link;
+       unsigned int            last_speed;
+       bool                    is_sgmii;
+       struct delayed_work     dwork;
+       struct workqueue_struct *check_link;
+};
+
+struct bgx {
+       u8                      bgx_id;
+       u8                      qlm_mode;
+       struct  lmac            lmac[MAX_LMAC_PER_BGX];
+       int                     lmac_count;
+       int                     lmac_type;
+       int                     lane_to_sds;
+       int                     use_training;
+       void __iomem            *reg_base;
+       struct pci_dev          *pdev;
+};
+
+static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
+static int lmac_count; /* Total no of LMACs in system */
+
+static int bgx_xaui_check_link(struct lmac *lmac);
+
+/* Supported devices */
+static const struct pci_device_id bgx_id_table[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+       { 0, }  /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, bgx_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation.  All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver.  The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
+{
+       void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+       return readq_relaxed(addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+       void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+       writeq_relaxed(val, addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+       void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+       writeq_relaxed(val | readq_relaxed(addr), addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
+{
+       int timeout = 100;
+       u64 reg_val;
+
+       while (timeout) {
+               reg_val = bgx_reg_read(bgx, lmac, reg);
+               if (zero && !(reg_val & mask))
+                       return 0;
+               if (!zero && (reg_val & mask))
+                       return 0;
+               usleep_range(1000, 2000);
+               timeout--;
+       }
+       return 1;
+}
+
+/* Return number of BGX present in HW */
+unsigned bgx_get_map(int node)
+{
+       int i;
+       unsigned map = 0;
+
+       for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
+               if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+                       map |= (1 << i);
+       }
+
+       return map;
+}
+EXPORT_SYMBOL(bgx_get_map);
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+       struct bgx *bgx;
+
+       bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+       if (bgx)
+               return bgx->lmac_count;
+
+       return 0;
+}
+EXPORT_SYMBOL(bgx_get_lmac_count);
+
+/* Returns the current link status of LMAC */
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+{
+       struct bgx_link_status *link = (struct bgx_link_status *)status;
+       struct bgx *bgx;
+       struct lmac *lmac;
+
+       bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+       if (!bgx)
+               return;
+
+       lmac = &bgx->lmac[lmacid];
+       link->link_up = lmac->link_up;
+       link->duplex = lmac->last_duplex;
+       link->speed = lmac->last_speed;
+}
+EXPORT_SYMBOL(bgx_get_lmac_link_state);
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+       struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+       if (bgx)
+               return bgx->lmac[lmacid].mac;
+
+       return NULL;
+}
+EXPORT_SYMBOL(bgx_get_lmac_mac);
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+       struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+       if (!bgx)
+               return;
+
+       ether_addr_copy(bgx->lmac[lmacid].mac, mac);
+}
+EXPORT_SYMBOL(bgx_set_lmac_mac);
+
+static void bgx_sgmii_change_link_state(struct lmac *lmac)
+{
+       struct bgx *bgx = lmac->bgx;
+       u64 cmr_cfg;
+       u64 port_cfg = 0;
+       u64 misc_ctl = 0;
+
+       cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
+       cmr_cfg &= ~CMR_EN;
+       bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+       port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+       misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
+
+       if (lmac->link_up) {
+               misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+               port_cfg &= ~GMI_PORT_CFG_DUPLEX;
+               port_cfg |=  (lmac->last_duplex << 2);
+       } else {
+               misc_ctl |= PCS_MISC_CTL_GMX_ENO;
+       }
+
+       switch (lmac->last_speed) {
+       case 10:
+               port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+               port_cfg |= GMI_PORT_CFG_SPEED_MSB;  /* speed_msb 1 */
+               port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+               misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+               misc_ctl |= 50; /* samp_pt */
+               bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+               bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+               break;
+       case 100:
+               port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+               port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+               port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+               misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+               misc_ctl |= 5; /* samp_pt */
+               bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+               bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+               break;
+       case 1000:
+               port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
+               port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+               port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
+               misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+               misc_ctl |= 1; /* samp_pt */
+               bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
+               if (lmac->last_duplex)
+                       bgx_reg_write(bgx, lmac->lmacid,
+                                     BGX_GMP_GMI_TXX_BURST, 0);
+               else
+                       bgx_reg_write(bgx, lmac->lmacid,
+                                     BGX_GMP_GMI_TXX_BURST, 8192);
+               break;
+       default:
+               break;
+       }
+       bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
+       bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+
+       port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+
+       /* renable lmac */
+       cmr_cfg |= CMR_EN;
+       bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+}
+
+static void bgx_lmac_handler(struct net_device *netdev)
+{
+       struct lmac *lmac = container_of(netdev, struct lmac, netdev);
+       struct phy_device *phydev = lmac->phydev;
+       int link_changed = 0;
+
+       if (!lmac)
+               return;
+
+       if (!phydev->link && lmac->last_link)
+               link_changed = -1;
+
+       if (phydev->link &&
+           (lmac->last_duplex != phydev->duplex ||
+            lmac->last_link != phydev->link ||
+            lmac->last_speed != phydev->speed)) {
+                       link_changed = 1;
+       }
+
+       lmac->last_link = phydev->link;
+       lmac->last_speed = phydev->speed;
+       lmac->last_duplex = phydev->duplex;
+
+       if (!link_changed)
+               return;
+
+       if (link_changed > 0)
+               lmac->link_up = true;
+       else
+               lmac->link_up = false;
+
+       if (lmac->is_sgmii)
+               bgx_sgmii_change_link_state(lmac);
+       else
+               bgx_xaui_check_link(lmac);
+}
+
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+       struct bgx *bgx;
+
+       bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+       if (!bgx)
+               return 0;
+
+       if (idx > 8)
+               lmac = 0;
+       return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_rx_stats);
+
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+       struct bgx *bgx;
+
+       bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+       if (!bgx)
+               return 0;
+
+       return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_tx_stats);
+
+static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
+{
+       u64 offset;
+
+       while (bgx->lmac[lmac].dmac > 0) {
+               offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
+                       (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
+               bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
+               bgx->lmac[lmac].dmac--;
+       }
+}
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+{
+       u64 cfg;
+
+       bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+       /* max packet size */
+       bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+       /* Disable frame alignment if using preamble */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+       if (cfg & 1)
+               bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+       /* Enable lmac */
+       bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+       /* PCS reset */
+       bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+       if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+                        PCS_MRX_CTL_RESET, true)) {
+               dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+               return -1;
+       }
+
+       /* power down, reset autoneg, autoneg enable */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+       cfg &= ~PCS_MRX_CTL_PWR_DN;
+       cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+       bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+       if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+                        PCS_MRX_STATUS_AN_CPT, false)) {
+               dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+{
+       u64 cfg;
+
+       /* Reset SPU */
+       bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+       if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+               dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+               return -1;
+       }
+
+       /* Disable LMAC */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       cfg &= ~CMR_EN;
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+       bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+       /* Set interleaved running disparity for RXAUI */
+       if (bgx->lmac_type != BGX_MODE_RXAUI)
+               bgx_reg_modify(bgx, lmacid,
+                              BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+       else
+               bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+                              SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+
+       /* clear all interrupts */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+       bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+       bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+       bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+       if (bgx->use_training) {
+               bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+               bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+               bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+               /* training enable */
+               bgx_reg_modify(bgx, lmacid,
+                              BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+       }
+
+       /* Append FCS to each packet */
+       bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+       /* Disable forward error correction */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+       cfg &= ~SPU_FEC_CTL_FEC_EN;
+       bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+       /* Disable autoneg */
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+       cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
+       bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+       if (bgx->lmac_type == BGX_MODE_10G_KR)
+               cfg |= (1 << 23);
+       else if (bgx->lmac_type == BGX_MODE_40G_KR)
+               cfg |= (1 << 24);
+       else
+               cfg &= ~((1 << 23) | (1 << 24));
+       cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
+       bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+       cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+       cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+       bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+       /* Enable lmac */
+       bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+       cfg &= ~SPU_CTL_LOW_POWER;
+       bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+       cfg &= ~SMU_TX_CTL_UNI_EN;
+       cfg |= SMU_TX_CTL_DIC_EN;
+       bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+       /* take lmac_count into account */
+       bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+       /* max packet size */
+       bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+       return 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+       struct bgx *bgx = lmac->bgx;
+       int lmacid = lmac->lmacid;
+       int lmac_type = bgx->lmac_type;
+       u64 cfg;
+
+       bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+       if (bgx->use_training) {
+               cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+               if (!(cfg & (1ull << 13))) {
+                       cfg = (1ull << 13) | (1ull << 14);
+                       bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+                       cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+                       cfg |= (1ull << 0);
+                       bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+                       return -1;
+               }
+       }
+
+       /* wait for PCS to come out of reset */
+       if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+               dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+               return -1;
+       }
+
+       if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
+           (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
+               if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+                                SPU_BR_STATUS_BLK_LOCK, false)) {
+                       dev_err(&bgx->pdev->dev,
+                               "SPU_BR_STATUS_BLK_LOCK not completed\n");
+                       return -1;
+               }
+       } else {
+               if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+                                SPU_BX_STATUS_RX_ALIGN, false)) {
+                       dev_err(&bgx->pdev->dev,
+                               "SPU_BX_STATUS_RX_ALIGN not completed\n");
+                       return -1;
+               }
+       }
+
+       /* Clear rcvflt bit (latching high) and read it back */
+       bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+       if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+               dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
+               if (bgx->use_training) {
+                       cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+                       if (!(cfg & (1ull << 13))) {
+                               cfg = (1ull << 13) | (1ull << 14);
+                               bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+                               cfg = bgx_reg_read(bgx, lmacid,
+                                                  BGX_SPUX_BR_PMD_CRTL);
+                               cfg |= (1ull << 0);
+                               bgx_reg_write(bgx, lmacid,
+                                             BGX_SPUX_BR_PMD_CRTL, cfg);
+                               return -1;
+                       }
+               }
+               return -1;
+       }
+
+       /* Wait for MAC RX to be ready */
+       if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+                        SMU_RX_CTL_STATUS, true)) {
+               dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
+               return -1;
+       }
+
+       /* Wait for BGX RX to be idle */
+       if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+               dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
+               return -1;
+       }
+
+       /* Wait for BGX TX to be idle */
+       if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+               dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
+               return -1;
+       }
+
+       if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+               dev_err(&bgx->pdev->dev, "Receive fault\n");
+               return -1;
+       }
+
+       /* Receive link is latching low. Force it high and verify it */
+       bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+       if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+                        SPU_STATUS1_RCV_LNK, false)) {
+               dev_err(&bgx->pdev->dev, "SPU receive link down\n");
+               return -1;
+       }
+
+       cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+       cfg &= ~SPU_MISC_CTL_RX_DIS;
+       bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+       return 0;
+}
+
+static void bgx_poll_for_link(struct work_struct *work)
+{
+       struct lmac *lmac;
+       u64 link;
+
+       lmac = container_of(work, struct lmac, dwork.work);
+
+       /* Receive link is latching low. Force it high and verify it */
+       bgx_reg_modify(lmac->bgx, lmac->lmacid,
+                      BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+       bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
+                    SPU_STATUS1_RCV_LNK, false);
+
+       link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+       if (link & SPU_STATUS1_RCV_LNK) {
+               lmac->link_up = 1;
+               if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+                       lmac->last_speed = 40000;
+               else
+                       lmac->last_speed = 10000;
+               lmac->last_duplex = 1;
+       } else {
+               lmac->link_up = 0;
+       }
+
+       if (lmac->last_link != lmac->link_up) {
+               lmac->last_link = lmac->link_up;
+               if (lmac->link_up)
+                       bgx_xaui_check_link(lmac);
+       }
+
+       queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+{
+       struct lmac *lmac;
+       u64 cfg;
+
+       lmac = &bgx->lmac[lmacid];
+       lmac->bgx = bgx;
+
+       if (bgx->lmac_type == BGX_MODE_SGMII) {
+               lmac->is_sgmii = 1;
+               if (bgx_lmac_sgmii_init(bgx, lmacid))
+                       return -1;
+       } else {
+               lmac->is_sgmii = 0;
+               if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+                       return -1;
+       }
+
+       if (lmac->is_sgmii) {
+               cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+               cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+               bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+               bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+       } else {
+               cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+               cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+               bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+               bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+       }
+
+       /* Enable lmac */
+       bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
+                      CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+       /* Restore default cfg, incase low level firmware changed it */
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
+
+       if ((bgx->lmac_type != BGX_MODE_XFI) &&
+           (bgx->lmac_type != BGX_MODE_XLAUI) &&
+           (bgx->lmac_type != BGX_MODE_40G_KR) &&
+           (bgx->lmac_type != BGX_MODE_10G_KR)) {
+               if (!lmac->phydev)
+                       return -ENODEV;
+
+               lmac->phydev->dev_flags = 0;
+
+               if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+                                      bgx_lmac_handler,
+                                      PHY_INTERFACE_MODE_SGMII))
+                       return -ENODEV;
+
+               phy_start_aneg(lmac->phydev);
+       } else {
+               lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+                                                  WQ_MEM_RECLAIM, 1);
+               if (!lmac->check_link)
+                       return -ENOMEM;
+               INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+               queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+       }
+
+       return 0;
+}
+
+static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
+{
+       struct lmac *lmac;
+       u64 cmrx_cfg;
+
+       lmac = &bgx->lmac[lmacid];
+       if (lmac->check_link) {
+               /* Destroy work queue */
+               cancel_delayed_work(&lmac->dwork);
+               flush_workqueue(lmac->check_link);
+               destroy_workqueue(lmac->check_link);
+       }
+
+       cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+       cmrx_cfg &= ~(1 << 15);
+       bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+       bgx_flush_dmac_addrs(bgx, lmacid);
+
+       if (lmac->phydev)
+               phy_disconnect(lmac->phydev);
+
+       lmac->phydev = NULL;
+}
+
+static void bgx_set_num_ports(struct bgx *bgx)
+{
+       u64 lmac_count;
+
+       switch (bgx->qlm_mode) {
+       case QLM_MODE_SGMII:
+               bgx->lmac_count = 4;
+               bgx->lmac_type = BGX_MODE_SGMII;
+               bgx->lane_to_sds = 0;
+               break;
+       case QLM_MODE_XAUI_1X4:
+               bgx->lmac_count = 1;
+               bgx->lmac_type = BGX_MODE_XAUI;
+               bgx->lane_to_sds = 0xE4;
+                       break;
+       case QLM_MODE_RXAUI_2X2:
+               bgx->lmac_count = 2;
+               bgx->lmac_type = BGX_MODE_RXAUI;
+               bgx->lane_to_sds = 0xE4;
+                       break;
+       case QLM_MODE_XFI_4X1:
+               bgx->lmac_count = 4;
+               bgx->lmac_type = BGX_MODE_XFI;
+               bgx->lane_to_sds = 0;
+               break;
+       case QLM_MODE_XLAUI_1X4:
+               bgx->lmac_count = 1;
+               bgx->lmac_type = BGX_MODE_XLAUI;
+               bgx->lane_to_sds = 0xE4;
+               break;
+       case QLM_MODE_10G_KR_4X1:
+               bgx->lmac_count = 4;
+               bgx->lmac_type = BGX_MODE_10G_KR;
+               bgx->lane_to_sds = 0;
+               bgx->use_training = 1;
+               break;
+       case QLM_MODE_40G_KR4_1X4:
+               bgx->lmac_count = 1;
+               bgx->lmac_type = BGX_MODE_40G_KR;
+               bgx->lane_to_sds = 0xE4;
+               bgx->use_training = 1;
+               break;
+       default:
+               bgx->lmac_count = 0;
+               break;
+       }
+
+       /* Check if low level firmware has programmed LMAC count
+        * based on board type, if yes consider that otherwise
+        * the default static values
+        */
+       lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+       if (lmac_count != 4)
+               bgx->lmac_count = lmac_count;
+}
+
+static void bgx_init_hw(struct bgx *bgx)
+{
+       int i;
+
+       bgx_set_num_ports(bgx);
+
+       bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+       if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+               dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
+
+       /* Set lmac type and lane2serdes mapping */
+       for (i = 0; i < bgx->lmac_count; i++) {
+               if (bgx->lmac_type == BGX_MODE_RXAUI) {
+                       if (i)
+                               bgx->lane_to_sds = 0x0e;
+                       else
+                               bgx->lane_to_sds = 0x04;
+                       bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+                                     (bgx->lmac_type << 8) | bgx->lane_to_sds);
+                       continue;
+               }
+               bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+                             (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+               bgx->lmac[i].lmacid_bd = lmac_count;
+               lmac_count++;
+       }
+
+       bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
+       bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
+
+       /* Set the backpressure AND mask */
+       for (i = 0; i < bgx->lmac_count; i++)
+               bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+                              ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+                              (i * MAX_BGX_CHANS_PER_LMAC));
+
+       /* Disable all MAC filtering */
+       for (i = 0; i < RX_DMAC_COUNT; i++)
+               bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+       /* Disable MAC steering (NCSI traffic) */
+       for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+               bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+       struct device *dev = &bgx->pdev->dev;
+       int lmac_type;
+       int train_en;
+
+       /* Read LMAC0 type to figure out QLM mode
+        * This is configured by low level firmware
+        */
+       lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+       lmac_type = (lmac_type >> 8) & 0x07;
+
+       train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
+                               SPU_PMD_CRTL_TRAIN_EN;
+
+       switch (lmac_type) {
+       case BGX_MODE_SGMII:
+               bgx->qlm_mode = QLM_MODE_SGMII;
+               dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+               break;
+       case BGX_MODE_XAUI:
+               bgx->qlm_mode = QLM_MODE_XAUI_1X4;
+               dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+               break;
+       case BGX_MODE_RXAUI:
+               bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
+               dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+               break;
+       case BGX_MODE_XFI:
+               if (!train_en) {
+                       bgx->qlm_mode = QLM_MODE_XFI_4X1;
+                       dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
+               } else {
+                       bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
+                       dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
+               }
+               break;
+       case BGX_MODE_XLAUI:
+               if (!train_en) {
+                       bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
+                       dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
+               } else {
+                       bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
+                       dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
+               }
+               break;
+       default:
+               bgx->qlm_mode = QLM_MODE_SGMII;
+               dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+       }
+}
+
+static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+{
+       struct device_node *np_child;
+       u8 lmac = 0;
+
+       for_each_child_of_node(np, np_child) {
+               struct device_node *phy_np;
+               const char *mac;
+
+               phy_np = of_parse_phandle(np_child, "phy-handle", 0);
+               if (phy_np)
+                       bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+
+               mac = of_get_mac_address(np_child);
+               if (mac)
+                       ether_addr_copy(bgx->lmac[lmac].mac, mac);
+
+               SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+               bgx->lmac[lmac].lmacid = lmac;
+               lmac++;
+               if (lmac == MAX_LMAC_PER_BGX)
+                       break;
+       }
+}
+
+static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       int err;
+       struct device *dev = &pdev->dev;
+       struct bgx *bgx = NULL;
+       struct device_node *np;
+       char bgx_sel[5];
+       u8 lmac;
+
+       bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
+       if (!bgx)
+               return -ENOMEM;
+       bgx->pdev = pdev;
+
+       pci_set_drvdata(pdev, bgx);
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(dev, "Failed to enable PCI device\n");
+               pci_set_drvdata(pdev, NULL);
+               return err;
+       }
+
+       err = pci_request_regions(pdev, DRV_NAME);
+       if (err) {
+               dev_err(dev, "PCI request regions failed 0x%x\n", err);
+               goto err_disable_device;
+       }
+
+       /* MAP configuration registers */
+       bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+       if (!bgx->reg_base) {
+               dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
+               err = -ENOMEM;
+               goto err_release_regions;
+       }
+       bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+       bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
+
+       bgx_vnic[bgx->bgx_id] = bgx;
+       bgx_get_qlm_mode(bgx);
+
+       snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+       np = of_find_node_by_name(NULL, bgx_sel);
+       if (np)
+               bgx_init_of(bgx, np);
+
+       bgx_init_hw(bgx);
+
+       /* Enable all LMACs */
+       for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+               err = bgx_lmac_enable(bgx, lmac);
+               if (err) {
+                       dev_err(dev, "BGX%d failed to enable lmac%d\n",
+                               bgx->bgx_id, lmac);
+                       goto err_enable;
+               }
+       }
+
+       return 0;
+
+err_enable:
+       bgx_vnic[bgx->bgx_id] = NULL;
+err_release_regions:
+       pci_release_regions(pdev);
+err_disable_device:
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+       return err;
+}
+
+static void bgx_remove(struct pci_dev *pdev)
+{
+       struct bgx *bgx = pci_get_drvdata(pdev);
+       u8 lmac;
+
+       /* Disable all LMACs */
+       for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+               bgx_lmac_disable(bgx, lmac);
+
+       bgx_vnic[bgx->bgx_id] = NULL;
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver bgx_driver = {
+       .name = DRV_NAME,
+       .id_table = bgx_id_table,
+       .probe = bgx_probe,
+       .remove = bgx_remove,
+};
+
+static int __init bgx_init_module(void)
+{
+       pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+       return pci_register_driver(&bgx_driver);
+}
+
+static void __exit bgx_cleanup_module(void)
+{
+       pci_unregister_driver(&bgx_driver);
+}
+
+module_init(bgx_init_module);
+module_exit(bgx_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
new file mode 100644 (file)
index 0000000..ba4f53b
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef THUNDER_BGX_H
+#define THUNDER_BGX_H
+
+#define    MAX_BGX_THUNDER                     8 /* Max 4 nodes, 2 per node */
+#define    MAX_BGX_PER_CN88XX                  2
+#define    MAX_LMAC_PER_BGX                    4
+#define    MAX_BGX_CHANS_PER_LMAC              16
+#define    MAX_DMAC_PER_LMAC                   8
+#define    MAX_FRAME_SIZE                      9216
+
+#define    MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE   2
+
+#define    MAX_LMAC    (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
+
+/* Registers */
+#define BGX_CMRX_CFG                   0x00
+#define  CMR_PKT_TX_EN                         BIT_ULL(13)
+#define  CMR_PKT_RX_EN                         BIT_ULL(14)
+#define  CMR_EN                                        BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG             0x08
+#define  CMR_GLOBAL_CFG_FCS_STRIP              BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP             0x60
+#define BGX_CMRX_RX_STAT0              0x70
+#define BGX_CMRX_RX_STAT1              0x78
+#define BGX_CMRX_RX_STAT2              0x80
+#define BGX_CMRX_RX_STAT3              0x88
+#define BGX_CMRX_RX_STAT4              0x90
+#define BGX_CMRX_RX_STAT5              0x98
+#define BGX_CMRX_RX_STAT6              0xA0
+#define BGX_CMRX_RX_STAT7              0xA8
+#define BGX_CMRX_RX_STAT8              0xB0
+#define BGX_CMRX_RX_STAT9              0xB8
+#define BGX_CMRX_RX_STAT10             0xC0
+#define BGX_CMRX_RX_BP_DROP            0xC8
+#define BGX_CMRX_RX_DMAC_CTL           0x0E8
+#define BGX_CMR_RX_DMACX_CAM           0x200
+#define  RX_DMACX_CAM_EN                       BIT_ULL(48)
+#define  RX_DMACX_CAM_LMACID(x)                        (x << 49)
+#define  RX_DMAC_COUNT                         32
+#define BGX_CMR_RX_STREERING           0x300
+#define  RX_TRAFFIC_STEER_RULE_COUNT           8
+#define BGX_CMR_CHAN_MSK_AND           0x450
+#define BGX_CMR_BIST_STATUS            0x460
+#define BGX_CMR_RX_LMACS               0x468
+#define BGX_CMRX_TX_STAT0              0x600
+#define BGX_CMRX_TX_STAT1              0x608
+#define BGX_CMRX_TX_STAT2              0x610
+#define BGX_CMRX_TX_STAT3              0x618
+#define BGX_CMRX_TX_STAT4              0x620
+#define BGX_CMRX_TX_STAT5              0x628
+#define BGX_CMRX_TX_STAT6              0x630
+#define BGX_CMRX_TX_STAT7              0x638
+#define BGX_CMRX_TX_STAT8              0x640
+#define BGX_CMRX_TX_STAT9              0x648
+#define BGX_CMRX_TX_STAT10             0x650
+#define BGX_CMRX_TX_STAT11             0x658
+#define BGX_CMRX_TX_STAT12             0x660
+#define BGX_CMRX_TX_STAT13             0x668
+#define BGX_CMRX_TX_STAT14             0x670
+#define BGX_CMRX_TX_STAT15             0x678
+#define BGX_CMRX_TX_STAT16             0x680
+#define BGX_CMRX_TX_STAT17             0x688
+#define BGX_CMR_TX_LMACS               0x1000
+
+#define BGX_SPUX_CONTROL1              0x10000
+#define  SPU_CTL_LOW_POWER                     BIT_ULL(11)
+#define  SPU_CTL_RESET                         BIT_ULL(15)
+#define BGX_SPUX_STATUS1               0x10008
+#define  SPU_STATUS1_RCV_LNK                   BIT_ULL(2)
+#define BGX_SPUX_STATUS2               0x10020
+#define  SPU_STATUS2_RCVFLT                    BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS             0x10028
+#define  SPU_BX_STATUS_RX_ALIGN                        BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1            0x10030
+#define  SPU_BR_STATUS_BLK_LOCK                        BIT_ULL(0)
+#define  SPU_BR_STATUS_RCV_LNK                 BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL           0x10068
+#define  SPU_PMD_CRTL_TRAIN_EN                 BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP         0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP         0x10088
+#define BGX_SPUX_BR_PMD_LD_REP         0x10090
+#define BGX_SPUX_FEC_CONTROL           0x100A0
+#define  SPU_FEC_CTL_FEC_EN                    BIT_ULL(0)
+#define  SPU_FEC_CTL_ERR_EN                    BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL            0x100C8
+#define  SPU_AN_CTL_AN_EN                      BIT_ULL(12)
+#define  SPU_AN_CTL_XNP_EN                     BIT_ULL(13)
+#define BGX_SPUX_AN_ADV                        0x100D8
+#define BGX_SPUX_MISC_CONTROL          0x10218
+#define  SPU_MISC_CTL_INTLV_RDISP              BIT_ULL(10)
+#define  SPU_MISC_CTL_RX_DIS                   BIT_ULL(12)
+#define BGX_SPUX_INT                   0x10220 /* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S               0x10228
+#define BGX_SPUX_INT_ENA_W1C           0x10230
+#define BGX_SPUX_INT_ENA_W1S           0x10238
+#define BGX_SPU_DBG_CONTROL            0x10300
+#define  SPU_DBG_CTL_AN_ARB_LINK_CHK_EN                BIT_ULL(18)
+#define  SPU_DBG_CTL_AN_NONCE_MCT_DIS          BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT                        0x20000
+#define BGX_SMUX_RX_JABBER             0x20030
+#define BGX_SMUX_RX_CTL                        0x20048
+#define  SMU_RX_CTL_STATUS                     (3ull << 0)
+#define BGX_SMUX_TX_APPEND             0x20100
+#define  SMU_TX_APPEND_FCS_D                   BIT_ULL(2)
+#define BGX_SMUX_TX_MIN_PKT            0x20118
+#define BGX_SMUX_TX_INT                        0x20140
+#define BGX_SMUX_TX_CTL                        0x20178
+#define  SMU_TX_CTL_DIC_EN                     BIT_ULL(0)
+#define  SMU_TX_CTL_UNI_EN                     BIT_ULL(1)
+#define  SMU_TX_CTL_LNK_STATUS                 (3ull << 4)
+#define BGX_SMUX_TX_THRESH             0x20180
+#define BGX_SMUX_CTL                   0x20200
+#define  SMU_CTL_RX_IDLE                       BIT_ULL(0)
+#define  SMU_CTL_TX_IDLE                       BIT_ULL(1)
+
+#define BGX_GMP_PCS_MRX_CTL            0x30000
+#define         PCS_MRX_CTL_RST_AN                     BIT_ULL(9)
+#define         PCS_MRX_CTL_PWR_DN                     BIT_ULL(11)
+#define         PCS_MRX_CTL_AN_EN                      BIT_ULL(12)
+#define         PCS_MRX_CTL_RESET                      BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS         0x30008
+#define         PCS_MRX_STATUS_AN_CPT                  BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_AN_RESULTS     0x30020
+#define BGX_GMP_PCS_SGM_AN_ADV         0x30068
+#define BGX_GMP_PCS_MISCX_CTL          0x30078
+#define  PCS_MISC_CTL_GMX_ENO                  BIT_ULL(11)
+#define  PCS_MISC_CTL_SAMP_PT_MASK     0x7Full
+#define BGX_GMP_GMI_PRTX_CFG           0x38020
+#define  GMI_PORT_CFG_SPEED                    BIT_ULL(1)
+#define  GMI_PORT_CFG_DUPLEX                   BIT_ULL(2)
+#define  GMI_PORT_CFG_SLOT_TIME                        BIT_ULL(3)
+#define  GMI_PORT_CFG_SPEED_MSB                        BIT_ULL(8)
+#define BGX_GMP_GMI_RXX_JABBER         0x38038
+#define BGX_GMP_GMI_TXX_THRESH         0x38210
+#define BGX_GMP_GMI_TXX_APPEND         0x38218
+#define BGX_GMP_GMI_TXX_SLOT           0x38220
+#define BGX_GMP_GMI_TXX_BURST          0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT                0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL      0x38300
+
+#define BGX_MSIX_VEC_0_29_ADDR         0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL          0x400008
+#define BGX_MSIX_PBA_0                 0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS       30
+#define BGX_LMAC_VEC_OFFSET    7
+#define BGX_MSIX_VEC_SHIFT     4
+
+#define CMRX_INT               0
+#define SPUX_INT               1
+#define SMUX_RX_INT            2
+#define SMUX_TX_INT            3
+#define GMPX_PCS_INT           4
+#define GMPX_GMI_RX_INT                5
+#define GMPX_GMI_TX_INT                6
+#define CMR_MEM_INT            28
+#define SPU_MEM_INT            29
+
+#define LMAC_INTR_LINK_UP      BIT(0)
+#define LMAC_INTR_LINK_DOWN    BIT(1)
+
+/*  RX_DMAC_CTL configuration*/
+enum MCAST_MODE {
+               MCAST_MODE_REJECT,
+               MCAST_MODE_ACCEPT,
+               MCAST_MODE_CAM_FILTER,
+               RSVD
+};
+
+#define BCAST_ACCEPT   1
+#define CAM_ACCEPT     1
+
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+unsigned bgx_get_map(int node);
+int bgx_get_lmac_count(int node, int bgx);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
+#define BGX_RX_STATS_COUNT 11
+#define BGX_TX_STATS_COUNT 18
+
+struct bgx_stats {
+       u64 rx_stats[BGX_RX_STATS_COUNT];
+       u64 tx_stats[BGX_TX_STATS_COUNT];
+};
+
+enum LMAC_TYPE {
+       BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+       BGX_MODE_XAUI = 1,  /* 4 lanes, 3.125 Gbaud */
+       BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+       BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+       BGX_MODE_XFI = 3,   /* 1 lane, 10.3125 Gbaud */
+       BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+       BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+       BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+};
+
+enum qlm_mode {
+       QLM_MODE_SGMII,         /* SGMII, each lane independent */
+       QLM_MODE_XAUI_1X4,      /* 1 XAUI or DXAUI, 4 lanes */
+       QLM_MODE_RXAUI_2X2,     /* 2 RXAUI, 2 lanes each */
+       QLM_MODE_XFI_4X1,       /* 4 XFI, 1 lane each */
+       QLM_MODE_XLAUI_1X4,     /* 1 XLAUI, 4 lanes each */
+       QLM_MODE_10G_KR_4X1,    /* 4 10GBASE-KR, 1 lane each */
+       QLM_MODE_40G_KR4_1X4,   /* 1 40GBASE-KR4, 4 lanes each */
+};
+
+#endif /* THUNDER_BGX_H */
index 524d11098c566d178d132fe0b3a05be557036440..4d627a8f04b06c0e5598530505c3a185a1bdb55e 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/timer.h>
 #include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
 #include <asm/io.h>
 #include "cxgb4_uld.h"
 
 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
 
 enum {
-       MAX_NPORTS = 4,     /* max # of ports */
-       SERNUM_LEN = 24,    /* Serial # length */
-       EC_LEN     = 16,    /* E/C length */
-       ID_LEN     = 16,    /* ID length */
-       PN_LEN     = 16,    /* Part Number length */
+       MAX_NPORTS      = 4,     /* max # of ports */
+       SERNUM_LEN      = 24,    /* Serial # length */
+       EC_LEN          = 16,    /* E/C length */
+       ID_LEN          = 16,    /* ID length */
+       PN_LEN          = 16,    /* Part Number length */
+       MACADDR_LEN     = 12,    /* MAC Address length */
 };
 
 enum {
@@ -198,23 +200,45 @@ struct lb_port_stats {
 };
 
 struct tp_tcp_stats {
-       u32 tcpOutRsts;
-       u64 tcpInSegs;
-       u64 tcpOutSegs;
-       u64 tcpRetransSegs;
+       u32 tcp_out_rsts;
+       u64 tcp_in_segs;
+       u64 tcp_out_segs;
+       u64 tcp_retrans_segs;
+};
+
+struct tp_usm_stats {
+       u32 frames;
+       u32 drops;
+       u64 octets;
+};
+
+struct tp_fcoe_stats {
+       u32 frames_ddp;
+       u32 frames_drop;
+       u64 octets_ddp;
 };
 
 struct tp_err_stats {
-       u32 macInErrs[4];
-       u32 hdrInErrs[4];
-       u32 tcpInErrs[4];
-       u32 tnlCongDrops[4];
-       u32 ofldChanDrops[4];
-       u32 tnlTxDrops[4];
-       u32 ofldVlanDrops[4];
-       u32 tcp6InErrs[4];
-       u32 ofldNoNeigh;
-       u32 ofldCongDefer;
+       u32 mac_in_errs[4];
+       u32 hdr_in_errs[4];
+       u32 tcp_in_errs[4];
+       u32 tnl_cong_drops[4];
+       u32 ofld_chan_drops[4];
+       u32 tnl_tx_drops[4];
+       u32 ofld_vlan_drops[4];
+       u32 tcp6_in_errs[4];
+       u32 ofld_no_neigh;
+       u32 ofld_cong_defer;
+};
+
+struct tp_cpl_stats {
+       u32 req[4];
+       u32 rsp[4];
+};
+
+struct tp_rdma_stats {
+       u32 rqe_dfr_pkt;
+       u32 rqe_dfr_mod;
 };
 
 struct sge_params {
@@ -224,7 +248,6 @@ struct sge_params {
 };
 
 struct tp_params {
-       unsigned int ntxchan;        /* # of Tx channels */
        unsigned int tre;            /* log2 of core clocks per TP tick */
        unsigned int la_mask;        /* what events are recorded by TP LA */
        unsigned short tx_modq_map;  /* TX modulation scheduler queue to */
@@ -259,6 +282,7 @@ struct vpd_params {
        u8 sn[SERNUM_LEN + 1];
        u8 id[ID_LEN + 1];
        u8 pn[PN_LEN + 1];
+       u8 na[MACADDR_LEN + 1];
 };
 
 struct pci_params {
@@ -273,6 +297,7 @@ struct pci_params {
 
 #define CHELSIO_T4             0x4
 #define CHELSIO_T5             0x5
+#define CHELSIO_T6             0x6
 
 enum chip_type {
        T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -284,6 +309,10 @@ enum chip_type {
        T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
        T5_FIRST_REV    = T5_A0,
        T5_LAST_REV     = T5_A1,
+
+       T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+       T6_FIRST_REV    = T6_A0,
+       T6_LAST_REV     = T6_A0,
 };
 
 struct devlog_params {
@@ -292,6 +321,15 @@ struct devlog_params {
        u32 size;                       /* size of log */
 };
 
+/* Stores chip specific parameters */
+struct arch_specific_params {
+       u8 nchan;
+       u16 mps_rplc_size;
+       u16 vfcount;
+       u32 sge_fl_db;
+       u16 mps_tcam_size;
+};
+
 struct adapter_params {
        struct sge_params sge;
        struct tp_params  tp;
@@ -317,6 +355,7 @@ struct adapter_params {
        unsigned char nports;             /* # of ethernet ports */
        unsigned char portvec;
        enum chip_type chip;               /* chip code */
+       struct arch_specific_params arch;  /* chip specific params */
        unsigned char offload;
 
        unsigned char bypass;
@@ -328,6 +367,17 @@ struct adapter_params {
        unsigned int max_ird_adapter;     /* Max read depth per adapter */
 };
 
+/* State needed to monitor the forward progress of SGE Ingress DMA activities
+ * and possible hangs.
+ */
+struct sge_idma_monitor_state {
+       unsigned int idma_1s_thresh;    /* 1s threshold in Core Clock ticks */
+       unsigned int idma_stalled[2];   /* synthesized stalled timers in HZ */
+       unsigned int idma_state[2];     /* IDMA Hang detect state */
+       unsigned int idma_qid[2];       /* IDMA Hung Ingress Queue ID */
+       unsigned int idma_warn[2];      /* time to warning in HZ */
+};
+
 #include "t4fw_api.h"
 
 #define FW_VERSION(chip) ( \
@@ -421,6 +471,7 @@ struct port_info {
        u8     rss_mode;
        struct link_config link_cfg;
        u16   *rss;
+       struct port_stats stats_base;
 #ifdef CONFIG_CHELSIO_T4_DCB
        struct port_dcb_info dcb;     /* Data Center Bridging support */
 #endif
@@ -630,12 +681,7 @@ struct sge {
        u32 fl_align;               /* response queue message alignment */
        u32 fl_starve_thres;        /* Free List starvation threshold */
 
-       /* State variables for detecting an SGE Ingress DMA hang */
-       unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
-       unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
-       unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
-       unsigned int idma_qid[2];   /* SGE IDMA Hung Ingress Queue ID */
-
+       struct sge_idma_monitor_state idma_monitor;
        unsigned int egr_start;
        unsigned int egr_sz;
        unsigned int ingr_start;
@@ -644,6 +690,7 @@ struct sge {
        struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
        unsigned long *starving_fl;
        unsigned long *txq_maperr;
+       unsigned long *blocked_fl;
        struct timer_list rx_timer; /* refills starving FLs */
        struct timer_list tx_timer; /* checks Tx queues */
 };
@@ -665,6 +712,12 @@ struct l2t_data;
 
 #endif
 
+struct doorbell_stats {
+       u32 db_drop;
+       u32 db_empty;
+       u32 db_full;
+};
+
 struct adapter {
        void __iomem *regs;
        void __iomem *bar2;
@@ -672,7 +725,7 @@ struct adapter {
        struct pci_dev *pdev;
        struct device *pdev_dev;
        unsigned int mbox;
-       unsigned int fn;
+       unsigned int pf;
        unsigned int flags;
        enum chip_type chip;
 
@@ -682,13 +735,12 @@ struct adapter {
        struct cxgb4_virt_res vres;
        unsigned int swintr;
 
-       unsigned int wol;
-
        struct {
                unsigned short vec;
                char desc[IFNAMSIZ + 10];
        } msix_info[MAX_INGQ + 1];
 
+       struct doorbell_stats db_stats;
        struct sge sge;
 
        struct net_device *port[MAX_NPORTS];
@@ -843,6 +895,16 @@ enum {
        VLAN_REWRITE
 };
 
+static inline int is_offload(const struct adapter *adap)
+{
+       return adap->params.offload;
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+       return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6;
+}
+
 static inline int is_t5(enum chip_type chip)
 {
        return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
@@ -886,6 +948,22 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
        writeq(val, adap->regs + reg_addr);
 }
 
+/**
+ * t4_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW.  Called by the common
+ * code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
+                                 u8 hw_addr[])
+{
+       ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+       ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
+}
+
 /**
  * netdev2pinfo - return the port_info structure associated with a net_device
  * @dev: the netdev
@@ -1055,7 +1133,7 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                     struct net_device *dev, int intr_idx,
-                    struct sge_fl *fl, rspq_handler_t hnd);
+                    struct sge_fl *fl, rspq_handler_t hnd, int cong);
 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
                         struct net_device *dev, struct netdev_queue *netdevq,
                         unsigned int iqid);
@@ -1095,6 +1173,19 @@ static inline int is_bypass_device(int device)
        }
 }
 
+static inline int is_10gbt_device(int device)
+{
+       /* this should be set based upon device capabilities */
+       switch (device) {
+       case 0x4409:
+       case 0x4486:
+               return 1;
+
+       default:
+               return 0;
+       }
+}
+
 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
 {
        return adap->params.vpd.cclk / 1000;
@@ -1117,9 +1208,19 @@ static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
                      u32 val);
 
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+                           int size, void *rpl, bool sleep_ok, int timeout);
 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
                    void *rpl, bool sleep_ok);
 
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+                                    const void *cmd, int size, void *rpl,
+                                    int timeout)
+{
+       return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+                                      timeout);
+}
+
 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
                             int size, void *rpl)
 {
@@ -1147,10 +1248,14 @@ void t4_intr_disable(struct adapter *adapter);
 int t4_slow_intr_handler(struct adapter *adapter);
 
 int t4_wait_dev_ready(void __iomem *regs);
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
                  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
 
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
+u32 t4_get_util_window(struct adapter *adap);
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
+
 #define T4_MEMORY_WRITE        0
 #define T4_MEMORY_READ 1
 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
@@ -1165,10 +1270,16 @@ unsigned int t4_get_regs_len(struct adapter *adapter);
 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
 
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_read_flash(struct adapter *adapter, unsigned int addr,
                  unsigned int nwords, u32 *data, int byte_oriented);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_load_phy_fw(struct adapter *adap,
+                  int win, spinlock_t *lock,
+                  int (*phy_fw_version)(const u8 *, size_t),
+                  const u8 *phy_fw_data, size_t phy_fw_size);
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
                  const u8 *fw_data, unsigned int size, int force);
@@ -1182,7 +1293,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
 int t4_prep_adapter(struct adapter *adapter);
 
 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
                      unsigned int qid,
                      enum t4_bar2_qtype qtype,
                      u64 *pbar2_qoffset,
@@ -1195,12 +1306,15 @@ int t4_init_devlog_params(struct adapter *adapter);
 int t4_init_sge_params(struct adapter *adapter);
 int t4_init_tp_params(struct adapter *adap);
 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
 void t4_fatal_err(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
                        int start, int n, const u16 *rspq, unsigned int nrspq);
 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
                       unsigned int flags);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+                    unsigned int flags, unsigned int defq);
 int t4_read_rss(struct adapter *adapter, u16 *entries);
 void t4_read_rss_key(struct adapter *adapter, u32 *key);
 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
@@ -1211,10 +1325,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
 u32 t4_read_rss_pf_map(struct adapter *adapter);
 u32 t4_read_rss_pf_mask(struct adapter *adapter);
 
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
-              u64 *parity);
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
-               u64 *parity);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx);
 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
@@ -1229,13 +1340,23 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
 const char *t4_get_port_type_description(enum fw_port_type port_type);
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+                             struct port_stats *stats,
+                             struct port_stats *offset);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
                            unsigned int mask, unsigned int val);
 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
                         struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+                      struct tp_fcoe_stats *st);
 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
                  const unsigned short *alpha, const unsigned short *beta);
 
@@ -1259,13 +1380,16 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int nparams, const u32 *params,
                    u32 *val);
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+                      unsigned int vf, unsigned int nparams, const u32 *params,
+                      u32 *val, int rw);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+                         unsigned int pf, unsigned int vf,
+                         unsigned int nparams, const u32 *params,
+                         const u32 *val, int timeout);
 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
                  unsigned int vf, unsigned int nparams, const u32 *params,
                  const u32 *val);
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
-                         unsigned int pf, unsigned int vf,
-                         unsigned int nparams, const u32 *params,
-                         const u32 *val);
 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
                unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
                unsigned int rxqi, unsigned int rxq, unsigned int tc,
@@ -1274,6 +1398,9 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
                unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
                unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+              unsigned int pf, unsigned int vf,
+              unsigned int viid);
 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
                int mtu, int promisc, int all_multi, int bcast, int vlanex,
                bool sleep_ok);
@@ -1303,6 +1430,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int eqid);
 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
                    unsigned int vf, unsigned int eqid);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
@@ -1310,4 +1438,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
                         u32 addr, u32 val);
 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
 void t4_free_mem(void *addr);
+void t4_idma_monitor_init(struct adapter *adapter,
+                         struct sge_idma_monitor_state *idma);
+void t4_idma_monitor(struct adapter *adapter,
+                    struct sge_idma_monitor_state *idma,
+                    int hz, int ticks);
 #endif /* __CXGB4_H__ */
index 371f75e782e5eb76fd22205d9aedfed30850aa19..3719807efdddf176c10b460c7eeb9b0030b6c38d 100644 (file)
@@ -1084,41 +1084,89 @@ static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
 
 static int mps_tcam_show(struct seq_file *seq, void *v)
 {
-       if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "Idx  Ethernet address     Mask     Vld Ports PF"
-                        "  VF              Replication             "
-                        "P0 P1 P2 P3  ML\n");
-       else {
+       struct adapter *adap = seq->private;
+       unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+       if (v == SEQ_START_TOKEN) {
+               if (adap->params.arch.mps_rplc_size > 128)
+                       seq_puts(seq, "Idx  Ethernet address     Mask     "
+                                "Vld Ports PF  VF                           "
+                                "Replication                                "
+                                "    P0 P1 P2 P3  ML\n");
+               else
+                       seq_puts(seq, "Idx  Ethernet address     Mask     "
+                                "Vld Ports PF  VF              Replication"
+                                "               P0 P1 P2 P3  ML\n");
+       } else {
                u64 mask;
                u8 addr[ETH_ALEN];
-               struct adapter *adap = seq->private;
+               bool replicate;
                unsigned int idx = (uintptr_t)v - 2;
-               u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
-               u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
-               u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
-               u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
-               u32 rplc[4] = {0, 0, 0, 0};
+               u64 tcamy, tcamx, val;
+               u32 cls_lo, cls_hi, ctl;
+               u32 rplc[8] = {0};
+
+               if (chip_ver > CHELSIO_T5) {
+                       /* CtlCmdType - 0: Read, 1: Write
+                        * CtlTcamSel - 0: TCAM0, 1: TCAM1
+                        * CtlXYBitSel- 0: Y bit, 1: X bit
+                        */
+
+                       /* Read tcamy */
+                       ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+                       if (idx < 256)
+                               ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+                       else
+                               ctl |= CTLTCAMINDEX_V(idx - 256) |
+                                      CTLTCAMSEL_V(1);
+                       t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+                       val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+                       tcamy = DMACH_G(val) << 32;
+                       tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+
+                       /* Read tcamx. Change the control param */
+                       ctl |= CTLXYBITSEL_V(1);
+                       t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+                       val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+                       tcamx = DMACH_G(val) << 32;
+                       tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+               } else {
+                       tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+                       tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+               }
+
+               cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+               cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
 
                if (tcamx & tcamy) {
                        seq_printf(seq, "%3u         -\n", idx);
                        goto out;
                }
 
-               if (cls_lo & REPLICATE_F) {
+               rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
+               if (chip_ver > CHELSIO_T5)
+                       replicate = (cls_lo & T6_REPLICATE_F);
+               else
+                       replicate = (cls_lo & REPLICATE_F);
+
+               if (replicate) {
                        struct fw_ldst_cmd ldst_cmd;
                        int ret;
+                       struct fw_ldst_mps_rplc mps_rplc;
+                       u32 ldst_addrspc;
 
                        memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+                       ldst_addrspc =
+                               FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
                        ldst_cmd.op_to_addrspace =
                                htonl(FW_CMD_OP_V(FW_LDST_CMD) |
                                      FW_CMD_REQUEST_F |
                                      FW_CMD_READ_F |
-                                     FW_LDST_CMD_ADDRSPACE_V(
-                                             FW_LDST_ADDRSPC_MPS));
+                                     ldst_addrspc);
                        ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
-                       ldst_cmd.u.mps.fid_ctl =
+                       ldst_cmd.u.mps.rplc.fid_idx =
                                htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
-                                     FW_LDST_CMD_CTL_V(idx));
+                                     FW_LDST_CMD_IDX_V(idx));
                        ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
                                         sizeof(ldst_cmd), &ldst_cmd);
                        if (ret)
@@ -1126,30 +1174,69 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
                                         "replication map for idx %d: %d\n",
                                         idx, -ret);
                        else {
-                               rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
-                               rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
-                               rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
-                               rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+                               mps_rplc = ldst_cmd.u.mps.rplc;
+                               rplc[0] = ntohl(mps_rplc.rplc31_0);
+                               rplc[1] = ntohl(mps_rplc.rplc63_32);
+                               rplc[2] = ntohl(mps_rplc.rplc95_64);
+                               rplc[3] = ntohl(mps_rplc.rplc127_96);
+                               if (adap->params.arch.mps_rplc_size > 128) {
+                                       rplc[4] = ntohl(mps_rplc.rplc159_128);
+                                       rplc[5] = ntohl(mps_rplc.rplc191_160);
+                                       rplc[6] = ntohl(mps_rplc.rplc223_192);
+                                       rplc[7] = ntohl(mps_rplc.rplc255_224);
+                               }
                        }
                }
 
                tcamxy2valmask(tcamx, tcamy, addr, &mask);
-               seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
-                          "%3c   %#x%4u%4d",
-                          idx, addr[0], addr[1], addr[2], addr[3], addr[4],
-                          addr[5], (unsigned long long)mask,
-                          (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
-                          PF_G(cls_lo),
-                          (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
-               if (cls_lo & REPLICATE_F)
-                       seq_printf(seq, " %08x %08x %08x %08x",
-                                  rplc[3], rplc[2], rplc[1], rplc[0]);
+               if (chip_ver > CHELSIO_T5)
+                       seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+                                  "%012llx%3c   %#x%4u%4d",
+                                  idx, addr[0], addr[1], addr[2], addr[3],
+                                  addr[4], addr[5], (unsigned long long)mask,
+                                  (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+                                  PORTMAP_G(cls_hi),
+                                  T6_PF_G(cls_lo),
+                                  (cls_lo & T6_VF_VALID_F) ?
+                                  T6_VF_G(cls_lo) : -1);
                else
-                       seq_printf(seq, "%36c", ' ');
-               seq_printf(seq, "%4u%3u%3u%3u %#x\n",
-                          SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
-                          SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
-                          (cls_lo >> MULTILISTEN0_S) & 0xf);
+                       seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+                                  "%012llx%3c   %#x%4u%4d",
+                                  idx, addr[0], addr[1], addr[2], addr[3],
+                                  addr[4], addr[5], (unsigned long long)mask,
+                                  (cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
+                                  PORTMAP_G(cls_hi),
+                                  PF_G(cls_lo),
+                                  (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+
+               if (replicate) {
+                       if (adap->params.arch.mps_rplc_size > 128)
+                               seq_printf(seq, " %08x %08x %08x %08x "
+                                          "%08x %08x %08x %08x",
+                                          rplc[7], rplc[6], rplc[5], rplc[4],
+                                          rplc[3], rplc[2], rplc[1], rplc[0]);
+                       else
+                               seq_printf(seq, " %08x %08x %08x %08x",
+                                          rplc[3], rplc[2], rplc[1], rplc[0]);
+               } else {
+                       if (adap->params.arch.mps_rplc_size > 128)
+                               seq_printf(seq, "%72c", ' ');
+                       else
+                               seq_printf(seq, "%36c", ' ');
+               }
+
+               if (chip_ver > CHELSIO_T5)
+                       seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+                                  T6_SRAM_PRIO0_G(cls_lo),
+                                  T6_SRAM_PRIO1_G(cls_lo),
+                                  T6_SRAM_PRIO2_G(cls_lo),
+                                  T6_SRAM_PRIO3_G(cls_lo),
+                                  (cls_lo >> T6_MULTILISTEN0_S) & 0xf);
+               else
+                       seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+                                  SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+                                  SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+                                  (cls_lo >> MULTILISTEN0_S) & 0xf);
        }
 out:   return 0;
 }
@@ -1222,7 +1309,7 @@ static int sensors_show(struct seq_file *seq, void *v)
        param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
                    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
                    FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
-       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
                              param, val);
 
        if (ret < 0 || val[0] == 0)
@@ -1416,6 +1503,9 @@ static int rss_config_show(struct seq_file *seq, void *v)
        seq_printf(seq, "  HashDelay:     %3d\n", HASHDELAY_G(rssconf));
        if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
                seq_printf(seq, "  VfWrAddr:      %3d\n", VFWRADDR_G(rssconf));
+       else
+               seq_printf(seq, "  VfWrAddr:      %3d\n",
+                          T6_VFWRADDR_G(rssconf));
        seq_printf(seq, "  KeyMode:       %s\n", keymode[KEYMODE_G(rssconf)]);
        seq_printf(seq, "  VfWrEn:        %3s\n", yesno(rssconf & VFWREN_F));
        seq_printf(seq, "  KeyWrEn:       %3s\n", yesno(rssconf & KEYWREN_F));
@@ -1634,14 +1724,14 @@ static int rss_vf_config_open(struct inode *inode, struct file *file)
        struct adapter *adapter = inode->i_private;
        struct seq_tab *p;
        struct rss_vf_conf *vfconf;
-       int vf;
+       int vf, vfcount = adapter->params.arch.vfcount;
 
-       p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
+       p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
        if (!p)
                return -ENOMEM;
 
        vfconf = (struct rss_vf_conf *)p->data;
-       for (vf = 0; vf < 128; vf++) {
+       for (vf = 0; vf < vfcount; vf++) {
                t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
                                      &vfconf[vf].rss_vf_vfh);
        }
@@ -1959,6 +2049,61 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
                                 size_mb << 20);
 }
 
+static int blocked_fl_open(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
+                              size_t count, loff_t *ppos)
+{
+       int len;
+       const struct adapter *adap = filp->private_data;
+       char *buf;
+       ssize_t size = (adap->sge.egr_sz + 3) / 4 +
+                       adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       len = snprintf(buf, size - 1, "%*pb\n",
+                      adap->sge.egr_sz, adap->sge.blocked_fl);
+       len += sprintf(buf + len, "\n");
+       size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+       t4_free_mem(buf);
+       return size;
+}
+
+static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+                               size_t count, loff_t *ppos)
+{
+       int err;
+       unsigned long *t;
+       struct adapter *adap = filp->private_data;
+
+       t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+       if (!t)
+               return -ENOMEM;
+
+       err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+       if (err)
+               return err;
+
+       bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+       t4_free_mem(t);
+       return count;
+}
+
+static const struct file_operations blocked_fl_fops = {
+       .owner   = THIS_MODULE,
+       .open    = blocked_fl_open,
+       .read    = blocked_fl_read,
+       .write   = blocked_fl_write,
+       .llseek  = generic_file_llseek,
+};
+
 /* Add an array of Debug FS files.
  */
 void add_debugfs_files(struct adapter *adap,
@@ -1978,7 +2123,7 @@ void add_debugfs_files(struct adapter *adap,
 int t4_setup_debugfs(struct adapter *adap)
 {
        int i;
-       u32 size;
+       u32 size = 0;
        struct dentry *de;
 
        static struct t4_debugfs_entry t4_debugfs_files[] = {
@@ -2022,6 +2167,7 @@ int t4_setup_debugfs(struct adapter *adap)
 #if IS_ENABLED(CONFIG_IPV6)
                { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
 #endif
+               { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
        };
 
        /* Debug FS nodes common to all T5 and later adapters.
@@ -2048,12 +2194,7 @@ int t4_setup_debugfs(struct adapter *adap)
                size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
                add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
        }
-       if (is_t4(adap->params.chip)) {
-               size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
-               if (i & EXT_MEM_ENABLE_F)
-                       add_debugfs_mem(adap, "mc", MEM_MC,
-                                       EXT_MEM_SIZE_G(size));
-       } else {
+       if (is_t5(adap->params.chip)) {
                if (i & EXT_MEM0_ENABLE_F) {
                        size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
                        add_debugfs_mem(adap, "mc0", MEM_MC0,
@@ -2064,6 +2205,11 @@ int t4_setup_debugfs(struct adapter *adap)
                        add_debugfs_mem(adap, "mc1", MEM_MC1,
                                        EXT_MEM1_SIZE_G(size));
                }
+       } else {
+               if (i & EXT_MEM_ENABLE_F)
+                       size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+                       add_debugfs_mem(adap, "mc", MEM_MC,
+                                       EXT_MEM_SIZE_G(size));
        }
 
        de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
index 10d82b51d7efa2ab52d01d0558b61ecf703ddd4b..687acf71fa15e01e5886b5055f9bc9b8ccbc4929 100644 (file)
@@ -108,15 +108,82 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
        "VLANinsertions     ",
        "GROpackets         ",
        "GROmerged          ",
-       "WriteCoalSuccess   ",
-       "WriteCoalFail      ",
+};
+
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+       "db_drop                ",
+       "db_full                ",
+       "db_empty               ",
+       "tcp_ipv4_out_rsts      ",
+       "tcp_ipv4_in_segs       ",
+       "tcp_ipv4_out_segs      ",
+       "tcp_ipv4_retrans_segs  ",
+       "tcp_ipv6_out_rsts      ",
+       "tcp_ipv6_in_segs       ",
+       "tcp_ipv6_out_segs      ",
+       "tcp_ipv6_retrans_segs  ",
+       "usm_ddp_frames         ",
+       "usm_ddp_octets         ",
+       "usm_ddp_drops          ",
+       "rdma_no_rqe_mod_defer  ",
+       "rdma_no_rqe_pkt_defer  ",
+       "tp_err_ofld_no_neigh   ",
+       "tp_err_ofld_cong_defer ",
+       "write_coal_success     ",
+       "write_coal_fail        ",
+};
+
+static char channel_stats_strings[][ETH_GSTRING_LEN] = {
+       "--------Channel--------- ",
+       "tp_cpl_requests        ",
+       "tp_cpl_responses       ",
+       "tp_mac_in_errs         ",
+       "tp_hdr_in_errs         ",
+       "tp_tcp_in_errs         ",
+       "tp_tcp6_in_errs        ",
+       "tp_tnl_cong_drops      ",
+       "tp_tnl_tx_drops        ",
+       "tp_ofld_vlan_drops     ",
+       "tp_ofld_chan_drops     ",
+       "fcoe_octets_ddp        ",
+       "fcoe_frames_ddp        ",
+       "fcoe_frames_drop       ",
+};
+
+static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
+       "-------Loopback----------- ",
+       "octets_ok              ",
+       "frames_ok              ",
+       "bcast_frames           ",
+       "mcast_frames           ",
+       "ucast_frames           ",
+       "error_frames           ",
+       "frames_64              ",
+       "frames_65_to_127       ",
+       "frames_128_to_255      ",
+       "frames_256_to_511      ",
+       "frames_512_to_1023     ",
+       "frames_1024_to_1518    ",
+       "frames_1519_to_max     ",
+       "frames_dropped         ",
+       "bg0_frames_dropped     ",
+       "bg1_frames_dropped     ",
+       "bg2_frames_dropped     ",
+       "bg3_frames_dropped     ",
+       "bg0_frames_trunc       ",
+       "bg1_frames_trunc       ",
+       "bg2_frames_trunc       ",
+       "bg3_frames_trunc       ",
 };
 
 static int get_sset_count(struct net_device *dev, int sset)
 {
        switch (sset) {
        case ETH_SS_STATS:
-               return ARRAY_SIZE(stats_strings);
+               return ARRAY_SIZE(stats_strings) +
+                      ARRAY_SIZE(adapter_stats_strings) +
+                      ARRAY_SIZE(channel_stats_strings) +
+                      ARRAY_SIZE(loopback_stats_strings);
        default:
                return -EOPNOTSUPP;
        }
@@ -168,8 +235,18 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 
 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
-       if (stringset == ETH_SS_STATS)
+       if (stringset == ETH_SS_STATS) {
                memcpy(data, stats_strings, sizeof(stats_strings));
+               data += sizeof(stats_strings);
+               memcpy(data, adapter_stats_strings,
+                      sizeof(adapter_stats_strings));
+               data += sizeof(adapter_stats_strings);
+               memcpy(data, channel_stats_strings,
+                      sizeof(channel_stats_strings));
+               data += sizeof(channel_stats_strings);
+               memcpy(data, loopback_stats_strings,
+                      sizeof(loopback_stats_strings));
+       }
 }
 
 /* port stats maintained per queue of the port. They should be in the same
@@ -185,6 +262,45 @@ struct queue_port_stats {
        u64 gro_merged;
 };
 
+struct adapter_stats {
+       u64 db_drop;
+       u64 db_full;
+       u64 db_empty;
+       u64 tcp_v4_out_rsts;
+       u64 tcp_v4_in_segs;
+       u64 tcp_v4_out_segs;
+       u64 tcp_v4_retrans_segs;
+       u64 tcp_v6_out_rsts;
+       u64 tcp_v6_in_segs;
+       u64 tcp_v6_out_segs;
+       u64 tcp_v6_retrans_segs;
+       u64 frames;
+       u64 octets;
+       u64 drops;
+       u64 rqe_dfr_mod;
+       u64 rqe_dfr_pkt;
+       u64 ofld_no_neigh;
+       u64 ofld_cong_defer;
+       u64 wc_success;
+       u64 wc_fail;
+};
+
+struct channel_stats {
+       u64 cpl_req;
+       u64 cpl_rsp;
+       u64 mac_in_errs;
+       u64 hdr_in_errs;
+       u64 tcp_in_errs;
+       u64 tcp6_in_errs;
+       u64 tnl_cong_drops;
+       u64 tnl_tx_drops;
+       u64 ofld_vlan_drops;
+       u64 ofld_chan_drops;
+       u64 octets_ddp;
+       u64 frames_ddp;
+       u64 frames_drop;
+};
+
 static void collect_sge_port_stats(const struct adapter *adap,
                                   const struct port_info *p,
                                   struct queue_port_stats *s)
@@ -205,30 +321,121 @@ static void collect_sge_port_stats(const struct adapter *adap,
        }
 }
 
+static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+{
+       struct tp_tcp_stats v4, v6;
+       struct tp_rdma_stats rdma_stats;
+       struct tp_err_stats err_stats;
+       struct tp_usm_stats usm_stats;
+       u64 val1, val2;
+
+       memset(s, 0, sizeof(*s));
+
+       spin_lock(&adap->stats_lock);
+       t4_tp_get_tcp_stats(adap, &v4, &v6);
+       t4_tp_get_rdma_stats(adap, &rdma_stats);
+       t4_get_usm_stats(adap, &usm_stats);
+       t4_tp_get_err_stats(adap, &err_stats);
+       spin_unlock(&adap->stats_lock);
+
+       s->db_drop = adap->db_stats.db_drop;
+       s->db_full = adap->db_stats.db_full;
+       s->db_empty = adap->db_stats.db_empty;
+
+       s->tcp_v4_out_rsts = v4.tcp_out_rsts;
+       s->tcp_v4_in_segs = v4.tcp_in_segs;
+       s->tcp_v4_out_segs = v4.tcp_out_segs;
+       s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
+       s->tcp_v6_out_rsts = v6.tcp_out_rsts;
+       s->tcp_v6_in_segs = v6.tcp_in_segs;
+       s->tcp_v6_out_segs = v6.tcp_out_segs;
+       s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
+
+       if (is_offload(adap)) {
+               s->frames = usm_stats.frames;
+               s->octets = usm_stats.octets;
+               s->drops = usm_stats.drops;
+               s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
+               s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
+       }
+
+       s->ofld_no_neigh = err_stats.ofld_no_neigh;
+       s->ofld_cong_defer = err_stats.ofld_cong_defer;
+
+       if (!is_t4(adap->params.chip)) {
+               int v;
+
+               v = t4_read_reg(adap, SGE_STAT_CFG_A);
+               if (STATSOURCE_T5_G(v) == 7) {
+                       val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
+                       val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
+                       s->wc_success = val1 - val2;
+                       s->wc_fail = val2;
+               }
+       }
+}
+
+static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
+                                 u8 i)
+{
+       struct tp_cpl_stats cpl_stats;
+       struct tp_err_stats err_stats;
+       struct tp_fcoe_stats fcoe_stats;
+
+       memset(s, 0, sizeof(*s));
+
+       spin_lock(&adap->stats_lock);
+       t4_tp_get_cpl_stats(adap, &cpl_stats);
+       t4_tp_get_err_stats(adap, &err_stats);
+       t4_get_fcoe_stats(adap, i, &fcoe_stats);
+       spin_unlock(&adap->stats_lock);
+
+       s->cpl_req = cpl_stats.req[i];
+       s->cpl_rsp = cpl_stats.rsp[i];
+       s->mac_in_errs = err_stats.mac_in_errs[i];
+       s->hdr_in_errs = err_stats.hdr_in_errs[i];
+       s->tcp_in_errs = err_stats.tcp_in_errs[i];
+       s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
+       s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
+       s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
+       s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
+       s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
+       s->octets_ddp = fcoe_stats.octets_ddp;
+       s->frames_ddp = fcoe_stats.frames_ddp;
+       s->frames_drop = fcoe_stats.frames_drop;
+}
+
 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
                      u64 *data)
 {
        struct port_info *pi = netdev_priv(dev);
        struct adapter *adapter = pi->adapter;
-       u32 val1, val2;
+       struct lb_port_stats s;
+       int i;
+       u64 *p0;
 
-       t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+       t4_get_port_stats_offset(adapter, pi->tx_chan,
+                                (struct port_stats *)data,
+                                &pi->stats_base);
 
        data += sizeof(struct port_stats) / sizeof(u64);
        collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
        data += sizeof(struct queue_port_stats) / sizeof(u64);
-       if (!is_t4(adapter->params.chip)) {
-               t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
-               val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
-               val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
-               *data = val1 - val2;
-               data++;
-               *data = val2;
-               data++;
-       } else {
-               memset(data, 0, 2 * sizeof(u64));
-               *data += 2;
-       }
+       collect_adapter_stats(adapter, (struct adapter_stats *)data);
+       data += sizeof(struct adapter_stats) / sizeof(u64);
+
+       *data++ = (u64)pi->port_id;
+       collect_channel_stats(adapter, (struct channel_stats *)data,
+                             pi->port_id);
+       data += sizeof(struct channel_stats) / sizeof(u64);
+
+       *data++ = (u64)pi->port_id;
+       memset(&s, 0, sizeof(s));
+       t4_get_lb_stats(adapter, pi->port_id, &s);
+
+       p0 = &s.octets;
+       for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
+               *data++ = (unsigned long long)*p0++;
 }
 
 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -250,7 +457,7 @@ static int restart_autoneg(struct net_device *dev)
                return -EAGAIN;
        if (p->link_cfg.autoneg != AUTONEG_ENABLE)
                return -EINVAL;
-       t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+       t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
        return 0;
 }
 
@@ -267,7 +474,7 @@ static int identify_port(struct net_device *dev,
        else
                return -EINVAL;
 
-       return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+       return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
 }
 
 static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
@@ -439,7 +646,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        lc->autoneg = cmd->autoneg;
 
        if (netif_running(dev))
-               return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+               return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
                                     lc);
        return 0;
 }
@@ -472,7 +679,7 @@ static int set_pauseparam(struct net_device *dev,
        if (epause->tx_pause)
                lc->requested_fc |= PAUSE_TX;
        if (netif_running(dev))
-               return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+               return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
                                     lc);
        return 0;
 }
@@ -578,7 +785,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
        const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
 
        c->rx_coalesce_usecs = qtimer_val(adap, rq);
-       c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+       c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
                adap->sge.counter_val[rq->pktcnt_idx] : 0;
        c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
        return 0;
@@ -617,7 +824,7 @@ static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
  */
 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
 {
-       int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+       int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
 
        if (vaddr >= 0)
                vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -626,7 +833,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
 
 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
 {
-       int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+       int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
 
        if (vaddr >= 0)
                vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -669,8 +876,8 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        aligned_offset = eeprom->offset & ~3;
        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
 
-       if (adapter->fn > 0) {
-               u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+       if (adapter->pf > 0) {
+               u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
 
                if (aligned_offset < start ||
                    aligned_offset + aligned_len > start + EEPROMPFSIZE)
@@ -740,37 +947,6 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
        return ret;
 }
 
-#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
-#define BCAST_CRC 0xa0ccc1a6
-
-static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-       wol->supported = WAKE_BCAST | WAKE_MAGIC;
-       wol->wolopts = netdev2adap(dev)->wol;
-       memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
-       int err = 0;
-       struct port_info *pi = netdev_priv(dev);
-
-       if (wol->wolopts & ~WOL_SUPPORTED)
-               return -EINVAL;
-       t4_wol_magic_enable(pi->adapter, pi->tx_chan,
-                           (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
-       if (wol->wolopts & WAKE_BCAST) {
-               err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
-                                       ~0ULL, 0, false);
-               if (!err)
-                       err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
-                                               ~6ULL, ~0ULL, BCAST_CRC, true);
-       } else {
-               t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
-       }
-       return err;
-}
-
 static u32 get_rss_table_size(struct net_device *dev)
 {
        const struct port_info *pi = netdev_priv(dev);
@@ -900,8 +1076,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
        .get_ethtool_stats = get_stats,
        .get_regs_len      = get_regs_len,
        .get_regs          = get_regs,
-       .get_wol           = get_wol,
-       .set_wol           = set_wol,
        .get_rxnfc         = get_rxnfc,
        .get_rxfh_indir_size = get_rss_table_size,
        .get_rxfh          = get_rss_table,
index 803d91beec6f43b7a94c3378ff14d8623bc001a9..0e27f2266e6b7abdcb957a2163efa465df44b8e2 100644 (file)
@@ -135,8 +135,14 @@ struct filter_entry {
 
 #define FW4_FNAME "cxgb4/t4fw.bin"
 #define FW5_FNAME "cxgb4/t5fw.bin"
+#define FW6_FNAME "cxgb4/t6fw.bin"
 #define FW4_CFNAME "cxgb4/t4-config.txt"
 #define FW5_CFNAME "cxgb4/t5-config.txt"
+#define FW6_CFNAME "cxgb4/t6-config.txt"
+#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
+#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
+#define PHY_AQ1202_DEVICEID 0x4409
+#define PHY_BCM84834_DEVICEID 0x4486
 
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_AUTHOR("Chelsio Communications");
@@ -318,8 +324,9 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
                 * level") we need to issue the Set Parameters Commannd
                 * without sleeping (timeout < 0).
                 */
-               err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
-                                           &name, &value);
+               err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+                                           &name, &value,
+                                           -FW_CMD_MAX_TIMEOUT);
 
                if (err)
                        dev_err(adap->pdev_dev,
@@ -382,7 +389,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
        int uc_cnt = netdev_uc_count(dev);
        int mc_cnt = netdev_mc_count(dev);
        const struct port_info *pi = netdev_priv(dev);
-       unsigned int mb = pi->adapter->fn;
+       unsigned int mb = pi->adapter->pf;
 
        /* first do the secondary unicast addresses */
        netdev_for_each_uc_addr(ha, dev) {
@@ -439,7 +446,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
 
        ret = set_addr_filters(dev, sleep_ok);
        if (ret == 0)
-               ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
+               ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
                                    (dev->flags & IFF_PROMISC) ? 1 : 0,
                                    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
                                    sleep_ok);
@@ -456,7 +463,7 @@ static int link_start(struct net_device *dev)
 {
        int ret;
        struct port_info *pi = netdev_priv(dev);
-       unsigned int mb = pi->adapter->fn;
+       unsigned int mb = pi->adapter->pf;
 
        /*
         * We do not set address filters and promiscuity here, the stack does
@@ -474,7 +481,7 @@ static int link_start(struct net_device *dev)
                }
        }
        if (ret == 0)
-               ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
+               ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
                                    &pi->link_cfg);
        if (ret == 0) {
                local_bh_disable();
@@ -856,23 +863,39 @@ static void free_msix_queue_irqs(struct adapter *adap)
  *
  *     Sets up the portion of the HW RSS table for the port's VI to distribute
  *     packets to the Rx queues in @queues.
+ *     Should never be called before setting up sge eth rx queues
  */
 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
 {
        u16 *rss;
        int i, err;
-       const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
+       struct adapter *adapter = pi->adapter;
+       const struct sge_eth_rxq *rxq;
 
+       rxq = &adapter->sge.ethrxq[pi->first_qset];
        rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
        if (!rss)
                return -ENOMEM;
 
        /* map the queue indices to queue ids */
        for (i = 0; i < pi->rss_size; i++, queues++)
-               rss[i] = q[*queues].rspq.abs_id;
+               rss[i] = rxq[*queues].rspq.abs_id;
 
-       err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
+       err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
                                  pi->rss_size, rss, pi->rss_size);
+       /* If Tunnel All Lookup isn't specified in the global RSS
+        * Configuration, then we need to specify a default Ingress
+        * Queue for any ingress packets which aren't hashed.  We'll
+        * use our first ingress queue ...
+        */
+       if (!err)
+               err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+                                      FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
+                                      FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
+                                      FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
+                                      FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
+                                      FW_RSS_VI_CONFIG_CMD_UDPEN_F,
+                                      rss[0]);
        kfree(rss);
        return err;
 }
@@ -885,11 +908,15 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
  */
 static int setup_rss(struct adapter *adap)
 {
-       int i, err;
+       int i, j, err;
 
        for_each_port(adap, i) {
                const struct port_info *pi = adap2pinfo(adap, i);
 
+               /* Fill default values with equal distribution */
+               for (j = 0; j < pi->rss_size; j++)
+                       pi->rss[j] = j % pi->nqsets;
+
                err = cxgb4_write_rss(pi, pi->rss);
                if (err)
                        return err;
@@ -977,7 +1004,7 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
                err = t4_sge_alloc_rxq(adap, &q->rspq, false,
                                       adap->port[i / per_chan],
                                       msi_idx, q->fl.size ? &q->fl : NULL,
-                                      uldrx_handler);
+                                      uldrx_handler, 0);
                if (err)
                        return err;
                memset(&q->stats, 0, sizeof(q->stats));
@@ -1007,7 +1034,7 @@ static int setup_sge_queues(struct adapter *adap)
                msi_idx = 1;         /* vector 0 is for non-queue interrupts */
        else {
                err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
-                                      NULL, NULL);
+                                      NULL, NULL, -1);
                if (err)
                        return err;
                msi_idx = -((int)s->intrq.abs_id + 1);
@@ -1027,7 +1054,7 @@ static int setup_sge_queues(struct adapter *adap)
         *    new/deleted queues.
         */
        err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
-                              msi_idx, NULL, fwevtq_handler);
+                              msi_idx, NULL, fwevtq_handler, -1);
        if (err) {
 freeout:       t4_free_sge_resources(adap);
                return err;
@@ -1044,7 +1071,9 @@ freeout:  t4_free_sge_resources(adap);
                                msi_idx++;
                        err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
                                               msi_idx, &q->fl,
-                                              t4_ethrx_handler);
+                                              t4_ethrx_handler,
+                                              t4_get_mps_bg_map(adap,
+                                                                pi->tx_chan));
                        if (err)
                                goto freeout;
                        q->rspq.idx = j;
@@ -1324,11 +1353,6 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
        return fallback(dev, skb) % dev->real_num_tx_queues;
 }
 
-static inline int is_offload(const struct adapter *adap)
-{
-       return adap->params.offload;
-}
-
 static int closest_timer(const struct sge *s, int time)
 {
        int i, delta, match = 0, min_delta = INT_MAX;
@@ -1389,8 +1413,8 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
                            FW_PARAMS_PARAM_X_V(
                                        FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
                            FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
-                       err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
-                                           &new_idx);
+                       err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+                                           &v, &new_idx);
                        if (err)
                                return err;
                }
@@ -1398,7 +1422,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
        }
 
        us = us == 0 ? 6 : closest_timer(&adap->sge, us);
-       q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+       q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
        return 0;
 }
 
@@ -1411,7 +1435,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
        if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
                return 0;
 
-       err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+       err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
                            -1, -1, -1,
                            !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
        if (unlikely(err))
@@ -1694,7 +1718,7 @@ static int tid_init(struct tid_info *t)
        bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
        /* Reserve stid 0 for T4/T5 adapters */
        if (!t->stid_base &&
-           (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+           (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
                __set_bit(0, t->stid_bmap);
 
        return 0;
@@ -1983,11 +2007,8 @@ EXPORT_SYMBOL(cxgb4_iscsi_init);
 int cxgb4_flush_eq_cache(struct net_device *dev)
 {
        struct adapter *adap = netdev2adap(dev);
-       int ret;
 
-       ret = t4_fwaddrspace_write(adap, adap->mbox,
-                                  0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
-       return ret;
+       return t4_sge_ctxt_flush(adap, adap->mbox);
 }
 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
 
@@ -2042,25 +2063,6 @@ out:
 }
 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
 
-void cxgb4_disable_db_coalescing(struct net_device *dev)
-{
-       struct adapter *adap;
-
-       adap = netdev2adap(dev);
-       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
-                        NOCOALESCE_F);
-}
-EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
-
-void cxgb4_enable_db_coalescing(struct net_device *dev)
-{
-       struct adapter *adap;
-
-       adap = netdev2adap(dev);
-       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
-}
-EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
-
 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
 {
        struct adapter *adap;
@@ -2100,10 +2102,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
                if (offset < mc0_end) {
                        memtype = MEM_MC0;
                        memaddr = offset - edc1_end;
-               } else if (is_t4(adap->params.chip)) {
-                       /* T4 only has a single memory channel */
-                       goto err;
-               } else {
+               } else if (is_t5(adap->params.chip)) {
                        size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
                        mc1_size = EXT_MEM1_SIZE_G(size) << 20;
                        mc1_end = mc0_end + mc1_size;
@@ -2114,6 +2113,9 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
                                /* offset beyond the end of any memory */
                                goto err;
                        }
+               } else {
+                       /* T4/T6 only has a single memory channel */
+                       goto err;
                }
        }
 
@@ -2148,7 +2150,7 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev,
                         u64 *pbar2_qoffset,
                         unsigned int *pbar2_qid)
 {
-       return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
+       return t4_bar2_sge_qregs(netdev2adap(dev),
                                 qid,
                                 (qtype == CXGB4_BAR2_QTYPE_EGRESS
                                  ? T4_BAR2_QTYPE_EGRESS
@@ -2278,9 +2280,13 @@ static void process_db_full(struct work_struct *work)
        drain_db_fifo(adap, dbfifo_drain_delay);
        enable_dbs(adap);
        notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-       t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
-                        DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
-                        DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+       if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+               t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+                                DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+                                DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+       else
+               t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+                                DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
 }
 
 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -2342,7 +2348,7 @@ static void process_db_drop(struct work_struct *work)
                drain_db_fifo(adap, dbfifo_drain_delay);
                enable_dbs(adap);
                notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
-       } else {
+       } else if (is_t5(adap->params.chip)) {
                u32 dropped_db = t4_read_reg(adap, 0x010ac);
                u16 qid = (dropped_db >> 15) & 0x1ffff;
                u16 pidx_inc = dropped_db & 0x1fff;
@@ -2350,7 +2356,7 @@ static void process_db_drop(struct work_struct *work)
                unsigned int bar2_qid;
                int ret;
 
-               ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+               ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
                                        &bar2_qoffset, &bar2_qid);
                if (ret)
                        dev_err(adap->pdev_dev, "doorbell drop recovery: "
@@ -2363,7 +2369,8 @@ static void process_db_drop(struct work_struct *work)
                t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
        }
 
-       t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
+       if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+               t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
 }
 
 void t4_db_full(struct adapter *adap)
@@ -2393,7 +2400,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        unsigned short i;
 
        lli.pdev = adap->pdev;
-       lli.pf = adap->fn;
+       lli.pf = adap->pf;
        lli.l2t = adap->l2t;
        lli.tids = &adap->tids;
        lli.ports = adap->port;
@@ -2432,6 +2439,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.max_ordird_qp = adap->params.max_ordird_qp;
        lli.max_ird_adapter = adap->params.max_ird_adapter;
        lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+       lli.nodeid = dev_to_node(adap->pdev_dev);
 
        handle = ulds[uld].add(&lli);
        if (IS_ERR(handle)) {
@@ -2729,7 +2737,7 @@ static int cxgb_close(struct net_device *dev)
 
        netif_tx_stop_all_queues(dev);
        netif_carrier_off(dev);
-       return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
+       return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
 }
 
 /* Return an error number if the indicated filter isn't writable ...
@@ -2873,7 +2881,8 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
                spin_unlock(&adapter->stats_lock);
                return ns;
        }
-       t4_get_port_stats(adapter, p->tx_chan, &stats);
+       t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+                                &p->stats_base);
        spin_unlock(&adapter->stats_lock);
 
        ns->tx_bytes   = stats.tx_octets;
@@ -2932,7 +2941,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
                } else
                        return -EINVAL;
 
-               mbox = pi->adapter->fn;
+               mbox = pi->adapter->pf;
                if (cmd == SIOCGMIIREG)
                        ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
                                         data->reg_num, &data->val_out);
@@ -2959,7 +2968,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
 
        if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
                return -EINVAL;
-       ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+       ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
                            -1, -1, -1, true);
        if (!ret)
                dev->mtu = new_mtu;
@@ -2975,7 +2984,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
-       ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+       ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
                            pi->xact_addr_filt, addr->sa_data, true, true);
        if (ret < 0)
                return ret;
@@ -3034,86 +3043,11 @@ void t4_fatal_err(struct adapter *adap)
        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
 }
 
-/* Return the specified PCI-E Configuration Space register from our Physical
- * Function.  We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
-{
-       struct fw_ldst_cmd ldst_cmd;
-       u32 val;
-       int ret;
-
-       /* Construct and send the Firmware LDST Command to retrieve the
-        * specified PCI-E Configuration Space register.
-        */
-       memset(&ldst_cmd, 0, sizeof(ldst_cmd));
-       ldst_cmd.op_to_addrspace =
-               htonl(FW_CMD_OP_V(FW_LDST_CMD) |
-                     FW_CMD_REQUEST_F |
-                     FW_CMD_READ_F |
-                     FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
-       ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
-       ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
-       ldst_cmd.u.pcie.ctrl_to_fn =
-               (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
-       ldst_cmd.u.pcie.r = reg;
-       ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
-                        &ldst_cmd);
-
-       /* If the LDST Command suucceeded, exctract the returned register
-        * value.  Otherwise read it directly ourself.
-        */
-       if (ret == 0)
-               val = ntohl(ldst_cmd.u.pcie.data[0]);
-       else
-               t4_hw_pci_read_cfg4(adap, reg, &val);
-
-       return val;
-}
-
 static void setup_memwin(struct adapter *adap)
 {
-       u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
+       u32 nic_win_base = t4_get_util_window(adap);
 
-       if (is_t4(adap->params.chip)) {
-               u32 bar0;
-
-               /* Truncation intentional: we only read the bottom 32-bits of
-                * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
-                * mechanism to read BAR0 instead of using
-                * pci_resource_start() because we could be operating from
-                * within a Virtual Machine which is trapping our accesses to
-                * our Configuration Space and we need to set up the PCI-E
-                * Memory Window decoders with the actual addresses which will
-                * be coming across the PCI-E link.
-                */
-               bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
-               bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
-               adap->t4_bar0 = bar0;
-
-               mem_win0_base = bar0 + MEMWIN0_BASE;
-               mem_win1_base = bar0 + MEMWIN1_BASE;
-               mem_win2_base = bar0 + MEMWIN2_BASE;
-               mem_win2_aperture = MEMWIN2_APERTURE;
-       } else {
-               /* For T5, only relative offset inside the PCIe BAR is passed */
-               mem_win0_base = MEMWIN0_BASE;
-               mem_win1_base = MEMWIN1_BASE;
-               mem_win2_base = MEMWIN2_BASE_T5;
-               mem_win2_aperture = MEMWIN2_APERTURE_T5;
-       }
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
-                    mem_win0_base | BIR_V(0) |
-                    WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
-                    mem_win1_base | BIR_V(0) |
-                    WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
-       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
-                    mem_win2_base | BIR_V(0) |
-                    WINDOW_V(ilog2(mem_win2_aperture) - 10));
-       t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
+       t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
 }
 
 static void setup_memwin_rdma(struct adapter *adap)
@@ -3147,7 +3081,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
                               FW_CMD_REQUEST_F | FW_CMD_READ_F);
        c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
-       ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
+       ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
        if (ret < 0)
                return ret;
 
@@ -3163,18 +3097,18 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        }
        c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
                               FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
-       ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
+       ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
        if (ret < 0)
                return ret;
 
-       ret = t4_config_glbl_rss(adap, adap->fn,
+       ret = t4_config_glbl_rss(adap, adap->pf,
                                 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
                                 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
                                 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
        if (ret < 0)
                return ret;
 
-       ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+       ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
                          MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
                          FW_CMD_CAP_PF);
        if (ret < 0)
@@ -3218,7 +3152,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
        }
 
        /* get basic stuff going */
-       return t4_early_init(adap, adap->fn);
+       return t4_early_init(adap, adap->pf);
 }
 
 /*
@@ -3274,6 +3208,142 @@ static int adap_init0_tweaks(struct adapter *adapter)
        return 0;
 }
 
+/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
+ * unto themselves and they contain their own firmware to perform their
+ * tasks ...
+ */
+static int phy_aq1202_version(const u8 *phy_fw_data,
+                             size_t phy_fw_size)
+{
+       int offset;
+
+       /* At offset 0x8 you're looking for the primary image's
+        * starting offset which is 3 Bytes wide
+        *
+        * At offset 0xa of the primary image, you look for the offset
+        * of the DRAM segment which is 3 Bytes wide.
+        *
+        * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
+        * wide
+        */
+       #define be16(__p) (((__p)[0] << 8) | (__p)[1])
+       #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
+       #define le24(__p) (le16(__p) | ((__p)[2] << 16))
+
+       offset = le24(phy_fw_data + 0x8) << 12;
+       offset = le24(phy_fw_data + offset + 0xa);
+       return be16(phy_fw_data + offset + 0x27e);
+
+       #undef be16
+       #undef le16
+       #undef le24
+}
+
+static struct info_10gbt_phy_fw {
+       unsigned int phy_fw_id;         /* PCI Device ID */
+       char *phy_fw_file;              /* /lib/firmware/ PHY Firmware file */
+       int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
+       int phy_flash;                  /* Has FLASH for PHY Firmware */
+} phy_info_array[] = {
+       {
+               PHY_AQ1202_DEVICEID,
+               PHY_AQ1202_FIRMWARE,
+               phy_aq1202_version,
+               1,
+       },
+       {
+               PHY_BCM84834_DEVICEID,
+               PHY_BCM84834_FIRMWARE,
+               NULL,
+               0,
+       },
+       { 0, NULL, NULL },
+};
+
+static struct info_10gbt_phy_fw *find_phy_info(int devid)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
+               if (phy_info_array[i].phy_fw_id == devid)
+                       return &phy_info_array[i];
+       }
+       return NULL;
+}
+
+/* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
+ * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
+ * we return a negative error number.  If we transfer new firmware we return 1
+ * (from t4_load_phy_fw()).  If we don't do anything we return 0.
+ */
+static int adap_init0_phy(struct adapter *adap)
+{
+       const struct firmware *phyf;
+       int ret;
+       struct info_10gbt_phy_fw *phy_info;
+
+       /* Use the device ID to determine which PHY file to flash.
+        */
+       phy_info = find_phy_info(adap->pdev->device);
+       if (!phy_info) {
+               dev_warn(adap->pdev_dev,
+                        "No PHY Firmware file found for this PHY\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
+        * use that. The adapter firmware provides us with a memory buffer
+        * where we can load a PHY firmware file from the host if we want to
+        * override the PHY firmware File in flash.
+        */
+       ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
+                                     adap->pdev_dev);
+       if (ret < 0) {
+               /* For adapters without FLASH attached to PHY for their
+                * firmware, it's obviously a fatal error if we can't get the
+                * firmware to the adapter.  For adapters with PHY firmware
+                * FLASH storage, it's worth a warning if we can't find the
+                * PHY Firmware but we'll neuter the error ...
+                */
+               dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
+                       "/lib/firmware/%s, error %d\n",
+                       phy_info->phy_fw_file, -ret);
+               if (phy_info->phy_flash) {
+                       int cur_phy_fw_ver = 0;
+
+                       t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+                       dev_warn(adap->pdev_dev, "continuing with, on-adapter "
+                                "FLASH copy, version %#x\n", cur_phy_fw_ver);
+                       ret = 0;
+               }
+
+               return ret;
+       }
+
+       /* Load PHY Firmware onto adapter.
+        */
+       ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
+                            phy_info->phy_fw_version,
+                            (u8 *)phyf->data, phyf->size);
+       if (ret < 0)
+               dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
+                       -ret);
+       else if (ret > 0) {
+               int new_phy_fw_ver = 0;
+
+               if (phy_info->phy_fw_version)
+                       new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
+                                                                 phyf->size);
+               dev_info(adap->pdev_dev, "Successfully transferred PHY "
+                        "Firmware /lib/firmware/%s, version %#x\n",
+                        phy_info->phy_fw_file, new_phy_fw_ver);
+       }
+
+       release_firmware(phyf);
+
+       return ret;
+}
+
 /*
  * Attempt to initialize the adapter via a Firmware Configuration File.
  */
@@ -3298,6 +3368,16 @@ static int adap_init0_config(struct adapter *adapter, int reset)
                        goto bye;
        }
 
+       /* If this is a 10Gb/s-BT adapter make sure the chip-external
+        * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
+        * to be performed after any global adapter RESET above since some
+        * PHYs only have local RAM copies of the PHY firmware.
+        */
+       if (is_10gbt_device(adapter->pdev->device)) {
+               ret = adap_init0_phy(adapter);
+               if (ret < 0)
+                       goto bye;
+       }
        /*
         * If we have a T4 configuration file under /lib/firmware/cxgb4/,
         * then use that.  Otherwise, use the configuration file stored
@@ -3310,6 +3390,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
        case CHELSIO_T5:
                fw_config_file = FW5_CFNAME;
                break;
+       case CHELSIO_T6:
+               fw_config_file = FW6_CFNAME;
+               break;
        default:
                dev_err(adapter->pdev_dev, "Device %d is not supported\n",
                       adapter->pdev->device);
@@ -3335,7 +3418,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
                        params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
                             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
                        ret = t4_query_params(adapter, adapter->mbox,
-                                             adapter->fn, 0, 1, params, val);
+                                             adapter->pf, 0, 1, params, val);
                        if (ret == 0) {
                                /*
                                 * For t4_memory_rw() below addresses and
@@ -3506,7 +3589,24 @@ static struct fw_info fw_info_array[] = {
                        .intfver_iscsi = FW_INTFVER(T5, ISCSI),
                        .intfver_fcoe = FW_INTFVER(T5, FCOE),
                },
+       }, {
+               .chip = CHELSIO_T6,
+               .fs_name = FW6_CFNAME,
+               .fw_mod_name = FW6_FNAME,
+               .fw_hdr = {
+                       .chip = FW_HDR_CHIP_T6,
+                       .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+                       .intfver_nic = FW_INTFVER(T6, NIC),
+                       .intfver_vnic = FW_INTFVER(T6, VNIC),
+                       .intfver_ofld = FW_INTFVER(T6, OFLD),
+                       .intfver_ri = FW_INTFVER(T6, RI),
+                       .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
+                       .intfver_iscsi = FW_INTFVER(T6, ISCSI),
+                       .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
+                       .intfver_fcoe = FW_INTFVER(T6, FCOE),
+               },
        }
+
 };
 
 static struct fw_info *find_fw_info(int chip)
@@ -3612,7 +3712,7 @@ static int adap_init0(struct adapter *adap)
         * the firmware.  On the other hand, we need these fairly early on
         * so we do this right after getting ahold of the firmware.
         */
-       ret = get_vpd_params(adap, &adap->params.vpd);
+       ret = t4_get_vpd_params(adap, &adap->params.vpd);
        if (ret < 0)
                goto bye;
 
@@ -3624,7 +3724,7 @@ static int adap_init0(struct adapter *adap)
        v =
            FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
            FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
-       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
        if (ret < 0)
                goto bye;
 
@@ -3647,7 +3747,7 @@ static int adap_init0(struct adapter *adap)
                 */
                params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
                             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
-               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+               ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
                                      params, val);
 
                /* If the firmware doesn't support Configuration Files,
@@ -3706,7 +3806,7 @@ static int adap_init0(struct adapter *adap)
        params[3] = FW_PARAM_PFVF(FILTER_START);
        params[4] = FW_PARAM_PFVF(FILTER_END);
        params[5] = FW_PARAM_PFVF(IQFLINT_START);
-       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
        if (ret < 0)
                goto bye;
        adap->sge.egr_start = val[0];
@@ -3724,7 +3824,7 @@ static int adap_init0(struct adapter *adap)
         */
        params[0] = FW_PARAM_PFVF(EQ_END);
        params[1] = FW_PARAM_PFVF(IQFLINT_END);
-       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
        if (ret < 0)
                goto bye;
        adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
@@ -3745,7 +3845,7 @@ static int adap_init0(struct adapter *adap)
        }
 
        /* Allocate the memory for the vaious egress queue bitmaps
-        * ie starving_fl and txq_maperr.
+        * ie starving_fl, txq_maperr and blocked_fl.
         */
        adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
                                        sizeof(long), GFP_KERNEL);
@@ -3761,9 +3861,18 @@ static int adap_init0(struct adapter *adap)
                goto bye;
        }
 
+#ifdef CONFIG_DEBUG_FS
+       adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+                                      sizeof(long), GFP_KERNEL);
+       if (!adap->sge.blocked_fl) {
+               ret = -ENOMEM;
+               goto bye;
+       }
+#endif
+
        params[0] = FW_PARAM_PFVF(CLIP_START);
        params[1] = FW_PARAM_PFVF(CLIP_END);
-       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
        if (ret < 0)
                goto bye;
        adap->clipt_start = val[0];
@@ -3772,7 +3881,7 @@ static int adap_init0(struct adapter *adap)
        /* query params related to active filter region */
        params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
        params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
-       ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
        /* If Active filter size is set we enable establishing
         * offload connection through firmware work request
         */
@@ -3789,7 +3898,7 @@ static int adap_init0(struct adapter *adap)
         */
        params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
        val[0] = 1;
-       (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
+       (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
 
        /*
         * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
@@ -3801,7 +3910,7 @@ static int adap_init0(struct adapter *adap)
                adap->params.ulptx_memwrite_dsgl = false;
        } else {
                params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
-               ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+               ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
                                      1, params, val);
                adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
        }
@@ -3827,7 +3936,7 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(TDDP_START);
                params[4] = FW_PARAM_PFVF(TDDP_END);
                params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
-               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+               ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
                                      params, val);
                if (ret < 0)
                        goto bye;
@@ -3865,7 +3974,7 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(RQ_END);
                params[4] = FW_PARAM_PFVF(PBL_START);
                params[5] = FW_PARAM_PFVF(PBL_END);
-               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+               ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
                                      params, val);
                if (ret < 0)
                        goto bye;
@@ -3882,7 +3991,7 @@ static int adap_init0(struct adapter *adap)
                params[3] = FW_PARAM_PFVF(CQ_END);
                params[4] = FW_PARAM_PFVF(OCQ_START);
                params[5] = FW_PARAM_PFVF(OCQ_END);
-               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+               ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
                                      val);
                if (ret < 0)
                        goto bye;
@@ -3895,7 +4004,7 @@ static int adap_init0(struct adapter *adap)
 
                params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
                params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
-               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+               ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
                                      val);
                if (ret < 0) {
                        adap->params.max_ordird_qp = 8;
@@ -3913,7 +4022,7 @@ static int adap_init0(struct adapter *adap)
        if (caps_cmd.iscsicaps) {
                params[0] = FW_PARAM_PFVF(ISCSI_START);
                params[1] = FW_PARAM_PFVF(ISCSI_END);
-               ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+               ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
                                      params, val);
                if (ret < 0)
                        goto bye;
@@ -3959,8 +4068,8 @@ static int adap_init0(struct adapter *adap)
                             adap->params.b_wnd);
        }
        t4_init_sge_params(adap);
-       t4_init_tp_params(adap);
        adap->flags |= FW_OK;
+       t4_init_tp_params(adap);
        return 0;
 
        /*
@@ -3973,6 +4082,9 @@ bye:
        kfree(adap->sge.ingr_map);
        kfree(adap->sge.starving_fl);
        kfree(adap->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+       kfree(adap->sge.blocked_fl);
+#endif
        if (ret != -ETIMEDOUT && ret != -EIO)
                t4_fw_bye(adap, adap->mbox);
        return ret;
@@ -4040,7 +4152,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
 
        if (t4_wait_dev_ready(adap->regs) < 0)
                return PCI_ERS_RESULT_DISCONNECT;
-       if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
+       if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
                return PCI_ERS_RESULT_DISCONNECT;
        adap->flags |= FW_OK;
        if (adap_init1(adap, &c))
@@ -4049,7 +4161,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
        for_each_port(adap, i) {
                struct port_info *p = adap2pinfo(adap, i);
 
-               ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+               ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
                                  NULL, NULL);
                if (ret < 0)
                        return PCI_ERS_RESULT_DISCONNECT;
@@ -4340,7 +4452,12 @@ static int enable_msix(struct adapter *adap)
 
 static int init_rss(struct adapter *adap)
 {
-       unsigned int i, j;
+       unsigned int i;
+       int err;
+
+       err = t4_init_rss_mode(adap, adap->mbox);
+       if (err)
+               return err;
 
        for_each_port(adap, i) {
                struct port_info *pi = adap2pinfo(adap, i);
@@ -4348,8 +4465,6 @@ static int init_rss(struct adapter *adap)
                pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
                if (!pi->rss)
                        return -ENOMEM;
-               for (j = 0; j < pi->rss_size; j++)
-                       pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
        }
        return 0;
 }
@@ -4413,15 +4528,23 @@ static void free_some_resources(struct adapter *adapter)
        kfree(adapter->sge.ingr_map);
        kfree(adapter->sge.starving_fl);
        kfree(adapter->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+       kfree(adapter->sge.blocked_fl);
+#endif
        disable_msi(adapter);
 
        for_each_port(adapter, i)
                if (adapter->port[i]) {
+                       struct port_info *pi = adap2pinfo(adapter, i);
+
+                       if (pi->viid != 0)
+                               t4_free_vi(adapter, adapter->mbox, adapter->pf,
+                                          0, pi->viid);
                        kfree(adap2pinfo(adapter, i)->rss);
                        free_netdev(adapter->port[i]);
                }
        if (adapter->flags & FW_OK)
-               t4_fw_bye(adapter, adapter->fn);
+               t4_fw_bye(adapter, adapter->pf);
 }
 
 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
@@ -4512,7 +4635,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
        adapter->mbox = func;
-       adapter->fn = func;
+       adapter->pf = func;
        adapter->msg_enable = dflt_msg_enable;
        memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
 
@@ -4532,7 +4655,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!is_t4(adapter->params.chip)) {
                s_qpp = (QUEUESPERPAGEPF0_S +
                        (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
-                       adapter->fn);
+                       adapter->pf);
                qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
                      SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
                num_seg = PAGE_SIZE / SEGMENT_SIZE;
@@ -4555,10 +4678,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        err = -ENOMEM;
                        goto out_free_adapter;
                }
+               t4_write_reg(adapter, SGE_STAT_CFG_A,
+                            STATSOURCE_T5_V(7) | STATMODE_V(0));
        }
 
        setup_memwin(adapter);
        err = adap_init0(adapter);
+#ifdef CONFIG_DEBUG_FS
+       bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
+#endif
        setup_memwin_rdma(adapter);
        if (err)
                goto out_unmap_bar;
@@ -4607,10 +4735,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                err = t4_port_init(adapter, func, func, 0);
                if (err)
                        goto out_free_dev;
+       } else if (adapter->params.nports == 1) {
+               /* If we don't have a connection to the firmware -- possibly
+                * because of an error -- grab the raw VPD parameters so we
+                * can set the proper MAC Address on the debug network
+                * interface that we've created.
+                */
+               u8 hw_addr[ETH_ALEN];
+               u8 *na = adapter->params.vpd.na;
+
+               err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
+               if (!err) {
+                       for (i = 0; i < ETH_ALEN; i++)
+                               hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+                                             hex2val(na[2 * i + 1]));
+                       t4_set_hw_addr(adapter, 0, hw_addr);
+               }
        }
 
-       /*
-        * Configure queues and allocate tables now, they can be needed as
+       /* Configure queues and allocate tables now, they can be needed as
         * soon as the first register_netdev completes.
         */
        cfg_queues(adapter);
index 78ab4d406ce277509eb6da61780903129596e7ee..14e8110b5dbbd23273aeaa2b8721d9831d1a3664 100644 (file)
@@ -264,6 +264,7 @@ struct cxgb4_lld_info {
        unsigned int max_ordird_qp;          /* Max ORD/IRD depth per RDMA QP */
        unsigned int max_ird_adapter;        /* Max IRD memory per adapter */
        bool ulptx_memwrite_dsgl;            /* use of T5 DSGL allowed */
+       int nodeid;                          /* device numa node id */
 };
 
 struct cxgb4_uld_info {
@@ -297,8 +298,6 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
                                   unsigned int skb_len, unsigned int pull_len);
 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
 int cxgb4_flush_eq_cache(struct net_device *dev);
-void cxgb4_disable_db_coalescing(struct net_device *dev);
-void cxgb4_enable_db_coalescing(struct net_device *dev);
 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
 u64 cxgb4_read_sge_timestamp(struct net_device *dev);
 
index 0d2eddab04efbf7b2a0e1054ea46848273c97933..6b7c37fd0252f23d0ea79abc422ace16f8e198b5 100644 (file)
  */
 #define TX_QCHECK_PERIOD (HZ / 2)
 
-/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
- * (in RX_QCHECK_PERIOD multiples).  If we find one of the SGE Ingress DMA
- * State Machines in the same state for this amount of time (in HZ) then we'll
- * issue a warning about a potential hang.  We'll repeat the warning as the
- * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
- * the situation clears.  If the situation clears, we'll note that as well.
- */
-#define SGE_IDMA_WARN_THRESH (1 * HZ)
-#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
-
 /*
  * Max number of Tx descriptors to be reclaimed by the Tx timer.
  */
@@ -532,14 +522,17 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
 
 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
 {
-       u32 val;
        if (q->pend_cred >= 8) {
+               u32 val = adap->params.arch.sge_fl_db;
+
                if (is_t4(adap->params.chip))
-                       val = PIDX_V(q->pend_cred / 8);
+                       val |= PIDX_V(q->pend_cred / 8);
                else
-                       val = PIDX_T5_V(q->pend_cred / 8) |
-                               DBTYPE_F;
-               val |= DBPRIO_F;
+                       val |= PIDX_T5_V(q->pend_cred / 8);
+
+               /* Make sure all memory writes to the Free List queue are
+                * committed before we tell the hardware about them.
+                */
                wmb();
 
                /* If we don't have access to the new User Doorbell (T5+), use
@@ -594,6 +587,11 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
        struct rx_sw_desc *sd = &q->sdesc[q->pidx];
        int node;
 
+#ifdef CONFIG_DEBUG_FS
+       if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
+               goto out;
+#endif
+
        gfp |= __GFP_NOWARN;
        node = dev_to_node(adap->pdev_dev);
 
@@ -930,7 +928,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
  */
 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
 {
-       wmb();            /* write descriptors before telling HW */
+       /* Make sure that all writes to the TX Descriptors are committed
+        * before we tell the hardware about them.
+        */
+       wmb();
 
        /* If we don't have access to the new User Doorbell (T5+), use the old
         * doorbell mechanism; otherwise use the new BAR2 mechanism.
@@ -1032,7 +1033,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
  * Figure out what HW csum a packet wants and return the appropriate control
  * bits.
  */
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
 {
        int csum_type;
        const struct iphdr *iph = ip_hdr(skb);
@@ -1047,7 +1048,7 @@ nocsum:                   /*
                         * unknown protocol, disable HW csum
                         * and hope a bad packet is detected
                         */
-                       return TXPKT_L4CSUM_DIS;
+                       return TXPKT_L4CSUM_DIS_F;
                }
        } else {
                /*
@@ -1063,15 +1064,21 @@ nocsum:                 /*
                        goto nocsum;
        }
 
-       if (likely(csum_type >= TX_CSUM_TCPIP))
-               return TXPKT_CSUM_TYPE(csum_type) |
-                       TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
-                       TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
-       else {
+       if (likely(csum_type >= TX_CSUM_TCPIP)) {
+               u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+               int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+               if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+                       hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+               else
+                       hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+               return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+       } else {
                int start = skb_transport_offset(skb);
 
-               return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
-                       TXPKT_CSUM_LOC(start + skb->csum_offset);
+               return TXPKT_CSUM_TYPE_V(csum_type) |
+                       TXPKT_CSUM_START_V(start) |
+                       TXPKT_CSUM_LOC_V(start + skb->csum_offset);
        }
 }
 
@@ -1112,11 +1119,11 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
                return -ENOTSUPP;
 
        /* FC CRC offload */
-       *cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
-                    TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
-                    TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
-                    TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
-                    TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
+       *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
+                    TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
+                    TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
+                    TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
+                    TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
        return 0;
 }
 #endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1130,7 +1137,6 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
  */
 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       int len;
        u32 wr_mid;
        u64 cntrl, *end;
        int qidx, credits;
@@ -1143,6 +1149,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        const struct skb_shared_info *ssi;
        dma_addr_t addr[MAX_SKB_FRAGS + 1];
        bool immediate = false;
+       int len, max_pkt_len;
 #ifdef CONFIG_CHELSIO_T4_FCOE
        int err;
 #endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1156,13 +1163,20 @@ out_free:       dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
+       /* Discard the packet if the length is greater than mtu */
+       max_pkt_len = ETH_HLEN + dev->mtu;
+       if (skb_vlan_tag_present(skb))
+               max_pkt_len += VLAN_HLEN;
+       if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+               goto out_free;
+
        pi = netdev_priv(dev);
        adap = pi->adapter;
        qidx = skb_get_queue_mapping(skb);
        q = &adap->sge.ethtxq[qidx + pi->first_qset];
 
        reclaim_completed_tx(adap, &q->q, true);
-       cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+       cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
 
 #ifdef CONFIG_CHELSIO_T4_FCOE
        err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
@@ -1213,23 +1227,29 @@ out_free:       dev_kfree_skb_any(skb);
                len += sizeof(*lso);
                wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
                                       FW_WR_IMMDLEN_V(len));
-               lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
-                                       LSO_FIRST_SLICE | LSO_LAST_SLICE |
-                                       LSO_IPV6(v6) |
-                                       LSO_ETHHDR_LEN(eth_xtra_len / 4) |
-                                       LSO_IPHDR_LEN(l3hdr_len / 4) |
-                                       LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+               lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+                                       LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+                                       LSO_IPV6_V(v6) |
+                                       LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+                                       LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+                                       LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
                lso->c.ipid_ofst = htons(0);
                lso->c.mss = htons(ssi->gso_size);
                lso->c.seqno_offset = htonl(0);
                if (is_t4(adap->params.chip))
                        lso->c.len = htonl(skb->len);
                else
-                       lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
+                       lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
                cpl = (void *)(lso + 1);
-               cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
-                       TXPKT_IPHDR_LEN(l3hdr_len) |
-                       TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+               if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+                       cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+               else
+                       cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+               cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+                                          TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+                        TXPKT_IPHDR_LEN_V(l3hdr_len);
                q->tso++;
                q->tx_cso += ssi->gso_segs;
        } else {
@@ -1238,23 +1258,25 @@ out_free:       dev_kfree_skb_any(skb);
                                       FW_WR_IMMDLEN_V(len));
                cpl = (void *)(wr + 1);
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+                       cntrl = hwcsum(adap->params.chip, skb) |
+                               TXPKT_IPCSUM_DIS_F;
                        q->tx_cso++;
                }
        }
 
        if (skb_vlan_tag_present(skb)) {
                q->vlan_ins++;
-               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+               cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
 #ifdef CONFIG_CHELSIO_T4_FCOE
                if (skb->protocol == htons(ETH_P_FCOE))
-                       cntrl |= TXPKT_VLAN(
+                       cntrl |= TXPKT_VLAN_V(
                                 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
 #endif /* CONFIG_CHELSIO_T4_FCOE */
        }
 
-       cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
-                          TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
+       cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+                          TXPKT_INTF_V(pi->tx_chan) |
+                          TXPKT_PF_V(adap->pf));
        cpl->pack = htons(0);
        cpl->len = htons(skb->len);
        cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1964,7 +1986,7 @@ static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
 static inline bool is_new_response(const struct rsp_ctrl *r,
                                   const struct sge_rspq *q)
 {
-       return RSPD_GEN(r->type_gen) == q->gen;
+       return (r->type_gen >> RSPD_GEN_S) == q->gen;
 }
 
 /**
@@ -2011,19 +2033,19 @@ static int process_responses(struct sge_rspq *q, int budget)
                        break;
 
                dma_rmb();
-               rsp_type = RSPD_TYPE(rc->type_gen);
-               if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+               rsp_type = RSPD_TYPE_G(rc->type_gen);
+               if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
                        struct page_frag *fp;
                        struct pkt_gl si;
                        const struct rx_sw_desc *rsd;
                        u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
 
-                       if (len & RSPD_NEWBUF) {
+                       if (len & RSPD_NEWBUF_F) {
                                if (likely(q->offset > 0)) {
                                        free_rx_bufs(q->adap, &rxq->fl, 1);
                                        q->offset = 0;
                                }
-                               len = RSPD_LEN(len);
+                               len = RSPD_LEN_G(len);
                        }
                        si.tot_len = len;
 
@@ -2058,7 +2080,7 @@ static int process_responses(struct sge_rspq *q, int budget)
                                q->offset += ALIGN(fp->size, s->fl_align);
                        else
                                restore_rx_bufs(&si, &rxq->fl, frags);
-               } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+               } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
                        ret = q->handler(q, q->cur_desc, NULL);
                } else {
                        ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
@@ -2066,7 +2088,7 @@ static int process_responses(struct sge_rspq *q, int budget)
 
                if (unlikely(ret)) {
                        /* couldn't process descriptor, back off for recovery */
-                       q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+                       q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
                        break;
                }
 
@@ -2090,7 +2112,7 @@ int cxgb_busy_poll(struct napi_struct *napi)
                return LL_FLUSH_BUSY;
 
        work_done = process_responses(q, 4);
-       params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
+       params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
        q->next_intr_params = params;
        val = CIDXINC_V(work_done) | SEINTARM_V(params);
 
@@ -2137,7 +2159,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
                int timer_index;
 
                napi_complete(napi);
-               timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
+               timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
 
                if (q->adaptive_rx) {
                        if (work_done > max(timer_pkt_quota[timer_index],
@@ -2147,15 +2169,16 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
                                timer_index = timer_index - 1;
 
                        timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
-                       q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
-                                                             V_QINTR_CNT_EN;
+                       q->next_intr_params =
+                                       QINTR_TIMER_IDX_V(timer_index) |
+                                       QINTR_CNT_EN_V(0);
                        params = q->next_intr_params;
                } else {
                        params = q->next_intr_params;
                        q->next_intr_params = q->intr_params;
                }
        } else
-               params = QINTR_TIMER_IDX(7);
+               params = QINTR_TIMER_IDX_V(7);
 
        val = CIDXINC_V(work_done) | SEINTARM_V(params);
 
@@ -2203,7 +2226,7 @@ static unsigned int process_intrq(struct adapter *adap)
                        break;
 
                dma_rmb();
-               if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+               if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
                        unsigned int qid = ntohl(rc->pldbuflen_qid);
 
                        qid -= adap->sge.ingr_start;
@@ -2279,7 +2302,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
 static void sge_rx_timer_cb(unsigned long data)
 {
        unsigned long m;
-       unsigned int i, idma_same_state_cnt[2];
+       unsigned int i;
        struct adapter *adap = (struct adapter *)data;
        struct sge *s = &adap->sge;
 
@@ -2300,67 +2323,16 @@ static void sge_rx_timer_cb(unsigned long data)
                                        set_bit(id, s->starving_fl);
                        }
                }
+       /* The remainder of the SGE RX Timer Callback routine is dedicated to
+        * global Master PF activities like checking for chip ingress stalls,
+        * etc.
+        */
+       if (!(adap->flags & MASTER_PF))
+               goto done;
 
-       t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
-       idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
-       idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
-
-       for (i = 0; i < 2; i++) {
-               u32 debug0, debug11;
-
-               /* If the Ingress DMA Same State Counter ("timer") is less
-                * than 1s, then we can reset our synthesized Stall Timer and
-                * continue.  If we have previously emitted warnings about a
-                * potential stalled Ingress Queue, issue a note indicating
-                * that the Ingress Queue has resumed forward progress.
-                */
-               if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
-                       if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
-                               CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
-                                       i, s->idma_qid[i],
-                                       s->idma_stalled[i]/HZ);
-                       s->idma_stalled[i] = 0;
-                       continue;
-               }
-
-               /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
-                * domain.  The first time we get here it'll be because we
-                * passed the 1s Threshold; each additional time it'll be
-                * because the RX Timer Callback is being fired on its regular
-                * schedule.
-                *
-                * If the stall is below our Potential Hung Ingress Queue
-                * Warning Threshold, continue.
-                */
-               if (s->idma_stalled[i] == 0)
-                       s->idma_stalled[i] = HZ;
-               else
-                       s->idma_stalled[i] += RX_QCHECK_PERIOD;
-
-               if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
-                       continue;
-
-               /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
-               if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
-                       continue;
-
-               /* Read and save the SGE IDMA State and Queue ID information.
-                * We do this every time in case it changes across time ...
-                */
-               t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
-               debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
-               s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
-
-               t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
-               debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
-               s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
-
-               CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
-                       i, s->idma_qid[i], s->idma_state[i],
-                       s->idma_stalled[i]/HZ, debug0, debug11);
-               t4_sge_decode_idma_state(adap, s->idma_state[i]);
-       }
+       t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
 
+done:
        mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
 }
 
@@ -2429,7 +2401,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
        u64 bar2_qoffset;
        int ret;
 
-       ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
+       ret = t4_bar2_sge_qregs(adapter, qid, qtype,
                                &bar2_qoffset, pbar2_qid);
        if (ret)
                return NULL;
@@ -2437,9 +2409,12 @@ static void __iomem *bar2_address(struct adapter *adapter,
        return adapter->bar2 + bar2_qoffset;
 }
 
+/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                     struct net_device *dev, int intr_idx,
-                    struct sge_fl *fl, rspq_handler_t hnd)
+                    struct sge_fl *fl, rspq_handler_t hnd, int cong)
 {
        int ret, flsz = 0;
        struct fw_iq_cmd c;
@@ -2457,12 +2432,13 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
                            FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-                           FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
+                           FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
        c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
                                 FW_LEN16(c));
        c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
                FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
-               FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
+               FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
+               FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
                FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
                                                        -intr_idx - 1));
        c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
@@ -2471,8 +2447,21 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
        c.iqsize = htons(iq->size);
        c.iqaddr = cpu_to_be64(iq->phys_addr);
+       if (cong >= 0)
+               c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
 
        if (fl) {
+               enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+               /* Allocate the ring for the hardware free list (with space
+                * for its status page) along with the associated software
+                * descriptor ring.  The free list size needs to be a multiple
+                * of the Egress Queue Unit and at least 2 Egress Units larger
+                * than the SGE's Egress Congrestion Threshold
+                * (fl_starve_thres - 1).
+                */
+               if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+                       fl->size = s->fl_starve_thres - 1 + 2 * 8;
                fl->size = roundup(fl->size, 8);
                fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
                                      sizeof(struct rx_sw_desc), &fl->addr,
@@ -2481,17 +2470,25 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                        goto fl_nomem;
 
                flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
-               c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
-                                           FW_IQ_CMD_FL0FETCHRO_F |
-                                           FW_IQ_CMD_FL0DATARO_F |
-                                           FW_IQ_CMD_FL0PADEN_F);
-               c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
-                               FW_IQ_CMD_FL0FBMAX_V(3));
+               c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
+                                            FW_IQ_CMD_FL0FETCHRO_F |
+                                            FW_IQ_CMD_FL0DATARO_F |
+                                            FW_IQ_CMD_FL0PADEN_F);
+               if (cong >= 0)
+                       c.iqns_to_fl0congen |=
+                               htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
+                                     FW_IQ_CMD_FL0CONGCIF_F |
+                                     FW_IQ_CMD_FL0CONGEN_F);
+               c.fl0dcaen_to_fl0cidxfthresh =
+                       htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
+                             FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+                                                  FETCHBURSTMAX_512B_X :
+                                                  FETCHBURSTMAX_256B_X));
                c.fl0size = htons(flsz);
                c.fl0addr = cpu_to_be64(fl->addr);
        }
 
-       ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+       ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
        if (ret)
                goto err;
 
@@ -2532,6 +2529,41 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                                             &fl->bar2_qid);
                refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
        }
+
+       /* For T5 and later we attempt to set up the Congestion Manager values
+        * of the new RX Ethernet Queue.  This should really be handled by
+        * firmware because it's more complex than any host driver wants to
+        * get involved with and it's different per chip and this is almost
+        * certainly wrong.  Firmware would be wrong as well, but it would be
+        * a lot easier to fix in one place ...  For now we do something very
+        * simple (and hopefully less wrong).
+        */
+       if (!is_t4(adap->params.chip) && cong >= 0) {
+               u32 param, val;
+               int i;
+
+               param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+                        FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+                        FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
+               if (cong == 0) {
+                       val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
+               } else {
+                       val =
+                           CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
+                       for (i = 0; i < 4; i++) {
+                               if (cong & (1 << i))
+                                       val |=
+                                            CONMCTXT_CNGCHMAP_V(1 << (i << 2));
+                       }
+               }
+               ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+                                   &param, &val);
+               if (ret)
+                       dev_warn(adap->pdev_dev, "Failed to set Congestion"
+                                " Manager Context for Ingress Queue %d: %d\n",
+                                iq->cntxt_id, -ret);
+       }
+
        return 0;
 
 fl_nomem:
@@ -2589,23 +2621,24 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
                            FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-                           FW_EQ_ETH_CMD_PFN_V(adap->fn) |
+                           FW_EQ_ETH_CMD_PFN_V(adap->pf) |
                            FW_EQ_ETH_CMD_VFN_V(0));
        c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
                                 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
        c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
                           FW_EQ_ETH_CMD_VIID_V(pi->viid));
-       c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
-                                  FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
-                                  FW_EQ_ETH_CMD_FETCHRO_V(1) |
-                                  FW_EQ_ETH_CMD_IQID_V(iqid));
-       c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
-                                 FW_EQ_ETH_CMD_FBMAX_V(3) |
-                                 FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
-                                 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
+       c.fetchszm_to_iqid =
+               htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+                     FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
+                     FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
+       c.dcaen_to_eqsize =
+               htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+                     FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+                     FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+                     FW_EQ_ETH_CMD_EQSIZE_V(nentries));
        c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-       ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+       ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
        if (ret) {
                kfree(txq->q.sdesc);
                txq->q.sdesc = NULL;
@@ -2637,29 +2670,30 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
 
        txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
                                 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
-                                NULL, 0, NUMA_NO_NODE);
+                                NULL, 0, dev_to_node(adap->pdev_dev));
        if (!txq->q.desc)
                return -ENOMEM;
 
        c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
                            FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-                           FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
+                           FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
                            FW_EQ_CTRL_CMD_VFN_V(0));
        c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
                                 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
        c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
        c.physeqid_pkd = htonl(0);
-       c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
-                                  FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
-                                  FW_EQ_CTRL_CMD_FETCHRO_F |
-                                  FW_EQ_CTRL_CMD_IQID_V(iqid));
-       c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
-                                 FW_EQ_CTRL_CMD_FBMAX_V(3) |
-                                 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
-                                 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
+       c.fetchszm_to_iqid =
+               htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+                     FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
+                     FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
+       c.dcaen_to_eqsize =
+               htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+                     FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+                     FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+                     FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
        c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-       ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+       ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
        if (ret) {
                dma_free_coherent(adap->pdev_dev,
                                  nentries * sizeof(struct tx_desc),
@@ -2697,21 +2731,22 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
                            FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-                           FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
+                           FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
                            FW_EQ_OFLD_CMD_VFN_V(0));
        c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
                                 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
-       c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
-                                  FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
-                                  FW_EQ_OFLD_CMD_FETCHRO_F |
-                                  FW_EQ_OFLD_CMD_IQID_V(iqid));
-       c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
-                                 FW_EQ_OFLD_CMD_FBMAX_V(3) |
-                                 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
-                                 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
+       c.fetchszm_to_iqid =
+               htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+                     FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
+                     FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
+       c.dcaen_to_eqsize =
+               htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
+                     FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
+                     FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+                     FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
        c.eqaddr = cpu_to_be64(txq->q.phys_addr);
 
-       ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+       ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
        if (ret) {
                kfree(txq->q.sdesc);
                txq->q.sdesc = NULL;
@@ -2750,7 +2785,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
        unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
 
        adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
-       t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+       t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
                   rq->cntxt_id, fl_id, 0xffff);
        dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
                          rq->desc, rq->phys_addr);
@@ -2805,7 +2840,7 @@ void t4_free_sge_resources(struct adapter *adap)
                        free_rspq_fl(adap, &eq->rspq,
                                     eq->fl.size ? &eq->fl : NULL);
                if (etq->q.desc) {
-                       t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+                       t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
                                       etq->q.cntxt_id);
                        free_tx_desc(adap, &etq->q, etq->q.in_use, true);
                        kfree(etq->q.sdesc);
@@ -2824,7 +2859,7 @@ void t4_free_sge_resources(struct adapter *adap)
 
                if (q->q.desc) {
                        tasklet_kill(&q->qresume_tsk);
-                       t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+                       t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
                                        q->q.cntxt_id);
                        free_tx_desc(adap, &q->q, q->q.in_use, false);
                        kfree(q->q.sdesc);
@@ -2839,7 +2874,7 @@ void t4_free_sge_resources(struct adapter *adap)
 
                if (cq->q.desc) {
                        tasklet_kill(&cq->qresume_tsk);
-                       t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+                       t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
                                        cq->q.cntxt_id);
                        __skb_queue_purge(&cq->sendq);
                        free_txq(adap, &cq->q);
@@ -3023,7 +3058,11 @@ int t4_sge_init(struct adapter *adap)
         * Packing Boundary.  T5 introduced the ability to specify these
         * separately.  The actual Ingress Packet Data alignment boundary
         * within Packed Buffer Mode is the maximum of these two
-        * specifications.
+        * specifications.  (Note that it makes no real practical sense to
+        * have the Pading Boudary be larger than the Packing Boundary but you
+        * could set the chip up that way and, in fact, legacy T4 code would
+        * end doing this because it would initialize the Padding Boundary and
+        * leave the Packing Boundary initialized to 0 (16 bytes).)
         */
        ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
                               INGPADBOUNDARY_SHIFT_X);
@@ -3067,11 +3106,14 @@ int t4_sge_init(struct adapter *adap)
                egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
        s->fl_starve_thres = 2*egress_threshold + 1;
 
+       t4_idma_monitor_init(adap, &s->idma_monitor);
+
+       /* Set up timers used for recuring callbacks to process RX and TX
+        * administrative tasks.
+        */
        setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
        setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
-       s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
-       s->idma_stalled[0] = 0;
-       s->idma_stalled[1] = 0;
+
        spin_lock_init(&s->intrq_lock);
 
        return 0;
index e8578a742f2a29b14a2eaec01216a8e47a68e12a..fdda0f8c5a190555c133832895c95842e735b7be 100644 (file)
@@ -150,7 +150,12 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
  */
 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
 {
-       u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
+       u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
+
+       if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+               req |= ENABLE_F;
+       else
+               req |= T6_ENABLE_F;
 
        if (is_t4(adap->params.chip))
                req |= LOCALCFG_F;
@@ -214,8 +219,8 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
        get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
        dev_alert(adap->pdev_dev,
                  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
-                 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
-                 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+                 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+                 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
 }
 
 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
@@ -233,13 +238,14 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
 }
 
 /**
- *     t4_wr_mbox_meat - send a command to FW through the given mailbox
+ *     t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
  *     @adap: the adapter
  *     @mbox: index of the mailbox to use
  *     @cmd: the command to write
  *     @size: command length in bytes
  *     @rpl: where to optionally store the reply
  *     @sleep_ok: if true we may sleep while awaiting command completion
+ *     @timeout: time to wait for command to finish before timing out
  *
  *     Sends the given command to FW through the selected mailbox and waits
  *     for the FW to execute the command.  If @rpl is not %NULL it is used to
@@ -254,8 +260,8 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
  *     command or FW executes it but signals an error.  In the latter case
  *     the return value is the error code indicated by FW (negated).
  */
-int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
-                   void *rpl, bool sleep_ok)
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+                           int size, void *rpl, bool sleep_ok, int timeout)
 {
        static const int delay[] = {
                1, 1, 3, 5, 10, 10, 20, 50, 100, 200
@@ -294,7 +300,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        delay_idx = 0;
        ms = delay[0];
 
-       for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+       for (i = 0; i < timeout; i += ms) {
                if (sleep_ok) {
                        ms = delay[delay_idx];  /* last element may repeat */
                        if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -332,114 +338,11 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
        return -ETIMEDOUT;
 }
 
-/**
- *     t4_mc_read - read from MC through backdoor accesses
- *     @adap: the adapter
- *     @addr: address of first byte requested
- *     @idx: which MC to access
- *     @data: 64 bytes of data containing the requested address
- *     @ecc: where to store the corresponding 64-bit ECC word
- *
- *     Read 64 bytes of data from MC starting at a 64-byte-aligned address
- *     that covers the requested address @addr.  If @parity is not %NULL it
- *     is assigned the 64-bit ECC word for the read data.
- */
-int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
-{
-       int i;
-       u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
-       u32 mc_bist_status_rdata, mc_bist_data_pattern;
-
-       if (is_t4(adap->params.chip)) {
-               mc_bist_cmd = MC_BIST_CMD_A;
-               mc_bist_cmd_addr = MC_BIST_CMD_ADDR_A;
-               mc_bist_cmd_len = MC_BIST_CMD_LEN_A;
-               mc_bist_status_rdata = MC_BIST_STATUS_RDATA_A;
-               mc_bist_data_pattern = MC_BIST_DATA_PATTERN_A;
-       } else {
-               mc_bist_cmd = MC_REG(MC_P_BIST_CMD_A, idx);
-               mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR_A, idx);
-               mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN_A, idx);
-               mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA_A, idx);
-               mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN_A, idx);
-       }
-
-       if (t4_read_reg(adap, mc_bist_cmd) & START_BIST_F)
-               return -EBUSY;
-       t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
-       t4_write_reg(adap, mc_bist_cmd_len, 64);
-       t4_write_reg(adap, mc_bist_data_pattern, 0xc);
-       t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE_V(1) | START_BIST_F |
-                    BIST_CMD_GAP_V(1));
-       i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST_F, 0, 10, 1);
-       if (i)
-               return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
-
-       for (i = 15; i >= 0; i--)
-               *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
-       if (ecc)
-               *ecc = t4_read_reg64(adap, MC_DATA(16));
-#undef MC_DATA
-       return 0;
-}
-
-/**
- *     t4_edc_read - read from EDC through backdoor accesses
- *     @adap: the adapter
- *     @idx: which EDC to access
- *     @addr: address of first byte requested
- *     @data: 64 bytes of data containing the requested address
- *     @ecc: where to store the corresponding 64-bit ECC word
- *
- *     Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- *     that covers the requested address @addr.  If @parity is not %NULL it
- *     is assigned the 64-bit ECC word for the read data.
- */
-int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+                   void *rpl, bool sleep_ok)
 {
-       int i;
-       u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
-       u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
-
-       if (is_t4(adap->params.chip)) {
-               edc_bist_cmd = EDC_REG(EDC_BIST_CMD_A, idx);
-               edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR_A, idx);
-               edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN_A, idx);
-               edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN_A,
-                                                   idx);
-               edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA_A,
-                                               idx);
-       } else {
-               edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD_A, idx);
-               edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR_A, idx);
-               edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN_A, idx);
-               edc_bist_cmd_data_pattern =
-                       EDC_REG_T5(EDC_H_BIST_DATA_PATTERN_A, idx);
-               edc_bist_status_rdata =
-                        EDC_REG_T5(EDC_H_BIST_STATUS_RDATA_A, idx);
-       }
-
-       if (t4_read_reg(adap, edc_bist_cmd) & START_BIST_F)
-               return -EBUSY;
-       t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
-       t4_write_reg(adap, edc_bist_cmd_len, 64);
-       t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
-       t4_write_reg(adap, edc_bist_cmd,
-                    BIST_OPCODE_V(1) | BIST_CMD_GAP_V(1) | START_BIST_F);
-       i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST_F, 0, 10, 1);
-       if (i)
-               return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
-
-       for (i = 15; i >= 0; i--)
-               *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
-       if (ecc)
-               *ecc = t4_read_reg64(adap, EDC_DATA(16));
-#undef EDC_DATA
-       return 0;
+       return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+                                      FW_CMD_MAX_TIMEOUT);
 }
 
 /**
@@ -483,9 +386,8 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
        /* Offset into the region of memory which is being accessed
         * MEM_EDC0 = 0
         * MEM_EDC1 = 1
-        * MEM_MC   = 2 -- T4
-        * MEM_MC0  = 2 -- For T5
-        * MEM_MC1  = 3 -- For T5
+        * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
+        * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
         */
        edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
        if (mtype != MEM_MC1)
@@ -514,7 +416,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
        mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
        if (is_t4(adap->params.chip))
                mem_base -= adap->t4_bar0;
-       win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
+       win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
 
        /* Calculate our initial PCI-E Memory Window Position and Offset into
         * that Window.
@@ -625,6 +527,102 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
        return 0;
 }
 
+/* Return the specified PCI-E Configuration Space register from our Physical
+ * Function.  We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
+{
+       u32 val, ldst_addrspace;
+
+       /* If fw_attach != 0, construct and send the Firmware LDST Command to
+        * retrieve the specified PCI-E Configuration Space register.
+        */
+       struct fw_ldst_cmd ldst_cmd;
+       int ret;
+
+       memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+       ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
+       ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+                                              FW_CMD_REQUEST_F |
+                                              FW_CMD_READ_F |
+                                              ldst_addrspace);
+       ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+       ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
+       ldst_cmd.u.pcie.ctrl_to_fn =
+               (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
+       ldst_cmd.u.pcie.r = reg;
+
+       /* If the LDST Command succeeds, return the result, otherwise
+        * fall through to reading it directly ourselves ...
+        */
+       ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+                        &ldst_cmd);
+       if (ret == 0)
+               val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
+       else
+               /* Read the desired Configuration Space register via the PCI-E
+                * Backdoor mechanism.
+                */
+               t4_hw_pci_read_cfg4(adap, reg, &val);
+       return val;
+}
+
+/* Get the window based on base passed to it.
+ * Window aperture is currently unhandled, but there is no use case for it
+ * right now
+ */
+static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
+                        u32 memwin_base)
+{
+       u32 ret;
+
+       if (is_t4(adap->params.chip)) {
+               u32 bar0;
+
+               /* Truncation intentional: we only read the bottom 32-bits of
+                * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
+                * mechanism to read BAR0 instead of using
+                * pci_resource_start() because we could be operating from
+                * within a Virtual Machine which is trapping our accesses to
+                * our Configuration Space and we need to set up the PCI-E
+                * Memory Window decoders with the actual addresses which will
+                * be coming across the PCI-E link.
+                */
+               bar0 = t4_read_pcie_cfg4(adap, pci_base);
+               bar0 &= pci_mask;
+               adap->t4_bar0 = bar0;
+
+               ret = bar0 + memwin_base;
+       } else {
+               /* For T5, only relative offset inside the PCIe BAR is passed */
+               ret = memwin_base;
+       }
+       return ret;
+}
+
+/* Get the default utility window (win0) used by everyone */
+u32 t4_get_util_window(struct adapter *adap)
+{
+       return t4_get_window(adap, PCI_BASE_ADDRESS_0,
+                            PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
+}
+
+/* Set up memory window for accessing adapter memory ranges.  (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
+{
+       t4_write_reg(adap,
+                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
+                    memwin_base | BIR_V(0) |
+                    WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
+       t4_read_reg(adap,
+                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
+}
+
 /**
  *     t4_get_regs_len - return the size of the chips register set
  *     @adapter: the adapter
@@ -640,6 +638,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
                return T4_REGMAP_SIZE;
 
        case CHELSIO_T5:
+       case CHELSIO_T6:
                return T5_REGMAP_SIZE;
        }
 
@@ -666,7 +665,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x11fc, 0x123c,
                0x1300, 0x173c,
                0x1800, 0x18fc,
-               0x3000, 0x30d8,
+               0x3000, 0x305c,
+               0x3068, 0x30d8,
                0x30e0, 0x5924,
                0x5960, 0x59d4,
                0x5a00, 0x5af8,
@@ -729,7 +729,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x19238, 0x1924c,
                0x193f8, 0x19474,
                0x19490, 0x194f8,
-               0x19800, 0x19f30,
+               0x19800, 0x19f4c,
                0x1a000, 0x1a06c,
                0x1a0b0, 0x1a120,
                0x1a128, 0x1a138,
@@ -878,7 +878,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x27780, 0x2778c,
                0x27800, 0x27c38,
                0x27c80, 0x27d7c,
-               0x27e00, 0x27e04
+               0x27e00, 0x27e04,
        };
 
        static const unsigned int t5_reg_ranges[] = {
@@ -888,7 +888,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x1280, 0x173c,
                0x1800, 0x18fc,
                0x3000, 0x3028,
-               0x3060, 0x30d8,
+               0x3068, 0x30d8,
                0x30e0, 0x30fc,
                0x3140, 0x357c,
                0x35a8, 0x35cc,
@@ -900,7 +900,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x5940, 0x59dc,
                0x59fc, 0x5a18,
                0x5a60, 0x5a9c,
-               0x5b9c, 0x5bfc,
+               0x5b94, 0x5bfc,
                0x6000, 0x6040,
                0x6058, 0x614c,
                0x7700, 0x7798,
@@ -1014,27 +1014,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x30800, 0x30834,
                0x308c0, 0x30908,
                0x30910, 0x309ac,
-               0x30a00, 0x30a04,
-               0x30a0c, 0x30a2c,
+               0x30a00, 0x30a2c,
                0x30a44, 0x30a50,
                0x30a74, 0x30c24,
+               0x30d00, 0x30d00,
                0x30d08, 0x30d14,
                0x30d1c, 0x30d20,
                0x30d3c, 0x30d50,
                0x31200, 0x3120c,
                0x31220, 0x31220,
                0x31240, 0x31240,
-               0x31600, 0x31600,
-               0x31608, 0x3160c,
+               0x31600, 0x3160c,
                0x31a00, 0x31a1c,
-               0x31e04, 0x31e20,
+               0x31e00, 0x31e20,
                0x31e38, 0x31e3c,
                0x31e80, 0x31e80,
                0x31e88, 0x31ea8,
                0x31eb0, 0x31eb4,
                0x31ec8, 0x31ed4,
                0x31fb8, 0x32004,
-               0x32208, 0x3223c,
+               0x32200, 0x32200,
+               0x32208, 0x32240,
+               0x32248, 0x32280,
+               0x32288, 0x322c0,
+               0x322c8, 0x322fc,
                0x32600, 0x32630,
                0x32a00, 0x32abc,
                0x32b00, 0x32b70,
@@ -1074,27 +1077,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x34800, 0x34834,
                0x348c0, 0x34908,
                0x34910, 0x349ac,
-               0x34a00, 0x34a04,
-               0x34a0c, 0x34a2c,
+               0x34a00, 0x34a2c,
                0x34a44, 0x34a50,
                0x34a74, 0x34c24,
+               0x34d00, 0x34d00,
                0x34d08, 0x34d14,
                0x34d1c, 0x34d20,
                0x34d3c, 0x34d50,
                0x35200, 0x3520c,
                0x35220, 0x35220,
                0x35240, 0x35240,
-               0x35600, 0x35600,
-               0x35608, 0x3560c,
+               0x35600, 0x3560c,
                0x35a00, 0x35a1c,
-               0x35e04, 0x35e20,
+               0x35e00, 0x35e20,
                0x35e38, 0x35e3c,
                0x35e80, 0x35e80,
                0x35e88, 0x35ea8,
                0x35eb0, 0x35eb4,
                0x35ec8, 0x35ed4,
                0x35fb8, 0x36004,
-               0x36208, 0x3623c,
+               0x36200, 0x36200,
+               0x36208, 0x36240,
+               0x36248, 0x36280,
+               0x36288, 0x362c0,
+               0x362c8, 0x362fc,
                0x36600, 0x36630,
                0x36a00, 0x36abc,
                0x36b00, 0x36b70,
@@ -1134,27 +1140,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x38800, 0x38834,
                0x388c0, 0x38908,
                0x38910, 0x389ac,
-               0x38a00, 0x38a04,
-               0x38a0c, 0x38a2c,
+               0x38a00, 0x38a2c,
                0x38a44, 0x38a50,
                0x38a74, 0x38c24,
+               0x38d00, 0x38d00,
                0x38d08, 0x38d14,
                0x38d1c, 0x38d20,
                0x38d3c, 0x38d50,
                0x39200, 0x3920c,
                0x39220, 0x39220,
                0x39240, 0x39240,
-               0x39600, 0x39600,
-               0x39608, 0x3960c,
+               0x39600, 0x3960c,
                0x39a00, 0x39a1c,
-               0x39e04, 0x39e20,
+               0x39e00, 0x39e20,
                0x39e38, 0x39e3c,
                0x39e80, 0x39e80,
                0x39e88, 0x39ea8,
                0x39eb0, 0x39eb4,
                0x39ec8, 0x39ed4,
                0x39fb8, 0x3a004,
-               0x3a208, 0x3a23c,
+               0x3a200, 0x3a200,
+               0x3a208, 0x3a240,
+               0x3a248, 0x3a280,
+               0x3a288, 0x3a2c0,
+               0x3a2c8, 0x3a2fc,
                0x3a600, 0x3a630,
                0x3aa00, 0x3aabc,
                0x3ab00, 0x3ab70,
@@ -1194,27 +1203,30 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x3c800, 0x3c834,
                0x3c8c0, 0x3c908,
                0x3c910, 0x3c9ac,
-               0x3ca00, 0x3ca04,
-               0x3ca0c, 0x3ca2c,
+               0x3ca00, 0x3ca2c,
                0x3ca44, 0x3ca50,
                0x3ca74, 0x3cc24,
+               0x3cd00, 0x3cd00,
                0x3cd08, 0x3cd14,
                0x3cd1c, 0x3cd20,
                0x3cd3c, 0x3cd50,
                0x3d200, 0x3d20c,
                0x3d220, 0x3d220,
                0x3d240, 0x3d240,
-               0x3d600, 0x3d600,
-               0x3d608, 0x3d60c,
+               0x3d600, 0x3d60c,
                0x3da00, 0x3da1c,
-               0x3de04, 0x3de20,
+               0x3de00, 0x3de20,
                0x3de38, 0x3de3c,
                0x3de80, 0x3de80,
                0x3de88, 0x3dea8,
                0x3deb0, 0x3deb4,
                0x3dec8, 0x3ded4,
                0x3dfb8, 0x3e004,
-               0x3e208, 0x3e23c,
+               0x3e200, 0x3e200,
+               0x3e208, 0x3e240,
+               0x3e248, 0x3e280,
+               0x3e288, 0x3e2c0,
+               0x3e2c8, 0x3e2fc,
                0x3e600, 0x3e630,
                0x3ea00, 0x3eabc,
                0x3eb00, 0x3eb70,
@@ -1247,7 +1259,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x3fcf0, 0x3fcfc,
                0x40000, 0x4000c,
                0x40040, 0x40068,
-               0x40080, 0x40144,
+               0x4007c, 0x40144,
                0x40180, 0x4018c,
                0x40200, 0x40298,
                0x402ac, 0x4033c,
@@ -1275,7 +1287,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x47800, 0x47814,
                0x48000, 0x4800c,
                0x48040, 0x48068,
-               0x48080, 0x48144,
+               0x4807c, 0x48144,
                0x48180, 0x4818c,
                0x48200, 0x48298,
                0x482ac, 0x4833c,
@@ -1309,6 +1321,344 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x51300, 0x51308,
        };
 
+       static const unsigned int t6_reg_ranges[] = {
+               0x1008, 0x114c,
+               0x1180, 0x11b4,
+               0x11fc, 0x1250,
+               0x1280, 0x133c,
+               0x1800, 0x18fc,
+               0x3000, 0x302c,
+               0x3060, 0x30d8,
+               0x30e0, 0x30fc,
+               0x3140, 0x357c,
+               0x35a8, 0x35cc,
+               0x35ec, 0x35ec,
+               0x3600, 0x5624,
+               0x56cc, 0x575c,
+               0x580c, 0x5814,
+               0x5890, 0x58bc,
+               0x5940, 0x595c,
+               0x5980, 0x598c,
+               0x59b0, 0x59dc,
+               0x59fc, 0x5a18,
+               0x5a60, 0x5a6c,
+               0x5a80, 0x5a9c,
+               0x5b94, 0x5bfc,
+               0x5c10, 0x5ec0,
+               0x5ec8, 0x5ec8,
+               0x6000, 0x6040,
+               0x6058, 0x6154,
+               0x7700, 0x7798,
+               0x77c0, 0x7880,
+               0x78cc, 0x78fc,
+               0x7b00, 0x7c54,
+               0x7d00, 0x7efc,
+               0x8dc0, 0x8de0,
+               0x8df8, 0x8e84,
+               0x8ea0, 0x8f88,
+               0x8fb8, 0x911c,
+               0x9400, 0x9470,
+               0x9600, 0x971c,
+               0x9800, 0x9808,
+               0x9820, 0x983c,
+               0x9850, 0x9864,
+               0x9c00, 0x9c6c,
+               0x9c80, 0x9cec,
+               0x9d00, 0x9d6c,
+               0x9d80, 0x9dec,
+               0x9e00, 0x9e6c,
+               0x9e80, 0x9eec,
+               0x9f00, 0x9f6c,
+               0x9f80, 0xa020,
+               0xd004, 0xd03c,
+               0xdfc0, 0xdfe0,
+               0xe000, 0xf008,
+               0x11000, 0x11014,
+               0x11048, 0x11110,
+               0x11118, 0x1117c,
+               0x11190, 0x11260,
+               0x11300, 0x1130c,
+               0x12000, 0x1205c,
+               0x19040, 0x1906c,
+               0x19078, 0x19080,
+               0x1908c, 0x19124,
+               0x19150, 0x191b0,
+               0x191d0, 0x191e8,
+               0x19238, 0x192b8,
+               0x193f8, 0x19474,
+               0x19490, 0x194cc,
+               0x194f0, 0x194f8,
+               0x19c00, 0x19c80,
+               0x19c94, 0x19cbc,
+               0x19ce4, 0x19d28,
+               0x19d50, 0x19d78,
+               0x19d94, 0x19dc8,
+               0x19df0, 0x19e10,
+               0x19e50, 0x19e6c,
+               0x19ea0, 0x19f34,
+               0x19f40, 0x19f50,
+               0x19f90, 0x19fac,
+               0x19fc4, 0x19fe4,
+               0x1a000, 0x1a06c,
+               0x1a0b0, 0x1a120,
+               0x1a128, 0x1a138,
+               0x1a190, 0x1a1c4,
+               0x1a1fc, 0x1a1fc,
+               0x1e008, 0x1e00c,
+               0x1e040, 0x1e04c,
+               0x1e284, 0x1e290,
+               0x1e2c0, 0x1e2c0,
+               0x1e2e0, 0x1e2e0,
+               0x1e300, 0x1e384,
+               0x1e3c0, 0x1e3c8,
+               0x1e408, 0x1e40c,
+               0x1e440, 0x1e44c,
+               0x1e684, 0x1e690,
+               0x1e6c0, 0x1e6c0,
+               0x1e6e0, 0x1e6e0,
+               0x1e700, 0x1e784,
+               0x1e7c0, 0x1e7c8,
+               0x1e808, 0x1e80c,
+               0x1e840, 0x1e84c,
+               0x1ea84, 0x1ea90,
+               0x1eac0, 0x1eac0,
+               0x1eae0, 0x1eae0,
+               0x1eb00, 0x1eb84,
+               0x1ebc0, 0x1ebc8,
+               0x1ec08, 0x1ec0c,
+               0x1ec40, 0x1ec4c,
+               0x1ee84, 0x1ee90,
+               0x1eec0, 0x1eec0,
+               0x1eee0, 0x1eee0,
+               0x1ef00, 0x1ef84,
+               0x1efc0, 0x1efc8,
+               0x1f008, 0x1f00c,
+               0x1f040, 0x1f04c,
+               0x1f284, 0x1f290,
+               0x1f2c0, 0x1f2c0,
+               0x1f2e0, 0x1f2e0,
+               0x1f300, 0x1f384,
+               0x1f3c0, 0x1f3c8,
+               0x1f408, 0x1f40c,
+               0x1f440, 0x1f44c,
+               0x1f684, 0x1f690,
+               0x1f6c0, 0x1f6c0,
+               0x1f6e0, 0x1f6e0,
+               0x1f700, 0x1f784,
+               0x1f7c0, 0x1f7c8,
+               0x1f808, 0x1f80c,
+               0x1f840, 0x1f84c,
+               0x1fa84, 0x1fa90,
+               0x1fac0, 0x1fac0,
+               0x1fae0, 0x1fae0,
+               0x1fb00, 0x1fb84,
+               0x1fbc0, 0x1fbc8,
+               0x1fc08, 0x1fc0c,
+               0x1fc40, 0x1fc4c,
+               0x1fe84, 0x1fe90,
+               0x1fec0, 0x1fec0,
+               0x1fee0, 0x1fee0,
+               0x1ff00, 0x1ff84,
+               0x1ffc0, 0x1ffc8,
+               0x30000, 0x30070,
+               0x30100, 0x3015c,
+               0x30190, 0x301d0,
+               0x30200, 0x30318,
+               0x30400, 0x3052c,
+               0x30540, 0x3061c,
+               0x30800, 0x3088c,
+               0x308c0, 0x30908,
+               0x30910, 0x309b8,
+               0x30a00, 0x30a04,
+               0x30a0c, 0x30a2c,
+               0x30a44, 0x30a50,
+               0x30a74, 0x30c24,
+               0x30d00, 0x30d3c,
+               0x30d44, 0x30d7c,
+               0x30de0, 0x30de0,
+               0x30e00, 0x30ed4,
+               0x30f00, 0x30fa4,
+               0x30fc0, 0x30fc4,
+               0x31000, 0x31004,
+               0x31080, 0x310fc,
+               0x31208, 0x31220,
+               0x3123c, 0x31254,
+               0x31300, 0x31300,
+               0x31308, 0x3131c,
+               0x31338, 0x3133c,
+               0x31380, 0x31380,
+               0x31388, 0x313a8,
+               0x313b4, 0x313b4,
+               0x31400, 0x31420,
+               0x31438, 0x3143c,
+               0x31480, 0x31480,
+               0x314a8, 0x314a8,
+               0x314b0, 0x314b4,
+               0x314c8, 0x314d4,
+               0x31a40, 0x31a4c,
+               0x31af0, 0x31b20,
+               0x31b38, 0x31b3c,
+               0x31b80, 0x31b80,
+               0x31ba8, 0x31ba8,
+               0x31bb0, 0x31bb4,
+               0x31bc8, 0x31bd4,
+               0x32140, 0x3218c,
+               0x321f0, 0x32200,
+               0x32218, 0x32218,
+               0x32400, 0x32400,
+               0x32408, 0x3241c,
+               0x32618, 0x32620,
+               0x32664, 0x32664,
+               0x326a8, 0x326a8,
+               0x326ec, 0x326ec,
+               0x32a00, 0x32abc,
+               0x32b00, 0x32b78,
+               0x32c00, 0x32c00,
+               0x32c08, 0x32c3c,
+               0x32e00, 0x32e2c,
+               0x32f00, 0x32f2c,
+               0x33000, 0x330ac,
+               0x330c0, 0x331ac,
+               0x331c0, 0x332c4,
+               0x332e4, 0x333c4,
+               0x333e4, 0x334ac,
+               0x334c0, 0x335ac,
+               0x335c0, 0x336c4,
+               0x336e4, 0x337c4,
+               0x337e4, 0x337fc,
+               0x33814, 0x33814,
+               0x33854, 0x33868,
+               0x33880, 0x3388c,
+               0x338c0, 0x338d0,
+               0x338e8, 0x338ec,
+               0x33900, 0x339ac,
+               0x339c0, 0x33ac4,
+               0x33ae4, 0x33b10,
+               0x33b24, 0x33b50,
+               0x33bf0, 0x33c10,
+               0x33c24, 0x33c50,
+               0x33cf0, 0x33cfc,
+               0x34000, 0x34070,
+               0x34100, 0x3415c,
+               0x34190, 0x341d0,
+               0x34200, 0x34318,
+               0x34400, 0x3452c,
+               0x34540, 0x3461c,
+               0x34800, 0x3488c,
+               0x348c0, 0x34908,
+               0x34910, 0x349b8,
+               0x34a00, 0x34a04,
+               0x34a0c, 0x34a2c,
+               0x34a44, 0x34a50,
+               0x34a74, 0x34c24,
+               0x34d00, 0x34d3c,
+               0x34d44, 0x34d7c,
+               0x34de0, 0x34de0,
+               0x34e00, 0x34ed4,
+               0x34f00, 0x34fa4,
+               0x34fc0, 0x34fc4,
+               0x35000, 0x35004,
+               0x35080, 0x350fc,
+               0x35208, 0x35220,
+               0x3523c, 0x35254,
+               0x35300, 0x35300,
+               0x35308, 0x3531c,
+               0x35338, 0x3533c,
+               0x35380, 0x35380,
+               0x35388, 0x353a8,
+               0x353b4, 0x353b4,
+               0x35400, 0x35420,
+               0x35438, 0x3543c,
+               0x35480, 0x35480,
+               0x354a8, 0x354a8,
+               0x354b0, 0x354b4,
+               0x354c8, 0x354d4,
+               0x35a40, 0x35a4c,
+               0x35af0, 0x35b20,
+               0x35b38, 0x35b3c,
+               0x35b80, 0x35b80,
+               0x35ba8, 0x35ba8,
+               0x35bb0, 0x35bb4,
+               0x35bc8, 0x35bd4,
+               0x36140, 0x3618c,
+               0x361f0, 0x36200,
+               0x36218, 0x36218,
+               0x36400, 0x36400,
+               0x36408, 0x3641c,
+               0x36618, 0x36620,
+               0x36664, 0x36664,
+               0x366a8, 0x366a8,
+               0x366ec, 0x366ec,
+               0x36a00, 0x36abc,
+               0x36b00, 0x36b78,
+               0x36c00, 0x36c00,
+               0x36c08, 0x36c3c,
+               0x36e00, 0x36e2c,
+               0x36f00, 0x36f2c,
+               0x37000, 0x370ac,
+               0x370c0, 0x371ac,
+               0x371c0, 0x372c4,
+               0x372e4, 0x373c4,
+               0x373e4, 0x374ac,
+               0x374c0, 0x375ac,
+               0x375c0, 0x376c4,
+               0x376e4, 0x377c4,
+               0x377e4, 0x377fc,
+               0x37814, 0x37814,
+               0x37854, 0x37868,
+               0x37880, 0x3788c,
+               0x378c0, 0x378d0,
+               0x378e8, 0x378ec,
+               0x37900, 0x379ac,
+               0x379c0, 0x37ac4,
+               0x37ae4, 0x37b10,
+               0x37b24, 0x37b50,
+               0x37bf0, 0x37c10,
+               0x37c24, 0x37c50,
+               0x37cf0, 0x37cfc,
+               0x40040, 0x40040,
+               0x40080, 0x40084,
+               0x40100, 0x40100,
+               0x40140, 0x401bc,
+               0x40200, 0x40214,
+               0x40228, 0x40228,
+               0x40240, 0x40258,
+               0x40280, 0x40280,
+               0x40304, 0x40304,
+               0x40330, 0x4033c,
+               0x41304, 0x413dc,
+               0x41400, 0x4141c,
+               0x41480, 0x414d0,
+               0x44000, 0x4407c,
+               0x440c0, 0x4427c,
+               0x442c0, 0x4447c,
+               0x444c0, 0x4467c,
+               0x446c0, 0x4487c,
+               0x448c0, 0x44a7c,
+               0x44ac0, 0x44c7c,
+               0x44cc0, 0x44e7c,
+               0x44ec0, 0x4507c,
+               0x450c0, 0x451fc,
+               0x45800, 0x45868,
+               0x45880, 0x45884,
+               0x458a0, 0x458b0,
+               0x45a00, 0x45a68,
+               0x45a80, 0x45a84,
+               0x45aa0, 0x45ab0,
+               0x460c0, 0x460e4,
+               0x47000, 0x4708c,
+               0x47200, 0x47250,
+               0x47400, 0x47420,
+               0x47600, 0x47618,
+               0x47800, 0x4782c,
+               0x50000, 0x500cc,
+               0x50400, 0x50400,
+               0x50800, 0x508cc,
+               0x50c00, 0x50c00,
+               0x51000, 0x510b0,
+               0x51300, 0x51324,
+       };
+
        u32 *buf_end = (u32 *)((char *)buf + buf_size);
        const unsigned int *reg_ranges;
        int reg_ranges_size, range;
@@ -1328,6 +1678,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
                break;
 
+       case CHELSIO_T6:
+               reg_ranges = t6_reg_ranges;
+               reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+               break;
+
        default:
                dev_err(adap->pdev_dev,
                        "Unsupported chip version %d\n", chip_version);
@@ -1374,17 +1729,16 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
 }
 
 /**
- *     get_vpd_params - read VPD parameters from VPD EEPROM
+ *     t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
  *     @adapter: adapter to read
  *     @p: where to store the parameters
  *
  *     Reads card parameters stored in VPD EEPROM.
  */
-int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
 {
-       u32 cclk_param, cclk_val;
-       int i, ret, addr;
-       int ec, sn, pn;
+       int i, ret = 0, addr;
+       int ec, sn, pn, na;
        u8 *vpd, csum;
        unsigned int vpdr_len, kw_offset, id_len;
 
@@ -1392,6 +1746,9 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        if (!vpd)
                return -ENOMEM;
 
+       /* Card information normally starts at VPD_BASE but early cards had
+        * it at 0.
+        */
        ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
        if (ret < 0)
                goto out;
@@ -1457,6 +1814,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        FIND_VPD_KW(ec, "EC");
        FIND_VPD_KW(sn, "SN");
        FIND_VPD_KW(pn, "PN");
+       FIND_VPD_KW(na, "NA");
 #undef FIND_VPD_KW
 
        memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -1469,18 +1827,42 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
        memcpy(p->pn, vpd + pn, min(i, PN_LEN));
        strim(p->pn);
+       memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
+       strim((char *)p->na);
 
-       /*
-        * Ask firmware for the Core Clock since it knows how to translate the
+out:
+       vfree(vpd);
+       return ret;
+}
+
+/**
+ *     t4_get_vpd_params - read VPD parameters & retrieve Core Clock
+ *     @adapter: adapter to read
+ *     @p: where to store the parameters
+ *
+ *     Reads card parameters stored in VPD EEPROM and retrieves the Core
+ *     Clock.  This can only be called after a connection to the firmware
+ *     is established.
+ */
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+       u32 cclk_param, cclk_val;
+       int ret;
+
+       /* Grab the raw VPD parameters.
+        */
+       ret = t4_get_raw_vpd_params(adapter, p);
+       if (ret)
+               return ret;
+
+       /* Ask firmware for the Core Clock since it knows how to translate the
         * Reference Clock ('V2') VPD field into a Core Clock value ...
         */
        cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
                      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
-       ret = t4_query_params(adapter, adapter->mbox, 0, 0,
+       ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
                              1, &cclk_param, &cclk_val);
 
-out:
-       vfree(vpd);
        if (ret)
                return ret;
        p->cclk = cclk_val;
@@ -1618,7 +2000,7 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
                if (ret)
                        return ret;
                if (byte_oriented)
-                       *data = (__force __u32) (htonl(*data));
+                       *data = (__force __u32)(cpu_to_be32(*data));
        }
        return 0;
 }
@@ -1941,7 +2323,8 @@ static bool t4_fw_matches_chip(const struct adapter *adap,
         * which will keep us "honest" in the future ...
         */
        if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
-           (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
+           (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
+           (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
                return true;
 
        dev_err(adap->pdev_dev,
@@ -1979,7 +2362,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
                        "FW image size not multiple of 512 bytes\n");
                return -EINVAL;
        }
-       if (ntohs(hdr->len512) * 512 != size) {
+       if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
                dev_err(adap->pdev_dev,
                        "FW image size differs from size in FW header\n");
                return -EINVAL;
@@ -1993,7 +2376,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
                return -EINVAL;
 
        for (csum = 0, i = 0; i < size / sizeof(csum); i++)
-               csum += ntohl(p[i]);
+               csum += be32_to_cpu(p[i]);
 
        if (csum != 0xffffffff) {
                dev_err(adap->pdev_dev,
@@ -2012,7 +2395,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
         * first page with a bad version.
         */
        memcpy(first_page, fw_data, SF_PAGE_SIZE);
-       ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+       ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
        ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
        if (ret)
                goto out;
@@ -2038,6 +2421,147 @@ out:
        return ret;
 }
 
+/**
+ *     t4_phy_fw_ver - return current PHY firmware version
+ *     @adap: the adapter
+ *     @phy_fw_ver: return value buffer for PHY firmware version
+ *
+ *     Returns the current version of external PHY firmware on the
+ *     adapter.
+ */
+int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
+{
+       u32 param, val;
+       int ret;
+
+       param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+                FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+                FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+                FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
+       ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+                             &param, &val);
+       if (ret < 0)
+               return ret;
+       *phy_fw_ver = val;
+       return 0;
+}
+
+/**
+ *     t4_load_phy_fw - download port PHY firmware
+ *     @adap: the adapter
+ *     @win: the PCI-E Memory Window index to use for t4_memory_rw()
+ *     @win_lock: the lock to use to guard the memory copy
+ *     @phy_fw_version: function to check PHY firmware versions
+ *     @phy_fw_data: the PHY firmware image to write
+ *     @phy_fw_size: image size
+ *
+ *     Transfer the specified PHY firmware to the adapter.  If a non-NULL
+ *     @phy_fw_version is supplied, then it will be used to determine if
+ *     it's necessary to perform the transfer by comparing the version
+ *     of any existing adapter PHY firmware with that of the passed in
+ *     PHY firmware image.  If @win_lock is non-NULL then it will be used
+ *     around the call to t4_memory_rw() which transfers the PHY firmware
+ *     to the adapter.
+ *
+ *     A negative error number will be returned if an error occurs.  If
+ *     version number support is available and there's no need to upgrade
+ *     the firmware, 0 will be returned.  If firmware is successfully
+ *     transferred to the adapter, 1 will be retured.
+ *
+ *     NOTE: some adapters only have local RAM to store the PHY firmware.  As
+ *     a result, a RESET of the adapter would cause that RAM to lose its
+ *     contents.  Thus, loading PHY firmware on such adapters must happen
+ *     after any FW_RESET_CMDs ...
+ */
+int t4_load_phy_fw(struct adapter *adap,
+                  int win, spinlock_t *win_lock,
+                  int (*phy_fw_version)(const u8 *, size_t),
+                  const u8 *phy_fw_data, size_t phy_fw_size)
+{
+       unsigned long mtype = 0, maddr = 0;
+       u32 param, val;
+       int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
+       int ret;
+
+       /* If we have version number support, then check to see if the adapter
+        * already has up-to-date PHY firmware loaded.
+        */
+        if (phy_fw_version) {
+               new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
+               ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+               if (ret < 0)
+                       return ret;
+
+               if (cur_phy_fw_ver >= new_phy_fw_vers) {
+                       CH_WARN(adap, "PHY Firmware already up-to-date, "
+                               "version %#x\n", cur_phy_fw_ver);
+                       return 0;
+               }
+       }
+
+       /* Ask the firmware where it wants us to copy the PHY firmware image.
+        * The size of the file requires a special version of the READ coommand
+        * which will pass the file size via the values field in PARAMS_CMD and
+        * retrieve the return value from firmware and place it in the same
+        * buffer values
+        */
+       param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+                FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+                FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+                FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+       val = phy_fw_size;
+       ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
+                                &param, &val, 1);
+       if (ret < 0)
+               return ret;
+       mtype = val >> 8;
+       maddr = (val & 0xff) << 16;
+
+       /* Copy the supplied PHY Firmware image to the adapter memory location
+        * allocated by the adapter firmware.
+        */
+       if (win_lock)
+               spin_lock_bh(win_lock);
+       ret = t4_memory_rw(adap, win, mtype, maddr,
+                          phy_fw_size, (__be32 *)phy_fw_data,
+                          T4_MEMORY_WRITE);
+       if (win_lock)
+               spin_unlock_bh(win_lock);
+       if (ret)
+               return ret;
+
+       /* Tell the firmware that the PHY firmware image has been written to
+        * RAM and it can now start copying it over to the PHYs.  The chip
+        * firmware will RESET the affected PHYs as part of this operation
+        * leaving them running the new PHY firmware image.
+        */
+       param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+                FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
+                FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
+                FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
+       ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
+                                   &param, &val, 30000);
+
+       /* If we have version number support, then check to see that the new
+        * firmware got loaded properly.
+        */
+       if (phy_fw_version) {
+               ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
+               if (ret < 0)
+                       return ret;
+
+               if (cur_phy_fw_ver != new_phy_fw_vers) {
+                       CH_WARN(adap, "PHY Firmware did not update: "
+                               "version on adapter %#x, "
+                               "version flashed %#x\n",
+                               cur_phy_fw_ver, new_phy_fw_vers);
+                       return -ENXIO;
+               }
+       }
+
+       return 1;
+}
+
 /**
  *     t4_fwcache - firmware cache operation
  *     @adap: the adapter
@@ -2051,7 +2575,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
        c.op_to_vfn =
                cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
                            FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
-                           FW_PARAMS_CMD_PFN_V(adap->fn) |
+                           FW_PARAMS_CMD_PFN_V(adap->pf) |
                            FW_PARAMS_CMD_VFN_V(0));
        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
        c.param[0].mnem =
@@ -2082,7 +2606,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
                     FW_PORT_CAP_ANEG)
 
 /**
- *     t4_link_start - apply link configuration to MAC/PHY
+ *     t4_link_l1cfg - apply link configuration to MAC/PHY
  *     @phy: the PHY to setup
  *     @mac: the MAC to setup
  *     @lc: the requested link configuration
@@ -2094,7 +2618,7 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
  *     - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
  *       otherwise do it later based on the outcome of auto-negotiation.
  */
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
                  struct link_config *lc)
 {
        struct fw_port_cmd c;
@@ -2107,19 +2631,22 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
                fc |= FW_PORT_CAP_FC_TX;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
-                              FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
-       c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
-                                 FW_LEN16(c));
+       c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+                                    FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+                                    FW_PORT_CMD_PORTID_V(port));
+       c.action_to_len16 =
+               cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+                           FW_LEN16(c));
 
        if (!(lc->supported & FW_PORT_CAP_ANEG)) {
-               c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+               c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
+                                            fc);
                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
        } else if (lc->autoneg == AUTONEG_DISABLE) {
-               c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+               c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
        } else
-               c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+               c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
 
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -2137,11 +2664,13 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
        struct fw_port_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F |
-                              FW_CMD_EXEC_F | FW_PORT_CMD_PORTID_V(port));
-       c.action_to_len16 = htonl(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
-                                 FW_LEN16(c));
-       c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+       c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+                                    FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+                                    FW_PORT_CMD_PORTID_V(port));
+       c.action_to_len16 =
+               cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
+                           FW_LEN16(c));
+       c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -2335,6 +2864,7 @@ static void tp_intr_handler(struct adapter *adapter)
 static void sge_intr_handler(struct adapter *adapter)
 {
        u64 v;
+       u32 err;
 
        static const struct intr_info sge_intr_info[] = {
                { ERR_CPL_EXCEED_IQE_SIZE_F,
@@ -2343,8 +2873,6 @@ static void sge_intr_handler(struct adapter *adapter)
                  "SGE GTS CIDX increment too large", -1, 0 },
                { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
                { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
-               { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
-               { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
                { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
                  "SGE IQID > 1023 received CPL for FL", -1, 0 },
                { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
@@ -2357,10 +2885,16 @@ static void sge_intr_handler(struct adapter *adapter)
                  0 },
                { ERR_ING_CTXT_PRIO_F,
                  "SGE too many priority ingress contexts", -1, 0 },
+               { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
+               { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
+               { 0 }
+       };
+
+       static struct intr_info t4t5_sge_intr_info[] = {
+               { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+               { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
                { ERR_EGR_CTXT_PRIO_F,
                  "SGE too many priority egress contexts", -1, 0 },
-               { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
-               { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
                { 0 }
        };
 
@@ -2373,8 +2907,23 @@ static void sge_intr_handler(struct adapter *adapter)
                t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
        }
 
-       if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
-           v != 0)
+       v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
+       if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+               v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
+                                          t4t5_sge_intr_info);
+
+       err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
+       if (err & ERROR_QID_VALID_F) {
+               dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
+                       ERROR_QID_G(err));
+               if (err & UNCAPTURED_ERROR_F)
+                       dev_err(adapter->pdev_dev,
+                               "SGE UNCAPTURED_ERROR set (clearing)\n");
+               t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
+                            UNCAPTURED_ERROR_F);
+       }
+
+       if (v != 0)
                t4_fatal_err(adapter);
 }
 
@@ -2547,6 +3096,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
  */
 static void le_intr_handler(struct adapter *adap)
 {
+       enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
        static const struct intr_info le_intr_info[] = {
                { LIPMISS_F, "LE LIP miss", -1, 0 },
                { LIP0_F, "LE 0 LIP error", -1, 0 },
@@ -2556,7 +3106,18 @@ static void le_intr_handler(struct adapter *adap)
                { 0 }
        };
 
-       if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
+       static struct intr_info t6_le_intr_info[] = {
+               { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+               { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+               { TCAMINTPERR_F, "LE parity error", -1, 1 },
+               { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+               { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+               { 0 }
+       };
+
+       if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
+                                 (chip <= CHELSIO_T5) ?
+                                 le_intr_info : t6_le_intr_info))
                t4_fatal_err(adap);
 }
 
@@ -2825,7 +3386,7 @@ int t4_slow_intr_handler(struct adapter *adapter)
                pcie_intr_handler(adapter);
        if (cause & MC_F)
                mem_intr_handler(adapter, MEM_MC);
-       if (!is_t4(adapter->params.chip) && (cause & MC1_S))
+       if (is_t5(adapter->params.chip) && (cause & MC1_F))
                mem_intr_handler(adapter, MEM_MC1);
        if (cause & EDC0_F)
                mem_intr_handler(adapter, MEM_EDC0);
@@ -2871,17 +3432,18 @@ int t4_slow_intr_handler(struct adapter *adapter)
  */
 void t4_intr_enable(struct adapter *adapter)
 {
+       u32 val = 0;
        u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
 
+       if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+               val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
        t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
                     ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
-                    ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+                    ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
                     ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
                     ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
                     ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
-                    ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
-                    DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
-                    EGRESS_SIZE_ERR_F);
+                    DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
        t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
 }
@@ -2945,18 +3507,18 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
        struct fw_rss_ind_tbl_cmd cmd;
 
        memset(&cmd, 0, sizeof(cmd));
-       cmd.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
+       cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
                               FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
                               FW_RSS_IND_TBL_CMD_VIID_V(viid));
-       cmd.retval_len16 = htonl(FW_LEN16(cmd));
+       cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
 
        /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
        while (n > 0) {
                int nq = min(n, 32);
                __be32 *qp = &cmd.iq0_to_iq2;
 
-               cmd.niqid = htons(nq);
-               cmd.startidx = htons(start);
+               cmd.niqid = cpu_to_be16(nq);
+               cmd.startidx = cpu_to_be16(start);
 
                start += nq;
                n -= nq;
@@ -2974,7 +3536,7 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
                        if (++rsp >= rsp_end)
                                rsp = rspq;
 
-                       *qp++ = htonl(v);
+                       *qp++ = cpu_to_be32(v);
                        nq -= 3;
                }
 
@@ -3000,20 +3562,46 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
        struct fw_rss_glb_config_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_write = htonl(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
-                             FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
-       c.retval_len16 = htonl(FW_LEN16(c));
+       c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
+                                   FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
+       c.retval_len16 = cpu_to_be32(FW_LEN16(c));
        if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
-               c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+               c.u.manual.mode_pkd =
+                       cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
        } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
                c.u.basicvirtual.mode_pkd =
-                       htonl(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
-               c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+                       cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
+               c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
        } else
                return -EINVAL;
        return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
 }
 
+/**
+ *     t4_config_vi_rss - configure per VI RSS settings
+ *     @adapter: the adapter
+ *     @mbox: mbox to use for the FW command
+ *     @viid: the VI id
+ *     @flags: RSS flags
+ *     @defq: id of the default RSS queue for the VI.
+ *
+ *     Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+                    unsigned int flags, unsigned int defq)
+{
+       struct fw_rss_vi_config_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+                                  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                  FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
+       c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+       c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+                                       FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
+       return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
 /* Read an RSS table row */
 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
 {
@@ -3044,6 +3632,40 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
        return 0;
 }
 
+/**
+ *     t4_fw_tp_pio_rw - Access TP PIO through LDST
+ *     @adap: the adapter
+ *     @vals: where the indirect register values are stored/written
+ *     @nregs: how many indirect registers to read/write
+ *     @start_idx: index of first indirect register to read/write
+ *     @rw: Read (1) or Write (0)
+ *
+ *     Access TP PIO registers through LDST
+ */
+static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+                           unsigned int start_index, unsigned int rw)
+{
+       int ret, i;
+       int cmd = FW_LDST_ADDRSPC_TP_PIO;
+       struct fw_ldst_cmd c;
+
+       for (i = 0 ; i < nregs; i++) {
+               memset(&c, 0, sizeof(c));
+               c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+                                               FW_CMD_REQUEST_F |
+                                               (rw ? FW_CMD_READ_F :
+                                                     FW_CMD_WRITE_F) |
+                                               FW_LDST_CMD_ADDRSPACE_V(cmd));
+               c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+               c.u.addrval.addr = cpu_to_be32(start_index + i);
+               c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
+               ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+               if (!ret && rw)
+                       vals[i] = be32_to_cpu(c.u.addrval.val);
+       }
+}
+
 /**
  *     t4_read_rss_key - read the global RSS key
  *     @adap: the adapter
@@ -3053,8 +3675,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
  */
 void t4_read_rss_key(struct adapter *adap, u32 *key)
 {
-       t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
-                        TP_RSS_SECRET_KEY0_A);
+       if (adap->flags & FW_OK)
+               t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
+       else
+               t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+                                TP_RSS_SECRET_KEY0_A);
 }
 
 /**
@@ -3069,11 +3694,32 @@ void t4_read_rss_key(struct adapter *adap, u32 *key)
  */
 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
 {
-       t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
-                         TP_RSS_SECRET_KEY0_A);
-       if (idx >= 0 && idx < 16)
-               t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
-                            KEYWRADDR_V(idx) | KEYWREN_F);
+       u8 rss_key_addr_cnt = 16;
+       u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
+
+       /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+        * allows access to key addresses 16-63 by using KeyWrAddrX
+        * as index[5:4](upper 2) into key table
+        */
+       if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+           (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
+               rss_key_addr_cnt = 32;
+
+       if (adap->flags & FW_OK)
+               t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
+       else
+               t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
+                                 TP_RSS_SECRET_KEY0_A);
+
+       if (idx >= 0 && idx < rss_key_addr_cnt) {
+               if (rss_key_addr_cnt > 16)
+                       t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+                                    KEYWRADDRX_V(idx >> 4) |
+                                    T6_VFWRADDR_V(idx) | KEYWREN_F);
+               else
+                       t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+                                    KEYWRADDR_V(idx) | KEYWREN_F);
+       }
 }
 
 /**
@@ -3088,8 +3734,12 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
                           u32 *valp)
 {
-       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-                        valp, 1, TP_RSS_PF0_CONFIG_A + index);
+       if (adapter->flags & FW_OK)
+               t4_fw_tp_pio_rw(adapter, valp, 1,
+                               TP_RSS_PF0_CONFIG_A + index, 1);
+       else
+               t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                                valp, 1, TP_RSS_PF0_CONFIG_A + index);
 }
 
 /**
@@ -3107,8 +3757,13 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
 {
        u32 vrt, mask, data;
 
-       mask = VFWRADDR_V(VFWRADDR_M);
-       data = VFWRADDR_V(index);
+       if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+               mask = VFWRADDR_V(VFWRADDR_M);
+               data = VFWRADDR_V(index);
+       } else {
+                mask =  T6_VFWRADDR_V(T6_VFWRADDR_M);
+                data = T6_VFWRADDR_V(index);
+       }
 
        /* Request that the index'th VF Table values be read into VFL/VFH.
         */
@@ -3119,10 +3774,15 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
 
        /* Grab the VFL/VFH values ...
         */
-       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-                        vfl, 1, TP_RSS_VFL_CONFIG_A);
-       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-                        vfh, 1, TP_RSS_VFH_CONFIG_A);
+       if (adapter->flags & FW_OK) {
+               t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
+               t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
+       } else {
+               t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                                vfl, 1, TP_RSS_VFL_CONFIG_A);
+               t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                                vfh, 1, TP_RSS_VFH_CONFIG_A);
+       }
 }
 
 /**
@@ -3135,8 +3795,11 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
 {
        u32 pfmap;
 
-       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-                        &pfmap, 1, TP_RSS_PF_MAP_A);
+       if (adapter->flags & FW_OK)
+               t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
+       else
+               t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                                &pfmap, 1, TP_RSS_PF_MAP_A);
        return pfmap;
 }
 
@@ -3150,8 +3813,11 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
 {
        u32 pfmask;
 
-       t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-                        &pfmask, 1, TP_RSS_PF_MSK_A);
+       if (adapter->flags & FW_OK)
+               t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
+       else
+               t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                                &pfmask, 1, TP_RSS_PF_MSK_A);
        return pfmask;
 }
 
@@ -3176,24 +3842,148 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
        if (v4) {
                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
                                 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
-               v4->tcpOutRsts = STAT(OUT_RST);
-               v4->tcpInSegs  = STAT64(IN_SEG);
-               v4->tcpOutSegs = STAT64(OUT_SEG);
-               v4->tcpRetransSegs = STAT64(RXT_SEG);
+               v4->tcp_out_rsts = STAT(OUT_RST);
+               v4->tcp_in_segs  = STAT64(IN_SEG);
+               v4->tcp_out_segs = STAT64(OUT_SEG);
+               v4->tcp_retrans_segs = STAT64(RXT_SEG);
        }
        if (v6) {
                t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
                                 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
-               v6->tcpOutRsts = STAT(OUT_RST);
-               v6->tcpInSegs  = STAT64(IN_SEG);
-               v6->tcpOutSegs = STAT64(OUT_SEG);
-               v6->tcpRetransSegs = STAT64(RXT_SEG);
+               v6->tcp_out_rsts = STAT(OUT_RST);
+               v6->tcp_in_segs  = STAT64(IN_SEG);
+               v6->tcp_out_segs = STAT64(OUT_SEG);
+               v6->tcp_retrans_segs = STAT64(RXT_SEG);
        }
 #undef STAT64
 #undef STAT
 #undef STAT_IDX
 }
 
+/**
+ *     t4_tp_get_err_stats - read TP's error MIB counters
+ *     @adap: the adapter
+ *     @st: holds the counter values
+ *
+ *     Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+       /* T6 and later has 2 channels */
+       if (adap->params.arch.nchan == NCHAN) {
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->tnl_cong_drops, 8,
+                                TP_MIB_TNL_CNG_DROP_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->tnl_tx_drops, 4,
+                                TP_MIB_TNL_DROP_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->ofld_vlan_drops, 4,
+                                TP_MIB_OFD_VLN_DROP_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->tcp6_in_errs, 4,
+                                TP_MIB_TCP_V6IN_ERR_0_A);
+       } else {
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->tnl_cong_drops, 2,
+                                TP_MIB_TNL_CNG_DROP_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->ofld_chan_drops, 2,
+                                TP_MIB_OFD_CHN_DROP_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->ofld_vlan_drops, 2,
+                                TP_MIB_OFD_VLN_DROP_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                                st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
+       }
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
+}
+
+/**
+ *     t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ *     @adap: the adapter
+ *     @st: holds the counter values
+ *
+ *     Returns the values of TP's CPL counters.
+ */
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+       /* T6 and later has 2 channels */
+       if (adap->params.arch.nchan == NCHAN) {
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+                                8, TP_MIB_CPL_IN_REQ_0_A);
+       } else {
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+                                2, TP_MIB_CPL_IN_REQ_0_A);
+               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+                                2, TP_MIB_CPL_OUT_RSP_0_A);
+       }
+}
+
+/**
+ *     t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ *     @adap: the adapter
+ *     @st: holds the counter values
+ *
+ *     Returns the values of TP's RDMA counters.
+ */
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
+                        2, TP_MIB_RQE_DFR_PKT_A);
+}
+
+/**
+ *     t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ *     @adap: the adapter
+ *     @idx: the port index
+ *     @st: holds the counter values
+ *
+ *     Returns the values of TP's FCoE counters for the selected port.
+ */
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+                      struct tp_fcoe_stats *st)
+{
+       u32 val[2];
+
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
+                        1, TP_MIB_FCOE_DDP_0_A + idx);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
+                        1, TP_MIB_FCOE_DROP_0_A + idx);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+                        2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
+       st->octets_ddp = ((u64)val[0] << 32) | val[1];
+}
+
+/**
+ *     t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ *     @adap: the adapter
+ *     @st: holds the counter values
+ *
+ *     Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+       u32 val[4];
+
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
+                        TP_MIB_USM_PKTS_A);
+       st->frames = val[0];
+       st->drops = val[1];
+       st->octets = ((u64)val[2] << 32) | val[3];
+}
+
 /**
  *     t4_read_mtu_tbl - returns the values in the HW path MTU table
  *     @adap: the adapter
@@ -3401,7 +4191,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
 }
 
 /**
- *     get_mps_bg_map - return the buffer groups associated with a port
+ *     t4_get_mps_bg_map - return the buffer groups associated with a port
  *     @adap: the adapter
  *     @idx: the port index
  *
@@ -3409,7 +4199,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
  *     with the given port.  Bit i is set if buffer group i is used by the
  *     port.
  */
-static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
 {
        u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
 
@@ -3450,6 +4240,28 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
        return "UNKNOWN";
 }
 
+/**
+ *      t4_get_port_stats_offset - collect port stats relative to a previous
+ *                                 snapshot
+ *      @adap: The adapter
+ *      @idx: The port
+ *      @stats: Current stats to fill
+ *      @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+                             struct port_stats *stats,
+                             struct port_stats *offset)
+{
+       u64 *s, *o;
+       int i;
+
+       t4_get_port_stats(adap, idx, stats);
+       for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+                       i < (sizeof(struct port_stats) / sizeof(u64));
+                       i++, s++, o++)
+               *s -= *o;
+}
+
 /**
  *     t4_get_port_stats - collect port statistics
  *     @adap: the adapter
@@ -3460,7 +4272,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
  */
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
 {
-       u32 bgmap = get_mps_bg_map(adap, idx);
+       u32 bgmap = t4_get_mps_bg_map(adap, idx);
 
 #define GET_STAT(name) \
        t4_read_reg64(adap, \
@@ -3534,103 +4346,51 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
 }
 
 /**
- *     t4_wol_magic_enable - enable/disable magic packet WoL
- *     @adap: the adapter
- *     @port: the physical port index
- *     @addr: MAC address expected in magic packets, %NULL to disable
- *
- *     Enables/disables magic packet wake-on-LAN for the selected port.
- */
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
-                        const u8 *addr)
-{
-       u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
-
-       if (is_t4(adap->params.chip)) {
-               mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
-               mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
-               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
-       } else {
-               mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
-               mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
-               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
-       }
-
-       if (addr) {
-               t4_write_reg(adap, mag_id_reg_l,
-                            (addr[2] << 24) | (addr[3] << 16) |
-                            (addr[4] << 8) | addr[5]);
-               t4_write_reg(adap, mag_id_reg_h,
-                            (addr[0] << 8) | addr[1]);
-       }
-       t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
-                        addr ? MAGICEN_F : 0);
-}
-
-/**
- *     t4_wol_pat_enable - enable/disable pattern-based WoL
+ *     t4_get_lb_stats - collect loopback port statistics
  *     @adap: the adapter
- *     @port: the physical port index
- *     @map: bitmap of which HW pattern filters to set
- *     @mask0: byte mask for bytes 0-63 of a packet
- *     @mask1: byte mask for bytes 64-127 of a packet
- *     @crc: Ethernet CRC for selected bytes
- *     @enable: enable/disable switch
+ *     @idx: the loopback port index
+ *     @p: the stats structure to fill
  *
- *     Sets the pattern filters indicated in @map to mask out the bytes
- *     specified in @mask0/@mask1 in received packets and compare the CRC of
- *     the resulting packet against @crc.  If @enable is %true pattern-based
- *     WoL is enabled, otherwise disabled.
+ *     Return HW statistics for the given loopback port.
  */
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
-                     u64 mask0, u64 mask1, unsigned int crc, bool enable)
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
 {
-       int i;
-       u32 port_cfg_reg;
-
-       if (is_t4(adap->params.chip))
-               port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
-       else
-               port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
-
-       if (!enable) {
-               t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
-               return 0;
-       }
-       if (map > 0xff)
-               return -EINVAL;
+       u32 bgmap = t4_get_mps_bg_map(adap, idx);
 
-#define EPIO_REG(name) \
+#define GET_STAT(name) \
+       t4_read_reg64(adap, \
        (is_t4(adap->params.chip) ? \
-        PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
-        T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
-
-       t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
-       t4_write_reg(adap, EPIO_REG(DATA2), mask1);
-       t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
-
-       for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
-               if (!(map & 1))
-                       continue;
+       PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
+       T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
 
-               /* write byte masks */
-               t4_write_reg(adap, EPIO_REG(DATA0), mask0);
-               t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
-               t4_read_reg(adap, EPIO_REG(OP));                /* flush */
-               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
-                       return -ETIMEDOUT;
-
-               /* write CRC */
-               t4_write_reg(adap, EPIO_REG(DATA0), crc);
-               t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
-               t4_read_reg(adap, EPIO_REG(OP));                /* flush */
-               if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
-                       return -ETIMEDOUT;
-       }
-#undef EPIO_REG
+       p->octets           = GET_STAT(BYTES);
+       p->frames           = GET_STAT(FRAMES);
+       p->bcast_frames     = GET_STAT(BCAST);
+       p->mcast_frames     = GET_STAT(MCAST);
+       p->ucast_frames     = GET_STAT(UCAST);
+       p->error_frames     = GET_STAT(ERROR);
+
+       p->frames_64        = GET_STAT(64B);
+       p->frames_65_127    = GET_STAT(65B_127B);
+       p->frames_128_255   = GET_STAT(128B_255B);
+       p->frames_256_511   = GET_STAT(256B_511B);
+       p->frames_512_1023  = GET_STAT(512B_1023B);
+       p->frames_1024_1518 = GET_STAT(1024B_1518B);
+       p->frames_1519_max  = GET_STAT(1519B_MAX);
+       p->drop             = GET_STAT(DROP_FRAMES);
+
+       p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+       p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+       p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+       p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+       p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+       p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+       p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+       p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
 
-       t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
-       return 0;
+#undef GET_STAT
+#undef GET_STAT_COM
 }
 
 /*     t4_mk_filtdelwr - create a delete filter WR
@@ -3644,33 +4404,38 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
 {
        memset(wr, 0, sizeof(*wr));
-       wr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
-       wr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*wr) / 16));
-       wr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(ftid) |
-                       FW_FILTER_WR_NOREPLY_V(qid < 0));
-       wr->del_filter_to_l2tix = htonl(FW_FILTER_WR_DEL_FILTER_F);
+       wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
+       wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
+       wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
+                                   FW_FILTER_WR_NOREPLY_V(qid < 0));
+       wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
        if (qid >= 0)
-               wr->rx_chan_rx_rpl_iq = htons(FW_FILTER_WR_RX_RPL_IQ_V(qid));
+               wr->rx_chan_rx_rpl_iq =
+                       cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
 }
 
 #define INIT_CMD(var, cmd, rd_wr) do { \
-       (var).op_to_write = htonl(FW_CMD_OP_V(FW_##cmd##_CMD) | \
-                                 FW_CMD_REQUEST_F | FW_CMD_##rd_wr##_F); \
-       (var).retval_len16 = htonl(FW_LEN16(var)); \
+       (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
+                                       FW_CMD_REQUEST_F | \
+                                       FW_CMD_##rd_wr##_F); \
+       (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
 } while (0)
 
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
                          u32 addr, u32 val)
 {
+       u32 ldst_addrspace;
        struct fw_ldst_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_WRITE_F |
-                           FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE));
-       c.cycles_to_len16 = htonl(FW_LEN16(c));
-       c.u.addrval.addr = htonl(addr);
-       c.u.addrval.val = htonl(val);
+       ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
+       c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+                                       FW_CMD_REQUEST_F |
+                                       FW_CMD_WRITE_F |
+                                       ldst_addrspace);
+       c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+       c.u.addrval.addr = cpu_to_be32(addr);
+       c.u.addrval.val = cpu_to_be32(val);
 
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -3690,19 +4455,22 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
               unsigned int mmd, unsigned int reg, u16 *valp)
 {
        int ret;
+       u32 ldst_addrspace;
        struct fw_ldst_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
-               FW_CMD_READ_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
-       c.cycles_to_len16 = htonl(FW_LEN16(c));
-       c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
-                                  FW_LDST_CMD_MMD_V(mmd));
-       c.u.mdio.raddr = htons(reg);
+       ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+       c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+                                       FW_CMD_REQUEST_F | FW_CMD_READ_F |
+                                       ldst_addrspace);
+       c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+       c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+                                        FW_LDST_CMD_MMD_V(mmd));
+       c.u.mdio.raddr = cpu_to_be16(reg);
 
        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
        if (ret == 0)
-               *valp = ntohs(c.u.mdio.rval);
+               *valp = be16_to_cpu(c.u.mdio.rval);
        return ret;
 }
 
@@ -3720,16 +4488,19 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
               unsigned int mmd, unsigned int reg, u16 val)
 {
+       u32 ldst_addrspace;
        struct fw_ldst_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_addrspace = htonl(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F |
-               FW_CMD_WRITE_F | FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO));
-       c.cycles_to_len16 = htonl(FW_LEN16(c));
-       c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR_V(phy_addr) |
-                                  FW_LDST_CMD_MMD_V(mmd));
-       c.u.mdio.raddr = htons(reg);
-       c.u.mdio.rval = htons(val);
+       ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
+       c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+                                       FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                       ldst_addrspace);
+       c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+       c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
+                                        FW_LDST_CMD_MMD_V(mmd));
+       c.u.mdio.raddr = cpu_to_be16(reg);
+       c.u.mdio.rval = cpu_to_be16(val);
 
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -3840,6 +4611,32 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
                        sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
 }
 
+/**
+ *      t4_sge_ctxt_flush - flush the SGE context cache
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *
+ *      Issues a FW command through the given mailbox to flush the
+ *      SGE context cache.
+ */
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+{
+       int ret;
+       u32 ldst_addrspace;
+       struct fw_ldst_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+       c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+                                       FW_CMD_REQUEST_F | FW_CMD_READ_F |
+                                       ldst_addrspace);
+       c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+       c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
+
+       ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+       return ret;
+}
+
 /**
  *      t4_fw_hello - establish communication with FW
  *      @adap: the adapter
@@ -3863,11 +4660,11 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
 retry:
        memset(&c, 0, sizeof(c));
        INIT_CMD(c, HELLO, WRITE);
-       c.err_to_clearinit = htonl(
+       c.err_to_clearinit = cpu_to_be32(
                FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
                FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
-               FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ? mbox :
-                                     FW_HELLO_CMD_MBMASTER_M) |
+               FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
+                                       mbox : FW_HELLO_CMD_MBMASTER_M) |
                FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
                FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
                FW_HELLO_CMD_CLEARINIT_F);
@@ -3888,7 +4685,7 @@ retry:
                return ret;
        }
 
-       v = ntohl(c.err_to_clearinit);
+       v = be32_to_cpu(c.err_to_clearinit);
        master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
        if (state) {
                if (v & FW_HELLO_CMD_ERR_F)
@@ -4017,7 +4814,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
 
        memset(&c, 0, sizeof(c));
        INIT_CMD(c, RESET, WRITE);
-       c.val = htonl(reset);
+       c.val = cpu_to_be32(reset);
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4050,8 +4847,8 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 
                memset(&c, 0, sizeof(c));
                INIT_CMD(c, RESET, WRITE);
-               c.val = htonl(PIORST_F | PIORSTMODE_F);
-               c.halt_pkd = htonl(FW_RESET_CMD_HALT_F);
+               c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
+               c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
                ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
        }
 
@@ -4190,7 +4987,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
         * the newly loaded firmware will handle this right by checking
         * its header flags to see if it advertises the capability.
         */
-       reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+       reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
        return t4_fw_restart(adap, mbox, reset);
 }
 
@@ -4321,7 +5118,7 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
 }
 
 /**
- *     t4_query_params - query FW or device parameters
+ *     t4_query_params_rw - query FW or device parameters
  *     @adap: the adapter
  *     @mbox: mailbox to use for the FW command
  *     @pf: the PF
@@ -4329,13 +5126,14 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
  *     @nparams: the number of parameters
  *     @params: the parameter names
  *     @val: the parameter values
+ *     @rw: Write and read flag
  *
  *     Reads the value of FW or device parameters.  Up to 7 parameters can be
  *     queried at once.
  */
-int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
-                   unsigned int vf, unsigned int nparams, const u32 *params,
-                   u32 *val)
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+                      unsigned int vf, unsigned int nparams, const u32 *params,
+                      u32 *val, int rw)
 {
        int i, ret;
        struct fw_params_cmd c;
@@ -4345,22 +5143,35 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
                return -EINVAL;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) |
-                           FW_PARAMS_CMD_VFN_V(vf));
-       c.retval_len16 = htonl(FW_LEN16(c));
-       for (i = 0; i < nparams; i++, p += 2)
-               *p = htonl(*params++);
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
+                                 FW_CMD_REQUEST_F | FW_CMD_READ_F |
+                                 FW_PARAMS_CMD_PFN_V(pf) |
+                                 FW_PARAMS_CMD_VFN_V(vf));
+       c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+       for (i = 0; i < nparams; i++) {
+               *p++ = cpu_to_be32(*params++);
+               if (rw)
+                       *p = cpu_to_be32(*(val + i));
+               p++;
+       }
 
        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
        if (ret == 0)
                for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
-                       *val++ = ntohl(*p);
+                       *val++ = be32_to_cpu(*p);
        return ret;
 }
 
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+                   unsigned int vf, unsigned int nparams, const u32 *params,
+                   u32 *val)
+{
+       return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
 /**
- *      t4_set_params_nosleep - sets FW or device parameters
+ *      t4_set_params_timeout - sets FW or device parameters
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
  *      @pf: the PF
@@ -4368,15 +5179,15 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
  *      @nparams: the number of parameters
  *      @params: the parameter names
  *      @val: the parameter values
+ *      @timeout: the timeout time
  *
- *      Does not ever sleep
  *      Sets the value of FW or device parameters.  Up to 7 parameters can be
  *      specified at once.
  */
-int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
                          unsigned int pf, unsigned int vf,
                          unsigned int nparams, const u32 *params,
-                         const u32 *val)
+                         const u32 *val, int timeout)
 {
        struct fw_params_cmd c;
        __be32 *p = &c.param[0].mnem;
@@ -4386,9 +5197,9 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
 
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
-                               FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
-                               FW_PARAMS_CMD_PFN_V(pf) |
-                               FW_PARAMS_CMD_VFN_V(vf));
+                                 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                 FW_PARAMS_CMD_PFN_V(pf) |
+                                 FW_PARAMS_CMD_VFN_V(vf));
        c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 
        while (nparams--) {
@@ -4396,7 +5207,7 @@ int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
                *p++ = cpu_to_be32(*val++);
        }
 
-       return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+       return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
 }
 
 /**
@@ -4416,23 +5227,8 @@ int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
                  unsigned int vf, unsigned int nparams, const u32 *params,
                  const u32 *val)
 {
-       struct fw_params_cmd c;
-       __be32 *p = &c.param[0].mnem;
-
-       if (nparams > 7)
-               return -EINVAL;
-
-       memset(&c, 0, sizeof(c));
-       c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PARAMS_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) |
-                           FW_PARAMS_CMD_VFN_V(vf));
-       c.retval_len16 = htonl(FW_LEN16(c));
-       while (nparams--) {
-               *p++ = htonl(*params++);
-               *p++ = htonl(*val++);
-       }
-
-       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+       return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+                                    FW_CMD_MAX_TIMEOUT);
 }
 
 /**
@@ -4465,20 +5261,21 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
        struct fw_pfvf_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_vfn = htonl(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
-                           FW_PFVF_CMD_VFN_V(vf));
-       c.retval_len16 = htonl(FW_LEN16(c));
-       c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
-                              FW_PFVF_CMD_NIQ_V(rxq));
-       c.type_to_neq = htonl(FW_PFVF_CMD_CMASK_V(cmask) |
-                              FW_PFVF_CMD_PMASK_V(pmask) |
-                              FW_PFVF_CMD_NEQ_V(txq));
-       c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC_V(tc) | FW_PFVF_CMD_NVI_V(vi) |
-                               FW_PFVF_CMD_NEXACTF_V(nexact));
-       c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS_V(rcaps) |
-                                    FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
-                                    FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
+                                 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
+                                 FW_PFVF_CMD_VFN_V(vf));
+       c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+       c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
+                                    FW_PFVF_CMD_NIQ_V(rxq));
+       c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
+                                   FW_PFVF_CMD_PMASK_V(pmask) |
+                                   FW_PFVF_CMD_NEQ_V(txq));
+       c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
+                                     FW_PFVF_CMD_NVI_V(vi) |
+                                     FW_PFVF_CMD_NEXACTF_V(nexact));
+       c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
+                                       FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
+                                       FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4507,10 +5304,10 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
        struct fw_vi_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_vfn = htonl(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_WRITE_F | FW_CMD_EXEC_F |
-                           FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
-       c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
+                                 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
+                                 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
        c.portid_pkd = FW_VI_CMD_PORTID_V(port);
        c.nmac = nmac - 1;
 
@@ -4532,8 +5329,35 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
                }
        }
        if (rss_size)
-               *rss_size = FW_VI_CMD_RSSSIZE_G(ntohs(c.rsssize_pkd));
-       return FW_VI_CMD_VIID_G(ntohs(c.type_viid));
+               *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+       return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
+}
+
+/**
+ *     t4_free_vi - free a virtual interface
+ *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
+ *     @pf: the PF owning the VI
+ *     @vf: the VF owning the VI
+ *     @viid: virtual interface identifiler
+ *
+ *     Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+              unsigned int vf, unsigned int viid)
+{
+       struct fw_vi_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
+                                 FW_CMD_REQUEST_F |
+                                 FW_CMD_EXEC_F |
+                                 FW_VI_CMD_PFN_V(pf) |
+                                 FW_VI_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
+       c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
+
+       return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 }
 
 /**
@@ -4569,14 +5393,16 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
                vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST_F |
-                            FW_CMD_WRITE_F | FW_VI_RXMODE_CMD_VIID_V(viid));
-       c.retval_len16 = htonl(FW_LEN16(c));
-       c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU_V(mtu) |
-                                 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
-                                 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
-                                 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
-                                 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
+       c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+                                  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                  FW_VI_RXMODE_CMD_VIID_V(viid));
+       c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+       c.mtu_to_vlanexen =
+               cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
+                           FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
+                           FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
+                           FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
+                           FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
        return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
 
@@ -4606,43 +5432,71 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
                      unsigned int viid, bool free, unsigned int naddr,
                      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
 {
-       int i, ret;
+       int offset, ret = 0;
        struct fw_vi_mac_cmd c;
-       struct fw_vi_mac_exact *p;
-       unsigned int max_naddr = is_t4(adap->params.chip) ?
-                                      NUM_MPS_CLS_SRAM_L_INSTANCES :
-                                      NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+       unsigned int nfilters = 0;
+       unsigned int max_naddr = adap->params.arch.mps_tcam_size;
+       unsigned int rem = naddr;
 
-       if (naddr > 7)
+       if (naddr > max_naddr)
                return -EINVAL;
 
-       memset(&c, 0, sizeof(c));
-       c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
-                            FW_CMD_WRITE_F | (free ? FW_CMD_EXEC_F : 0) |
-                            FW_VI_MAC_CMD_VIID_V(viid));
-       c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS_V(free) |
-                                   FW_CMD_LEN16_V((naddr + 2) / 2));
-
-       for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
-               p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
-                                     FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
-               memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
-       }
+       for (offset = 0; offset < naddr ; /**/) {
+               unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
+                                        rem : ARRAY_SIZE(c.u.exact));
+               size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+                                                    u.exact[fw_naddr]), 16);
+               struct fw_vi_mac_exact *p;
+               int i;
 
-       ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
-       if (ret)
-               return ret;
+               memset(&c, 0, sizeof(c));
+               c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+                                          FW_CMD_REQUEST_F |
+                                          FW_CMD_WRITE_F |
+                                          FW_CMD_EXEC_V(free) |
+                                          FW_VI_MAC_CMD_VIID_V(viid));
+               c.freemacs_to_len16 =
+                       cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+                                   FW_CMD_LEN16_V(len16));
+
+               for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+                       p->valid_to_idx =
+                               cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+                                           FW_VI_MAC_CMD_IDX_V(
+                                                   FW_VI_MAC_ADD_MAC));
+                       memcpy(p->macaddr, addr[offset + i],
+                              sizeof(p->macaddr));
+               }
+
+               /* It's okay if we run out of space in our MAC address arena.
+                * Some of the addresses we submit may get stored so we need
+                * to run through the reply to see what the results were ...
+                */
+               ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+               if (ret && ret != -FW_ENOMEM)
+                       break;
 
-       for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
-               u16 index = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+               for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+                       u16 index = FW_VI_MAC_CMD_IDX_G(
+                                       be16_to_cpu(p->valid_to_idx));
+
+                       if (idx)
+                               idx[offset + i] = (index >= max_naddr ?
+                                                  0xffff : index);
+                       if (index < max_naddr)
+                               nfilters++;
+                       else if (hash)
+                               *hash |= (1ULL <<
+                                         hash_mac_addr(addr[offset + i]));
+               }
 
-               if (idx)
-                       idx[i] = index >= max_naddr ? 0xffff : index;
-               if (index < max_naddr)
-                       ret++;
-               else if (hash)
-                       *hash |= (1ULL << hash_mac_addr(addr[i]));
+               free = false;
+               offset += fw_naddr;
+               rem -= fw_naddr;
        }
+
+       if (ret == 0 || ret == -FW_ENOMEM)
+               ret = nfilters;
        return ret;
 }
 
@@ -4671,26 +5525,25 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
        int ret, mode;
        struct fw_vi_mac_cmd c;
        struct fw_vi_mac_exact *p = c.u.exact;
-       unsigned int max_mac_addr = is_t4(adap->params.chip) ?
-                                   NUM_MPS_CLS_SRAM_L_INSTANCES :
-                                   NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+       unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
 
        if (idx < 0)                             /* new allocation */
                idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
        mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
-                            FW_CMD_WRITE_F | FW_VI_MAC_CMD_VIID_V(viid));
-       c.freemacs_to_len16 = htonl(FW_CMD_LEN16_V(1));
-       p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID_F |
-                               FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
-                               FW_VI_MAC_CMD_IDX_V(idx));
+       c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+                                  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                  FW_VI_MAC_CMD_VIID_V(viid));
+       c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
+       p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+                                     FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
+                                     FW_VI_MAC_CMD_IDX_V(idx));
        memcpy(p->macaddr, addr, sizeof(p->macaddr));
 
        ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
        if (ret == 0) {
-               ret = FW_VI_MAC_CMD_IDX_G(ntohs(p->valid_to_idx));
+               ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
                if (ret >= max_mac_addr)
                        ret = -ENOMEM;
        }
@@ -4714,11 +5567,12 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
        struct fw_vi_mac_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_MAC_CMD) | FW_CMD_REQUEST_F |
-                            FW_CMD_WRITE_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-       c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN_F |
-                                   FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
-                                   FW_CMD_LEN16_V(1));
+       c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+                                  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                  FW_VI_ENABLE_CMD_VIID_V(viid));
+       c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
+                                         FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
+                                         FW_CMD_LEN16_V(1));
        c.u.hash.hashvec = cpu_to_be64(vec);
        return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
@@ -4741,12 +5595,13 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
        struct fw_vi_enable_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
-                            FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-
-       c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
-                              FW_VI_ENABLE_CMD_EEN_V(tx_en) | FW_LEN16(c) |
-                              FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en));
+       c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+                                  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+                                  FW_VI_ENABLE_CMD_VIID_V(viid));
+       c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
+                                    FW_VI_ENABLE_CMD_EEN_V(tx_en) |
+                                    FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
+                                    FW_LEN16(c));
        return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4781,10 +5636,11 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
        struct fw_vi_enable_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_viid = htonl(FW_CMD_OP_V(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST_F |
-                            FW_CMD_EXEC_F | FW_VI_ENABLE_CMD_VIID_V(viid));
-       c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
-       c.blinkdur = htons(nblinks);
+       c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
+                                  FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+                                  FW_VI_ENABLE_CMD_VIID_V(viid));
+       c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
+       c.blinkdur = cpu_to_be16(nblinks);
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4808,14 +5664,14 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
        struct fw_iq_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
-                           FW_IQ_CMD_VFN_V(vf));
-       c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE_F | FW_LEN16(c));
-       c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(iqtype));
-       c.iqid = htons(iqid);
-       c.fl0id = htons(fl0id);
-       c.fl1id = htons(fl1id);
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
+                                 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
+                                 FW_IQ_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
+       c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
+       c.iqid = cpu_to_be16(iqid);
+       c.fl0id = cpu_to_be16(fl0id);
+       c.fl1id = cpu_to_be16(fl1id);
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4835,11 +5691,12 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
        struct fw_eq_eth_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) |
-                           FW_EQ_ETH_CMD_VFN_V(vf));
-       c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
-       c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID_V(eqid));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
+                                 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+                                 FW_EQ_ETH_CMD_PFN_V(pf) |
+                                 FW_EQ_ETH_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
+       c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4859,11 +5716,12 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
        struct fw_eq_ctrl_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) |
-                           FW_EQ_CTRL_CMD_VFN_V(vf));
-       c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
-       c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID_V(eqid));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
+                                 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+                                 FW_EQ_CTRL_CMD_PFN_V(pf) |
+                                 FW_EQ_CTRL_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
+       c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4883,11 +5741,12 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
        struct fw_eq_ofld_cmd c;
 
        memset(&c, 0, sizeof(c));
-       c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
-                           FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) |
-                           FW_EQ_OFLD_CMD_VFN_V(vf));
-       c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
-       c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID_V(eqid));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
+                                 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+                                 FW_EQ_OFLD_CMD_PFN_V(pf) |
+                                 FW_EQ_OFLD_CMD_VFN_V(vf));
+       c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
+       c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4905,11 +5764,11 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
        if (opcode == FW_PORT_CMD) {    /* link/module state change message */
                int speed = 0, fc = 0;
                const struct fw_port_cmd *p = (void *)rpl;
-               int chan = FW_PORT_CMD_PORTID_G(ntohl(p->op_to_portid));
+               int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
                int port = adap->chan_map[chan];
                struct port_info *pi = adap2pinfo(adap, port);
                struct link_config *lc = &pi->link_cfg;
-               u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+               u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
                int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
                u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
 
@@ -5043,6 +5902,22 @@ static int get_flash_params(struct adapter *adap)
        return 0;
 }
 
+static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
+{
+       u16 val;
+       u32 pcie_cap;
+
+       pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+       if (pcie_cap) {
+               pci_read_config_word(adapter->pdev,
+                                    pcie_cap + PCI_EXP_DEVCTL2, &val);
+               val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
+               val |= range;
+               pci_write_config_word(adapter->pdev,
+                                     pcie_cap + PCI_EXP_DEVCTL2, val);
+       }
+}
+
 /**
  *     t4_prep_adapter - prepare SW and HW for operation
  *     @adapter: the adapter
@@ -5075,9 +5950,30 @@ int t4_prep_adapter(struct adapter *adapter)
        switch (ver) {
        case CHELSIO_T4:
                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+               adapter->params.arch.sge_fl_db = DBPRIO_F;
+               adapter->params.arch.mps_tcam_size =
+                                NUM_MPS_CLS_SRAM_L_INSTANCES;
+               adapter->params.arch.mps_rplc_size = 128;
+               adapter->params.arch.nchan = NCHAN;
+               adapter->params.arch.vfcount = 128;
                break;
        case CHELSIO_T5:
                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+               adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+               adapter->params.arch.mps_tcam_size =
+                                NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+               adapter->params.arch.mps_rplc_size = 128;
+               adapter->params.arch.nchan = NCHAN;
+               adapter->params.arch.vfcount = 128;
+               break;
+       case CHELSIO_T6:
+               adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+               adapter->params.arch.sge_fl_db = 0;
+               adapter->params.arch.mps_tcam_size =
+                                NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+               adapter->params.arch.mps_rplc_size = 256;
+               adapter->params.arch.nchan = 2;
+               adapter->params.arch.vfcount = 256;
                break;
        default:
                dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -5094,11 +5990,14 @@ int t4_prep_adapter(struct adapter *adapter)
        adapter->params.nports = 1;
        adapter->params.portvec = 1;
        adapter->params.vpd.cclk = 50000;
+
+       /* Set pci completion timeout value to 4 seconds. */
+       set_pcie_completion_timeout(adapter, 0xd);
        return 0;
 }
 
 /**
- *     cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ *     t4_bar2_sge_qregs - return BAR2 SGE Queue register information
  *     @adapter: the adapter
  *     @qid: the Queue ID
  *     @qtype: the Ingress or Egress type for @qid
@@ -5122,7 +6021,7 @@ int t4_prep_adapter(struct adapter *adapter)
  *     Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
  *     then these "Inferred Queue ID" register may not be used.
  */
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
                      unsigned int qid,
                      enum t4_bar2_qtype qtype,
                      u64 *pbar2_qoffset,
@@ -5154,7 +6053,7 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
         *  o The BAR2 Queue ID.
         *  o The BAR2 Queue ID Offset into the BAR2 page.
         */
-       bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+       bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
        bar2_qid = qid & qpp_mask;
        bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
 
@@ -5223,18 +6122,19 @@ int t4_init_devlog_params(struct adapter *adap)
        /* Otherwise, ask the firmware for it's Device Log Parameters.
         */
        memset(&devlog_cmd, 0, sizeof(devlog_cmd));
-       devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
-                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
-       devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
+       devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
+                                            FW_CMD_REQUEST_F | FW_CMD_READ_F);
+       devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
        ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
                         &devlog_cmd);
        if (ret)
                return ret;
 
-       devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
+       devlog_meminfo =
+               be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
        dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
        dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
-       dparams->size = ntohl(devlog_cmd.memsize_devlog);
+       dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
 
        return 0;
 }
@@ -5255,13 +6155,13 @@ int t4_init_sge_params(struct adapter *adapter)
         */
        hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
        s_hps = (HOSTPAGESIZEPF0_S +
-                (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
+                (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
        sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
 
        /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
         */
        s_qpp = (QUEUESPERPAGEPF0_S +
-               (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
+               (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
        qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
        sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
        qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
@@ -5292,12 +6192,19 @@ int t4_init_tp_params(struct adapter *adap)
        /* Cache the adapter's Compressed Filter Mode and global Incress
         * Configuration.
         */
-       t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-                        &adap->params.tp.vlan_pri_map, 1,
-                        TP_VLAN_PRI_MAP_A);
-       t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
-                        &adap->params.tp.ingress_config, 1,
-                        TP_INGRESS_CONFIG_A);
+       if (adap->flags & FW_OK) {
+               t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
+                               TP_VLAN_PRI_MAP_A, 1);
+               t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
+                               TP_INGRESS_CONFIG_A, 1);
+       } else {
+               t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                                &adap->params.tp.vlan_pri_map, 1,
+                                TP_VLAN_PRI_MAP_A);
+               t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
+                                &adap->params.tp.ingress_config, 1,
+                                TP_INGRESS_CONFIG_A);
+       }
 
        /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
         * shift positions of several elements of the Compressed Filter Tuple
@@ -5373,6 +6280,29 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
        return field_shift;
 }
 
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+       int i, ret;
+       struct fw_rss_vi_config_cmd rvc;
+
+       memset(&rvc, 0, sizeof(rvc));
+
+       for_each_port(adap, i) {
+               struct port_info *p = adap2pinfo(adap, i);
+
+               rvc.op_to_viid =
+                       cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+                                   FW_CMD_REQUEST_F | FW_CMD_READ_F |
+                                   FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
+               rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
+               ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+               if (ret)
+                       return ret;
+               p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
+       }
+       return 0;
+}
+
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
 {
        u8 addr[6];
@@ -5390,10 +6320,10 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
                while ((adap->params.portvec & (1 << j)) == 0)
                        j++;
 
-               c.op_to_portid = htonl(FW_CMD_OP_V(FW_PORT_CMD) |
-                                      FW_CMD_REQUEST_F | FW_CMD_READ_F |
-                                      FW_PORT_CMD_PORTID_V(j));
-               c.action_to_len16 = htonl(
+               c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+                                            FW_CMD_REQUEST_F | FW_CMD_READ_F |
+                                            FW_PORT_CMD_PORTID_V(j));
+               c.action_to_len16 = cpu_to_be32(
                        FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
                        FW_LEN16(c));
                ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
@@ -5411,22 +6341,23 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
                memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
                adap->port[i]->dev_port = j;
 
-               ret = ntohl(c.u.info.lstatus_to_modtype);
+               ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
                p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
                        FW_PORT_CMD_MDIOADDR_G(ret) : -1;
                p->port_type = FW_PORT_CMD_PTYPE_G(ret);
                p->mod_type = FW_PORT_MOD_TYPE_NA;
 
-               rvc.op_to_viid = htonl(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
-                                      FW_CMD_REQUEST_F | FW_CMD_READ_F |
-                                      FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
-               rvc.retval_len16 = htonl(FW_LEN16(rvc));
+               rvc.op_to_viid =
+                       cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
+                                   FW_CMD_REQUEST_F | FW_CMD_READ_F |
+                                   FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+               rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
                ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
                if (ret)
                        return ret;
-               p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+               p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
 
-               init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+               init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
                j++;
        }
        return 0;
@@ -5717,3 +6648,130 @@ void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
                t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
                             cfg | adap->params.tp.la_mask);
 }
+
+/* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
+ * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
+ * state for more than the Warning Threshold then we'll issue a warning about
+ * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
+ * appears to be hung every Warning Repeat second till the situation clears.
+ * If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH 1
+#define SGE_IDMA_WARN_REPEAT 300
+
+/**
+ *     t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
+ *     @adapter: the adapter
+ *     @idma: the adapter IDMA Monitor state
+ *
+ *     Initialize the state of an SGE Ingress DMA Monitor.
+ */
+void t4_idma_monitor_init(struct adapter *adapter,
+                         struct sge_idma_monitor_state *idma)
+{
+       /* Initialize the state variables for detecting an SGE Ingress DMA
+        * hang.  The SGE has internal counters which count up on each clock
+        * tick whenever the SGE finds its Ingress DMA State Engines in the
+        * same state they were on the previous clock tick.  The clock used is
+        * the Core Clock so we have a limit on the maximum "time" they can
+        * record; typically a very small number of seconds.  For instance,
+        * with a 600MHz Core Clock, we can only count up to a bit more than
+        * 7s.  So we'll synthesize a larger counter in order to not run the
+        * risk of having the "timers" overflow and give us the flexibility to
+        * maintain a Hung SGE State Machine of our own which operates across
+        * a longer time frame.
+        */
+       idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
+       idma->idma_stalled[0] = 0;
+       idma->idma_stalled[1] = 0;
+}
+
+/**
+ *     t4_idma_monitor - monitor SGE Ingress DMA state
+ *     @adapter: the adapter
+ *     @idma: the adapter IDMA Monitor state
+ *     @hz: number of ticks/second
+ *     @ticks: number of ticks since the last IDMA Monitor call
+ */
+void t4_idma_monitor(struct adapter *adapter,
+                    struct sge_idma_monitor_state *idma,
+                    int hz, int ticks)
+{
+       int i, idma_same_state_cnt[2];
+
+        /* Read the SGE Debug Ingress DMA Same State Count registers.  These
+         * are counters inside the SGE which count up on each clock when the
+         * SGE finds its Ingress DMA State Engines in the same states they
+         * were in the previous clock.  The counters will peg out at
+         * 0xffffffff without wrapping around so once they pass the 1s
+         * threshold they'll stay above that till the IDMA state changes.
+         */
+       t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
+       idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
+       idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+
+       for (i = 0; i < 2; i++) {
+               u32 debug0, debug11;
+
+               /* If the Ingress DMA Same State Counter ("timer") is less
+                * than 1s, then we can reset our synthesized Stall Timer and
+                * continue.  If we have previously emitted warnings about a
+                * potential stalled Ingress Queue, issue a note indicating
+                * that the Ingress Queue has resumed forward progress.
+                */
+               if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
+                       if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
+                               dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
+                                        "resumed after %d seconds\n",
+                                        i, idma->idma_qid[i],
+                                        idma->idma_stalled[i] / hz);
+                       idma->idma_stalled[i] = 0;
+                       continue;
+               }
+
+               /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+                * domain.  The first time we get here it'll be because we
+                * passed the 1s Threshold; each additional time it'll be
+                * because the RX Timer Callback is being fired on its regular
+                * schedule.
+                *
+                * If the stall is below our Potential Hung Ingress Queue
+                * Warning Threshold, continue.
+                */
+               if (idma->idma_stalled[i] == 0) {
+                       idma->idma_stalled[i] = hz;
+                       idma->idma_warn[i] = 0;
+               } else {
+                       idma->idma_stalled[i] += ticks;
+                       idma->idma_warn[i] -= ticks;
+               }
+
+               if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
+                       continue;
+
+               /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
+                */
+               if (idma->idma_warn[i] > 0)
+                       continue;
+               idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
+
+               /* Read and save the SGE IDMA State and Queue ID information.
+                * We do this every time in case it changes across time ...
+                * can't be too careful ...
+                */
+               t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
+               debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+               idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+               t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
+               debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
+               idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+               dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
+                        "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
+                        i, idma->idma_qid[i], idma->idma_state[i],
+                        idma->idma_stalled[i] / hz,
+                        debug0, debug11);
+               t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
+       }
+}
index 380b15c0417a142c2d3dd9af6d5bc5767927aaa6..f9a2cb164737dac11fba1b96dbf3aef21537bf47 100644 (file)
@@ -52,8 +52,6 @@ enum {
        MBOX_LEN       = 64,    /* mailbox size in bytes */
        TRACE_LEN      = 112,   /* length of trace data and mask */
        FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
-       NWOL_PAT       = 8,     /* # of WoL patterns */
-       WOL_PAT_LEN    = 128,   /* length of WoL patterns */
 };
 
 enum {
@@ -152,17 +150,33 @@ struct rsp_ctrl {
        };
 };
 
-#define RSPD_NEWBUF 0x80000000U
-#define RSPD_LEN(x) (((x) >> 0) & 0x7fffffffU)
-#define RSPD_QID(x) RSPD_LEN(x)
+#define RSPD_NEWBUF_S    31
+#define RSPD_NEWBUF_V(x) ((x) << RSPD_NEWBUF_S)
+#define RSPD_NEWBUF_F    RSPD_NEWBUF_V(1U)
 
-#define RSPD_GEN(x)  ((x) >> 7)
-#define RSPD_TYPE(x) (((x) >> 4) & 3)
+#define RSPD_LEN_S    0
+#define RSPD_LEN_M    0x7fffffff
+#define RSPD_LEN_G(x) (((x) >> RSPD_LEN_S) & RSPD_LEN_M)
 
-#define V_QINTR_CNT_EN    0x0
-#define QINTR_CNT_EN       0x1
-#define QINTR_TIMER_IDX(x) ((x) << 1)
-#define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7)
+#define RSPD_QID_S    RSPD_LEN_S
+#define RSPD_QID_M    RSPD_LEN_M
+#define RSPD_QID_G(x) RSPD_LEN_G(x)
+
+#define RSPD_GEN_S    7
+
+#define RSPD_TYPE_S    4
+#define RSPD_TYPE_M    0x3
+#define RSPD_TYPE_G(x) (((x) >> RSPD_TYPE_S) & RSPD_TYPE_M)
+
+/* Rx queue interrupt deferral fields: counter enable and timer index */
+#define QINTR_CNT_EN_S    0
+#define QINTR_CNT_EN_V(x) ((x) << QINTR_CNT_EN_S)
+#define QINTR_CNT_EN_F    QINTR_CNT_EN_V(1U)
+
+#define QINTR_TIMER_IDX_S    1
+#define QINTR_TIMER_IDX_M    0x7
+#define QINTR_TIMER_IDX_V(x) ((x) << QINTR_TIMER_IDX_S)
+#define QINTR_TIMER_IDX_G(x) (((x) >> QINTR_TIMER_IDX_S) & QINTR_TIMER_IDX_M)
 
 /*
  * Flash layout.
index 30a2f56e99c297c735ec3af54de0fbf1eb0a2ed0..132cb8fc0bf7167703bab62109e384281ba33836 100644 (file)
@@ -634,26 +634,9 @@ struct cpl_tid_release {
 
 struct cpl_tx_pkt_core {
        __be32 ctrl0;
-#define TXPKT_VF(x)        ((x) << 0)
-#define TXPKT_PF(x)        ((x) << 8)
-#define TXPKT_VF_VLD       (1 << 11)
-#define TXPKT_OVLAN_IDX(x) ((x) << 12)
-#define TXPKT_INTF(x)      ((x) << 16)
-#define TXPKT_INS_OVLAN    (1 << 21)
-#define TXPKT_OPCODE(x)    ((x) << 24)
        __be16 pack;
        __be16 len;
        __be64 ctrl1;
-#define TXPKT_CSUM_END(x)   ((x) << 12)
-#define TXPKT_CSUM_START(x) ((x) << 20)
-#define TXPKT_IPHDR_LEN(x)  ((u64)(x) << 20)
-#define TXPKT_CSUM_LOC(x)   ((u64)(x) << 30)
-#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
-#define TXPKT_CSUM_TYPE(x)  ((u64)(x) << 40)
-#define TXPKT_VLAN(x)       ((u64)(x) << 44)
-#define TXPKT_VLAN_VLD      (1ULL << 60)
-#define TXPKT_IPCSUM_DIS    (1ULL << 62)
-#define TXPKT_L4CSUM_DIS    (1ULL << 63)
 };
 
 struct cpl_tx_pkt {
@@ -663,16 +646,69 @@ struct cpl_tx_pkt {
 
 #define cpl_tx_pkt_xt cpl_tx_pkt
 
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define TXPKT_VF_S    0
+#define TXPKT_VF_V(x) ((x) << TXPKT_VF_S)
+
+#define TXPKT_PF_S    8
+#define TXPKT_PF_V(x) ((x) << TXPKT_PF_S)
+
+#define TXPKT_VF_VLD_S    11
+#define TXPKT_VF_VLD_V(x) ((x) << TXPKT_VF_VLD_S)
+#define TXPKT_VF_VLD_F    TXPKT_VF_VLD_V(1U)
+
+#define TXPKT_OVLAN_IDX_S    12
+#define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
+
+#define TXPKT_INTF_S    16
+#define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
+
+#define TXPKT_INS_OVLAN_S    21
+#define TXPKT_INS_OVLAN_V(x) ((x) << TXPKT_INS_OVLAN_S)
+#define TXPKT_INS_OVLAN_F    TXPKT_INS_OVLAN_V(1U)
+
+#define TXPKT_OPCODE_S    24
+#define TXPKT_OPCODE_V(x) ((x) << TXPKT_OPCODE_S)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define TXPKT_CSUM_END_S    12
+#define TXPKT_CSUM_END_V(x) ((x) << TXPKT_CSUM_END_S)
+
+#define TXPKT_CSUM_START_S    20
+#define TXPKT_CSUM_START_V(x) ((x) << TXPKT_CSUM_START_S)
+
+#define TXPKT_IPHDR_LEN_S    20
+#define TXPKT_IPHDR_LEN_V(x) ((__u64)(x) << TXPKT_IPHDR_LEN_S)
+
+#define TXPKT_CSUM_LOC_S    30
+#define TXPKT_CSUM_LOC_V(x) ((__u64)(x) << TXPKT_CSUM_LOC_S)
+
+#define TXPKT_ETHHDR_LEN_S    34
+#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
+
+#define T6_TXPKT_ETHHDR_LEN_S    32
+#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S)
+
+#define TXPKT_CSUM_TYPE_S    40
+#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
+
+#define TXPKT_VLAN_S    44
+#define TXPKT_VLAN_V(x) ((__u64)(x) << TXPKT_VLAN_S)
+
+#define TXPKT_VLAN_VLD_S    60
+#define TXPKT_VLAN_VLD_V(x) ((__u64)(x) << TXPKT_VLAN_VLD_S)
+#define TXPKT_VLAN_VLD_F    TXPKT_VLAN_VLD_V(1ULL)
+
+#define TXPKT_IPCSUM_DIS_S    62
+#define TXPKT_IPCSUM_DIS_V(x) ((__u64)(x) << TXPKT_IPCSUM_DIS_S)
+#define TXPKT_IPCSUM_DIS_F    TXPKT_IPCSUM_DIS_V(1ULL)
+
+#define TXPKT_L4CSUM_DIS_S    63
+#define TXPKT_L4CSUM_DIS_V(x) ((__u64)(x) << TXPKT_L4CSUM_DIS_S)
+#define TXPKT_L4CSUM_DIS_F    TXPKT_L4CSUM_DIS_V(1ULL)
+
 struct cpl_tx_pkt_lso_core {
        __be32 lso_ctrl;
-#define LSO_TCPHDR_LEN(x) ((x) << 0)
-#define LSO_IPHDR_LEN(x)  ((x) << 4)
-#define LSO_ETHHDR_LEN(x) ((x) << 16)
-#define LSO_IPV6(x)       ((x) << 20)
-#define LSO_LAST_SLICE    (1 << 22)
-#define LSO_FIRST_SLICE   (1 << 23)
-#define LSO_OPCODE(x)     ((x) << 24)
-#define LSO_T5_XFER_SIZE(x) ((x) << 0)
        __be16 ipid_ofst;
        __be16 mss;
        __be32 seqno_offset;
index 326674b19983825af5631993b4427e0132ed6ba6..af3462db5adbbaed1aa2c2e7d992df224d01755f 100644 (file)
 #define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
 #define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
 
+#define SGE_ERROR_STATS_A 0x1100
+
+#define UNCAPTURED_ERROR_S    18
+#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S)
+#define UNCAPTURED_ERROR_F    UNCAPTURED_ERROR_V(1U)
+
+#define ERROR_QID_VALID_S    17
+#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S)
+#define ERROR_QID_VALID_F    ERROR_QID_VALID_V(1U)
+
+#define ERROR_QID_S    0
+#define ERROR_QID_M    0x1ffffU
+#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+
 #define HP_INT_THRESH_S    28
 #define HP_INT_THRESH_M    0xfU
 #define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
 #define SGE_STAT_MATCH_A       0x10e8
 #define SGE_STAT_CFG_A         0x10ec
 
+#define STATMODE_S    2
+#define STATMODE_V(x) ((x) << STATMODE_S)
+
 #define STATSOURCE_T5_S    9
+#define STATSOURCE_T5_M    0xfU
 #define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
 
 #define SGE_DBFIFO_STATUS2_A 0x1118
 
 #define REGISTER_S    0
 #define REGISTER_V(x) ((x) << REGISTER_S)
 
+#define T6_ENABLE_S    31
+#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S)
+#define T6_ENABLE_F    T6_ENABLE_V(1U)
+
 #define PFNUM_S    0
 #define PFNUM_V(x) ((x) << PFNUM_S)
 
 #define CSUM_HAS_PSEUDO_HDR_F    CSUM_HAS_PSEUDO_HDR_V(1U)
 
 #define TP_MIB_MAC_IN_ERR_0_A  0x0
+#define TP_MIB_HDR_IN_ERR_0_A  0x4
+#define TP_MIB_TCP_IN_ERR_0_A  0x8
 #define TP_MIB_TCP_OUT_RST_A   0xc
 #define TP_MIB_TCP_IN_SEG_HI_A 0x10
 #define TP_MIB_TCP_IN_SEG_LO_A 0x11
 #define TP_MIB_TCP_RXT_SEG_HI_A        0x14
 #define TP_MIB_TCP_RXT_SEG_LO_A        0x15
 #define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_OFD_CHN_DROP_0_A 0x1c
 #define TP_MIB_TCP_V6IN_ERR_0_A 0x28
 #define TP_MIB_TCP_V6OUT_RST_A 0x2c
 #define TP_MIB_OFD_ARP_DROP_A  0x36
+#define TP_MIB_CPL_IN_REQ_0_A  0x38
+#define TP_MIB_CPL_OUT_RSP_0_A 0x3c
 #define TP_MIB_TNL_DROP_0_A    0x44
+#define TP_MIB_FCOE_DDP_0_A    0x48
+#define TP_MIB_FCOE_DROP_0_A   0x4c
+#define TP_MIB_FCOE_BYTE_0_HI_A        0x50
 #define TP_MIB_OFD_VLN_DROP_0_A        0x58
+#define TP_MIB_USM_PKTS_A      0x5c
+#define TP_MIB_RQE_DFR_PKT_A   0x64
 
 #define ULP_TX_INT_CAUSE_A     0x8dcc
 
 #define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
 #define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
 #define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
 #define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
 #define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
 #define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
 #define VFLKPIDX_M    0xffU
 #define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
 
+#define T6_VFWRADDR_S    8
+#define T6_VFWRADDR_M    0xffU
+#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S)
+#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M)
+
 #define TP_RSS_CONFIG_CNG_A 0x7e04
 #define TP_RSS_SECRET_KEY0_A 0x40
 #define TP_RSS_PF0_CONFIG_A 0x30
 #define MPS_RX_PERR_INT_CAUSE_A 0x11074
 
 #define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_DATA0_A 0xf000
+#define MPS_CLS_TCAM_DATA1_A 0xf004
+
+#define DMACH_S    0
+#define DMACH_M    0xffffU
+#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
+
 #define MPS_CLS_TCAM_X_L_A 0xf008
+#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008
+
+#define CTLCMDTYPE_S    31
+#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S)
+#define CTLCMDTYPE_F    CTLCMDTYPE_V(1U)
+
+#define CTLTCAMSEL_S    25
+#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S)
+
+#define CTLTCAMINDEX_S    17
+#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S)
+
+#define CTLXYBITSEL_S    16
+#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S)
 
 #define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
 #define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
 #define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
 
 #define MPS_CLS_SRAM_L_A 0xe000
+
+#define T6_MULTILISTEN0_S    26
+
+#define T6_SRAM_PRIO3_S    23
+#define T6_SRAM_PRIO3_M    0x7U
+#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M)
+
+#define T6_SRAM_PRIO2_S    20
+#define T6_SRAM_PRIO2_M    0x7U
+#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M)
+
+#define T6_SRAM_PRIO1_S    17
+#define T6_SRAM_PRIO1_M    0x7U
+#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M)
+
+#define T6_SRAM_PRIO0_S    14
+#define T6_SRAM_PRIO0_M    0x7U
+#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M)
+
+#define T6_SRAM_VLD_S    13
+#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S)
+#define T6_SRAM_VLD_F    T6_SRAM_VLD_V(1U)
+
+#define T6_REPLICATE_S    12
+#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S)
+#define T6_REPLICATE_F    T6_REPLICATE_V(1U)
+
+#define T6_PF_S    9
+#define T6_PF_M    0x7U
+#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M)
+
+#define T6_VF_VALID_S    8
+#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S)
+#define T6_VF_VALID_F    T6_VF_VALID_V(1U)
+
+#define T6_VF_S    0
+#define T6_VF_M    0xffU
+#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M)
+
 #define MPS_CLS_SRAM_H_A 0xe004
 
 #define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
 #define CIM_F    CIM_V(1U)
 
 #define MC1_S    31
+#define MC1_V(x) ((x) << MC1_S)
+#define MC1_F    MC1_V(1U)
 
 #define PL_INT_ENABLE_A 0x19410
 #define PL_INT_MAP0_A 0x19414
 #define REV_V(x) ((x) << REV_S)
 #define REV_G(x) (((x) >> REV_S) & REV_M)
 
+#define T6_UNKNOWNCMD_S    3
+#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
+#define T6_UNKNOWNCMD_F    T6_UNKNOWNCMD_V(1U)
+
+#define T6_LIP0_S    2
+#define T6_LIP0_V(x) ((x) << T6_LIP0_S)
+#define T6_LIP0_F    T6_LIP0_V(1U)
+
+#define T6_LIPMISS_S    1
+#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
+#define T6_LIPMISS_F    T6_LIPMISS_V(1U)
+
 #define LE_DB_INT_CAUSE_A 0x19c3c
 
 #define REQQPARERR_S    16
 #define LIP0_V(x) ((x) << LIP0_S)
 #define LIP0_F    LIP0_V(1U)
 
+#define TCAMINTPERR_S    13
+#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
+#define TCAMINTPERR_F    TCAMINTPERR_V(1U)
+
+#define SSRAMINTPERR_S    10
+#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
+#define SSRAMINTPERR_F    SSRAMINTPERR_V(1U)
+
 #define NCSI_INT_CAUSE_A 0x1a0d8
 
 #define CIM_DM_PRTY_ERR_S    8
index 19b2dcf6acdebbd841c3869e3340de17c750a505..7bdee3bf75ec200743604069ae7be5c928430577 100644 (file)
 #define SGE_TIMERREGS                  6
 #define TIMERREG_COUNTER0_X            0
 
+#define FETCHBURSTMIN_64B_X            2
+
+#define FETCHBURSTMAX_256B_X           2
+#define FETCHBURSTMAX_512B_X           3
+
+#define HOSTFCMODE_STATUS_PAGE_X       2
+
+#define CIDXFLUSHTHRESH_32_X           5
+
+#define UPDATEDELIVERY_INTERRUPT_X     1
+
+#define RSPD_TYPE_FLBUF_X              0
+#define RSPD_TYPE_CPL_X                        1
+#define RSPD_TYPE_INTR_X               2
+
+/* Congestion Manager Definitions.
+ */
+#define CONMCTXT_CNGTPMODE_S           19
+#define CONMCTXT_CNGTPMODE_V(x)                ((x) << CONMCTXT_CNGTPMODE_S)
+#define CONMCTXT_CNGCHMAP_S            0
+#define CONMCTXT_CNGCHMAP_V(x)         ((x) << CONMCTXT_CNGCHMAP_S)
+#define CONMCTXT_CNGTPMODE_CHANNEL_X   2
+#define CONMCTXT_CNGTPMODE_QUEUE_X     1
+
 /* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
  * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
  * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
index 03fbfd1fb3dff35f5cef20f84a574df09e285116..ab4674684acc27fb18f7c9b6fb114eb9dfa2b0d0 100644 (file)
@@ -772,7 +772,7 @@ struct fw_ldst_cmd {
                } addrval;
                struct fw_ldst_idctxt {
                        __be32 physid;
-                       __be32 msg_pkd;
+                       __be32 msg_ctxtflush;
                        __be32 ctxt_data7;
                        __be32 ctxt_data6;
                        __be32 ctxt_data5;
@@ -788,15 +788,27 @@ struct fw_ldst_cmd {
                        __be16 vctl;
                        __be16 rval;
                } mdio;
-               struct fw_ldst_mps {
-                       __be16 fid_ctl;
-                       __be16 rplcpf_pkd;
-                       __be32 rplc127_96;
-                       __be32 rplc95_64;
-                       __be32 rplc63_32;
-                       __be32 rplc31_0;
-                       __be32 atrb;
-                       __be16 vlan[16];
+               union fw_ldst_mps {
+                       struct fw_ldst_mps_rplc {
+                               __be16 fid_idx;
+                               __be16 rplcpf_pkd;
+                               __be32 rplc255_224;
+                               __be32 rplc223_192;
+                               __be32 rplc191_160;
+                               __be32 rplc159_128;
+                               __be32 rplc127_96;
+                               __be32 rplc95_64;
+                               __be32 rplc63_32;
+                               __be32 rplc31_0;
+                       } rplc;
+                       struct fw_ldst_mps_atrb {
+                               __be16 fid_mpsid;
+                               __be16 r2[3];
+                               __be32 r3[2];
+                               __be32 r4;
+                               __be32 atrb;
+                               __be16 vlan[16];
+                       } atrb;
                } mps;
                struct fw_ldst_func {
                        u8 access_ctl;
@@ -822,6 +834,10 @@ struct fw_ldst_cmd {
 #define FW_LDST_CMD_MSG_S       31
 #define FW_LDST_CMD_MSG_V(x)   ((x) << FW_LDST_CMD_MSG_S)
 
+#define FW_LDST_CMD_CTXTFLUSH_S                30
+#define FW_LDST_CMD_CTXTFLUSH_V(x)     ((x) << FW_LDST_CMD_CTXTFLUSH_S)
+#define FW_LDST_CMD_CTXTFLUSH_F                FW_LDST_CMD_CTXTFLUSH_V(1U)
+
 #define FW_LDST_CMD_PADDR_S     8
 #define FW_LDST_CMD_PADDR_V(x) ((x) << FW_LDST_CMD_PADDR_S)
 
@@ -831,8 +847,8 @@ struct fw_ldst_cmd {
 #define FW_LDST_CMD_FID_S       15
 #define FW_LDST_CMD_FID_V(x)   ((x) << FW_LDST_CMD_FID_S)
 
-#define FW_LDST_CMD_CTL_S       0
-#define FW_LDST_CMD_CTL_V(x)   ((x) << FW_LDST_CMD_CTL_S)
+#define FW_LDST_CMD_IDX_S      0
+#define FW_LDST_CMD_IDX_V(x)   ((x) << FW_LDST_CMD_IDX_S)
 
 #define FW_LDST_CMD_RPLCPF_S    0
 #define FW_LDST_CMD_RPLCPF_V(x)        ((x) << FW_LDST_CMD_RPLCPF_S)
@@ -1061,6 +1077,7 @@ enum fw_params_param_dev {
        FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
        FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
        FW_PARAMS_PARAM_DEV_CF = 0x0D,
+       FW_PARAMS_PARAM_DEV_PHYFW = 0x0F,
        FW_PARAMS_PARAM_DEV_DIAG = 0x11,
        FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
        FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
@@ -1123,6 +1140,12 @@ enum fw_params_param_dmaq {
        FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
        FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
        FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+       FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+enum fw_params_param_dev_phyfw {
+       FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
+       FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
 };
 
 enum fw_params_param_dev_diag {
@@ -1377,6 +1400,7 @@ struct fw_iq_cmd {
 
 #define FW_IQ_CMD_IQFLINTCONGEN_S      27
 #define FW_IQ_CMD_IQFLINTCONGEN_V(x)   ((x) << FW_IQ_CMD_IQFLINTCONGEN_S)
+#define FW_IQ_CMD_IQFLINTCONGEN_F      FW_IQ_CMD_IQFLINTCONGEN_V(1U)
 
 #define FW_IQ_CMD_IQFLINTISCSIC_S      26
 #define FW_IQ_CMD_IQFLINTISCSIC_V(x)   ((x) << FW_IQ_CMD_IQFLINTISCSIC_S)
@@ -1399,6 +1423,7 @@ struct fw_iq_cmd {
 
 #define FW_IQ_CMD_FL0CONGCIF_S         11
 #define FW_IQ_CMD_FL0CONGCIF_V(x)      ((x) << FW_IQ_CMD_FL0CONGCIF_S)
+#define FW_IQ_CMD_FL0CONGCIF_F         FW_IQ_CMD_FL0CONGCIF_V(1U)
 
 #define FW_IQ_CMD_FL0ONCHIP_S          10
 #define FW_IQ_CMD_FL0ONCHIP_V(x)       ((x) << FW_IQ_CMD_FL0ONCHIP_S)
@@ -1589,6 +1614,7 @@ struct fw_eq_eth_cmd {
 
 #define FW_EQ_ETH_CMD_FETCHRO_S                22
 #define FW_EQ_ETH_CMD_FETCHRO_V(x)     ((x) << FW_EQ_ETH_CMD_FETCHRO_S)
+#define FW_EQ_ETH_CMD_FETCHRO_F                FW_EQ_ETH_CMD_FETCHRO_V(1U)
 
 #define FW_EQ_ETH_CMD_HOSTFCMODE_S     20
 #define FW_EQ_ETH_CMD_HOSTFCMODE_V(x)  ((x) << FW_EQ_ETH_CMD_HOSTFCMODE_S)
@@ -2526,13 +2552,8 @@ enum fw_port_mod_sub_type {
        FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
 };
 
-/* port stats */
-#define FW_NUM_PORT_STATS 50
-#define FW_NUM_PORT_TX_STATS 23
-#define FW_NUM_PORT_RX_STATS 27
-
 enum fw_port_stats_tx_index {
-       FW_STAT_TX_PORT_BYTES_IX,
+       FW_STAT_TX_PORT_BYTES_IX = 0,
        FW_STAT_TX_PORT_FRAMES_IX,
        FW_STAT_TX_PORT_BCAST_IX,
        FW_STAT_TX_PORT_MCAST_IX,
@@ -2554,11 +2575,12 @@ enum fw_port_stats_tx_index {
        FW_STAT_TX_PORT_PPP4_IX,
        FW_STAT_TX_PORT_PPP5_IX,
        FW_STAT_TX_PORT_PPP6_IX,
-       FW_STAT_TX_PORT_PPP7_IX
+       FW_STAT_TX_PORT_PPP7_IX,
+       FW_NUM_PORT_TX_STATS
 };
 
 enum fw_port_stat_rx_index {
-       FW_STAT_RX_PORT_BYTES_IX,
+       FW_STAT_RX_PORT_BYTES_IX = 0,
        FW_STAT_RX_PORT_FRAMES_IX,
        FW_STAT_RX_PORT_BCAST_IX,
        FW_STAT_RX_PORT_MCAST_IX,
@@ -2584,9 +2606,14 @@ enum fw_port_stat_rx_index {
        FW_STAT_RX_PORT_PPP5_IX,
        FW_STAT_RX_PORT_PPP6_IX,
        FW_STAT_RX_PORT_PPP7_IX,
-       FW_STAT_RX_PORT_LESS_64B_IX
+       FW_STAT_RX_PORT_LESS_64B_IX,
+       FW_STAT_RX_PORT_MAC_ERROR_IX,
+       FW_NUM_PORT_RX_STATS
 };
 
+/* port stats */
+#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS)
+
 struct fw_port_stats_cmd {
        __be32 op_to_portid;
        __be32 retval_len16;
@@ -3015,7 +3042,8 @@ struct fw_hdr {
 
 enum fw_hdr_chip {
        FW_HDR_CHIP_T4,
-       FW_HDR_CHIP_T5
+       FW_HDR_CHIP_T5,
+       FW_HDR_CHIP_T6
 };
 
 #define FW_HDR_FW_VER_MAJOR_S  24
index b9d1cbac0eee3c97e76cff147df732601b83f714..32b213559b02291ddccb252daaae4e63fb5c68a0 100644 (file)
@@ -45,4 +45,9 @@
 #define T5FW_VERSION_MICRO 0x20
 #define T5FW_VERSION_BUILD 0x00
 
+#define T6FW_VERSION_MAJOR 0x01
+#define T6FW_VERSION_MINOR 0x0D
+#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_BUILD 0x00
+
 #endif
index 1d893b0b7ddfde0b4db9083176c5b636285f023a..b2b5e5bbe04c5f3b307a79ba9371bd017d9b1147 100644 (file)
@@ -1021,7 +1021,7 @@ static int closest_thres(const struct sge *s, int thres)
 static unsigned int qtimer_val(const struct adapter *adapter,
                               const struct sge_rspq *rspq)
 {
-       unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
+       unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
 
        return timer_idx < SGE_NTIMERS
                ? adapter->sge.timer_val[timer_idx]
@@ -1086,8 +1086,8 @@ static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
         * Update the response queue's interrupt coalescing parameters and
         * return success.
         */
-       rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
-                            (cnt > 0 ? QINTR_CNT_EN : 0));
+       rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+                            QINTR_CNT_EN_V(cnt > 0));
        return 0;
 }
 
@@ -1439,7 +1439,7 @@ static int cxgb4vf_get_coalesce(struct net_device *dev,
 
        coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
        coalesce->rx_max_coalesced_frames =
-               ((rspq->intr_params & QINTR_CNT_EN)
+               ((rspq->intr_params & QINTR_CNT_EN_F)
                 ? adapter->sge.counter_val[rspq->pktcnt_idx]
                 : 0);
        return 0;
@@ -2393,8 +2393,9 @@ static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
                             u8 pkt_cnt_idx, unsigned int size,
                             unsigned int iqe_size)
 {
-       rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
-                            (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
+       rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
+                            (pkt_cnt_idx < SGE_NCOUNTERS ?
+                             QINTR_CNT_EN_F : 0));
        rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
                            ? pkt_cnt_idx
                            : 0);
index 482f6de6817d47e77e0b3ef648d0239554ef9561..ad53e5ad2acd05afa1b94c09f7e2c1be8f4599a6 100644 (file)
@@ -524,7 +524,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
  */
 static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
 {
-       u32 val;
+       u32 val = adapter->params.arch.sge_fl_db;
 
        /* The SGE keeps track of its Producer and Consumer Indices in terms
         * of Egress Queue Units so we can only tell it about integral numbers
@@ -532,11 +532,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
         */
        if (fl->pend_cred >= FL_PER_EQ_UNIT) {
                if (is_t4(adapter->params.chip))
-                       val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
+                       val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
                else
-                       val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
-                             DBTYPE_F;
-               val |= DBPRIO_F;
+                       val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
 
                /* Make sure all memory writes to the Free List queue are
                 * committed before we tell the hardware about them.
@@ -1084,7 +1082,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
  * Figure out what HW csum a packet wants and return the appropriate control
  * bits.
  */
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
 {
        int csum_type;
        const struct iphdr *iph = ip_hdr(skb);
@@ -1100,7 +1098,7 @@ nocsum:
                         * unknown protocol, disable HW csum
                         * and hope a bad packet is detected
                         */
-                       return TXPKT_L4CSUM_DIS;
+                       return TXPKT_L4CSUM_DIS_F;
                }
        } else {
                /*
@@ -1116,16 +1114,21 @@ nocsum:
                        goto nocsum;
        }
 
-       if (likely(csum_type >= TX_CSUM_TCPIP))
-               return TXPKT_CSUM_TYPE(csum_type) |
-                       TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
-                       TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
-       else {
+       if (likely(csum_type >= TX_CSUM_TCPIP)) {
+               u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+               int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+               if (chip <= CHELSIO_T5)
+                       hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+               else
+                       hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+               return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+       } else {
                int start = skb_transport_offset(skb);
 
-               return TXPKT_CSUM_TYPE(csum_type) |
-                       TXPKT_CSUM_START(start) |
-                       TXPKT_CSUM_LOC(start + skb->csum_offset);
+               return TXPKT_CSUM_TYPE_V(csum_type) |
+                       TXPKT_CSUM_START_V(start) |
+                       TXPKT_CSUM_LOC_V(start + skb->csum_offset);
        }
 }
 
@@ -1160,7 +1163,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        u32 wr_mid;
        u64 cntrl, *end;
-       int qidx, credits;
+       int qidx, credits, max_pkt_len;
        unsigned int flits, ndesc;
        struct adapter *adapter;
        struct sge_eth_txq *txq;
@@ -1183,6 +1186,13 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb->len < fw_hdr_copy_len))
                goto out_free;
 
+       /* Discard the packet if the length is greater than mtu */
+       max_pkt_len = ETH_HLEN + dev->mtu;
+       if (skb_vlan_tag_present(skb))
+               max_pkt_len += VLAN_HLEN;
+       if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+               goto out_free;
+
        /*
         * Figure out which TX Queue we're going to use.
         */
@@ -1281,29 +1291,35 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                 * Fill in the LSO CPL message.
                 */
                lso->lso_ctrl =
-                       cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
-                                   LSO_FIRST_SLICE |
-                                   LSO_LAST_SLICE |
-                                   LSO_IPV6(v6) |
-                                   LSO_ETHHDR_LEN(eth_xtra_len/4) |
-                                   LSO_IPHDR_LEN(l3hdr_len/4) |
-                                   LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+                       cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+                                   LSO_FIRST_SLICE_F |
+                                   LSO_LAST_SLICE_F |
+                                   LSO_IPV6_V(v6) |
+                                   LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+                                   LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+                                   LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
                lso->ipid_ofst = cpu_to_be16(0);
                lso->mss = cpu_to_be16(ssi->gso_size);
                lso->seqno_offset = cpu_to_be32(0);
                if (is_t4(adapter->params.chip))
                        lso->len = cpu_to_be32(skb->len);
                else
-                       lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
+                       lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
 
                /*
                 * Set up TX Packet CPL pointer, control word and perform
                 * accounting.
                 */
                cpl = (void *)(lso + 1);
-               cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
-                        TXPKT_IPHDR_LEN(l3hdr_len) |
-                        TXPKT_ETHHDR_LEN(eth_xtra_len));
+
+               if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+                       cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+               else
+                       cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+               cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+                                          TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+                        TXPKT_IPHDR_LEN_V(l3hdr_len);
                txq->tso++;
                txq->tx_cso += ssi->gso_segs;
        } else {
@@ -1320,10 +1336,11 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                 */
                cpl = (void *)(wr + 1);
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+                       cntrl = hwcsum(adapter->params.chip, skb) |
+                               TXPKT_IPCSUM_DIS_F;
                        txq->tx_cso++;
                } else
-                       cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+                       cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
        }
 
        /*
@@ -1332,15 +1349,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        if (skb_vlan_tag_present(skb)) {
                txq->vlan_ins++;
-               cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
+               cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
        }
 
        /*
         * Fill in the TX Packet CPL message header.
         */
-       cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
-                                TXPKT_INTF(pi->port_id) |
-                                TXPKT_PF(0));
+       cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+                                TXPKT_INTF_V(pi->port_id) |
+                                TXPKT_PF_V(0));
        cpl->pack = cpu_to_be16(0);
        cpl->len = cpu_to_be16(skb->len);
        cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1663,7 +1680,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
 static inline bool is_new_response(const struct rsp_ctrl *rc,
                                   const struct sge_rspq *rspq)
 {
-       return RSPD_GEN(rc->type_gen) == rspq->gen;
+       return ((rc->type_gen >> RSPD_GEN_S) & 0x1) == rspq->gen;
 }
 
 /**
@@ -1752,8 +1769,8 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                 * SGE.
                 */
                dma_rmb();
-               rsp_type = RSPD_TYPE(rc->type_gen);
-               if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+               rsp_type = RSPD_TYPE_G(rc->type_gen);
+               if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
                        struct page_frag *fp;
                        struct pkt_gl gl;
                        const struct rx_sw_desc *sdesc;
@@ -1764,7 +1781,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                         * If we get a "new buffer" message from the SGE we
                         * need to move on to the next Free List buffer.
                         */
-                       if (len & RSPD_NEWBUF) {
+                       if (len & RSPD_NEWBUF_F) {
                                /*
                                 * We get one "new buffer" message when we
                                 * first start up a queue so we need to ignore
@@ -1775,7 +1792,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                                                     1);
                                        rspq->offset = 0;
                                }
-                               len = RSPD_LEN(len);
+                               len = RSPD_LEN_G(len);
                        }
                        gl.tot_len = len;
 
@@ -1818,10 +1835,10 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                                rspq->offset += ALIGN(fp->size, s->fl_align);
                        else
                                restore_rx_bufs(&gl, &rxq->fl, frag);
-               } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+               } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
                        ret = rspq->handler(rspq, rspq->cur_desc, NULL);
                } else {
-                       WARN_ON(rsp_type > RSP_TYPE_CPL);
+                       WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
                        ret = 0;
                }
 
@@ -1833,7 +1850,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
                         */
                        const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
                        rspq->next_intr_params =
-                               QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
+                               QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
                        break;
                }
 
@@ -1875,7 +1892,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
                intr_params = rspq->next_intr_params;
                rspq->next_intr_params = rspq->intr_params;
        } else
-               intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
+               intr_params = QINTR_TIMER_IDX_V(SGE_TIMER_UPD_CIDX);
 
        if (unlikely(work_done == 0))
                rspq->unhandled_irqs++;
@@ -1936,10 +1953,10 @@ static unsigned int process_intrq(struct adapter *adapter)
                 * never happen ...
                 */
                dma_rmb();
-               if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
+               if (unlikely(RSPD_TYPE_G(rc->type_gen) != RSPD_TYPE_INTR_X)) {
                        dev_err(adapter->pdev_dev,
                                "Unexpected INTRQ response type %d\n",
-                               RSPD_TYPE(rc->type_gen));
+                               RSPD_TYPE_G(rc->type_gen));
                        continue;
                }
 
@@ -1951,7 +1968,7 @@ static unsigned int process_intrq(struct adapter *adapter)
                 * want to either make them fatal and/or conditionalized under
                 * DEBUG.
                 */
-               qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
+               qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid));
                iq_idx = IQ_IDX(s, qid);
                if (unlikely(iq_idx >= MAX_INGQ)) {
                        dev_err(adapter->pdev_dev,
@@ -2154,8 +2171,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
        u64 bar2_qoffset;
        int ret;
 
-       ret = t4_bar2_sge_qregs(adapter, qid, qtype,
-                               &bar2_qoffset, pbar2_qid);
+       ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
+                                 &bar2_qoffset, pbar2_qid);
        if (ret)
                return NULL;
 
@@ -2239,12 +2256,18 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
        cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
 
        if (fl) {
+               enum chip_type chip =
+                       CHELSIO_CHIP_VERSION(adapter->params.chip);
                /*
                 * Allocate the ring for the hardware free list (with space
                 * for its status page) along with the associated software
                 * descriptor ring.  The free list size needs to be a multiple
-                * of the Egress Queue Unit.
+                * of the Egress Queue Unit and at least 2 Egress Units larger
+                * than the SGE's Egress Congrestion Threshold
+                * (fl_starve_thres - 1).
                 */
+               if (fl->size < s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT)
+                       fl->size = s->fl_starve_thres - 1 + 2 * FL_PER_EQ_UNIT;
                fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
                fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
                                      sizeof(__be64), sizeof(struct rx_sw_desc),
@@ -2274,7 +2297,9 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
                cmd.fl0dcaen_to_fl0cidxfthresh =
                        cpu_to_be16(
                                FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
-                               FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
+                               FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+                                                    FETCHBURSTMAX_512B_X :
+                                                    FETCHBURSTMAX_256B_X));
                cmd.fl0size = cpu_to_be16(flsz);
                cmd.fl0addr = cpu_to_be64(fl->addr);
        }
index b9debb4f29a355a54b6304ea39fa717a3b33cc14..88b8981b47517ecf302667751ee86e8495d118ba 100644 (file)
@@ -51,6 +51,7 @@
  */
 #define CHELSIO_T4             0x4
 #define CHELSIO_T5             0x5
+#define CHELSIO_T6             0x6
 
 enum chip_type {
        T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -156,6 +157,12 @@ struct vpd_params {
        u32 cclk;                       /* Core Clock (KHz) */
 };
 
+/* Stores chip specific parameters */
+struct arch_specific_params {
+       u32 sge_fl_db;
+       u16 mps_tcam_size;
+};
+
 /*
  * Global Receive Side Scaling (RSS) parameters in host-native format.
  */
@@ -215,6 +222,7 @@ struct adapter_params {
        struct vpd_params vpd;          /* Vital Product Data */
        struct rss_params rss;          /* Receive Side Scaling */
        struct vf_resources vfres;      /* Virtual Function Resource limits */
+       struct arch_specific_params arch; /* chip specific params */
        enum chip_type chip;            /* chip code */
        u8 nports;                      /* # of Ethernet "ports" */
 };
@@ -284,11 +292,11 @@ int t4vf_fw_reset(struct adapter *);
 int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
 
 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int t4_bar2_sge_qregs(struct adapter *adapter,
-                     unsigned int qid,
-                     enum t4_bar2_qtype qtype,
-                     u64 *pbar2_qoffset,
-                     unsigned int *pbar2_qid);
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+                       unsigned int qid,
+                       enum t4_bar2_qtype qtype,
+                       u64 *pbar2_qoffset,
+                       unsigned int *pbar2_qid);
 
 int t4vf_get_sge_params(struct adapter *);
 int t4vf_get_vpd_params(struct adapter *);
index 966ee900ed00bdad79d24fdd6653665230272e87..0db6dc9e9ed25f41273f9767ab470033be40ec5e 100644 (file)
@@ -428,7 +428,7 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
 }
 
 /**
- *     t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ *     t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
  *     @adapter: the adapter
  *     @qid: the Queue ID
  *     @qtype: the Ingress or Egress type for @qid
@@ -452,11 +452,11 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
  *     Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
  *     then these "Inferred Queue ID" register may not be used.
  */
-int t4_bar2_sge_qregs(struct adapter *adapter,
-                     unsigned int qid,
-                     enum t4_bar2_qtype qtype,
-                     u64 *pbar2_qoffset,
-                     unsigned int *pbar2_qid)
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+                       unsigned int qid,
+                       enum t4_bar2_qtype qtype,
+                       u64 *pbar2_qoffset,
+                       unsigned int *pbar2_qid)
 {
        unsigned int page_shift, page_size, qpp_shift, qpp_mask;
        u64 bar2_page_offset, bar2_qoffset;
@@ -1191,9 +1191,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
        unsigned nfilters = 0;
        unsigned int rem = naddr;
        struct fw_vi_mac_cmd cmd, rpl;
-       unsigned int max_naddr = is_t4(adapter->params.chip) ?
-                                NUM_MPS_CLS_SRAM_L_INSTANCES :
-                                NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+       unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
 
        if (naddr > max_naddr)
                return -EINVAL;
@@ -1285,9 +1283,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
        struct fw_vi_mac_exact *p = &cmd.u.exact[0];
        size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
                                             u.exact[1]), 16);
-       unsigned int max_naddr = is_t4(adapter->params.chip) ?
-                                NUM_MPS_CLS_SRAM_L_INSTANCES :
-                                NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+       unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
 
        /*
         * If this is a new allocation, determine whether it should be
@@ -1310,7 +1306,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
        if (ret == 0) {
                p = &rpl.u.exact[0];
                ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
-               if (ret >= max_naddr)
+               if (ret >= max_mac_addr)
                        ret = -ENOMEM;
        }
        return ret;
@@ -1590,11 +1586,25 @@ int t4vf_prep_adapter(struct adapter *adapter)
        switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
        case CHELSIO_T4:
                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+               adapter->params.arch.sge_fl_db = DBPRIO_F;
+               adapter->params.arch.mps_tcam_size =
+                               NUM_MPS_CLS_SRAM_L_INSTANCES;
                break;
 
        case CHELSIO_T5:
                chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
                adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+               adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+               adapter->params.arch.mps_tcam_size =
+                               NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+               break;
+
+       case CHELSIO_T6:
+               chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
+               adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
+               adapter->params.arch.sge_fl_db = 0;
+               adapter->params.arch.mps_tcam_size =
+                               NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
                break;
        }
 
index 0be6850be8a2383e11240093bd421dfbd2595595..d106186f4f4abfbcf5b662bc80870b4ae4ab4216 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/in.h>
 #include <linux/types.h>
 #include <linux/skbuff.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 #include "enic_res.h"
 #include "enic_clsf.h"
 
  *     @rq: rq number to steer to
  *
  * This function returns filter_id(hardware_id) of the filter
- * added. In case of error it returns an negative number.
+ * added. In case of error it returns a negative number.
  */
 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
 {
        int res;
        struct filter data;
 
-       switch (keys->ip_proto) {
+       switch (keys->basic.ip_proto) {
        case IPPROTO_TCP:
                data.u.ipv4.protocol = PROTO_TCP;
                break;
@@ -33,10 +33,10 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
                return -EPROTONOSUPPORT;
        };
        data.type = FILTER_IPV4_5TUPLE;
-       data.u.ipv4.src_addr = ntohl(keys->src);
-       data.u.ipv4.dst_addr = ntohl(keys->dst);
-       data.u.ipv4.src_port = ntohs(keys->port16[0]);
-       data.u.ipv4.dst_port = ntohs(keys->port16[1]);
+       data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src);
+       data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst);
+       data.u.ipv4.src_port = ntohs(keys->ports.src);
+       data.u.ipv4.dst_port = ntohs(keys->ports.dst);
        data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
 
        spin_lock_bh(&enic->devcmd_lock);
@@ -158,11 +158,11 @@ static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
        struct enic_rfs_fltr_node *tpos;
 
        hlist_for_each_entry(tpos, h, node)
-               if (tpos->keys.src == k->src &&
-                   tpos->keys.dst == k->dst &&
-                   tpos->keys.ports == k->ports &&
-                   tpos->keys.ip_proto == k->ip_proto &&
-                   tpos->keys.n_proto == k->n_proto)
+               if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src &&
+                   tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst &&
+                   tpos->keys.ports.ports == k->ports.ports &&
+                   tpos->keys.basic.ip_proto == k->basic.ip_proto &&
+                   tpos->keys.basic.n_proto == k->basic.n_proto)
                        return tpos;
        return NULL;
 }
@@ -177,9 +177,10 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
        int res, i;
 
        enic = netdev_priv(dev);
-       res = skb_flow_dissect(skb, &keys);
-       if (!res || keys.n_proto != htons(ETH_P_IP) ||
-           (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
+       res = skb_flow_dissect_flow_keys(skb, &keys);
+       if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
+           (keys.basic.ip_proto != IPPROTO_TCP &&
+            keys.basic.ip_proto != IPPROTO_UDP))
                return -EPROTONOSUPPORT;
 
        tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
index 28d9ca675a274f9876473bcce7e6995a14e1289e..73874b2575bf1e85983feee3fa4b67ff67431d8c 100644 (file)
@@ -334,7 +334,7 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
        n = htbl_fltr_search(enic, (u16)fsp->location);
        if (!n)
                return -EINVAL;
-       switch (n->keys.ip_proto) {
+       switch (n->keys.basic.ip_proto) {
        case IPPROTO_TCP:
                fsp->flow_type = TCP_V4_FLOW;
                break;
@@ -346,16 +346,16 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
                break;
        }
 
-       fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src;
+       fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
        fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
 
-       fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst;
+       fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
        fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
 
-       fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0];
+       fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
        fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
 
-       fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1];
+       fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
        fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
 
        fsp->ring_cookie = n->rq_id;
index badff181e719692a9a94b2a5ba1c792fc2bf18fc..8966f3159bb2b1da3640e7fe9fd2b579b97457e4 100644 (file)
@@ -5189,16 +5189,16 @@ de4x5_parse_params(struct net_device *dev)
        if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
 
        if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
-           if (strstr(p, "TP")) {
-               lp->params.autosense = TP;
-           } else if (strstr(p, "TP_NW")) {
+           if (strstr(p, "TP_NW")) {
                lp->params.autosense = TP_NW;
+           } else if (strstr(p, "TP")) {
+               lp->params.autosense = TP;
+           } else if (strstr(p, "BNC_AUI")) {
+               lp->params.autosense = BNC;
            } else if (strstr(p, "BNC")) {
                lp->params.autosense = BNC;
            } else if (strstr(p, "AUI")) {
                lp->params.autosense = AUI;
-           } else if (strstr(p, "BNC_AUI")) {
-               lp->params.autosense = BNC;
            } else if (strstr(p, "10Mb")) {
                lp->params.autosense = _10Mb;
            } else if (strstr(p, "100Mb")) {
index 2c30c0c83f984a2d41204c637bb9f2dbe797bc00..447d09272ab7ab6d3485073ffcee608dbbf622eb 100644 (file)
@@ -1115,7 +1115,7 @@ static void uli526x_timer(unsigned long data)
                                netif_carrier_off(dev);
                        }
                }
-               db->init=0;
+       db->init = 0;
 
        /* Timer active again */
        db->timer.expires = ULI526X_TIMER_WUT;
index 1274b6fdac8aace34559bb1fd1cae3ccca314a2e..cf0a5fcdaaaf06e59772d5b4530547e11c7b4819 100644 (file)
@@ -463,10 +463,8 @@ rio_open (struct net_device *dev)
                dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
        }
 
-       init_timer (&np->timer);
+       setup_timer(&np->timer, rio_timer, (unsigned long)dev);
        np->timer.expires = jiffies + 1*HZ;
-       np->timer.data = (unsigned long) dev;
-       np->timer.function = rio_timer;
        add_timer (&np->timer);
 
        /* Start Tx/Rx */
index ea94a8eb6b35421e3c846ed412099cda0b460463..7108563260ae45b88e09922c6300faa26438dd7c 100644 (file)
@@ -5,6 +5,15 @@ config BE2NET
          This driver implements the NIC functionality for ServerEngines'
          10Gbps network adapter - BladeEngine.
 
+config BE2NET_HWMON
+       bool "HWMON support for be2net driver"
+       depends on BE2NET && HWMON
+       depends on !(BE2NET=y && HWMON=m)
+       default y
+       ---help---
+         Say Y here if you want to expose thermal sensor data on
+         be2net network adapter.
+
 config BE2NET_VXLAN
         bool "VXLAN offload support on be2net driver"
         default y
index 1bf1cdce74ac3591d4a2011e6be9399c4a5cdf57..8d12b41b3b1990af468da5a38c4759fc005ba3d3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
 #include <linux/slab.h>
 #include <linux/u64_stats_sync.h>
 #include <linux/cpumask.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "10.6.0.1"
+#define DRV_VER                        "10.6.0.2"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
@@ -314,7 +316,6 @@ struct be_rx_obj {
 } ____cacheline_aligned_in_smp;
 
 struct be_drv_stats {
-       u32 be_on_die_temperature;
        u32 eth_red_drops;
        u32 dma_map_errors;
        u32 rx_drops_no_pbuf;
@@ -366,6 +367,7 @@ struct be_vf_cfg {
        u32 tx_rate;
        u32 plink_tracking;
        u32 privileges;
+       bool spoofchk;
 };
 
 enum vf_state {
@@ -382,6 +384,7 @@ enum vf_state {
 #define BE_FLAGS_SETUP_DONE                    BIT(9)
 #define BE_FLAGS_EVT_INCOMPATIBLE_SFP          BIT(10)
 #define BE_FLAGS_ERR_DETECTION_SCHEDULED       BIT(11)
+#define BE_FLAGS_OS2BMC                                BIT(12)
 
 #define BE_UC_PMAC_COUNT                       30
 #define BE_VF_UC_PMAC_COUNT                    2
@@ -426,6 +429,8 @@ struct be_resources {
        u32 vf_if_cap_flags;    /* VF if capability flags */
 };
 
+#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
+
 struct rss_info {
        u64 rss_flags;
        u8 rsstable[RSS_INDIR_TABLE_LEN];
@@ -433,6 +438,12 @@ struct rss_info {
        u8 rss_hkey[RSS_HASH_KEY_LEN];
 };
 
+#define BE_INVALID_DIE_TEMP    0xFF
+struct be_hwmon {
+       struct device *hwmon_dev;
+       u8 be_on_die_temp;  /* Unit: millidegree Celsius */
+};
+
 /* Macros to read/write the 'features' word of be_wrb_params structure.
  */
 #define        BE_WRB_F_BIT(name)                      BE_WRB_F_##name##_BIT
@@ -453,7 +464,8 @@ enum {
        BE_WRB_F_LSO_BIT,               /* LSO */
        BE_WRB_F_LSO6_BIT,              /* LSO6 */
        BE_WRB_F_VLAN_BIT,              /* VLAN */
-       BE_WRB_F_VLAN_SKIP_HW_BIT       /* Skip VLAN tag (workaround) */
+       BE_WRB_F_VLAN_SKIP_HW_BIT,      /* Skip VLAN tag (workaround) */
+       BE_WRB_F_OS2BMC_BIT             /* Send packet to the management ring */
 };
 
 /* The structure below provides a HW-agnostic abstraction of WRB params
@@ -514,6 +526,7 @@ struct be_adapter {
        u16 work_counter;
 
        struct delayed_work be_err_detection_work;
+       u8 err_flags;
        u32 flags;
        u32 cmd_privileges;
        /* Ethtool knobs and info */
@@ -572,8 +585,11 @@ struct be_adapter {
        u16 qnq_vid;
        u32 msg_enable;
        int be_get_temp_freq;
+       struct be_hwmon hwmon_info;
        u8 pf_number;
        struct rss_info rss_info;
+       /* Filters for packets that need to be sent to BMC */
+       u32 bmc_filt_mask;
 };
 
 #define be_physfn(adapter)             (!adapter->virtfn)
@@ -772,26 +788,36 @@ static inline bool is_ipv4_pkt(struct sk_buff *skb)
        return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
 }
 
-static inline bool be_multi_rxq(const struct be_adapter *adapter)
+#define BE_ERROR_EEH           1
+#define BE_ERROR_UE            BIT(1)
+#define BE_ERROR_FW            BIT(2)
+#define BE_ERROR_HW            (BE_ERROR_EEH | BE_ERROR_UE)
+#define BE_ERROR_ANY           (BE_ERROR_EEH | BE_ERROR_UE | BE_ERROR_FW)
+#define BE_CLEAR_ALL           0xFF
+
+static inline u8 be_check_error(struct be_adapter *adapter, u32 err_type)
 {
-       return adapter->num_rx_qs > 1;
+       return (adapter->err_flags & err_type);
 }
 
-static inline bool be_error(struct be_adapter *adapter)
+static inline void be_set_error(struct be_adapter *adapter, int err_type)
 {
-       return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
+       struct net_device *netdev = adapter->netdev;
+
+       adapter->err_flags |= err_type;
+       netif_carrier_off(netdev);
+
+       dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
 }
 
-static inline bool be_hw_error(struct be_adapter *adapter)
+static inline void  be_clear_error(struct be_adapter *adapter, int err_type)
 {
-       return adapter->eeh_error || adapter->hw_error;
+       adapter->err_flags &= ~err_type;
 }
 
-static inline void  be_clear_all_error(struct be_adapter *adapter)
+static inline bool be_multi_rxq(const struct be_adapter *adapter)
 {
-       adapter->eeh_error = false;
-       adapter->hw_error = false;
-       adapter->fw_timeout = false;
+       return adapter->num_rx_qs > 1;
 }
 
 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
@@ -804,6 +830,7 @@ bool be_pause_supported(struct be_adapter *adapter);
 u32 be_get_fw_log_level(struct be_adapter *adapter);
 int be_update_queues(struct be_adapter *adapter);
 int be_poll(struct napi_struct *napi, int budget);
+void be_eqd_update(struct be_adapter *adapter, bool force_update);
 
 /*
  * internal function to initialize-cleanup roce device.
index c5e1d0ac75f909f843dd0397ad41b85eeb26a164..9eac3227d2cabc15c2d21a4baafafc3761372560 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -93,7 +93,7 @@ static void be_mcc_notify(struct be_adapter *adapter)
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        u32 val = 0;
 
-       if (be_error(adapter))
+       if (be_check_error(adapter, BE_ERROR_ANY))
                return;
 
        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
@@ -140,6 +140,7 @@ static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
        if (base_status == MCC_STATUS_NOT_SUPPORTED ||
            base_status == MCC_STATUS_ILLEGAL_REQUEST ||
            addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+           addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
            (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
            (base_status == MCC_STATUS_ILLEGAL_FIELD ||
             addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
@@ -191,10 +192,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
                if (base_status == MCC_STATUS_SUCCESS) {
                        struct be_cmd_resp_get_cntl_addnl_attribs *resp =
                                                        (void *)resp_hdr;
-                       adapter->drv_stats.be_on_die_temperature =
+                       adapter->hwmon_info.be_on_die_temp =
                                                resp->on_die_temperature;
                } else {
                        adapter->be_get_temp_freq = 0;
+                       adapter->hwmon_info.be_on_die_temp =
+                                               BE_INVALID_DIE_TEMP;
                }
                return;
        }
@@ -330,6 +333,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
        }
 }
 
+#define MGMT_ENABLE_MASK       0x4
+static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
+                                            struct be_mcc_compl *compl)
+{
+       struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
+       u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
+
+       if (evt_dw1 & MGMT_ENABLE_MASK) {
+               adapter->flags |= BE_FLAGS_OS2BMC;
+               adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
+       } else {
+               adapter->flags &= ~BE_FLAGS_OS2BMC;
+       }
+}
+
 static void be_async_grp5_evt_process(struct be_adapter *adapter,
                                      struct be_mcc_compl *compl)
 {
@@ -346,6 +364,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
        case ASYNC_EVENT_PVID_STATE:
                be_async_grp5_pvid_state_process(adapter, compl);
                break;
+       /* Async event to disable/enable os2bmc and/or mac-learning */
+       case ASYNC_EVENT_FW_CONTROL:
+               be_async_grp5_fw_control_process(adapter, compl);
+               break;
        default:
                break;
        }
@@ -486,7 +508,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
        struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
        for (i = 0; i < mcc_timeout; i++) {
-               if (be_error(adapter))
+               if (be_check_error(adapter, BE_ERROR_ANY))
                        return -EIO;
 
                local_bh_disable();
@@ -499,7 +521,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
        }
        if (i == mcc_timeout) {
                dev_err(&adapter->pdev->dev, "FW not responding\n");
-               adapter->fw_timeout = true;
+               be_set_error(adapter, BE_ERROR_FW);
                return -EIO;
        }
        return status;
@@ -538,7 +560,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
        u32 ready;
 
        do {
-               if (be_error(adapter))
+               if (be_check_error(adapter, BE_ERROR_ANY))
                        return -EIO;
 
                ready = ioread32(db);
@@ -551,7 +573,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
 
                if (msecs > 4000) {
                        dev_err(&adapter->pdev->dev, "FW not responding\n");
-                       adapter->fw_timeout = true;
+                       be_set_error(adapter, BE_ERROR_FW);
                        be_detect_error(adapter);
                        return -1;
                }
@@ -1457,7 +1479,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
                *if_handle = le32_to_cpu(resp->interface_id);
 
                /* Hack to retrieve VF's pmac-id on BE3 */
-               if (BE3_chip(adapter) && !be_physfn(adapter))
+               if (BE3_chip(adapter) && be_virtfn(adapter))
                        adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
        }
        return status;
@@ -3156,7 +3178,7 @@ int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
 }
 
 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
-                         u32 domain, u16 intf_id, u16 hsw_mode)
+                         u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_set_hsw_config *req;
@@ -3192,6 +3214,14 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
                              ctxt, hsw_mode);
        }
 
+       /* Enable/disable both mac and vlan spoof checking */
+       if (!BEx_chip(adapter) && spoofchk) {
+               AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
+                             ctxt, spoofchk);
+               AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
+                             ctxt, spoofchk);
+       }
+
        be_dws_cpu_to_le(req->context, sizeof(req->context));
        status = be_mcc_notify_wait(adapter);
 
@@ -3202,7 +3232,7 @@ err:
 
 /* Get Hyper switch config */
 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
-                         u32 domain, u16 intf_id, u8 *mode)
+                         u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_hsw_config *req;
@@ -3250,6 +3280,10 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
                if (mode)
                        *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
                                              port_fwd_type, &resp->context);
+               if (spoofchk)
+                       *spoofchk =
+                               AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+                                             spoofchk, &resp->context);
        }
 
 err:
@@ -3261,7 +3295,7 @@ static bool be_is_wol_excluded(struct be_adapter *adapter)
 {
        struct pci_dev *pdev = adapter->pdev;
 
-       if (!be_physfn(adapter))
+       if (be_virtfn(adapter))
                return true;
 
        switch (pdev->subsystem_device) {
index 1ec22300e2542f3f2382c8830153f917f19f6d35..2716e6f30d9a0949633b40dc9864196c7465fa3a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -65,7 +65,8 @@ enum mcc_base_status {
 enum mcc_addl_status {
        MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
        MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
-       MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+       MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
+       MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab
 };
 
 #define CQE_BASE_STATUS_MASK           0xFFFF
@@ -104,6 +105,7 @@ struct be_mcc_compl {
 #define ASYNC_DEBUG_EVENT_TYPE_QNQ     1
 #define ASYNC_EVENT_CODE_SLIPORT       0x11
 #define ASYNC_EVENT_PORT_MISCONFIG     0x9
+#define ASYNC_EVENT_FW_CONTROL         0x5
 
 enum {
        LINK_DOWN       = 0x0,
@@ -180,6 +182,22 @@ struct be_async_event_misconfig_port {
        u32 flags;
 } __packed;
 
+#define BMC_FILT_BROADCAST_ARP                         BIT(0)
+#define BMC_FILT_BROADCAST_DHCP_CLIENT                 BIT(1)
+#define BMC_FILT_BROADCAST_DHCP_SERVER                 BIT(2)
+#define BMC_FILT_BROADCAST_NET_BIOS                    BIT(3)
+#define BMC_FILT_BROADCAST                             BIT(7)
+#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER            BIT(8)
+#define BMC_FILT_MULTICAST_IPV6_RA                     BIT(9)
+#define BMC_FILT_MULTICAST_IPV6_RAS                    BIT(10)
+#define BMC_FILT_MULTICAST                             BIT(15)
+struct be_async_fw_control {
+       u32 event_data_word1;
+       u32 event_data_word2;
+       u32 evt_tag;
+       u32 event_data_word4;
+} __packed;
+
 struct be_mcc_mailbox {
        struct be_mcc_wrb wrb;
        struct be_mcc_compl compl;
@@ -1109,10 +1127,6 @@ struct be_cmd_req_query_fw_cfg {
        u32 rsvd[31];
 };
 
-/* ASIC revisions */
-#define ASIC_REV_B0            0x10
-#define ASIC_REV_P2            0x11
-
 struct be_cmd_resp_query_fw_cfg {
        struct be_cmd_resp_hdr hdr;
        u32 be_config_number;
@@ -1745,18 +1759,24 @@ struct be_cmd_req_set_mac_list {
 #define PORT_FWD_TYPE_VEPA             0x3
 #define PORT_FWD_TYPE_VEB              0x2
 
+#define ENABLE_MAC_SPOOFCHK            0x2
+#define DISABLE_MAC_SPOOFCHK           0x3
+
 struct amap_set_hsw_context {
        u8 interface_id[16];
-       u8 rsvd0[14];
+       u8 rsvd0[8];
+       u8 mac_spoofchk[2];
+       u8 rsvd1[4];
        u8 pvid_valid;
        u8 pport;
-       u8 rsvd1[6];
+       u8 rsvd2[6];
        u8 port_fwd_type[3];
-       u8 rsvd2[7];
+       u8 rsvd3[5];
+       u8 vlan_spoofchk[2];
        u8 pvid[16];
-       u8 rsvd3[32];
        u8 rsvd4[32];
        u8 rsvd5[32];
+       u8 rsvd6[32];
 } __packed;
 
 struct be_cmd_req_set_hsw_config {
@@ -1774,11 +1794,13 @@ struct amap_get_hsw_req_context {
 struct amap_get_hsw_resp_context {
        u8 rsvd0[6];
        u8 port_fwd_type[3];
-       u8 rsvd1[7];
+       u8 rsvd1[5];
+       u8 spoofchk;
+       u8 rsvd2;
        u8 pvid[16];
-       u8 rsvd2[32];
        u8 rsvd3[32];
        u8 rsvd4[32];
+       u8 rsvd5[32];
 } __packed;
 
 struct be_cmd_req_get_hsw_config {
@@ -2334,9 +2356,9 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
                        u32 domain);
 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
-                         u16 intf_id, u16 hsw_mode);
+                         u16 intf_id, u16 hsw_mode, u8 spoofchk);
 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
-                         u16 intf_id, u8 *mode);
+                         u16 intf_id, u8 *mode, bool *spoofchk);
 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level);
 int be_cmd_get_fw_log_level(struct be_adapter *adapter);
index 2835dee5dc3930cc5d1d09ec958bd2557228a2cd..b2476dbfd103120affb5e216a31d304dda570a67 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -123,7 +123,6 @@ static const struct be_ethtool_stat et_stats[] = {
        {DRVSTAT_INFO(dma_map_errors)},
        /* Number of packets dropped due to random early drop function */
        {DRVSTAT_INFO(eth_red_drops)},
-       {DRVSTAT_INFO(be_on_die_temperature)},
        {DRVSTAT_INFO(rx_roce_bytes_lsd)},
        {DRVSTAT_INFO(rx_roce_bytes_msd)},
        {DRVSTAT_INFO(rx_roce_frames)},
@@ -368,6 +367,14 @@ static int be_set_coalesce(struct net_device *netdev,
                aic++;
        }
 
+       /* For Skyhawk, the EQD setting happens via EQ_DB when AIC is enabled.
+        * When AIC is disabled, persistently force set EQD value via the
+        * FW cmd, so that we don't have to calculate the delay multiplier
+        * encode value each time EQ_DB is rung
+        */
+       if (!et->use_adaptive_rx_coalesce && skyhawk_chip(adapter))
+               be_eqd_update(adapter, true);
+
        return 0;
 }
 
index 48840889db6226325bb0f05de610842b255efc82..c684bb32b487e95c6bf88f253f335817452de442 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
 #define DB_EQ_NUM_POPPED_SHIFT         (16)    /* bits 16 - 28 */
 /* Rearm bit */
 #define DB_EQ_REARM_SHIFT              (29)    /* bit 29 */
+/* Rearm to interrupt delay encoding */
+#define DB_EQ_R2I_DLY_SHIFT            (30)    /* bits 30 - 31 */
+
+/* Rearm to interrupt (R2I) delay multiplier encoding represents 3 different
+ * values configured in CEV_REARM2IRPT_DLY_MULT_CSR register. This value is
+ * programmed by host driver while ringing an EQ doorbell(EQ_DB) if a delay
+ * between rearming the EQ and next interrupt on this EQ is desired.
+ */
+#define        R2I_DLY_ENC_0                   0       /* No delay */
+#define        R2I_DLY_ENC_1                   1       /* maps to 160us EQ delay */
+#define        R2I_DLY_ENC_2                   2       /* maps to 96us EQ delay */
+#define        R2I_DLY_ENC_3                   3       /* maps to 48us EQ delay */
 
 /********* Compl Q door bell *************/
 #define DB_CQ_OFFSET                   0x120
index e43cc8a73ea7e85a927443c077c18ce6c673751a..c0f34845cf5912630a56f753d0c5e103d8f14daf 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -179,7 +179,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
        if (lancer_chip(adapter))
                return;
 
-       if (adapter->eeh_error)
+       if (be_check_error(adapter, BE_ERROR_EEH))
                return;
 
        status = be_cmd_intr_set(adapter, enable);
@@ -191,6 +191,9 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
 {
        u32 val = 0;
 
+       if (be_check_error(adapter, BE_ERROR_HW))
+               return;
+
        val |= qid & DB_RQ_RING_ID_MASK;
        val |= posted << DB_RQ_NUM_POSTED_SHIFT;
 
@@ -203,6 +206,9 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
 {
        u32 val = 0;
 
+       if (be_check_error(adapter, BE_ERROR_HW))
+               return;
+
        val |= txo->q.id & DB_TXULP_RING_ID_MASK;
        val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
 
@@ -211,14 +217,15 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
 }
 
 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
-                        bool arm, bool clear_int, u16 num_popped)
+                        bool arm, bool clear_int, u16 num_popped,
+                        u32 eq_delay_mult_enc)
 {
        u32 val = 0;
 
        val |= qid & DB_EQ_RING_ID_MASK;
        val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
 
-       if (adapter->eeh_error)
+       if (be_check_error(adapter, BE_ERROR_HW))
                return;
 
        if (arm)
@@ -227,6 +234,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
                val |= 1 << DB_EQ_CLR_SHIFT;
        val |= 1 << DB_EQ_EVNT_SHIFT;
        val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
+       val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
        iowrite32(val, adapter->db + DB_EQ_OFFSET);
 }
 
@@ -238,7 +246,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
        val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
                        DB_CQ_RING_ID_EXT_MASK_SHIFT);
 
-       if (adapter->eeh_error)
+       if (be_check_error(adapter, BE_ERROR_HW))
                return;
 
        if (arm)
@@ -662,6 +670,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
                netif_carrier_on(netdev);
        else
                netif_carrier_off(netdev);
+
+       netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
 }
 
 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -810,6 +820,8 @@ static void wrb_fill_hdr(struct be_adapter *adapter,
 
        SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
        SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
+       SET_TX_WRB_HDR_BITS(mgmt, hdr,
+                           BE_WRB_F_GET(wrb_params->features, OS2BMC));
 }
 
 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
@@ -1146,6 +1158,130 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
        txo->pend_wrb_cnt = 0;
 }
 
+/* OS2BMC related */
+
+#define DHCP_CLIENT_PORT       68
+#define DHCP_SERVER_PORT       67
+#define NET_BIOS_PORT1         137
+#define NET_BIOS_PORT2         138
+#define DHCPV6_RAS_PORT                547
+
+#define is_mc_allowed_on_bmc(adapter, eh)      \
+       (!is_multicast_filt_enabled(adapter) && \
+        is_multicast_ether_addr(eh->h_dest) && \
+        !is_broadcast_ether_addr(eh->h_dest))
+
+#define is_bc_allowed_on_bmc(adapter, eh)      \
+       (!is_broadcast_filt_enabled(adapter) && \
+        is_broadcast_ether_addr(eh->h_dest))
+
+#define is_arp_allowed_on_bmc(adapter, skb)    \
+       (is_arp(skb) && is_arp_filt_enabled(adapter))
+
+#define is_broadcast_packet(eh, adapter)       \
+               (is_multicast_ether_addr(eh->h_dest) && \
+               !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
+
+#define is_arp(skb)    (skb->protocol == htons(ETH_P_ARP))
+
+#define is_arp_filt_enabled(adapter)   \
+               (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
+
+#define is_dhcp_client_filt_enabled(adapter)   \
+               (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
+
+#define is_dhcp_srvr_filt_enabled(adapter)     \
+               (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
+
+#define is_nbios_filt_enabled(adapter) \
+               (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
+
+#define is_ipv6_na_filt_enabled(adapter)       \
+               (adapter->bmc_filt_mask &       \
+                       BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
+
+#define is_ipv6_ra_filt_enabled(adapter)       \
+               (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
+
+#define is_ipv6_ras_filt_enabled(adapter)      \
+               (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
+
+#define is_broadcast_filt_enabled(adapter)     \
+               (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
+
+#define is_multicast_filt_enabled(adapter)     \
+               (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
+
+static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
+                              struct sk_buff **skb)
+{
+       struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
+       bool os2bmc = false;
+
+       if (!be_is_os2bmc_enabled(adapter))
+               goto done;
+
+       if (!is_multicast_ether_addr(eh->h_dest))
+               goto done;
+
+       if (is_mc_allowed_on_bmc(adapter, eh) ||
+           is_bc_allowed_on_bmc(adapter, eh) ||
+           is_arp_allowed_on_bmc(adapter, (*skb))) {
+               os2bmc = true;
+               goto done;
+       }
+
+       if ((*skb)->protocol == htons(ETH_P_IPV6)) {
+               struct ipv6hdr *hdr = ipv6_hdr((*skb));
+               u8 nexthdr = hdr->nexthdr;
+
+               if (nexthdr == IPPROTO_ICMPV6) {
+                       struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
+
+                       switch (icmp6->icmp6_type) {
+                       case NDISC_ROUTER_ADVERTISEMENT:
+                               os2bmc = is_ipv6_ra_filt_enabled(adapter);
+                               goto done;
+                       case NDISC_NEIGHBOUR_ADVERTISEMENT:
+                               os2bmc = is_ipv6_na_filt_enabled(adapter);
+                               goto done;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+       if (is_udp_pkt((*skb))) {
+               struct udphdr *udp = udp_hdr((*skb));
+
+               switch (udp->dest) {
+               case DHCP_CLIENT_PORT:
+                       os2bmc = is_dhcp_client_filt_enabled(adapter);
+                       goto done;
+               case DHCP_SERVER_PORT:
+                       os2bmc = is_dhcp_srvr_filt_enabled(adapter);
+                       goto done;
+               case NET_BIOS_PORT1:
+               case NET_BIOS_PORT2:
+                       os2bmc = is_nbios_filt_enabled(adapter);
+                       goto done;
+               case DHCPV6_RAS_PORT:
+                       os2bmc = is_ipv6_ras_filt_enabled(adapter);
+                       goto done;
+               default:
+                       break;
+               }
+       }
+done:
+       /* For packets over a vlan, which are destined
+        * to BMC, asic expects the vlan to be inline in the packet.
+        */
+       if (os2bmc)
+               *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
+
+       return os2bmc;
+}
+
 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -1167,6 +1303,18 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
                goto drop;
        }
 
+       /* if os2bmc is enabled and if the pkt is destined to bmc,
+        * enqueue the pkt a 2nd time with mgmt bit set.
+        */
+       if (be_send_pkt_to_bmc(adapter, &skb)) {
+               BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
+               wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+               if (unlikely(!wrb_cnt))
+                       goto drop;
+               else
+                       skb_get(skb);
+       }
+
        if (be_is_txq_full(txo)) {
                netif_stop_subqueue(netdev, q_idx);
                tx_stats(txo)->tx_stops++;
@@ -1265,7 +1413,8 @@ static int be_vid_config(struct be_adapter *adapter)
        if (status) {
                dev_err(dev, "Setting HW VLAN filtering failed\n");
                /* Set to VLAN promisc mode as setting VLAN filter failed */
-               if (addl_status(status) ==
+               if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
+                   addl_status(status) ==
                                MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
                        return be_set_vlan_promisc(adapter);
        } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
@@ -1466,6 +1615,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
        vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
        memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
        vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
+       vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
 
        return 0;
 }
@@ -1478,7 +1628,7 @@ static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
        int status;
 
        /* Enable Transparent VLAN Tagging */
-       status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
+       status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
        if (status)
                return status;
 
@@ -1507,7 +1657,7 @@ static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
 
        /* Reset Transparent VLAN Tagging. */
        status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
-                                      vf_cfg->if_handle, 0);
+                                      vf_cfg->if_handle, 0, 0);
        if (status)
                return status;
 
@@ -1642,6 +1792,39 @@ static int be_set_vf_link_state(struct net_device *netdev, int vf,
        return 0;
 }
 
+static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+       struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
+       u8 spoofchk;
+       int status;
+
+       if (!sriov_enabled(adapter))
+               return -EPERM;
+
+       if (vf >= adapter->num_vfs)
+               return -EINVAL;
+
+       if (BEx_chip(adapter))
+               return -EOPNOTSUPP;
+
+       if (enable == vf_cfg->spoofchk)
+               return 0;
+
+       spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
+
+       status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
+                                      0, spoofchk);
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "Spoofchk change on VF %d failed: %#x\n", vf, status);
+               return be_cmd_status(status);
+       }
+
+       vf_cfg->spoofchk = enable;
+       return 0;
+}
+
 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
                          ulong now)
 {
@@ -1650,61 +1833,110 @@ static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
        aic->jiffies = now;
 }
 
-static void be_eqd_update(struct be_adapter *adapter)
+static int be_get_new_eqd(struct be_eq_obj *eqo)
 {
-       struct be_set_eqd set_eqd[MAX_EVT_QS];
-       int eqd, i, num = 0, start;
+       struct be_adapter *adapter = eqo->adapter;
+       int eqd, start;
        struct be_aic_obj *aic;
-       struct be_eq_obj *eqo;
        struct be_rx_obj *rxo;
        struct be_tx_obj *txo;
-       u64 rx_pkts, tx_pkts;
+       u64 rx_pkts = 0, tx_pkts = 0;
        ulong now;
        u32 pps, delta;
+       int i;
 
-       for_all_evt_queues(adapter, eqo, i) {
-               aic = &adapter->aic_obj[eqo->idx];
-               if (!aic->enable) {
-                       if (aic->jiffies)
-                               aic->jiffies = 0;
-                       eqd = aic->et_eqd;
-                       goto modify_eqd;
-               }
+       aic = &adapter->aic_obj[eqo->idx];
+       if (!aic->enable) {
+               if (aic->jiffies)
+                       aic->jiffies = 0;
+               eqd = aic->et_eqd;
+               return eqd;
+       }
 
-               rxo = &adapter->rx_obj[eqo->idx];
+       for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
                do {
                        start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
-                       rx_pkts = rxo->stats.rx_pkts;
+                       rx_pkts += rxo->stats.rx_pkts;
                } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
+       }
 
-               txo = &adapter->tx_obj[eqo->idx];
+       for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
                do {
                        start = u64_stats_fetch_begin_irq(&txo->stats.sync);
-                       tx_pkts = txo->stats.tx_reqs;
+                       tx_pkts += txo->stats.tx_reqs;
                } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
+       }
 
-               /* Skip, if wrapped around or first calculation */
-               now = jiffies;
-               if (!aic->jiffies || time_before(now, aic->jiffies) ||
-                   rx_pkts < aic->rx_pkts_prev ||
-                   tx_pkts < aic->tx_reqs_prev) {
-                       be_aic_update(aic, rx_pkts, tx_pkts, now);
-                       continue;
-               }
+       /* Skip, if wrapped around or first calculation */
+       now = jiffies;
+       if (!aic->jiffies || time_before(now, aic->jiffies) ||
+           rx_pkts < aic->rx_pkts_prev ||
+           tx_pkts < aic->tx_reqs_prev) {
+               be_aic_update(aic, rx_pkts, tx_pkts, now);
+               return aic->prev_eqd;
+       }
 
-               delta = jiffies_to_msecs(now - aic->jiffies);
-               pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
-                       (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
-               eqd = (pps / 15000) << 2;
+       delta = jiffies_to_msecs(now - aic->jiffies);
+       if (delta == 0)
+               return aic->prev_eqd;
 
-               if (eqd < 8)
-                       eqd = 0;
-               eqd = min_t(u32, eqd, aic->max_eqd);
-               eqd = max_t(u32, eqd, aic->min_eqd);
+       pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+               (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+       eqd = (pps / 15000) << 2;
 
-               be_aic_update(aic, rx_pkts, tx_pkts, now);
-modify_eqd:
-               if (eqd != aic->prev_eqd) {
+       if (eqd < 8)
+               eqd = 0;
+       eqd = min_t(u32, eqd, aic->max_eqd);
+       eqd = max_t(u32, eqd, aic->min_eqd);
+
+       be_aic_update(aic, rx_pkts, tx_pkts, now);
+
+       return eqd;
+}
+
+/* For Skyhawk-R only */
+static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
+{
+       struct be_adapter *adapter = eqo->adapter;
+       struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
+       ulong now = jiffies;
+       int eqd;
+       u32 mult_enc;
+
+       if (!aic->enable)
+               return 0;
+
+       if (time_before_eq(now, aic->jiffies) ||
+           jiffies_to_msecs(now - aic->jiffies) < 1)
+               eqd = aic->prev_eqd;
+       else
+               eqd = be_get_new_eqd(eqo);
+
+       if (eqd > 100)
+               mult_enc = R2I_DLY_ENC_1;
+       else if (eqd > 60)
+               mult_enc = R2I_DLY_ENC_2;
+       else if (eqd > 20)
+               mult_enc = R2I_DLY_ENC_3;
+       else
+               mult_enc = R2I_DLY_ENC_0;
+
+       aic->prev_eqd = eqd;
+
+       return mult_enc;
+}
+
+void be_eqd_update(struct be_adapter *adapter, bool force_update)
+{
+       struct be_set_eqd set_eqd[MAX_EVT_QS];
+       struct be_aic_obj *aic;
+       struct be_eq_obj *eqo;
+       int i, num = 0, eqd;
+
+       for_all_evt_queues(adapter, eqo, i) {
+               aic = &adapter->aic_obj[eqo->idx];
+               eqd = be_get_new_eqd(eqo);
+               if (force_update || eqd != aic->prev_eqd) {
                        set_eqd[num].delay_multiplier = (eqd * 65)/100;
                        set_eqd[num].eq_id = eqo->q.id;
                        aic->prev_eqd = eqd;
@@ -2212,7 +2444,7 @@ static void be_eq_clean(struct be_eq_obj *eqo)
 {
        int num = events_get(eqo);
 
-       be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
+       be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
 }
 
 static void be_rx_cq_clean(struct be_rx_obj *rxo)
@@ -2236,7 +2468,9 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
                        if (lancer_chip(adapter))
                                break;
 
-                       if (flush_wait++ > 10 || be_hw_error(adapter)) {
+                       if (flush_wait++ > 50 ||
+                           be_check_error(adapter,
+                                          BE_ERROR_HW)) {
                                dev_warn(&adapter->pdev->dev,
                                         "did not receive flush compl\n");
                                break;
@@ -2297,7 +2531,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
                                pending_txqs--;
                }
 
-               if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
+               if (pending_txqs == 0 || ++timeo > 10 ||
+                   be_check_error(adapter, BE_ERROR_HW))
                        break;
 
                mdelay(1);
@@ -2573,7 +2808,7 @@ static irqreturn_t be_intx(int irq, void *dev)
                if (num_evts)
                        eqo->spurious_intr = 0;
        }
-       be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
+       be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
 
        /* Return IRQ_HANDLED only for the the first spurious intr
         * after a valid intr to stop the kernel from branding
@@ -2589,7 +2824,7 @@ static irqreturn_t be_msix(int irq, void *dev)
 {
        struct be_eq_obj *eqo = dev;
 
-       be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+       be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
        napi_schedule(&eqo->napi);
        return IRQ_HANDLED;
 }
@@ -2838,6 +3073,7 @@ int be_poll(struct napi_struct *napi, int budget)
        int max_work = 0, work, i, num_evts;
        struct be_rx_obj *rxo;
        struct be_tx_obj *txo;
+       u32 mult_enc = 0;
 
        num_evts = events_get(eqo);
 
@@ -2863,10 +3099,18 @@ int be_poll(struct napi_struct *napi, int budget)
 
        if (max_work < budget) {
                napi_complete(napi);
-               be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
+
+               /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
+                * delay via a delay multiplier encoding value
+                */
+               if (skyhawk_chip(adapter))
+                       mult_enc = be_get_eq_delay_mult_enc(eqo);
+
+               be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
+                            mult_enc);
        } else {
                /* As we'll continue in polling mode, count and clear events */
-               be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
+               be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
        }
        return max_work;
 }
@@ -2898,22 +3142,19 @@ void be_detect_error(struct be_adapter *adapter)
        u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
        u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
        u32 i;
-       bool error_detected = false;
        struct device *dev = &adapter->pdev->dev;
-       struct net_device *netdev = adapter->netdev;
 
-       if (be_hw_error(adapter))
+       if (be_check_error(adapter, BE_ERROR_HW))
                return;
 
        if (lancer_chip(adapter)) {
                sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
                if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+                       be_set_error(adapter, BE_ERROR_UE);
                        sliport_err1 = ioread32(adapter->db +
                                                SLIPORT_ERROR1_OFFSET);
                        sliport_err2 = ioread32(adapter->db +
                                                SLIPORT_ERROR2_OFFSET);
-                       adapter->hw_error = true;
-                       error_detected = true;
                        /* Do not log error messages if its a FW reset */
                        if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
                            sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
@@ -2945,12 +3186,12 @@ void be_detect_error(struct be_adapter *adapter)
                 */
 
                if (ue_lo || ue_hi) {
-                       error_detected = true;
                        dev_err(dev,
                                "Unrecoverable Error detected in the adapter");
                        dev_err(dev, "Please reboot server to recover");
                        if (skyhawk_chip(adapter))
-                               adapter->hw_error = true;
+                               be_set_error(adapter, BE_ERROR_UE);
+
                        for (i = 0; ue_lo; ue_lo >>= 1, i++) {
                                if (ue_lo & 1)
                                        dev_err(dev, "UE: %s bit set\n",
@@ -2963,8 +3204,6 @@ void be_detect_error(struct be_adapter *adapter)
                        }
                }
        }
-       if (error_detected)
-               netif_carrier_off(netdev);
 }
 
 static void be_msix_disable(struct be_adapter *adapter)
@@ -3015,7 +3254,7 @@ fail:
        dev_warn(dev, "MSIx enable failed\n");
 
        /* INTx is not supported in VFs, so fail probe if enable_msix fails */
-       if (!be_physfn(adapter))
+       if (be_virtfn(adapter))
                return num_vec;
        return 0;
 }
@@ -3062,7 +3301,7 @@ static int be_irq_register(struct be_adapter *adapter)
                if (status == 0)
                        goto done;
                /* INTx is not supported for VF */
-               if (!be_physfn(adapter))
+               if (be_virtfn(adapter))
                        return status;
        }
 
@@ -3229,9 +3468,12 @@ static int be_rx_qs_create(struct be_adapter *adapter)
 
        memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
 
-       /* First time posting */
+       /* Post 1 less than RXQ-len to avoid head being equal to tail,
+        * which is a queue empty condition
+        */
        for_all_rx_queues(adapter, rxo, i)
-               be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
+               be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
+
        return 0;
 }
 
@@ -3263,7 +3505,7 @@ static int be_open(struct net_device *netdev)
        for_all_evt_queues(adapter, eqo, i) {
                napi_enable(&eqo->napi);
                be_enable_busy_poll(eqo);
-               be_eq_notify(adapter, eqo->q.id, true, true, 0);
+               be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
        }
        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
 
@@ -3563,7 +3805,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
 
        /* If a FW profile exists, then cap_flags are updated */
        cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-                   BE_IF_FLAGS_MULTICAST;
+                   BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
 
        for_all_vfs(adapter, vf_cfg, vf) {
                if (!BE3_chip(adapter)) {
@@ -3610,6 +3852,7 @@ static int be_vf_setup(struct be_adapter *adapter)
        struct device *dev = &adapter->pdev->dev;
        struct be_vf_cfg *vf_cfg;
        int status, old_vfs, vf;
+       bool spoofchk;
 
        old_vfs = pci_num_vf(adapter->pdev);
 
@@ -3657,6 +3900,12 @@ static int be_vf_setup(struct be_adapter *adapter)
                if (!old_vfs)
                        be_cmd_config_qos(adapter, 0, 0, vf + 1);
 
+               status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
+                                              vf_cfg->if_handle, NULL,
+                                              &spoofchk);
+               if (!status)
+                       vf_cfg->spoofchk = spoofchk;
+
                if (!old_vfs) {
                        be_cmd_enable_vf(adapter, vf + 1);
                        be_cmd_set_logical_link_config(adapter,
@@ -3733,8 +3982,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
         *    *only* if it is RSS-capable.
         */
        if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
-           !be_physfn(adapter) || (be_is_mc(adapter) &&
-           !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
+           be_virtfn(adapter) ||
+           (be_is_mc(adapter) &&
+            !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
                res->max_tx_qs = 1;
        } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
                struct be_resources super_nic_res = {0};
@@ -4075,7 +4325,7 @@ static int be_func_init(struct be_adapter *adapter)
                msleep(100);
 
                /* We can clear all errors when function reset succeeds */
-               be_clear_all_error(adapter);
+               be_clear_error(adapter, BE_CLEAR_ALL);
        }
 
        /* Tell FW we're ready to fire cmds */
@@ -4182,7 +4432,7 @@ static void be_netpoll(struct net_device *netdev)
        int i;
 
        for_all_evt_queues(adapter, eqo, i) {
-               be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
+               be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
                napi_schedule(&eqo->napi);
        }
 }
@@ -4666,14 +4916,11 @@ static int lancer_fw_download(struct be_adapter *adapter,
        return 0;
 }
 
-#define BE2_UFI                2
-#define BE3_UFI                3
-#define BE3R_UFI       10
-#define SH_UFI         4
-#define SH_P2_UFI      11
-
-static int be_get_ufi_type(struct be_adapter *adapter,
-                          struct flash_file_hdr_g3 *fhdr)
+/* Check if the flash image file is compatible with the adapter that
+ * is being flashed.
+ */
+static bool be_check_ufi_compatibility(struct be_adapter *adapter,
+                                      struct flash_file_hdr_g3 *fhdr)
 {
        if (!fhdr) {
                dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
@@ -4685,43 +4932,22 @@ static int be_get_ufi_type(struct be_adapter *adapter,
         */
        switch (fhdr->build[0]) {
        case BLD_STR_UFI_TYPE_SH:
-               return (fhdr->asic_type_rev == ASIC_REV_P2) ? SH_P2_UFI :
-                                                               SH_UFI;
+               if (!skyhawk_chip(adapter))
+                       return false;
+               break;
        case BLD_STR_UFI_TYPE_BE3:
-               return (fhdr->asic_type_rev == ASIC_REV_B0) ? BE3R_UFI :
-                                                               BE3_UFI;
+               if (!BE3_chip(adapter))
+                       return false;
+               break;
        case BLD_STR_UFI_TYPE_BE2:
-               return BE2_UFI;
-       default:
-               return -1;
-       }
-}
-
-/* Check if the flash image file is compatible with the adapter that
- * is being flashed.
- * BE3 chips with asic-rev B0 must be flashed only with BE3R_UFI type.
- * Skyhawk chips with asic-rev P2 must be flashed only with SH_P2_UFI type.
- */
-static bool be_check_ufi_compatibility(struct be_adapter *adapter,
-                                      struct flash_file_hdr_g3 *fhdr)
-{
-       int ufi_type = be_get_ufi_type(adapter, fhdr);
-
-       switch (ufi_type) {
-       case SH_P2_UFI:
-               return skyhawk_chip(adapter);
-       case SH_UFI:
-               return (skyhawk_chip(adapter) &&
-                       adapter->asic_rev < ASIC_REV_P2);
-       case BE3R_UFI:
-               return BE3_chip(adapter);
-       case BE3_UFI:
-               return (BE3_chip(adapter) && adapter->asic_rev < ASIC_REV_B0);
-       case BE2_UFI:
-               return BE2_chip(adapter);
+               if (!BE2_chip(adapter))
+                       return false;
+               break;
        default:
                return false;
        }
+
+       return (fhdr->asic_type_rev >= adapter->asic_rev);
 }
 
 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
@@ -4829,7 +5055,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
                                               adapter->if_handle,
                                               mode == BRIDGE_MODE_VEPA ?
                                               PORT_FWD_TYPE_VEPA :
-                                              PORT_FWD_TYPE_VEB);
+                                              PORT_FWD_TYPE_VEB, 0);
                if (status)
                        goto err;
 
@@ -4861,7 +5087,8 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                hsw_mode = PORT_FWD_TYPE_VEB;
        } else {
                status = be_cmd_get_hsw_config(adapter, NULL, 0,
-                                              adapter->if_handle, &hsw_mode);
+                                              adapter->if_handle, &hsw_mode,
+                                              NULL);
                if (status)
                        return 0;
        }
@@ -5014,6 +5241,7 @@ static const struct net_device_ops be_netdev_ops = {
        .ndo_set_vf_rate        = be_set_vf_tx_rate,
        .ndo_get_vf_config      = be_get_vf_config,
        .ndo_set_vf_link_state  = be_set_vf_link_state,
+       .ndo_set_vf_spoofchk    = be_set_vf_spoofchk,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = be_netpoll,
 #endif
@@ -5118,7 +5346,7 @@ static void be_err_detection_task(struct work_struct *work)
 
        be_detect_error(adapter);
 
-       if (adapter->hw_error) {
+       if (be_check_error(adapter, BE_ERROR_HW)) {
                be_cleanup(adapter);
 
                /* As of now error recovery support is in Lancer only */
@@ -5182,7 +5410,9 @@ static void be_worker(struct work_struct *work)
                        be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
        }
 
-       be_eqd_update(adapter);
+       /* EQ-delay update for Skyhawk is done while notifying EQ */
+       if (!skyhawk_chip(adapter))
+               be_eqd_update(adapter, false);
 
        if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
                be_log_sfp_info(adapter);
@@ -5202,7 +5432,7 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
 
 static int db_bar(struct be_adapter *adapter)
 {
-       if (lancer_chip(adapter) || !be_physfn(adapter))
+       if (lancer_chip(adapter) || be_virtfn(adapter))
                return 0;
        else
                return 4;
@@ -5381,6 +5611,30 @@ static void be_remove(struct pci_dev *pdev)
        free_netdev(adapter->netdev);
 }
 
+static ssize_t be_hwmon_show_temp(struct device *dev,
+                                 struct device_attribute *dev_attr,
+                                 char *buf)
+{
+       struct be_adapter *adapter = dev_get_drvdata(dev);
+
+       /* Unit: millidegree Celsius */
+       if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
+               return -EIO;
+       else
+               return sprintf(buf, "%u\n",
+                              adapter->hwmon_info.be_on_die_temp * 1000);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+                         be_hwmon_show_temp, NULL, 1);
+
+static struct attribute *be_hwmon_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(be_hwmon);
+
 static char *mc_name(struct be_adapter *adapter)
 {
        char *str = ""; /* default */
@@ -5500,6 +5754,16 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
 
        be_schedule_err_detection(adapter);
 
+       /* On Die temperature not supported for VF. */
+       if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
+               adapter->hwmon_info.hwmon_dev =
+                       devm_hwmon_device_register_with_groups(&pdev->dev,
+                                                              DRV_NAME,
+                                                              adapter,
+                                                              be_hwmon_groups);
+               adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
+       }
+
        dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
                 func_name(adapter), mc_name(adapter), adapter->port_name);
 
@@ -5592,8 +5856,8 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
 
        dev_err(&adapter->pdev->dev, "EEH error detected\n");
 
-       if (!adapter->eeh_error) {
-               adapter->eeh_error = true;
+       if (!be_check_error(adapter, BE_ERROR_EEH)) {
+               be_set_error(adapter, BE_ERROR_EEH);
 
                be_cancel_err_detection(adapter);
 
@@ -5640,7 +5904,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
 
        pci_cleanup_aer_uncorrect_error_status(pdev);
-       be_clear_all_error(adapter);
+       be_clear_error(adapter, BE_CLEAR_ALL);
        return PCI_ERS_RESULT_RECOVERED;
 }
 
index 132866433a250cbbee21320aa587e0e947a8d85e..60368207bf584188b4d0293d9f2a8d7235180a8d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index e6f7eb1a7d879b23ba57e8e03f301f38b7c33d12..cde6ef905ec481dce0436858fb1fd78252471146 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
index 66d47e448e4d175aeefecddacc53f8f858f0085b..bf4cf3fbb5f2e8c05ac347e4c9bdd39b380fc184 100644 (file)
@@ -2118,6 +2118,82 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
        strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
 }
 
+static int fec_enet_get_regs_len(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       struct resource *r;
+       int s = 0;
+
+       r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
+       if (r)
+               s = resource_size(r);
+
+       return s;
+}
+
+/* List of registers that can be safety be read to dump them with ethtool */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+       defined(CONFIG_M520x) || defined(CONFIG_M532x) ||               \
+       defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+static u32 fec_enet_register_offset[] = {
+       FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+       FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+       FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
+       FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
+       FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
+       FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
+       FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
+       FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
+       FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+       FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
+       FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
+       FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
+       RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+       RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+       RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+       RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+       RMON_T_P_GTE2048, RMON_T_OCTETS,
+       IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+       IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+       IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+       RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+       RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+       RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+       RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+       RMON_R_P_GTE2048, RMON_R_OCTETS,
+       IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+       IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
+#else
+static u32 fec_enet_register_offset[] = {
+       FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
+       FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
+       FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
+       FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
+       FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
+       FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
+       FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
+       FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
+       FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
+};
+#endif
+
+static void fec_enet_get_regs(struct net_device *ndev,
+                             struct ethtool_regs *regs, void *regbuf)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+       u32 *buf = (u32 *)regbuf;
+       u32 i, off;
+
+       memset(buf, 0, regs->len);
+
+       for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
+               off = fec_enet_register_offset[i] / 4;
+               buf[off] = readl(&theregs[off]);
+       }
+}
+
 static int fec_enet_get_ts_info(struct net_device *ndev,
                                struct ethtool_ts_info *info)
 {
@@ -2515,6 +2591,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
        .get_settings           = fec_enet_get_settings,
        .set_settings           = fec_enet_set_settings,
        .get_drvinfo            = fec_enet_get_drvinfo,
+       .get_regs_len           = fec_enet_get_regs_len,
+       .get_regs               = fec_enet_get_regs,
        .nway_reset             = fec_enet_nway_reset,
        .get_link               = ethtool_op_get_link,
        .get_coalesce           = fec_enet_get_coalesce,
index a583d89b13c457d84a3158ab02adceb349fa7978..a15663ad7f5e98c64a1f6e0912163bee578c938b 100644 (file)
@@ -353,6 +353,7 @@ static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
        tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
        tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
        writel(tmp, fep->hwp + FEC_ATIME_INC);
+       corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
        writel(corr_period, fep->hwp + FEC_ATIME_CORR);
        /* dummy read to update the timer. */
        timecounter_read(&fep->tc);
index 4ee080d49bc000a6bdb0d4a3184d6421aaae2ffc..ff875028fdff5e1723c618f721658971cd051603 100644 (file)
@@ -516,6 +516,15 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
        return &dev->stats;
 }
 
+static int gfar_set_mac_addr(struct net_device *dev, void *p)
+{
+       eth_mac_addr(dev, p);
+
+       gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
+
+       return 0;
+}
+
 static const struct net_device_ops gfar_netdev_ops = {
        .ndo_open = gfar_enet_open,
        .ndo_start_xmit = gfar_start_xmit,
@@ -526,7 +535,7 @@ static const struct net_device_ops gfar_netdev_ops = {
        .ndo_tx_timeout = gfar_timeout,
        .ndo_do_ioctl = gfar_ioctl,
        .ndo_get_stats = gfar_get_stats,
-       .ndo_set_mac_address = eth_mac_addr,
+       .ndo_set_mac_address = gfar_set_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = gfar_netpoll,
@@ -1411,6 +1420,8 @@ static int gfar_probe(struct platform_device *ofdev)
                dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
        }
 
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
        gfar_init_addr_hash_table(priv);
 
        /* Insert receive time stamps into padding alignment bytes */
@@ -2254,7 +2265,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        int i, rq = 0;
        int do_tstamp, do_csum, do_vlan;
        u32 bufaddr;
-       unsigned long flags;
        unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
 
        rq = skb->queue_mapping;
@@ -2434,19 +2444,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        netdev_tx_sent_queue(txq, bytes_sent);
 
-       /* We can work in parallel with gfar_clean_tx_ring(), except
-        * when modifying num_txbdfree. Note that we didn't grab the lock
-        * when we were reading the num_txbdfree and checking for available
-        * space, that's because outside of this function it can only grow,
-        * and once we've got needed space, it cannot suddenly disappear.
-        *
-        * The lock also protects us from gfar_error(), which can modify
-        * regs->tstat and thus retrigger the transfers, which is why we
-        * also must grab the lock before setting ready bit for the first
-        * to be transmitted BD.
-        */
-       spin_lock_irqsave(&tx_queue->txlock, flags);
-
        gfar_wmb();
 
        txbdp_start->lstatus = cpu_to_be32(lstatus);
@@ -2463,8 +2460,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
 
+       /* We can work in parallel with gfar_clean_tx_ring(), except
+        * when modifying num_txbdfree. Note that we didn't grab the lock
+        * when we were reading the num_txbdfree and checking for available
+        * space, that's because outside of this function it can only grow.
+        */
+       spin_lock_bh(&tx_queue->txlock);
        /* reduce TxBD free count */
        tx_queue->num_txbdfree -= (nr_txbds);
+       spin_unlock_bh(&tx_queue->txlock);
 
        /* If the next BD still needs to be cleaned up, then the bds
         * are full.  We need to tell the kernel to stop sending us stuff.
@@ -2478,9 +2482,6 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Tell the DMA to go go go */
        gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
 
-       /* Unlock priv */
-       spin_unlock_irqrestore(&tx_queue->txlock, flags);
-
        return NETDEV_TX_OK;
 
 dma_map_err:
@@ -2622,7 +2623,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        skb_dirtytx = tx_queue->skb_dirtytx;
 
        while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
-               unsigned long flags;
 
                frags = skb_shinfo(skb)->nr_frags;
 
@@ -2686,9 +2686,9 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
                              TX_RING_MOD_MASK(tx_ring_size);
 
                howmany++;
-               spin_lock_irqsave(&tx_queue->txlock, flags);
+               spin_lock(&tx_queue->txlock);
                tx_queue->num_txbdfree += nr_txbds;
-               spin_unlock_irqrestore(&tx_queue->txlock, flags);
+               spin_unlock(&tx_queue->txlock);
        }
 
        /* If we freed a buffer, we can restart transmission, if necessary */
@@ -3411,21 +3411,12 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
                if (events & IEVENT_CRL)
                        dev->stats.tx_aborted_errors++;
                if (events & IEVENT_XFUN) {
-                       unsigned long flags;
-
                        netif_dbg(priv, tx_err, dev,
                                  "TX FIFO underrun, packet dropped\n");
                        dev->stats.tx_dropped++;
                        atomic64_inc(&priv->extra_stats.tx_underrun);
 
-                       local_irq_save(flags);
-                       lock_tx_qs(priv);
-
-                       /* Reactivate the Tx Queues */
-                       gfar_write(&regs->tstat, gfargrp->tstat);
-
-                       unlock_tx_qs(priv);
-                       local_irq_restore(flags);
+                       schedule_work(&priv->reset_task);
                }
                netif_dbg(priv, tx_err, dev, "Transmit Error\n");
        }
index 3b39fdddeb57140c2d00003a0c473a869f768252..d49bee38cd319a0a8c7afd2cad7f1cb1ac7f2ed3 100644 (file)
@@ -798,7 +798,7 @@ static void hip04_free_ring(struct net_device *ndev, struct device *d)
 
        for (i = 0; i < RX_DESC_NUM; i++)
                if (priv->rx_buf[i])
-                       put_page(virt_to_head_page(priv->rx_buf[i]));
+                       skb_free_frag(priv->rx_buf[i]);
 
        for (i = 0; i < TX_DESC_NUM; i++)
                if (priv->tx_skb[i])
index 0ffdcd381fddde21f3d56edeeaeeef22f589ff4c..a5e077eac99a3b8292fd5355c142733dccbf8f89 100644 (file)
@@ -500,7 +500,6 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
                napi_gro_receive(&priv->napi, skb);
                dev->stats.rx_packets++;
                dev->stats.rx_bytes += skb->len;
-               dev->last_rx = jiffies;
 next:
                pos = dma_ring_incr(pos, RX_DESC_NUM);
        }
index 18134766a11409c6c976f00ac0431de748c03073..29bbb628d712b38e8b17529626b85c3532d7942f 100644 (file)
@@ -58,7 +58,7 @@ static struct kobj_type ktype_veth_pool;
 
 static const char ibmveth_driver_name[] = "ibmveth";
 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.04"
+#define ibmveth_driver_version "1.05"
 
 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@@ -100,6 +100,8 @@ struct ibmveth_stat ibmveth_stats[] = {
        { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
        { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
        { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
+       { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
+       { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
 };
 
 /* simple methods of getting data from the current rxq entry */
@@ -852,6 +854,10 @@ static int ibmveth_set_features(struct net_device *dev,
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        int rx_csum = !!(features & NETIF_F_RXCSUM);
        int rc;
+       netdev_features_t changed = features ^ dev->features;
+
+       if (features & NETIF_F_TSO & changed)
+               netdev_info(dev, "TSO feature requires all partitions to have updated driver");
 
        if (rx_csum == adapter->rx_csum)
                return 0;
@@ -1035,6 +1041,15 @@ retry_bounce:
                descs[i+1].fields.address = dma_addr;
        }
 
+       if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
+               /* Put -1 in the IP checksum to tell phyp it
+                *  is a largesend packet and put the mss in the TCP checksum.
+                */
+               ip_hdr(skb)->check = 0xffff;
+               tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
+               adapter->tx_large_packets++;
+       }
+
        if (ibmveth_send(adapter, descs)) {
                adapter->tx_send_failed++;
                netdev->stats.tx_dropped++;
@@ -1080,6 +1095,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
        struct net_device *netdev = adapter->netdev;
        int frames_processed = 0;
        unsigned long lpar_rc;
+       struct iphdr *iph;
 
 restart_poll:
        while (frames_processed < budget) {
@@ -1122,10 +1138,23 @@ restart_poll:
                        skb_put(skb, length);
                        skb->protocol = eth_type_trans(skb, netdev);
 
-                       if (csum_good)
+                       if (csum_good) {
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
+                                       iph = (struct iphdr *)skb->data;
+
+                                       /* If the IP checksum is not offloaded and if the packet
+                                        *  is large send, the checksum must be rebuilt.
+                                        */
+                                       if (iph->check == 0xffff) {
+                                               iph->check = 0;
+                                               iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+                                               adapter->rx_large_packets++;
+                                       }
+                               }
+                       }
 
-                       netif_receive_skb(skb); /* send it up */
+                       napi_gro_receive(napi, skb);    /* send it up */
 
                        netdev->stats.rx_packets++;
                        netdev->stats.rx_bytes += length;
@@ -1422,8 +1451,14 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        netdev->features |= netdev->hw_features;
 
+       /* TSO is disabled by default */
+       netdev->hw_features |= NETIF_F_TSO;
+
        memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
+       if (firmware_has_feature(FW_FEATURE_CMO))
+               memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
+
        for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
                struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
                int error;
index 1f37499d43981d260c61cdc1c089d7328a7942c9..41dedb1fb2ae7403d89f3feed38b4f87197de067 100644 (file)
@@ -104,7 +104,8 @@ static inline long h_illan_attributes(unsigned long unit_address,
 
 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
 static int pool_count[] = { 256, 512, 256, 256, 256 };
-static int pool_active[] = { 1, 1, 0, 0, 0};
+static int pool_count_cmo[] = { 256, 512, 256, 256, 64 };
+static int pool_active[] = { 1, 1, 0, 0, 1};
 
 #define IBM_VETH_INVALID_MAP ((u16)0xffff)
 
@@ -160,6 +161,8 @@ struct ibmveth_adapter {
     u64 rx_no_buffer;
     u64 tx_map_failed;
     u64 tx_send_failed;
+    u64 tx_large_packets;
+    u64 rx_large_packets;
 };
 
 /*
index 1a450f4b6b125d2234ea159022666b5ca381d328..d2657a412768839145b57c656a2349cb750a146a 100644 (file)
@@ -874,7 +874,7 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
 {
        struct cb *cb;
        unsigned long flags;
-       int err = 0;
+       int err;
 
        spin_lock_irqsave(&nic->cb_lock, flags);
 
@@ -2922,9 +2922,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_master(pdev);
 
-       init_timer(&nic->watchdog);
-       nic->watchdog.function = e100_watchdog;
-       nic->watchdog.data = (unsigned long)nic;
+       setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
 
        INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
 
index 983eb4e6f7aa184deb48f526316c3254622c6756..74dc150559711f2dd8f3b9b707288cb5bd77319a 100644 (file)
@@ -2079,11 +2079,6 @@ static void *e1000_alloc_frag(const struct e1000_adapter *a)
        return data;
 }
 
-static void e1000_free_frag(const void *data)
-{
-       put_page(virt_to_head_page(data));
-}
-
 /**
  * e1000_clean_rx_ring - Free Rx Buffers per Queue
  * @adapter: board private structure
@@ -2107,7 +2102,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
                                                 adapter->rx_buffer_len,
                                                 DMA_FROM_DEVICE);
                        if (buffer_info->rxbuf.data) {
-                               e1000_free_frag(buffer_info->rxbuf.data);
+                               skb_free_frag(buffer_info->rxbuf.data);
                                buffer_info->rxbuf.data = NULL;
                        }
                } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
@@ -4594,28 +4589,28 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                        data = e1000_alloc_frag(adapter);
                        /* Failed allocation, critical failure */
                        if (!data) {
-                               e1000_free_frag(olddata);
+                               skb_free_frag(olddata);
                                adapter->alloc_rx_buff_failed++;
                                break;
                        }
 
                        if (!e1000_check_64k_bound(adapter, data, bufsz)) {
                                /* give up */
-                               e1000_free_frag(data);
-                               e1000_free_frag(olddata);
+                               skb_free_frag(data);
+                               skb_free_frag(olddata);
                                adapter->alloc_rx_buff_failed++;
                                break;
                        }
 
                        /* Use new allocation */
-                       e1000_free_frag(olddata);
+                       skb_free_frag(olddata);
                }
                buffer_info->dma = dma_map_single(&pdev->dev,
                                                  data,
                                                  adapter->rx_buffer_len,
                                                  DMA_FROM_DEVICE);
                if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
-                       e1000_free_frag(data);
+                       skb_free_frag(data);
                        buffer_info->dma = 0;
                        adapter->alloc_rx_buff_failed++;
                        break;
@@ -4637,7 +4632,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
                                         adapter->rx_buffer_len,
                                         DMA_FROM_DEVICE);
 
-                       e1000_free_frag(data);
+                       skb_free_frag(data);
                        buffer_info->rxbuf.data = NULL;
                        buffer_info->dma = 0;
 
index 08f22f348800ddd37aaf2d2d87eb4ce0e9c7f3b3..2af603f3e4183d5209ec9cb762892b7cac185206 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 535a9430976df7653671e23d62adc1140973579e..a2162e11673eef45b72e631cf1b30a0748064718 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index dc79ed85030b73e8ccbbdc57c05ebe09085a4bee..5f7016442ec4faeb827c78a074dbf6bef3eb0fec 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -2010,7 +2010,7 @@ const struct e1000_info e1000_82573_info = {
        .flags2                 = FLAG2_DISABLE_ASPM_L1
                                  | FLAG2_DISABLE_ASPM_L0S,
        .pba                    = 20,
-       .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
+       .max_hw_frame_size      = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_82571,
        .mac_ops                = &e82571_mac_ops,
        .phy_ops                = &e82_phy_ops_m88,
index 2e758f796d6099bc3dfb05107ea3a6ebc81dec09..abc6a9abff980227e6936c3ae2efa7e62aae96fb 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 0570c668ec3dd6f89cdb4c8ac7aa8edc271b710b..133d4074dbe48d8fd148662b2c57d0d41afd4dbe 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 0abc942c966e4a377af222c1d876af6983edb91a..0b748d1959d93162b79dea9f38bc2335bf666b92 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@ struct e1000_info;
 #define DEFAULT_RADV                   8
 #define BURST_RDTR                     0x20
 #define BURST_RADV                     0x20
+#define PCICFG_DESC_RING_STATUS                0xe4
+#define FLUSH_DESC_REQUIRED            0x100
 
 /* in the case of WTHRESH, it appears at least the 82571/2 hardware
  * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
@@ -384,6 +386,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
 #define INCVALUE_SHIFT_25MHz   18
 #define INCPERIOD_25MHz                1
 
+#define INCVALUE_24MHz         125
+#define INCVALUE_SHIFT_24MHz   14
+#define INCPERIOD_24MHz                3
+
 /* Another drawback of scaling the incvalue by a large factor is the
  * 64-bit SYSTIM register overflows more quickly.  This is dealt with
  * by simply reading the clock before it overflows.
index 11f486e4ff7b37ec2d96cb2272dad3bd3d09b68d..ad6daa656d3e9f8b21a9bd3e43e40346711238b6 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -1516,8 +1516,19 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
 static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 rctl;
-
+       u32 rctl, fext_nvm11, tarc0;
+
+       if (hw->mac.type == e1000_pch_spt) {
+               fext_nvm11 = er32(FEXTNVM11);
+               fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+               ew32(FEXTNVM11, fext_nvm11);
+               tarc0 = er32(TARC(0));
+               /* clear bits 28 & 29 (control of MULR concurrent requests) */
+               tarc0 &= 0xcfffffff;
+               /* set bit 29 (value of MULR requests is now 2) */
+               tarc0 |= 0x20000000;
+               ew32(TARC(0), tarc0);
+       }
        if (hw->phy.media_type == e1000_media_type_fiber ||
            hw->phy.media_type == e1000_media_type_internal_serdes) {
                switch (hw->mac.type) {
@@ -1542,7 +1553,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
 static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 rctl;
+       u32 rctl, fext_nvm11, tarc0;
        u16 phy_reg;
 
        rctl = er32(RCTL);
@@ -1550,6 +1561,16 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
        ew32(RCTL, rctl);
 
        switch (hw->mac.type) {
+       case e1000_pch_spt:
+               fext_nvm11 = er32(FEXTNVM11);
+               fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
+               ew32(FEXTNVM11, fext_nvm11);
+               tarc0 = er32(TARC(0));
+               /* clear bits 28 & 29 (control of MULR concurrent requests) */
+               /* set bit 29 (value of MULR requests is now 0) */
+               tarc0 &= 0xcfffffff;
+               ew32(TARC(0), tarc0);
+               /* fall through */
        case e1000_80003es2lan:
                if (hw->phy.media_type == e1000_media_type_fiber ||
                    hw->phy.media_type == e1000_media_type_internal_serdes) {
index 19e8c487db06d5c6c8ca0b330edaeb751bc4c364..c9da4654e9cad64b64ee305ea5a2aa37c0906338 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 9d81c03174334be84a3d605ff03417effc4c7ccd..b074b9a667b32cceae00965a0031632431e972fe 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -1014,8 +1014,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
                u16 speed, duplex, scale = 0;
                u16 max_snoop, max_nosnoop;
                u16 max_ltr_enc;        /* max LTR latency encoded */
-               s64 lat_ns;     /* latency (ns) */
-               s64 value;
+               u64 value;
                u32 rxa;
 
                if (!hw->adapter->max_frame_size) {
@@ -1040,14 +1039,11 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
                 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
                 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
                 */
-               lat_ns = ((s64)rxa * 1024 -
-                         (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
-               if (lat_ns < 0)
-                       lat_ns = 0;
-               else
-                       do_div(lat_ns, speed);
+               rxa *= 512;
+               value = (rxa > hw->adapter->max_frame_size) ?
+                       (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
+                       0;
 
-               value = lat_ns;
                while (value > PCI_LTR_VALUE_MASK) {
                        scale++;
                        value = DIV_ROUND_UP(value, (1 << 5));
@@ -1563,7 +1559,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
            ((adapter->hw.mac.type >= e1000_pch2lan) &&
             (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
                adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
-               adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+               adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
 
                hw->mac.ops.blink_led = NULL;
        }
@@ -5681,7 +5677,7 @@ const struct e1000_info e1000_ich8_info = {
                                  | FLAG_HAS_FLASH
                                  | FLAG_APME_IN_WUC,
        .pba                    = 8,
-       .max_hw_frame_size      = ETH_FRAME_LEN + ETH_FCS_LEN,
+       .max_hw_frame_size      = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
        .get_variants           = e1000_get_variants_ich8lan,
        .mac_ops                = &ich8_mac_ops,
        .phy_ops                = &ich8_phy_ops,
@@ -5754,7 +5750,7 @@ const struct e1000_info e1000_pch2_info = {
        .flags2                 = FLAG2_HAS_PHY_STATS
                                  | FLAG2_HAS_EEE,
        .pba                    = 26,
-       .max_hw_frame_size      = 9018,
+       .max_hw_frame_size      = 9022,
        .get_variants           = e1000_get_variants_ich8lan,
        .mac_ops                = &ich8_mac_ops,
        .phy_ops                = &ich8_phy_ops,
@@ -5774,7 +5770,7 @@ const struct e1000_info e1000_pch_lpt_info = {
        .flags2                 = FLAG2_HAS_PHY_STATS
                                  | FLAG2_HAS_EEE,
        .pba                    = 26,
-       .max_hw_frame_size      = 9018,
+       .max_hw_frame_size      = 9022,
        .get_variants           = e1000_get_variants_ich8lan,
        .mac_ops                = &ich8_mac_ops,
        .phy_ops                = &ich8_phy_ops,
@@ -5794,7 +5790,7 @@ const struct e1000_info e1000_pch_spt_info = {
        .flags2                 = FLAG2_HAS_PHY_STATS
                                  | FLAG2_HAS_EEE,
        .pba                    = 26,
-       .max_hw_frame_size      = 9018,
+       .max_hw_frame_size      = 9022,
        .get_variants           = e1000_get_variants_ich8lan,
        .mac_ops                = &ich8_mac_ops,
        .phy_ops                = &ich8_phy_ops,
index 770a573b9eea6c7dd1302ff06996c4025b2854f7..26459853c6be2133917a69c3fdba66d890a00ef7 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #define E1000_FEXTNVM6_K1_OFF_ENABLE   0x80000000
 /* bit for disabling packet buffer read */
 #define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
-
+#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004
 #define E1000_FEXTNVM7_DISABLE_SMB_PERST       0x00000020
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS      0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS       0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ                0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX       0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
 
 #define K1_ENTRY_LATENCY       0
 #define K1_MIN_TIME            1
index 30b74d590bee461663291d803bbfd06e042ba255..e59d7c283cd405daaaea4bb210305c207656bdf3 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 0513d90cdeeaa980b3fb226eaac6feebe481c4f3..8284618af9ff2dbd75b8e19b6377415778b0f532 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 06edfca1a35e74752af584a53f8857083f4273f3..cc9b3befc2bc0596ea3ae13da69b5afc29fd459d 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index a8c27f98f7b05e15d9548ba78df62c1c351ac91d..0b9ea5952b0719c0517f44443de2ef96199456d5 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index c509a5c900f5253973b24c9f966cb95f2fe1a2bc..e62b9dcb91fe51309ff7280a20fcbd4d37000548 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -48,7 +48,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -3525,22 +3525,30 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
        switch (hw->mac.type) {
        case e1000_pch2lan:
        case e1000_pch_lpt:
-       case e1000_pch_spt:
-               /* On I217, I218 and I219, the clock frequency is 25MHz
-                * or 96MHz as indicated by the System Clock Frequency
-                * Indication
-                */
-               if (((hw->mac.type != e1000_pch_lpt) &&
-                    (hw->mac.type != e1000_pch_spt)) ||
-                   (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+               if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
                        /* Stable 96MHz frequency */
                        incperiod = INCPERIOD_96MHz;
                        incvalue = INCVALUE_96MHz;
                        shift = INCVALUE_SHIFT_96MHz;
                        adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+               } else {
+                       /* Stable 25MHz frequency */
+                       incperiod = INCPERIOD_25MHz;
+                       incvalue = INCVALUE_25MHz;
+                       shift = INCVALUE_SHIFT_25MHz;
+                       adapter->cc.shift = shift;
+               }
+               break;
+       case e1000_pch_spt:
+               if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+                       /* Stable 24MHz frequency */
+                       incperiod = INCPERIOD_24MHz;
+                       incvalue = INCVALUE_24MHz;
+                       shift = INCVALUE_SHIFT_24MHz;
+                       adapter->cc.shift = shift;
                        break;
                }
-               /* fall-through */
+               return -EINVAL;
        case e1000_82574:
        case e1000_82583:
                /* Stable 25MHz frequency */
@@ -3787,6 +3795,108 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
                adapter->hw.phy.ops.power_down(&adapter->hw);
 }
 
+/**
+ * e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ *
+ * We want to clear all pending descriptors from the TX ring.
+ * zeroing happens when the HW reads the regs. We  assign the ring itself as
+ * the data of the next descriptor. We don't care about the data we are about
+ * to reset the HW.
+ */
+static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_ring *tx_ring = adapter->tx_ring;
+       struct e1000_tx_desc *tx_desc = NULL;
+       u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
+       u16 size = 512;
+
+       tctl = er32(TCTL);
+       ew32(TCTL, tctl | E1000_TCTL_EN);
+       tdt = er32(TDT(0));
+       BUG_ON(tdt != tx_ring->next_to_use);
+       tx_desc =  E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
+       tx_desc->buffer_addr = tx_ring->dma;
+
+       tx_desc->lower.data = cpu_to_le32(txd_lower | size);
+       tx_desc->upper.data = 0;
+       /* flush descriptors to memory before notifying the HW */
+       wmb();
+       tx_ring->next_to_use++;
+       if (tx_ring->next_to_use == tx_ring->count)
+               tx_ring->next_to_use = 0;
+       ew32(TDT(0), tx_ring->next_to_use);
+       mmiowb();
+       usleep_range(200, 250);
+}
+
+/**
+ * e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ *
+ * Mark all descriptors in the RX ring as consumed and disable the rx ring
+ */
+static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
+{
+       u32 rctl, rxdctl;
+       struct e1000_hw *hw = &adapter->hw;
+
+       rctl = er32(RCTL);
+       ew32(RCTL, rctl & ~E1000_RCTL_EN);
+       e1e_flush();
+       usleep_range(100, 150);
+
+       rxdctl = er32(RXDCTL(0));
+       /* zero the lower 14 bits (prefetch and host thresholds) */
+       rxdctl &= 0xffffc000;
+
+       /* update thresholds: prefetch threshold to 31, host threshold to 1
+        * and make sure the granularity is "descriptors" and not "cache lines"
+        */
+       rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+
+       ew32(RXDCTL(0), rxdctl);
+       /* momentarily enable the RX ring for the changes to take effect */
+       ew32(RCTL, rctl | E1000_RCTL_EN);
+       e1e_flush();
+       usleep_range(100, 150);
+       ew32(RCTL, rctl & ~E1000_RCTL_EN);
+}
+
+/**
+ * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ *
+ * In i219, the descriptor rings must be emptied before resetting the HW
+ * or before changing the device state to D3 during runtime (runtime PM).
+ *
+ * Failure to do this will cause the HW to enter a unit hang state which can
+ * only be released by PCI reset on the device
+ *
+ */
+
+static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
+{
+       u16 hang_state;
+       u32 fext_nvm11, tdlen;
+       struct e1000_hw *hw = &adapter->hw;
+
+       /* First, disable MULR fix in FEXTNVM11 */
+       fext_nvm11 = er32(FEXTNVM11);
+       fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+       ew32(FEXTNVM11, fext_nvm11);
+       /* do nothing if we're not in faulty state, or if the queue is empty */
+       tdlen = er32(TDLEN(0));
+       pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+                            &hang_state);
+       if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
+               return;
+       e1000_flush_tx_ring(adapter);
+       /* recheck, maybe the fault is caused by the rx ring */
+       pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+                            &hang_state);
+       if (hang_state & FLUSH_DESC_REQUIRED)
+               e1000_flush_rx_ring(adapter);
+}
+
 /**
  * e1000e_reset - bring the hardware into a known good state
  *
@@ -3807,7 +3917,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
        /* reset Packet Buffer Allocation to default */
        ew32(PBA, pba);
 
-       if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+       if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
                /* To maintain wire speed transmits, the Tx FIFO should be
                 * large enough to accommodate two full transmit packets,
                 * rounded up to the next 1KB and expressed in KB.  Likewise,
@@ -3943,6 +4053,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
                }
        }
 
+       if (hw->mac.type == e1000_pch_spt)
+               e1000_flush_desc_rings(adapter);
        /* Allow time for pending master requests to run */
        mac->ops.reset_hw(hw);
 
@@ -4016,6 +4128,20 @@ void e1000e_reset(struct e1000_adapter *adapter)
                phy_data &= ~IGP02E1000_PM_SPD;
                e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
        }
+       if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) {
+               u32 reg;
+
+               /* Fextnvm7 @ 0xe4[2] = 1 */
+               reg = er32(FEXTNVM7);
+               reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
+               ew32(FEXTNVM7, reg);
+               /* Fextnvm9 @ 0x5bb4[13:12] = 11 */
+               reg = er32(FEXTNVM9);
+               reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
+                      E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
+               ew32(FEXTNVM9, reg);
+       }
+
 }
 
 int e1000e_up(struct e1000_adapter *adapter)
@@ -4115,8 +4241,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
        spin_unlock(&adapter->stats64_lock);
 
        e1000e_flush_descriptors(adapter);
-       e1000_clean_tx_ring(adapter->tx_ring);
-       e1000_clean_rx_ring(adapter->rx_ring);
 
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
@@ -4127,8 +4251,14 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
            e1000_lv_jumbo_workaround_ich8lan(hw, false))
                e_dbg("failed to disable jumbo frame workaround mode\n");
 
-       if (reset && !pci_channel_offline(adapter->pdev))
-               e1000e_reset(adapter);
+       if (!pci_channel_offline(adapter->pdev)) {
+               if (reset)
+                       e1000e_reset(adapter);
+               else if (hw->mac.type == e1000_pch_spt)
+                       e1000_flush_desc_rings(adapter);
+       }
+       e1000_clean_tx_ring(adapter->tx_ring);
+       e1000_clean_rx_ring(adapter->rx_ring);
 }
 
 void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4151,9 +4281,16 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
                                                     cc);
        struct e1000_hw *hw = &adapter->hw;
        cycle_t systim, systim_next;
+       /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
+        * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
+        * to occur between reads, so if we read a vale close to overflow, we
+        * wait for overflow to occur and read both registers when its safe.
+        */
+       u32 systim_overflow_latch_fix = 0x3FFFFFFF;
 
-       /* latch SYSTIMH on read of SYSTIML */
-       systim = (cycle_t)er32(SYSTIML);
+       do {
+               systim = (cycle_t)er32(SYSTIML);
+       } while (systim > systim_overflow_latch_fix);
        systim |= (cycle_t)er32(SYSTIMH) << 32;
 
        if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
@@ -4196,9 +4333,9 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
 
-       adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+       adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
        adapter->rx_ps_bsize0 = 128;
-       adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+       adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
        adapter->tx_ring_count = E1000_DEFAULT_TXD;
        adapter->rx_ring_count = E1000_DEFAULT_RXD;
@@ -5781,17 +5918,17 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
-       int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
+       int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
 
        /* Jumbo frame support */
-       if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+       if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
            !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
                e_err("Jumbo Frames not supported.\n");
                return -EINVAL;
        }
 
        /* Supported frame sizes */
-       if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
+       if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
            (max_frame > adapter->max_hw_frame_size)) {
                e_err("Unsupported MTU setting\n");
                return -EINVAL;
@@ -5831,10 +5968,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = 4096;
 
        /* adjust allocation if LPE protects us, and we aren't using SBP */
-       if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
-           (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
-               adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
-                   + ETH_FCS_LEN;
+       if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
+               adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
 
        if (netif_running(netdev))
                e1000e_up(adapter);
@@ -6678,6 +6813,19 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
        }
 }
 
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+                                           netdev_features_t features)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+
+       /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
+       if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
+               features &= ~NETIF_F_RXFCS;
+
+       return features;
+}
+
 static int e1000_set_features(struct net_device *netdev,
                              netdev_features_t features)
 {
@@ -6734,6 +6882,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
        .ndo_poll_controller    = e1000_netpoll,
 #endif
        .ndo_set_features = e1000_set_features,
+       .ndo_fix_features = e1000_fix_features,
 };
 
 /**
@@ -7289,7 +7438,7 @@ static int __init e1000_init_module(void)
 
        pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
                e1000e_driver_version);
-       pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
+       pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
        ret = pci_register_driver(&e1000_driver);
 
        return ret;
index fa6b1036a327d318333f24a28781cdad697b6451..49f205c023bfc9025fe06c857a85d80ffaf94a80 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 342bf69efab545efcd460eaeb3d50198a22af408..5d46967e0d1f47b4f8782d0518bdec959fd88feb 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index aa1923f7ebdd2e56dd0ebd436feb69164426ad7c..6d8c39abee1676980e7abfd6acbde7545f4ee528 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index b2005e13fb01583a10f58aa37339feb2336191bd..de13aeacae97c85b34bffe28720a584d6ccbc709 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 537d2780b408b3cdc9e0fc546d45c04444cc86e2..55bfe473514da920177f49fc7c0660445fb31598 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 8d7b21dc7e19955a2e87868b6c1b4b340feb5236..25a0ad5102d633de71b11b45bdbc8912c3f133a5 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 85eefc4832ba1172cadca45a5f97ff6c2d5dd9af..b24e5fee17f2d5f7a514213e882bae67b43962c0 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,8 @@
 #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
 #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
 #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9 0x5BB4  /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11        0x5BBC  /* Future Extended NVM 11 - RW */
 #define E1000_PCIEANACFG       0x00F18 /* PCIE Analog Config */
 #define E1000_FCT      0x00030 /* Flow Control Type - RW */
 #define E1000_VET      0x00038 /* VLAN Ether Type - RW */
index 5d47307121abbe413cd259ff74f9aa2ee68e6c45..ec76c3fa3a041158dcb5c21872afd5dd8352b9aa 100644 (file)
@@ -182,6 +182,7 @@ struct i40e_lump_tracking {
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
        I40E_FD_STAT_SB,
+       I40E_FD_STAT_ATR_TUNNEL,
        I40E_FD_STAT_PF_COUNT
 };
 #define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx {
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
 #define I40E_FD_SB_STAT_IDX(pf_id)  \
                        (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+                       (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
 
 struct i40e_fdir_filter {
        struct hlist_node fdir_node;
@@ -263,8 +266,6 @@ struct i40e_pf {
 
        struct hlist_head fdir_filter_list;
        u16 fdir_pf_active_filters;
-       u16 fd_sb_cnt_idx;
-       u16 fd_atr_cnt_idx;
        unsigned long fd_flush_timestamp;
        u32 fd_flush_cnt;
        u32 fd_add_err;
index 4cbaaeb902c47737274010d2070d6fb95c5637ce..9a68c65b17ea03bd00642aab5fe3b2e5a5066765 100644 (file)
@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
        I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
        I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+       I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
        I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
 
        /* LPI stats */
@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data)
        return *data;
 }
 
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+       struct i40e_vf *vfs = pf->vf;
+       int i;
+
+       for (i = 0; i < pf->num_alloc_vfs; i++)
+               if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+                       return true;
+       return false;
+}
+
 static void i40e_diag_test(struct net_device *netdev,
                           struct ethtool_test *eth_test, u64 *data)
 {
@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev,
                netif_info(pf, drv, netdev, "offline testing starting\n");
 
                set_bit(__I40E_TESTING, &pf->state);
+
+               if (i40e_active_vfs(pf)) {
+                       dev_warn(&pf->pdev->dev,
+                                "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+                       data[I40E_ETH_TEST_REG]         = 1;
+                       data[I40E_ETH_TEST_EEPROM]      = 1;
+                       data[I40E_ETH_TEST_INTR]        = 1;
+                       data[I40E_ETH_TEST_LOOPBACK]    = 1;
+                       data[I40E_ETH_TEST_LINK]        = 1;
+                       eth_test->flags |= ETH_TEST_FL_FAILED;
+                       clear_bit(__I40E_TESTING, &pf->state);
+                       goto skip_ol_tests;
+               }
+
                /* If the device is online then take it offline */
                if (if_running)
                        /* indicate we're in test mode */
@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev,
                data[I40E_ETH_TEST_LOOPBACK] = 0;
        }
 
+skip_ol_tests:
+
        netif_info(pf, drv, netdev, "testing finished\n");
 }
 
@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
        input->pctype = 0;
        input->dest_vsi = vsi->id;
        input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
-       input->cnt_index  = pf->fd_sb_cnt_idx;
+       input->cnt_index  = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
        input->flow_type = fsp->flow_type;
        input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
 
index 1803afeef23ede81ed906b5400e9f3164234a4de..c8b621e0e7cda622c5a0fa9e795a898e53886cf5 100644 (file)
@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
  *
  * The FC EOF is converted to the value understood by HW for descriptor
  * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
- * first.
+ * first and that already checks for all supported valid eof values.
  **/
 static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
 {
@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
        case FC_EOF_A:
                return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
        default:
-               /* FIXME: still returns 0 */
-               pr_err("Unrecognized EOF %x\n", eof);
-               return 0;
+               /* Supported valid eof shall be already checked by
+                * calling i40e_fcoe_eof_is_supported() first,
+                * therefore this default case shall never hit.
+                */
+               WARN_ON(1);
+               return -EINVAL;
        }
 }
 
index 5b5bea159bd53c8684d0a69b310e492bc797c8b6..52d7d8b8f1f97542f78147ce7ae5a4d4508015e8 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 4
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
 
        dcb_cfg = &hw->local_dcbx_config;
 
-       /* See if DCB enabled with PFC TC */
-       if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
-           !(dcb_cfg->pfc.pfcenable)) {
+       /* Collect Link XOFF stats when PFC is disabled */
+       if (!dcb_cfg->pfc.pfcenable) {
                i40e_update_link_xoff_rx(pf);
                return;
        }
@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           &osd->rx_jabber, &nsd->rx_jabber);
 
        /* FDIR stats */
-       i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+       i40e_stat_update32(hw,
+                          I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
                           pf->stat_offsets_loaded,
                           &osd->fd_atr_match, &nsd->fd_atr_match);
-       i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+       i40e_stat_update32(hw,
+                          I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
                           pf->stat_offsets_loaded,
                           &osd->fd_sb_match, &nsd->fd_sb_match);
+       i40e_stat_update32(hw,
+                     I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+                     pf->stat_offsets_loaded,
+                     &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
 
        val = rd32(hw, I40E_PRTPM_EEE_STAT);
        nsd->tx_lpi_status =
@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
                pf->fd_add_err = pf->fd_atr_cnt = 0;
                if (pf->fd_tcp_rule > 0) {
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
                        pf->fd_tcp_rule = 0;
                }
                i40e_fdir_filter_restore(vsi);
@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
-                       dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
                }
        }
        /* Wait for some more space to be available to turn on ATR */
@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
                        pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
                }
        }
 }
@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
 
                if (!(time_after(jiffies, min_flush_time)) &&
                    (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
-                       dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
                        disable_atr = true;
                }
 
@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
                        if (!disable_atr)
                                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                        clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
-                       dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
                }
        }
 }
@@ -7680,12 +7690,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
            (pf->hw.func_caps.fd_filters_best_effort > 0)) {
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
-               /* Setup a counter for fd_atr per PF */
-               pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
                if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
                        pf->flags |= I40E_FLAG_FD_SB_ENABLED;
-                       /* Setup a counter for fd_sb per PF */
-                       pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
                } else {
                        dev_info(&pf->pdev->dev,
                                 "Flow Director Sideband mode Disabled in MFP mode\n");
@@ -7775,7 +7781,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
                pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
                pf->fdir_pf_active_filters = 0;
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-               dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                       dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
                /* if ATR was auto disabled it can be re-enabled. */
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
index 9d95042d5a0f5805824d53ecc847ff76a9909444..9a4f2bc70cd2cb5494576f5530a7447502cd3c91 100644 (file)
@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_desc->cmd_type_offset_bsz =
                build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 
-       /* set the timestamp */
-       tx_buf->time_stamp = jiffies;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.
         */
@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
        if (add) {
                pf->fd_tcp_rule++;
                if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
-                       dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
                        pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                }
        } else {
@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                                  (pf->fd_tcp_rule - 1) : 0;
                if (pf->fd_tcp_rule == 0) {
                        pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
-                       dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                               dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
                }
        }
 
@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                        if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                            !(pf->auto_disable_flags &
                                     I40E_FLAG_FD_SB_ENABLED)) {
-                               dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+                               if (I40E_DEBUG_FD & pf->hw.debug_mask)
+                                       dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
                                                        I40E_FLAG_FD_SB_ENABLED;
                        }
@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                         tx_ring->vsi->seid,
                         tx_ring->queue_index,
                         tx_ring->next_to_use, i);
-               dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
-                        "  time_stamp           <%lx>\n"
-                        "  jiffies              <%lx>\n",
-                        tx_ring->tx_bi[i].time_stamp, jiffies);
 
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
-                       /* TODO: shouldn't we increment a counter indicating the
-                        * drop?
-                        */
                        continue;
                }
 
@@ -1688,7 +1681,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
 
        } while (likely(total_rx_packets < budget));
@@ -1821,7 +1813,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 #endif
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
        } while (likely(total_rx_packets < budget));
 
@@ -1925,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
  * i40e_atr - Add a Flow Director ATR filter
  * @tx_ring:  ring to add programming descriptor to
  * @skb:      send buffer
- * @flags:    send flags
+ * @tx_flags: send tx flags
  * @protocol: wire protocol
  **/
 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                    u32 flags, __be16 protocol)
+                    u32 tx_flags, __be16 protocol)
 {
        struct i40e_filter_program_desc *fdir_desc;
        struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1954,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        if (!tx_ring->atr_sample_rate)
                return;
 
-       /* snag network header to get L4 type and address */
-       hdr.network = skb_network_header(skb);
+       if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+               return;
 
-       /* Currently only IPv4/IPv6 with TCP is supported */
-       if (protocol == htons(ETH_P_IP)) {
-               if (hdr.ipv4->protocol != IPPROTO_TCP)
-                       return;
+       if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+               /* snag network header to get L4 type and address */
+               hdr.network = skb_network_header(skb);
 
-               /* access ihl as a u8 to avoid unaligned access on ia64 */
-               hlen = (hdr.network[0] & 0x0F) << 2;
-       } else if (protocol == htons(ETH_P_IPV6)) {
-               if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+               /* Currently only IPv4/IPv6 with TCP is supported
+                * access ihl as u8 to avoid unaligned access on ia64
+                */
+               if (tx_flags & I40E_TX_FLAGS_IPV4)
+                       hlen = (hdr.network[0] & 0x0F) << 2;
+               else if (protocol == htons(ETH_P_IPV6))
+                       hlen = sizeof(struct ipv6hdr);
+               else
                        return;
-
-               hlen = sizeof(struct ipv6hdr);
        } else {
-               return;
+               hdr.network = skb_inner_network_header(skb);
+               hlen = skb_inner_network_header_len(skb);
        }
 
+       /* Currently only IPv4/IPv6 with TCP is supported
+        * Note: tx_flags gets modified to reflect inner protocols in
+        * tx_enable_csum function if encap is enabled.
+        */
+       if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+           (hdr.ipv4->protocol != IPPROTO_TCP))
+               return;
+       else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+                (hdr.ipv6->nexthdr != IPPROTO_TCP))
+               return;
+
        th = (struct tcphdr *)(hdr.network + hlen);
 
        /* Due to lack of space, no more new filters can be programmed */
@@ -2022,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
 
        dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
-       dtype_cmd |=
-               ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
-               I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+       if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+               dtype_cmd |=
+                       ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+       else
+               dtype_cmd |=
+                       ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
        fdir_desc->rsvd = cpu_to_le32(0);
@@ -2045,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
 #ifdef I40E_FCOE
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                              struct i40e_ring *tx_ring,
-                              u32 *flags)
-#else
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring,
                                      u32 *flags)
+#else
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                            struct i40e_ring *tx_ring,
+                                            u32 *flags)
 #endif
 {
        __be16 protocol = skb->protocol;
@@ -2119,16 +2130,14 @@ out:
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
  * @hdr_len:  ptr to the size of the packet header
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                   u32 tx_flags, __be16 protocol, u8 *hdr_len,
-                   u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+                   u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+                   u32 *cd_tunneling)
 {
        u32 cd_cmd, cd_tso_len, cd_mss;
        struct ipv6hdr *ipv6h;
@@ -2220,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
 /**
  * i40e_tx_enable_csum - Enable Tx checksum offloads
  * @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
  * @cd_tunneling: ptr to context desc bits
  **/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                u32 *td_cmd, u32 *td_offset,
                                struct i40e_ring *tx_ring,
                                u32 *cd_tunneling)
@@ -2241,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
                default:
                        return;
@@ -2250,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                this_ipv6_hdr = inner_ipv6_hdr(skb);
                this_tcp_hdrlen = inner_tcp_hdrlen(skb);
 
-               if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+                       if (*tx_flags & I40E_TX_FLAGS_TSO) {
                                *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
                                ip_hdr(skb)->check = 0;
                        } else {
                                *cd_tunneling |=
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
-               } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+               } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-                       if (tx_flags & I40E_TX_FLAGS_TSO)
+                       if (*tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
                }
 
@@ -2273,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
                if (this_ip_hdr->version == 6) {
-                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
-                       tx_flags |= I40E_TX_FLAGS_IPV6;
+                       *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       *tx_flags |= I40E_TX_FLAGS_IPV6;
                }
        } else {
                network_hdr_len = skb_network_header_len(skb);
@@ -2284,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        }
 
        /* Enable IP checksum offloads */
-       if (tx_flags & I40E_TX_FLAGS_IPV4) {
+       if (*tx_flags & I40E_TX_FLAGS_IPV4) {
                l4_hdr = this_ip_hdr->protocol;
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
-               if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_TSO) {
                        *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
                        this_ip_hdr->check = 0;
                } else {
@@ -2298,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                /* Now set the td_offset for IP header length */
                *td_offset = (network_hdr_len >> 2) <<
                              I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
-       } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+       } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                l4_hdr = this_ipv6_hdr->nexthdr;
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
                /* Now set the td_offset for IP header length */
@@ -2396,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  * Returns 0 if stop is not needed
  **/
 #ifdef I40E_FCOE
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 #else
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
 #endif
 {
        if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
@@ -2473,13 +2482,13 @@ linearize_chk_done:
  * @td_offset: offset for checksum or crc
  **/
 #ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                struct i40e_tx_buffer *first, u32 tx_flags,
-                const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        struct i40e_tx_buffer *first, u32 tx_flags,
                        const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                              struct i40e_tx_buffer *first, u32 tx_flags,
+                              const u8 hdr_len, u32 td_cmd, u32 td_offset)
 #endif
 {
        unsigned int data_len = skb->data_len;
@@ -2585,9 +2594,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                                                 tx_ring->queue_index),
                             first->bytecount);
 
-       /* set the timestamp */
-       first->time_stamp = jiffies;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -2640,11 +2646,11 @@ dma_error:
  * one descriptor.
  **/
 #ifdef I40E_FCOE
-int i40e_xmit_descriptor_count(struct sk_buff *skb,
-                              struct i40e_ring *tx_ring)
-#else
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+                                            struct i40e_ring *tx_ring)
 #endif
 {
        unsigned int f;
@@ -2706,7 +2712,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
-       tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+       tso = i40e_tso(tx_ring, skb, &hdr_len,
                       &cd_type_cmd_tso_mss, &cd_tunneling);
 
        if (tso < 0)
@@ -2732,7 +2738,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
 
-               i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+               i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
        }
 
index 4b0b8102cdc39c2529f49c18d6b1cbc61c48c341..0dc48dc9ca61922a4b11bd0b7624f07c153c603a 100644 (file)
@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
 #define I40E_TX_FLAGS_TSYN             (u32)(1 << 8)
 #define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     (u32)(1 << 10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t {
 
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
-       unsigned long time_stamp;
        union {
                struct sk_buff *skb;
                void *raw_buf;
index 568e855da0f3e16b29a81702766070974dd3da91..9a5a75b1e2bc053b50bec13adde2fd3aa4848595 100644 (file)
@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats {
        /* flow director stats */
        u64 fd_atr_match;
        u64 fd_sb_match;
+       u64 fd_atr_tunnel_match;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
index 4e9376da051829969de7750c2dc7a66acc5e5f40..23f95cdbdfcc2c20d5913fbab3a2b71a1bb61064 100644 (file)
@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
        int pre_existing_vfs = pci_num_vf(pdev);
        int err = 0;
 
+       if (pf->state & __I40E_TESTING) {
+               dev_warn(&pdev->dev,
+                        "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+               err = -EPERM;
+               goto err_out;
+       }
+
        dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
                i40e_free_vfs(pf);
index 458fbb421090772d0bbc1620277624339e0cd757..f54996f196293d8cf0c1942effe40c2e0e77b77e 100644 (file)
@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                         tx_ring->vsi->seid,
                         tx_ring->queue_index,
                         tx_ring->next_to_use, i);
-               dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
-                        "  time_stamp           <%lx>\n"
-                        "  jiffies              <%lx>\n",
-                        tx_ring->tx_bi[i].time_stamp, jiffies);
 
                netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 
@@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                /* ERR_MASK will only have valid bits if EOP set */
                if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
-                       /* TODO: shouldn't we increment a counter indicating the
-                        * drop?
-                        */
                        continue;
                }
 
@@ -1156,7 +1149,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                skb_mark_napi_id(skb, &rx_ring->q_vector->napi);
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
 
        } while (likely(total_rx_packets < budget));
@@ -1271,7 +1263,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                         : 0;
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
-               rx_ring->netdev->last_rx = jiffies;
                rx_desc->wb.qword1.status_error_len = 0;
        } while (likely(total_rx_packets < budget));
 
@@ -1352,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
 }
 
 /**
- * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
  * @skb:     send buffer
  * @tx_ring: ring to send buffer on
  * @flags:   the tx flags to be set
@@ -1363,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
  * Returns error code indicate the frame should be dropped upon error and the
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
-                                     struct i40e_ring *tx_ring,
-                                     u32 *flags)
+static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
+                                              struct i40e_ring *tx_ring,
+                                              u32 *flags)
 {
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
@@ -1408,16 +1399,14 @@ out:
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
  * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
  * @hdr_len:  ptr to the size of the packet header
  * @cd_tunneling: ptr to context descriptor bits
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
 static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                   u32 tx_flags, __be16 protocol, u8 *hdr_len,
-                   u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+                   u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+                   u32 *cd_tunneling)
 {
        u32 cd_cmd, cd_tso_len, cd_mss;
        struct ipv6hdr *ipv6h;
@@ -1468,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
 /**
  * i40e_tx_enable_csum - Enable Tx checksum offloads
  * @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
  * @td_cmd: Tx descriptor command bits to set
  * @td_offset: Tx descriptor header offsets to set
  * @cd_tunneling: ptr to context desc bits
  **/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
                                u32 *td_cmd, u32 *td_offset,
                                struct i40e_ring *tx_ring,
                                u32 *cd_tunneling)
@@ -1489,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+                       *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
                        break;
                default:
                        return;
@@ -1498,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                this_ipv6_hdr = inner_ipv6_hdr(skb);
                this_tcp_hdrlen = inner_tcp_hdrlen(skb);
 
-               if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
-                       if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+                       if (*tx_flags & I40E_TX_FLAGS_TSO) {
                                *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
                                ip_hdr(skb)->check = 0;
                        } else {
                                *cd_tunneling |=
                                         I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
                        }
-               } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+               } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-                       if (tx_flags & I40E_TX_FLAGS_TSO)
+                       if (*tx_flags & I40E_TX_FLAGS_TSO)
                                ip_hdr(skb)->check = 0;
                }
 
@@ -1521,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                                        skb_transport_offset(skb)) >> 1) <<
                                   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
                if (this_ip_hdr->version == 6) {
-                       tx_flags &= ~I40E_TX_FLAGS_IPV4;
-                       tx_flags |= I40E_TX_FLAGS_IPV6;
+                       *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+                       *tx_flags |= I40E_TX_FLAGS_IPV6;
                }
 
 
@@ -1534,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
        }
 
        /* Enable IP checksum offloads */
-       if (tx_flags & I40E_TX_FLAGS_IPV4) {
+       if (*tx_flags & I40E_TX_FLAGS_IPV4) {
                l4_hdr = this_ip_hdr->protocol;
                /* the stack computes the IP header already, the only time we
                 * need the hardware to recompute it is in the case of TSO.
                 */
-               if (tx_flags & I40E_TX_FLAGS_TSO) {
+               if (*tx_flags & I40E_TX_FLAGS_TSO) {
                        *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
                        this_ip_hdr->check = 0;
                } else {
@@ -1548,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
                /* Now set the td_offset for IP header length */
                *td_offset = (network_hdr_len >> 2) <<
                              I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
-       } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+       } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
                l4_hdr = this_ipv6_hdr->nexthdr;
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
                /* Now set the td_offset for IP header length */
@@ -1672,7 +1661,44 @@ linearize_chk_done:
 }
 
 /**
- * i40e_tx_map - Build the Tx descriptor
+ * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       /* Memory barrier before checking head and tail */
+       smp_mb();
+
+       /* Check again in a case another CPU has just made room available. */
+       if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+               return -EBUSY;
+
+       /* A reprieve! - use start_queue because it doesn't call schedule */
+       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       ++tx_ring->tx_stats.restart_queue;
+       return 0;
+}
+
+/**
+ * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size:    the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40evf_tx_map - Build the Tx descriptor
  * @tx_ring:  ring to send buffer on
  * @skb:      send buffer
  * @first:    first buffer info buffer to use
@@ -1681,9 +1707,9 @@ linearize_chk_done:
  * @td_cmd:   the command field in the descriptor
  * @td_offset: offset for checksum or crc
  **/
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                       struct i40e_tx_buffer *first, u32 tx_flags,
-                       const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                                struct i40e_tx_buffer *first, u32 tx_flags,
+                                const u8 hdr_len, u32 td_cmd, u32 td_offset)
 {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
@@ -1789,9 +1815,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                                                 tx_ring->queue_index),
                             first->bytecount);
 
-       /* set the timestamp */
-       first->time_stamp = jiffies;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -1808,8 +1831,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
 
        tx_ring->next_to_use = i;
 
+       i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
        /* notify HW of packet */
-       writel(i, tx_ring->tail);
+       if (!skb->xmit_more ||
+           netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+                                                  tx_ring->queue_index)))
+               writel(i, tx_ring->tail);
 
        return;
 
@@ -1831,44 +1858,7 @@ dma_error:
 }
 
 /**
- * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-       /* Memory barrier before checking head and tail */
-       smp_mb();
-
-       /* Check again in a case another CPU has just made room available. */
-       if (likely(I40E_DESC_UNUSED(tx_ring) < size))
-               return -EBUSY;
-
-       /* A reprieve! - use start_queue because it doesn't call schedule */
-       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
-       ++tx_ring->tx_stats.restart_queue;
-       return 0;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size:    the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
-       if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
-               return 0;
-       return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
  * @skb:     send buffer
  * @tx_ring: ring to send buffer on
  *
@@ -1876,8 +1866,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  * there is not enough descriptors available in this ring since we need at least
  * one descriptor.
  **/
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
-                                     struct i40e_ring *tx_ring)
+static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
+                                              struct i40e_ring *tx_ring)
 {
        unsigned int f;
        int count = 0;
@@ -1892,7 +1882,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
        count += TXD_USE_COUNT(skb_headlen(skb));
-       if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+       if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
                tx_ring->tx_stats.tx_busy++;
                return 0;
        }
@@ -1918,11 +1908,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        u32 td_cmd = 0;
        u8 hdr_len = 0;
        int tso;
-       if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+       if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
                return NETDEV_TX_BUSY;
 
        /* prepare the xmit flags */
-       if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+       if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
                goto out_drop;
 
        /* obtain protocol of skb */
@@ -1937,7 +1927,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
-       tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+       tso = i40e_tso(tx_ring, skb, &hdr_len,
                       &cd_type_cmd_tso_mss, &cd_tunneling);
 
        if (tso < 0)
@@ -1958,17 +1948,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
 
-               i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+               i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
        }
 
        i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
                           cd_tunneling, cd_l2tag2);
 
-       i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
-                   td_cmd, td_offset);
-
-       i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+       i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+                     td_cmd, td_offset);
 
        return NETDEV_TX_OK;
 
index 1e49bb1fbac1f0de59444626cc9645b72aeac0da..e7a34f899f2cbb8150495a31e0690a95e90efc1a 100644 (file)
@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
 #define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     (u32)(1 << 10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t {
 
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
-       unsigned long time_stamp;
        union {
                struct sk_buff *skb;
                void *raw_buf;
index ec9d83a9337944f20f934cf6217467ee61ec7303..c463ec41579c708ffbe606ea0b31ca0485ea4c58 100644 (file)
@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats {
        /* flow director stats */
        u64 fd_atr_match;
        u64 fd_sb_match;
+       u64 fd_atr_tunnel_match;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
index a0a9b1fcb5e8efcf4f7ebfe980459f64056e896f..f287186192bb655ba2dc1a205fb251351d593e98 100644 (file)
@@ -1836,31 +1836,19 @@ void igb_reinit_locked(struct igb_adapter *adapter)
  *
  * @adapter: adapter struct
  **/
-static s32 igb_enable_mas(struct igb_adapter *adapter)
+static void igb_enable_mas(struct igb_adapter *adapter)
 {
        struct e1000_hw *hw = &adapter->hw;
-       u32 connsw;
-       s32 ret_val = 0;
-
-       connsw = rd32(E1000_CONNSW);
-       if (!(hw->phy.media_type == e1000_media_type_copper))
-               return ret_val;
+       u32 connsw = rd32(E1000_CONNSW);
 
        /* configure for SerDes media detect */
-       if (!(connsw & E1000_CONNSW_SERDESD)) {
+       if ((hw->phy.media_type == e1000_media_type_copper) &&
+           (!(connsw & E1000_CONNSW_SERDESD))) {
                connsw |= E1000_CONNSW_ENRGSRC;
                connsw |= E1000_CONNSW_AUTOSENSE_EN;
                wr32(E1000_CONNSW, connsw);
                wrfl();
-       } else if (connsw & E1000_CONNSW_SERDESD) {
-               /* already SerDes, no need to enable anything */
-               return ret_val;
-       } else {
-               netdev_info(adapter->netdev,
-                       "MAS: Unable to configure feature, disabling..\n");
-               adapter->flags &= ~IGB_FLAG_MAS_ENABLE;
        }
-       return ret_val;
 }
 
 void igb_reset(struct igb_adapter *adapter)
@@ -1980,10 +1968,9 @@ void igb_reset(struct igb_adapter *adapter)
                adapter->ei.get_invariants(hw);
                adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
        }
-       if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
-               if (igb_enable_mas(adapter))
-                       dev_err(&pdev->dev,
-                               "Error enabling Media Auto Sense\n");
+       if ((mac->type == e1000_82575) &&
+           (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+               igb_enable_mas(adapter);
        }
        if (hw->mac.ops.init_hw(hw))
                dev_err(&pdev->dev, "Hardware Error\n");
@@ -4989,6 +4976,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        struct igb_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
+       unsigned short f;
        u16 count = TXD_USE_COUNT(skb_headlen(skb));
        __be16 protocol = vlan_get_protocol(skb);
        u8 hdr_len = 0;
@@ -4999,14 +4987,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
         *       + 1 desc for context descriptor,
         * otherwise try next time
         */
-       if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
-               unsigned short f;
-
-               for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
-                       count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-       } else {
-               count += skb_shinfo(skb)->nr_frags;
-       }
+       for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+               count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
        if (igb_maybe_stop_tx(tx_ring, count + 3)) {
                /* this is a hard error */
index eafa9ec802bab921fee1fc28b927d02e7d48bcae..9a1d0f142b091c3cecb06ce65ffac1d3c8e6bf1a 100644 (file)
@@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbe_fdir_filter *input;
        union ixgbe_atr_input mask;
+       u8 queue;
        int err;
 
        if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
                return -EOPNOTSUPP;
 
-       /*
-        * Don't allow programming if the action is a queue greater than
-        * the number of online Rx queues.
+       /* ring_cookie is a masked into a set of queues and ixgbe pools or
+        * we use the drop index.
         */
-       if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
-           (fsp->ring_cookie >= adapter->num_rx_queues))
-               return -EINVAL;
+       if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+               queue = IXGBE_FDIR_DROP_QUEUE;
+       } else {
+               u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+               u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+               if (!vf && (ring >= adapter->num_rx_queues))
+                       return -EINVAL;
+               else if (vf &&
+                        ((vf > adapter->num_vfs) ||
+                          ring >= adapter->num_rx_queues_per_pool))
+                       return -EINVAL;
+
+               /* Map the ring onto the absolute queue index */
+               if (!vf)
+                       queue = adapter->rx_ring[ring]->reg_idx;
+               else
+                       queue = ((vf - 1) *
+                               adapter->num_rx_queues_per_pool) + ring;
+       }
 
        /* Don't allow indexes to exist outside of available space */
        if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
@@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
 
        /* program filters to filter memory */
        err = ixgbe_fdir_write_perfect_filter_82599(hw,
-                               &input->filter, input->sw_idx,
-                               (input->action == IXGBE_FDIR_DROP_QUEUE) ?
-                               IXGBE_FDIR_DROP_QUEUE :
-                               adapter->rx_ring[input->action]->reg_idx);
+                               &input->filter, input->sw_idx, queue);
        if (err)
                goto err_out_w_lock;
 
@@ -3053,7 +3067,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 status;
+       s32 status;
        u8 sff8472_rev, addr_mode;
        bool page_swap = false;
 
@@ -3061,14 +3075,14 @@ static int ixgbe_get_module_info(struct net_device *dev,
        status = hw->phy.ops.read_i2c_eeprom(hw,
                                             IXGBE_SFF_SFF_8472_COMP,
                                             &sff8472_rev);
-       if (status != 0)
+       if (status)
                return -EIO;
 
        /* addressing mode is not supported */
        status = hw->phy.ops.read_i2c_eeprom(hw,
                                             IXGBE_SFF_SFF_8472_SWAP,
                                             &addr_mode);
-       if (status != 0)
+       if (status)
                return -EIO;
 
        if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
@@ -3095,7 +3109,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
        u8 databyte = 0xFF;
        int i = 0;
 
@@ -3112,7 +3126,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
                else
                        status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
 
-               if (status != 0)
+               if (status)
                        return -EIO;
 
                data[i - ee->offset] = databyte;
index 5be12a00e1f447744f2497131cea1a70e313fd1f..23d82b34314e110ccd5dca1d52b330b25a708126 100644 (file)
@@ -4757,7 +4757,7 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
 {
        u32 speed;
        bool autoneg, link_up = false;
-       u32 ret = IXGBE_ERR_LINK_SETUP;
+       int ret = IXGBE_ERR_LINK_SETUP;
 
        if (hw->mac.ops.check_link)
                ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
@@ -8022,7 +8022,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
                return -EINVAL;
 
        nla_for_each_nested(attr, br_spec, rem) {
-               u32 status;
+               int status;
                __u16 mode;
 
                if (nla_type(attr) != IFLA_BRIDGE_MODE)
index 8a2be444113dd65044d30f7831a81ba1d118f58f..af828f89419f8dc94ccde1b2c18c5ec64f7e3bd8 100644 (file)
@@ -317,14 +317,14 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
 {
-       u32 status;
+       s32 status;
        u16 phy_id_high = 0;
        u16 phy_id_low = 0;
 
        status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
                                      &phy_id_high);
 
-       if (status == 0) {
+       if (!status) {
                hw->phy.id = (u32)(phy_id_high << 16);
                status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
                                              &phy_id_low);
index f5f948d08b436147ca20c076ad80622544cd11a3..0a8b5e42e1a9b2dbd66e776962cacbb47ec021d2 100644 (file)
@@ -696,14 +696,14 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
 
        /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
 
-       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-       swsm &= ~IXGBE_SWSM_SMBI;
-       IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
-
        swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
        swsm &= ~IXGBE_SWFW_REGSMP;
        IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
 
+       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+       swsm &= ~IXGBE_SWSM_SMBI;
+       IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
        IXGBE_WRITE_FLUSH(hw);
 }
 
index cf5cf819a6b890bdce1da9d8eecc4acbe0d479bb..b0236985e9156e3c66dee931e3a20adc3989bc55 100644 (file)
@@ -103,6 +103,39 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
        return 0;
 }
 
+/**
+ * ixgbe_iosf_wait - Wait for IOSF command completion
+ * @hw: pointer to hardware structure
+ * @ctrl: pointer to location to receive final IOSF control value
+ *
+ * Return: failing status on timeout
+ *
+ * Note: ctrl can be NULL if the IOSF control register value is not needed
+ */
+static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
+{
+       u32 i, command;
+
+       /* Check every 10 usec to see if the address cycle completed.
+        * The SB IOSF BUSY bit will clear when the operation is
+        * complete.
+        */
+       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+               command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
+               if (!(command & IXGBE_SB_IOSF_CTRL_BUSY))
+                       break;
+               usleep_range(10, 20);
+       }
+       if (ctrl)
+               *ctrl = command;
+       if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
+               hw_dbg(hw, "IOSF wait timed out\n");
+               return IXGBE_ERR_PHY;
+       }
+
+       return 0;
+}
+
 /** ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the
  *  IOSF device
  *  @hw: pointer to hardware structure
@@ -113,7 +146,17 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
 static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
                                       u32 device_type, u32 *data)
 {
-       u32 i, command, error;
+       u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+       u32 command, error;
+       s32 ret;
+
+       ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+       if (ret)
+               return ret;
+
+       ret = ixgbe_iosf_wait(hw, NULL);
+       if (ret)
+               goto out;
 
        command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
                   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
@@ -121,17 +164,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
        /* Write IOSF control register */
        IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command);
 
-       /* Check every 10 usec to see if the address cycle completed.
-        * The SB IOSF BUSY bit will clear when the operation is
-        * complete
-        */
-       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-               usleep_range(10, 20);
-
-               command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
-               if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
-                       break;
-       }
+       ret = ixgbe_iosf_wait(hw, &command);
 
        if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
                error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
@@ -140,14 +173,12 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
                return IXGBE_ERR_PHY;
        }
 
-       if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
-               hw_dbg(hw, "Read timed out\n");
-               return IXGBE_ERR_PHY;
-       }
-
-       *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
+       if (!ret)
+               *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA);
 
-       return 0;
+out:
+       hw->mac.ops.release_swfw_sync(hw, gssr);
+       return ret;
 }
 
 /** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
@@ -789,7 +820,17 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
 static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
                                        u32 device_type, u32 data)
 {
-       u32 i, command, error;
+       u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM;
+       u32 command, error;
+       s32 ret;
+
+       ret = hw->mac.ops.acquire_swfw_sync(hw, gssr);
+       if (ret)
+               return ret;
+
+       ret = ixgbe_iosf_wait(hw, NULL);
+       if (ret)
+               goto out;
 
        command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) |
                   (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT));
@@ -800,17 +841,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
        /* Write IOSF data register */
        IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data);
 
-       /* Check every 10 usec to see if the address cycle completed.
-        * The SB IOSF BUSY bit will clear when the operation is
-        * complete
-        */
-       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-               usleep_range(10, 20);
-
-               command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL);
-               if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0)
-                       break;
-       }
+       ret = ixgbe_iosf_wait(hw, &command);
 
        if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) {
                error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >>
@@ -819,12 +850,9 @@ static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
                return IXGBE_ERR_PHY;
        }
 
-       if (i == IXGBE_MDIO_COMMAND_TIMEOUT) {
-               hw_dbg(hw, "Write timed out\n");
-               return IXGBE_ERR_PHY;
-       }
-
-       return 0;
+out:
+       hw->mac.ops.release_swfw_sync(hw, gssr);
+       return ret;
 }
 
 /** ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode.
@@ -1035,7 +1063,7 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw)
 {
-       u32 status;
+       s32 status;
        u16 lasi, autoneg_status, speed;
        ixgbe_link_speed force_speed;
 
@@ -1177,7 +1205,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
 {
-       u32 status;
+       s32 status;
        u16 reg;
        u32 retries = 2;
 
index 1c75829eb1668fe094af3a9049fb53bb0c8b4bb5..d52639bc491f7a1aa76f8bd127df8b9e68be0332 100644 (file)
@@ -3125,9 +3125,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        mib_counters_clear(mp);
 
-       init_timer(&mp->mib_counters_timer);
-       mp->mib_counters_timer.data = (unsigned long)mp;
-       mp->mib_counters_timer.function = mib_counters_timer_wrapper;
+       setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
+                   (unsigned long)mp);
        mp->mib_counters_timer.expires = jiffies + 30 * HZ;
 
        spin_lock_init(&mp->mib_counters_lock);
@@ -3136,9 +3135,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 
        netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
 
-       init_timer(&mp->rx_oom);
-       mp->rx_oom.data = (unsigned long)mp;
-       mp->rx_oom.function = oom_timer_wrapper;
+       setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
 
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
index ce5f7f9cff060868db1ad0ba3bde378d65222793..ecce8261ce3b3ccdb37e0966ea35f8608a9a2af3 100644 (file)
@@ -1359,7 +1359,7 @@ static void *mvneta_frag_alloc(const struct mvneta_port *pp)
 static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
 {
        if (likely(pp->frag_size <= PAGE_SIZE))
-               put_page(virt_to_head_page(data));
+               skb_free_frag(data);
        else
                kfree(data);
 }
index 529ef0594b902ebaf2838cf478ef914a0b69d5b7..68ae765873a9617becd0804749718a5ae3529b11 100644 (file)
@@ -882,7 +882,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
 {
        struct ib_smp *smp = inbox->buf;
        u32 index;
-       u8 port;
+       u8 port, slave_port;
        u8 opcode_modifier;
        u16 *table;
        int err;
@@ -894,7 +894,8 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
        __be32 slave_cap_mask;
        __be64 slave_node_guid;
 
-       port = vhcr->in_modifier;
+       slave_port = vhcr->in_modifier;
+       port = mlx4_slave_convert_port(dev, slave, slave_port);
 
        /* network-view bit is for driver use only, and should not be passed to FW */
        opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
@@ -930,8 +931,9 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
                        if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
                                /*get the slave specific caps:*/
                                /*do the command */
+                               smp->attr_mod = cpu_to_be32(port);
                                err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
-                                           vhcr->in_modifier, opcode_modifier,
+                                           port, opcode_modifier,
                                            vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
                                /* modify the response for slaves */
                                if (!err && slave != mlx4_master_func_num(dev)) {
@@ -975,7 +977,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
                        }
                        if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
                                err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
-                                            vhcr->in_modifier, opcode_modifier,
+                                            port, opcode_modifier,
                                             vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
                                if (!err) {
                                        slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
@@ -2915,7 +2917,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
        port = mlx4_slaves_closest_port(dev, slave, port);
        s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
        s_info->mac = mac;
-       mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
+       mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
                  vf, port, s_info->mac);
        return 0;
 }
@@ -3197,6 +3199,12 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
                                 int enabled)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
+                       &priv->dev, slave);
+       int min_port = find_first_bit(actv_ports.ports,
+                                     priv->dev.caps.num_ports) + 1;
+       int max_port = min_port - 1 +
+               bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
 
        if (slave == mlx4_master_func_num(dev))
                return 0;
@@ -3206,6 +3214,11 @@ int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
            enabled < 0 || enabled > 1)
                return -EINVAL;
 
+       if (min_port == max_port && dev->caps.num_ports > 1) {
+               mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
+               return -EPROTONOSUPPORT;
+       }
+
        priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
        return 0;
 }
index e71f31387ac6c73b843fef3f023cf6150327fe09..3348e646db705f41ff1cb3923d4d1533aea80e2d 100644 (file)
@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        u64 mtt_addr;
        int err;
 
-       if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+       if (vector >= dev->caps.num_comp_vectors)
                return -EINVAL;
 
        cq->vector = vector;
@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
                cq_context->flags  |= cpu_to_be32(1 << 19);
 
        cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
-       cq_context->comp_eqn        = priv->eq_table.eq[vector].eqn;
+       cq_context->comp_eqn        = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
        cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
 
        mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        init_completion(&cq->free);
        cq->comp = mlx4_add_cq_to_tasklet;
        cq->tasklet_ctx.priv =
-               &priv->eq_table.eq[cq->vector].tasklet_ctx;
+               &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
        INIT_LIST_HEAD(&cq->tasklet_ctx.list);
 
 
-       cq->irq = priv->eq_table.eq[cq->vector].irq;
+       cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
        return 0;
 
 err_radix:
@@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
        if (err)
                mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
 
-       synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+       synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+       if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+           priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+               synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
 
        spin_lock_irq(&cq_table->lock);
        radix_tree_delete(&cq_table->tree, cq->cqn);
index 22da4d0d0f05511dfc89a360e6df6871e96b6e7a..63769df872a42be81784eff223ef41a9e5a639e6 100644 (file)
@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
 
        cq->ring = ring;
        cq->is_tx = mode;
+       cq->vector = mdev->dev->caps.num_comp_vectors;
 
        /* Allocate HW buffers on provided NUMA node.
         * dev->numa_node is used in mtt range allocation flow.
@@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
        int err = 0;
        char name[25];
        int timestamp_en = 0;
-       struct cpu_rmap *rmap =
-#ifdef CONFIG_RFS_ACCEL
-               priv->dev->rx_cpu_rmap;
-#else
-               NULL;
-#endif
+       bool assigned_eq = false;
 
        cq->dev = mdev->pndev[priv->port];
        cq->mcq.set_ci_db  = cq->wqres.db.db;
@@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
        memset(cq->buf, 0, cq->buf_size);
 
        if (cq->is_tx == RX) {
-               if (mdev->dev->caps.comp_pool) {
-                       if (!cq->vector) {
-                               sprintf(name, "%s-%d", priv->dev->name,
-                                       cq->ring);
-                               /* Set IRQ for specific name (per ring) */
-                               if (mlx4_assign_eq(mdev->dev, name, rmap,
-                                                  &cq->vector)) {
-                                       cq->vector = (cq->ring + 1 + priv->port)
-                                           % mdev->dev->caps.num_comp_vectors;
-                                       mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
-                                                 name);
-                               }
-
+               if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
+                                            cq->vector)) {
+                       cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
+
+                       err = mlx4_assign_eq(mdev->dev, priv->port,
+                                            &cq->vector);
+                       if (err) {
+                               mlx4_err(mdev, "Failed assigning an EQ to %s\n",
+                                        name);
+                               goto free_eq;
                        }
-               } else {
-                       cq->vector = (cq->ring + 1 + priv->port) %
-                               mdev->dev->caps.num_comp_vectors;
+
+                       assigned_eq = true;
                }
 
                cq->irq_desc =
@@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                            &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
                            cq->vector, 0, timestamp_en);
        if (err)
-               return err;
+               goto free_eq;
 
        cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
        cq->mcq.event = mlx4_en_cq_event;
@@ -168,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
                               NAPI_POLL_WEIGHT);
        } else {
-               struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
-
-               err = irq_set_affinity_hint(cq->mcq.irq,
-                                           ring->affinity_mask);
-               if (err)
-                       mlx4_warn(mdev, "Failed setting affinity hint\n");
-
                netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
                napi_hash_add(&cq->napi);
        }
@@ -182,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
        napi_enable(&cq->napi);
 
        return 0;
+
+free_eq:
+       if (assigned_eq)
+               mlx4_release_eq(mdev->dev, cq->vector);
+       cq->vector = mdev->dev->caps.num_comp_vectors;
+       return err;
 }
 
 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
@@ -191,9 +182,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
 
        mlx4_en_unmap_buffer(&cq->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
-       if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+       if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
+           cq->is_tx == RX)
                mlx4_release_eq(priv->mdev->dev, cq->vector);
-       }
        cq->vector = 0;
        cq->buf_size = 0;
        cq->buf = NULL;
@@ -207,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
        if (!cq->is_tx) {
                napi_hash_del(&cq->napi);
                synchronize_rcu();
-               irq_set_affinity_hint(cq->mcq.irq, NULL);
        }
        netif_napi_del(&cq->napi);
 
index cf467a9f6cc78c0c8a53b9120cec2795888f4904..98efb5842fca326b47a03e79dd82a96baa7bba25 100644 (file)
@@ -1954,7 +1954,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
        int i;
 
 #ifdef CONFIG_RFS_ACCEL
-       free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
        priv->dev->rx_cpu_rmap = NULL;
 #endif
 
@@ -2012,11 +2011,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
        }
 
 #ifdef CONFIG_RFS_ACCEL
-       if (priv->mdev->dev->caps.comp_pool) {
-               priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
-               if (!priv->dev->rx_cpu_rmap)
-                       goto err;
-       }
+       priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
 #endif
 
        return 0;
index 2a77a6b191216b19059c89fa8ad386252684806c..35f726c17e48c80bdadfc07ba6a43974619c6938 100644 (file)
@@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
        struct mlx4_dev *dev = mdev->dev;
 
        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
-               if (!dev->caps.comp_pool)
-                       num_of_eqs = max_t(int, MIN_RX_RINGS,
-                                          min_t(int,
-                                                dev->caps.num_comp_vectors,
-                                                DEF_RX_RINGS));
-               else
-                       num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
-                                          dev->caps.comp_pool/
-                                          dev->caps.num_ports) - 1;
+               num_of_eqs = max_t(int, MIN_RX_RINGS,
+                                  min_t(int,
+                                        mlx4_get_eqs_per_port(mdev->dev, i),
+                                        DEF_RX_RINGS));
 
                num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
                        min_t(int, num_of_eqs,
index 2619c9fbf42dfb952473e4779a2ee8d6ebfd2c65..aae13adfb492b885bcf2fba03b042b949c3f5575 100644 (file)
@@ -145,7 +145,7 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
        struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
        struct mlx4_eqe *eqe;
        u8 slave;
-       int i;
+       int i, phys_port, slave_port;
 
        for (eqe = next_slave_event_eqe(slave_eq); eqe;
              eqe = next_slave_event_eqe(slave_eq)) {
@@ -154,9 +154,20 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
                /* All active slaves need to receive the event */
                if (slave == ALL_SLAVES) {
                        for (i = 0; i <= dev->persist->num_vfs; i++) {
+                               phys_port = 0;
+                               if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
+                                   eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
+                                       phys_port  = eqe->event.port_mgmt_change.port;
+                                       slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
+                                       if (slave_port < 0) /* VF doesn't have this port */
+                                               continue;
+                                       eqe->event.port_mgmt_change.port = slave_port;
+                               }
                                if (mlx4_GEN_EQE(dev, i, eqe))
                                        mlx4_warn(dev, "Failed to generate event for slave %d\n",
                                                  i);
+                               if (phys_port)
+                                       eqe->event.port_mgmt_change.port = phys_port;
                        }
                } else {
                        if (mlx4_GEN_EQE(dev, slave, eqe))
@@ -210,6 +221,22 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
        slave_event(dev, slave, eqe);
 }
 
+#if defined(CONFIG_SMP)
+static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
+{
+       int hint_err;
+       struct mlx4_dev *dev = &priv->dev;
+       struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+
+       if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+               return;
+
+       hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+       if (hint_err)
+               mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+}
+#endif
+
 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
 {
        struct mlx4_eqe eqe;
@@ -224,7 +251,7 @@ int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
 
        eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
        eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
-       eqe.event.port_mgmt_change.port = port;
+       eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
 
        return mlx4_GEN_EQE(dev, slave, &eqe);
 }
@@ -241,7 +268,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
 
        eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
        eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
-       eqe.event.port_mgmt_change.port = port;
+       eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
 
        return mlx4_GEN_EQE(dev, slave, &eqe);
 }
@@ -251,6 +278,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
                                   u8 port_subtype_change)
 {
        struct mlx4_eqe eqe;
+       u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
 
        /*don't send if we don't have the that slave */
        if (dev->persist->num_vfs < slave)
@@ -259,7 +287,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
 
        eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
        eqe.subtype = port_subtype_change;
-       eqe.event.port_change.port = cpu_to_be32(port << 28);
+       eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
 
        mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
                 port_subtype_change, slave, port);
@@ -589,6 +617,10 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                if (SLAVE_PORT_GEN_EVENT_DOWN ==  gen_event) {
                                                        if (i == mlx4_master_func_num(dev))
                                                                continue;
+                                                       eqe->event.port_change.port =
+                                                               cpu_to_be32(
+                                                               (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
+                                                               | (mlx4_phys_to_slave_port(dev, i, port) << 28));
                                                        mlx4_slave_event(dev, i, eqe);
                                                }
                                        }
@@ -879,8 +911,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
         * we need to map, take the difference of highest index and
         * the lowest index we'll use and add 1.
         */
-       return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
-                dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+       return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+               dev->caps.reserved_eqs / 4 + 1;
 }
 
 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -1069,32 +1101,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
 static void mlx4_free_irqs(struct mlx4_dev *dev)
 {
        struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int     i, vec;
+       int     i;
 
        if (eq_table->have_irq)
                free_irq(dev->persist->pdev->irq, dev);
 
        for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
                if (eq_table->eq[i].have_irq) {
+                       free_cpumask_var(eq_table->eq[i].affinity_mask);
+#if defined(CONFIG_SMP)
+                       irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
+#endif
                        free_irq(eq_table->eq[i].irq, eq_table->eq + i);
                        eq_table->eq[i].have_irq = 0;
                }
 
-       for (i = 0; i < dev->caps.comp_pool; i++) {
-               /*
-                * Freeing the assigned irq's
-                * all bits should be 0, but we need to validate
-                */
-               if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       /* NO need protecting*/
-                       vec = dev->caps.num_comp_vectors + 1 + i;
-                       free_irq(priv->eq_table.eq[vec].irq,
-                                &priv->eq_table.eq[vec]);
-               }
-       }
-
-
        kfree(eq_table->irq_names);
 }
 
@@ -1175,76 +1196,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        }
 
        priv->eq_table.irq_names =
-               kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
-                                            dev->caps.comp_pool),
+               kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
                        GFP_KERNEL);
        if (!priv->eq_table.irq_names) {
                err = -ENOMEM;
-               goto err_out_bitmap;
+               goto err_out_clr_int;
        }
 
-       for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
-               err = mlx4_create_eq(dev, dev->caps.num_cqs -
-                                         dev->caps.reserved_cqs +
-                                         MLX4_NUM_SPARE_EQE,
-                                    (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
-                                    &priv->eq_table.eq[i]);
-               if (err) {
-                       --i;
-                       goto err_out_unmap;
-               }
-       }
-
-       err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
-                            (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
-                            &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-       if (err)
-               goto err_out_comp;
-
-       /*if additional completion vectors poolsize is 0 this loop will not run*/
-       for (i = dev->caps.num_comp_vectors + 1;
-             i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+               if (i == MLX4_EQ_ASYNC) {
+                       err = mlx4_create_eq(dev,
+                                            MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+                                            0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+               } else {
+                       struct mlx4_eq  *eq = &priv->eq_table.eq[i];
+#ifdef CONFIG_RFS_ACCEL
+                       int port = find_first_bit(eq->actv_ports.ports,
+                                                 dev->caps.num_ports) + 1;
+
+                       if (port <= dev->caps.num_ports) {
+                               struct mlx4_port_info *info =
+                                       &mlx4_priv(dev)->port[port];
+
+                               if (!info->rmap) {
+                                       info->rmap = alloc_irq_cpu_rmap(
+                                               mlx4_get_eqs_per_port(dev, port));
+                                       if (!info->rmap) {
+                                               mlx4_warn(dev, "Failed to allocate cpu rmap\n");
+                                               err = -ENOMEM;
+                                               goto err_out_unmap;
+                                       }
+                               }
 
-               err = mlx4_create_eq(dev, dev->caps.num_cqs -
-                                         dev->caps.reserved_cqs +
-                                         MLX4_NUM_SPARE_EQE,
-                                    (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
-                                    &priv->eq_table.eq[i]);
-               if (err) {
-                       --i;
-                       goto err_out_unmap;
+                               err = irq_cpu_rmap_add(
+                                       info->rmap, eq->irq);
+                               if (err)
+                                       mlx4_warn(dev, "Failed adding irq rmap\n");
+                       }
+#endif
+                       err = mlx4_create_eq(dev, dev->caps.num_cqs -
+                                                 dev->caps.reserved_cqs +
+                                                 MLX4_NUM_SPARE_EQE,
+                                            (dev->flags & MLX4_FLAG_MSI_X) ?
+                                            i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
+                                            eq);
                }
+               if (err)
+                       goto err_out_unmap;
        }
 
-
        if (dev->flags & MLX4_FLAG_MSI_X) {
                const char *eq_name;
 
-               for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
-                       if (i < dev->caps.num_comp_vectors) {
-                               snprintf(priv->eq_table.irq_names +
-                                        i * MLX4_IRQNAME_SIZE,
-                                        MLX4_IRQNAME_SIZE,
-                                        "mlx4-comp-%d@pci:%s", i,
-                                        pci_name(dev->persist->pdev));
-                       } else {
-                               snprintf(priv->eq_table.irq_names +
-                                        i * MLX4_IRQNAME_SIZE,
-                                        MLX4_IRQNAME_SIZE,
-                                        "mlx4-async@pci:%s",
-                                        pci_name(dev->persist->pdev));
-                       }
+               snprintf(priv->eq_table.irq_names +
+                        MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
+                        MLX4_IRQNAME_SIZE,
+                        "mlx4-async@pci:%s",
+                        pci_name(dev->persist->pdev));
+               eq_name = priv->eq_table.irq_names +
+                       MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
 
-                       eq_name = priv->eq_table.irq_names +
-                                 i * MLX4_IRQNAME_SIZE;
-                       err = request_irq(priv->eq_table.eq[i].irq,
-                                         mlx4_msi_x_interrupt, 0, eq_name,
-                                         priv->eq_table.eq + i);
-                       if (err)
-                               goto err_out_async;
+               err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
+                                 mlx4_msi_x_interrupt, 0, eq_name,
+                                 priv->eq_table.eq + MLX4_EQ_ASYNC);
+               if (err)
+                       goto err_out_unmap;
 
-                       priv->eq_table.eq[i].have_irq = 1;
-               }
+               priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
        } else {
                snprintf(priv->eq_table.irq_names,
                         MLX4_IRQNAME_SIZE,
@@ -1253,36 +1271,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
                                  IRQF_SHARED, priv->eq_table.irq_names, dev);
                if (err)
-                       goto err_out_async;
+                       goto err_out_unmap;
 
                priv->eq_table.have_irq = 1;
        }
 
        err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
-                         priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+                         priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
        if (err)
                mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
-                          priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+                          priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
 
-       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
-               eq_set_ci(&priv->eq_table.eq[i], 1);
+       /* arm ASYNC eq */
+       eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
 
        return 0;
 
-err_out_async:
-       mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
-       i = dev->caps.num_comp_vectors - 1;
-
 err_out_unmap:
-       while (i >= 0) {
-               mlx4_free_eq(dev, &priv->eq_table.eq[i]);
-               --i;
+       while (i >= 0)
+               mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+#ifdef CONFIG_RFS_ACCEL
+       for (i = 1; i <= dev->caps.num_ports; i++) {
+               if (mlx4_priv(dev)->port[i].rmap) {
+                       free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+                       mlx4_priv(dev)->port[i].rmap = NULL;
+               }
        }
+#endif
+       mlx4_free_irqs(dev);
+
+err_out_clr_int:
        if (!mlx4_is_slave(dev))
                mlx4_unmap_clr_int(dev);
-       mlx4_free_irqs(dev);
 
 err_out_bitmap:
        mlx4_unmap_uar(dev);
@@ -1300,11 +1320,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
        int i;
 
        mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
-                   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+                   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
 
+#ifdef CONFIG_RFS_ACCEL
+       for (i = 1; i <= dev->caps.num_ports; i++) {
+               if (mlx4_priv(dev)->port[i].rmap) {
+                       free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+                       mlx4_priv(dev)->port[i].rmap = NULL;
+               }
+       }
+#endif
        mlx4_free_irqs(dev);
 
-       for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
 
        if (!mlx4_is_slave(dev))
@@ -1355,87 +1383,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
 
        /* Return to default */
        mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
-                   priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+                   priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
        return err;
 }
 EXPORT_SYMBOL(mlx4_test_interrupts);
 
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
-                  int *vector)
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
 {
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+       if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
+           (vector == MLX4_EQ_ASYNC))
+               return false;
+
+       return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
+}
+EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
 
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
+{
        struct mlx4_priv *priv = mlx4_priv(dev);
-       int vec = 0, err = 0, i;
+       unsigned int i;
+       unsigned int sum = 0;
+
+       for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
+               sum += !!test_bit(port - 1,
+                                 priv->eq_table.eq[i].actv_ports.ports);
+
+       return sum;
+}
+EXPORT_SYMBOL(mlx4_get_eqs_per_port);
+
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+       if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
+               return -EINVAL;
+
+       return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
+                               dev->caps.num_ports) > 1);
+}
+EXPORT_SYMBOL(mlx4_is_eq_shared);
+
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
+{
+       return mlx4_priv(dev)->port[port].rmap;
+}
+EXPORT_SYMBOL(mlx4_get_cpu_rmap);
+
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int err = 0, i = 0;
+       u32 min_ref_count_val = (u32)-1;
+       int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
+       int *prequested_vector = NULL;
+
 
        mutex_lock(&priv->msix_ctl.pool_lock);
-       for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
-               if (~priv->msix_ctl.pool_bm & 1ULL << i) {
-                       priv->msix_ctl.pool_bm |= 1ULL << i;
-                       vec = dev->caps.num_comp_vectors + 1 + i;
-                       snprintf(priv->eq_table.irq_names +
-                                       vec * MLX4_IRQNAME_SIZE,
-                                       MLX4_IRQNAME_SIZE, "%s", name);
-#ifdef CONFIG_RFS_ACCEL
-                       if (rmap) {
-                               err = irq_cpu_rmap_add(rmap,
-                                                      priv->eq_table.eq[vec].irq);
-                               if (err)
-                                       mlx4_warn(dev, "Failed adding irq rmap\n");
+       if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
+           (requested_vector >= 0) &&
+           (requested_vector != MLX4_EQ_ASYNC)) {
+               if (test_bit(port - 1,
+                            priv->eq_table.eq[requested_vector].actv_ports.ports)) {
+                       prequested_vector = &requested_vector;
+               } else {
+                       struct mlx4_eq *eq;
+
+                       for (i = 1; i < port;
+                            requested_vector += mlx4_get_eqs_per_port(dev, i++))
+                               ;
+
+                       eq = &priv->eq_table.eq[requested_vector];
+                       if (requested_vector < dev->caps.num_comp_vectors + 1 &&
+                           test_bit(port - 1, eq->actv_ports.ports)) {
+                               prequested_vector = &requested_vector;
                        }
-#endif
-                       err = request_irq(priv->eq_table.eq[vec].irq,
-                                         mlx4_msi_x_interrupt, 0,
-                                         &priv->eq_table.irq_names[vec<<5],
-                                         priv->eq_table.eq + vec);
-                       if (err) {
-                               /*zero out bit by fliping it*/
-                               priv->msix_ctl.pool_bm ^= 1 << i;
-                               vec = 0;
-                               continue;
-                               /*we dont want to break here*/
+               }
+       }
+
+       if  (!prequested_vector) {
+               requested_vector = -1;
+               for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
+                    i++) {
+                       struct mlx4_eq *eq = &priv->eq_table.eq[i];
+
+                       if (min_ref_count_val > eq->ref_count &&
+                           test_bit(port - 1, eq->actv_ports.ports)) {
+                               min_ref_count_val = eq->ref_count;
+                               requested_vector = i;
                        }
+               }
+
+               if (requested_vector < 0) {
+                       err = -ENOSPC;
+                       goto err_unlock;
+               }
+
+               prequested_vector = &requested_vector;
+       }
+
+       if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
+           dev->flags & MLX4_FLAG_MSI_X) {
+               set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+               snprintf(priv->eq_table.irq_names +
+                        *prequested_vector * MLX4_IRQNAME_SIZE,
+                        MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
+                        *prequested_vector, dev_name(&dev->persist->pdev->dev));
 
-                       eq_set_ci(&priv->eq_table.eq[vec], 1);
+               err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
+                                 mlx4_msi_x_interrupt, 0,
+                                 &priv->eq_table.irq_names[*prequested_vector << 5],
+                                 priv->eq_table.eq + *prequested_vector);
+
+               if (err) {
+                       clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+                       *prequested_vector = -1;
+               } else {
+#if defined(CONFIG_SMP)
+                       mlx4_set_eq_affinity_hint(priv, *prequested_vector);
+#endif
+                       eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
+                       priv->eq_table.eq[*prequested_vector].have_irq = 1;
                }
        }
+
+       if (!err && *prequested_vector >= 0)
+               priv->eq_table.eq[*prequested_vector].ref_count++;
+
+err_unlock:
        mutex_unlock(&priv->msix_ctl.pool_lock);
 
-       if (vec) {
-               *vector = vec;
-       } else {
+       if (!err && *prequested_vector >= 0)
+               *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
+       else
                *vector = 0;
-               err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
-       }
+
        return err;
 }
 EXPORT_SYMBOL(mlx4_assign_eq);
 
-int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
 
-       return priv->eq_table.eq[vec].irq;
+       return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
 }
 EXPORT_SYMBOL(mlx4_eq_get_irq);
 
 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       /*bm index*/
-       int i = vec - dev->caps.num_comp_vectors - 1;
-
-       if (likely(i >= 0)) {
-               /*sanity check , making sure were not trying to free irq's
-                 Belonging to a legacy EQ*/
-               mutex_lock(&priv->msix_ctl.pool_lock);
-               if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       free_irq(priv->eq_table.eq[vec].irq,
-                                &priv->eq_table.eq[vec]);
-                       priv->msix_ctl.pool_bm &= ~(1ULL << i);
-               }
-               mutex_unlock(&priv->msix_ctl.pool_lock);
-       }
+       int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
 
+       mutex_lock(&priv->msix_ctl.pool_lock);
+       priv->eq_table.eq[eq_vec].ref_count--;
+
+       /* once we allocated EQ, we don't release it because it might be binded
+        * to cpu_rmap.
+        */
+       mutex_unlock(&priv->msix_ctl.pool_lock);
 }
 EXPORT_SYMBOL(mlx4_release_eq);
 
index ced5ecab5aa754ad44ae055464608bba66d6b137..7d57777e65c53625c5edd924072cbceacb58340e 100644 (file)
@@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
        if (err) {
                if (dev->flags & MLX4_FLAG_MSI_X) {
                        mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
-                                 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+                                 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
                        mlx4_warn(dev, "Trying again without MSI-X\n");
                } else {
                        mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
-                                priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+                                priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
                        mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
                }
 
@@ -2481,14 +2481,45 @@ err_uar_table_free:
        return err;
 }
 
+static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
+{
+       int requested_cpu = 0;
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_eq *eq;
+       int off = 0;
+       int i;
+
+       if (eqn > dev->caps.num_comp_vectors)
+               return -EINVAL;
+
+       for (i = 1; i < port; i++)
+               off += mlx4_get_eqs_per_port(dev, i);
+
+       requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
+
+       /* Meaning EQs are shared, and this call comes from the second port */
+       if (requested_cpu < 0)
+               return 0;
+
+       eq = &priv->eq_table.eq[eqn];
+
+       if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_set_cpu(requested_cpu, eq->affinity_mask);
+
+       return 0;
+}
+
 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct msix_entry *entries;
        int i;
+       int port = 0;
 
        if (msi_x) {
-               int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+               int nreq = dev->caps.num_ports * num_online_cpus() + 1;
 
                nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
                             nreq);
@@ -2503,20 +2534,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
                nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
                                             nreq);
 
-               if (nreq < 0) {
+               if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
                        kfree(entries);
                        goto no_msi;
-               } else if (nreq < MSIX_LEGACY_SZ +
-                          dev->caps.num_ports * MIN_MSIX_P_PORT) {
-                       /*Working in legacy mode , all EQ's shared*/
-                       dev->caps.comp_pool           = 0;
-                       dev->caps.num_comp_vectors = nreq - 1;
-               } else {
-                       dev->caps.comp_pool           = nreq - MSIX_LEGACY_SZ;
-                       dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
                }
-               for (i = 0; i < nreq; ++i)
-                       priv->eq_table.eq[i].irq = entries[i].vector;
+               /* 1 is reserved for events (asyncrounous EQ) */
+               dev->caps.num_comp_vectors = nreq - 1;
+
+               priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
+               bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
+                           dev->caps.num_ports);
+
+               for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
+                       if (i == MLX4_EQ_ASYNC)
+                               continue;
+
+                       priv->eq_table.eq[i].irq =
+                               entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
+
+                       if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+                               bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+                                           dev->caps.num_ports);
+                               /* We don't set affinity hint when there
+                                * aren't enough EQs
+                                */
+                       } else {
+                               set_bit(port,
+                                       priv->eq_table.eq[i].actv_ports.ports);
+                               if (mlx4_init_affinity_hint(dev, port + 1, i))
+                                       mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
+                                                 i);
+                       }
+                       /* We divide the Eqs evenly between the two ports.
+                        * (dev->caps.num_comp_vectors / dev->caps.num_ports)
+                        * refers to the number of Eqs per port
+                        * (i.e eqs_per_port). Theoretically, we would like to
+                        * write something like (i + 1) % eqs_per_port == 0.
+                        * However, since there's an asynchronous Eq, we have
+                        * to skip over it by comparing this condition to
+                        * !!((i + 1) > MLX4_EQ_ASYNC).
+                        */
+                       if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
+                           ((i + 1) %
+                            (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
+                           !!((i + 1) > MLX4_EQ_ASYNC))
+                               /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
+                                * everything is shared anyway.
+                                */
+                               port++;
+               }
 
                dev->flags |= MLX4_FLAG_MSI_X;
 
@@ -2526,10 +2592,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
 
 no_msi:
        dev->caps.num_comp_vectors = 1;
-       dev->caps.comp_pool        = 0;
 
-       for (i = 0; i < 2; ++i)
+       BUG_ON(MLX4_EQ_ASYNC >= 2);
+       for (i = 0; i < 2; ++i) {
                priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
+               if (i != MLX4_EQ_ASYNC) {
+                       bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+                                   dev->caps.num_ports);
+               }
+       }
 }
 
 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2594,6 +2665,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
        device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
        device_remove_file(&info->dev->persist->pdev->dev,
                           &info->port_mtu_attr);
+#ifdef CONFIG_RFS_ACCEL
+       free_irq_cpu_rmap(info->rmap);
+       info->rmap = NULL;
+#endif
 }
 
 static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2749,6 +2824,7 @@ disable_sriov:
 free_mem:
        dev->persist->num_vfs = 0;
        kfree(dev->dev_vfs);
+        dev->dev_vfs = NULL;
        return dev_flags & ~MLX4_FLAG_MASTER;
 }
 
@@ -2900,6 +2976,7 @@ slave_start:
                                                                  existing_vfs,
                                                                  reset_flow);
 
+                               mlx4_close_fw(dev);
                                mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
                                dev->flags = dev_flags;
                                if (!SRIOV_VALID_STATE(dev->flags)) {
@@ -2988,18 +3065,6 @@ slave_start:
        /* In master functions, the communication channel must be initialized
         * after obtaining its address from fw */
        if (mlx4_is_master(dev)) {
-               int ib_ports = 0;
-
-               mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
-                       ib_ports++;
-
-               if (ib_ports &&
-                   (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
-                       mlx4_err(dev,
-                                "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
-                       err = -EINVAL;
-                       goto err_close;
-               }
                if (dev->caps.num_ports < 2 &&
                    num_vfs_argc > 1) {
                        err = -EINVAL;
@@ -3036,7 +3101,7 @@ slave_start:
        if (err)
                goto err_master_mfunc;
 
-       priv->msix_ctl.pool_bm = 0;
+       bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
        mutex_init(&priv->msix_ctl.pool_lock);
 
        mlx4_enable_msi_x(dev);
@@ -3058,7 +3123,6 @@ slave_start:
            !mlx4_is_mfunc(dev)) {
                dev->flags &= ~MLX4_FLAG_MSI_X;
                dev->caps.num_comp_vectors = 1;
-               dev->caps.comp_pool        = 0;
                pci_disable_msix(pdev);
                err = mlx4_setup_hca(dev);
        }
index 502d3dd2c888528e71af1cbf1ed276b10d058c81..f424900d23a65d8ad549927ae52bc6d262f9d37c 100644 (file)
@@ -287,6 +287,12 @@ struct mlx4_icm_table {
 #define MLX4_CQE_SIZE_MASK_STRIDE      0x3
 #define MLX4_EQE_SIZE_MASK_STRIDE      0x30
 
+#define MLX4_EQ_ASYNC                  0
+#define MLX4_EQ_TO_CQ_VECTOR(vector)   ((vector) - \
+                                        !!((int)(vector) >= MLX4_EQ_ASYNC))
+#define MLX4_CQ_TO_EQ_VECTOR(vector)   ((vector) + \
+                                        !!((int)(vector) >= MLX4_EQ_ASYNC))
+
 /*
  * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
  */
@@ -391,6 +397,9 @@ struct mlx4_eq {
        struct mlx4_buf_list   *page_list;
        struct mlx4_mtt         mtt;
        struct mlx4_eq_tasklet  tasklet_ctx;
+       struct mlx4_active_ports actv_ports;
+       u32                     ref_count;
+       cpumask_var_t           affinity_mask;
 };
 
 struct mlx4_slave_eqe {
@@ -808,6 +817,7 @@ struct mlx4_port_info {
        struct mlx4_vlan_table  vlan_table;
        struct mlx4_roce_gid_table gid_table;
        int                     base_qpn;
+       struct cpu_rmap         *rmap;
 };
 
 struct mlx4_sense {
@@ -818,7 +828,7 @@ struct mlx4_sense {
 };
 
 struct mlx4_msix_ctl {
-       u64             pool_bm;
+       DECLARE_BITMAP(pool_bm, MAX_MSIX);
        struct mutex    pool_lock;
 };
 
index d021f079f181b06bb6ec73250ea8493ad87d1cee..edd8fd69ec9a8d2133fcb27c44cca13c73551fa1 100644 (file)
@@ -338,7 +338,7 @@ struct mlx4_en_cq {
        struct napi_struct      napi;
        int size;
        int buf_size;
-       unsigned vector;
+       int vector;
        enum cq_type is_tx;
        u16 moder_time;
        u16 moder_cnt;
index bafe2180cf0c413c4d971f8043e401a018dc8100..ab48386bfefcd4ea18c3173cc151af0100a016f1 100644 (file)
@@ -2703,6 +2703,10 @@ static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
        context->qkey = cpu_to_be32(qkey);
 }
 
+static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
+                                struct mlx4_qp_context *qpc,
+                                struct mlx4_cmd_mailbox *inbox);
+
 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
                             struct mlx4_vhcr *vhcr,
                             struct mlx4_cmd_mailbox *inbox,
@@ -2725,6 +2729,10 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
        struct res_srq *srq;
        int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
 
+       err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
+       if (err)
+               return err;
+
        err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
        if (err)
                return err;
@@ -3526,8 +3534,8 @@ static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
        pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
                          ((port & 1) << 6);
 
-       if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
-           mlx4_is_eth(dev, port + 1)) {
+       if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
+           qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
                qpc->pri_path.sched_queue = pri_sched_queue;
        }
 
@@ -3965,6 +3973,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
        return 0;
 }
 
+static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+                                        struct _rule_hw *eth_header)
+{
+       if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+           is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+               struct mlx4_net_trans_rule_hw_eth *eth =
+                       (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+               struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+               bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+                       next_rule->rsvd == 0;
+
+               if (last_rule)
+                       ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+       }
+}
+
 /*
  * In case of missing eth header, append eth header with a MAC address
  * assigned to the VF.
@@ -4117,6 +4141,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        rule_header = (struct _rule_hw *)(ctrl + 1);
        header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
 
+       if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+               handle_eth_header_mcast_prio(ctrl, rule_header);
+
+       if (slave == dev->caps.function)
+               goto execute;
+
        switch (header_id) {
        case MLX4_NET_TRANS_RULE_ID_ETH:
                if (validate_eth_header_mac(slave, rule_header, rlist)) {
@@ -4143,6 +4173,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
                goto err_put;
        }
 
+execute:
        err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
                           vhcr->in_modifier, 0,
                           MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
index 8ff57e8e3e91601bc503e5f501ac2ef1da956296..158c88c69ef91ce9e18db6441e0be176c0aaeba3 100644 (file)
@@ -3,6 +3,18 @@
 #
 
 config MLX5_CORE
-       tristate
+       tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
        depends on PCI
        default n
+       ---help---
+         Core driver for low level functionality of the ConnectX-4 and
+         Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+       bool "Mellanox Technologies ConnectX-4 Ethernet support"
+       depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+       default n
+       ---help---
+         Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+         Ethernet and Infiniband support in ConnectX-4 are currently mutually
+         exclusive.
index 105780bb980b051e9dafaaa060ee51d41aa7e3ba..26a68b8af2c5c6ad0886711ee1f972a39b5c29ab 100644 (file)
@@ -2,4 +2,7 @@ obj-$(CONFIG_MLX5_CORE)         += mlx5_core.o
 
 mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
                health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o   \
-               mad.o
+               mad.o transobj.o vport.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
+               en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+               en_txrx.o
index ac0f7bf4be958bef168c0281f05108f6287304f4..0715b497511f6c861f5ac027341960fdc0acfab5 100644 (file)
 #include "mlx5_core.h"
 
 /* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0.  If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+ * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
-                  struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
 {
        dma_addr_t t;
 
        buf->size = size;
-       if (size <= max_direct) {
-               buf->nbufs        = 1;
-               buf->npages       = 1;
-               buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-               buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
-                                                       size, &t, GFP_KERNEL);
-               if (!buf->direct.buf)
-                       return -ENOMEM;
-
-               buf->direct.map = t;
-
-               while (t & ((1 << buf->page_shift) - 1)) {
-                       --buf->page_shift;
-                       buf->npages *= 2;
-               }
-       } else {
-               int i;
-
-               buf->direct.buf  = NULL;
-               buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-               buf->npages      = buf->nbufs;
-               buf->page_shift  = PAGE_SHIFT;
-               buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
-                                          GFP_KERNEL);
-               if (!buf->page_list)
-                       return -ENOMEM;
-
-               for (i = 0; i < buf->nbufs; i++) {
-                       buf->page_list[i].buf =
-                               dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                   &t, GFP_KERNEL);
-                       if (!buf->page_list[i].buf)
-                               goto err_free;
-
-                       buf->page_list[i].map = t;
-               }
-
-               if (BITS_PER_LONG == 64) {
-                       struct page **pages;
-                       pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
-                       if (!pages)
-                               goto err_free;
-                       for (i = 0; i < buf->nbufs; i++)
-                               pages[i] = virt_to_page(buf->page_list[i].buf);
-                       buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
-                       kfree(pages);
-                       if (!buf->direct.buf)
-                               goto err_free;
-               }
-       }
+       buf->npages       = 1;
+       buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
+       buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
+                                               size, &t, GFP_KERNEL);
+       if (!buf->direct.buf)
+               return -ENOMEM;
 
-       return 0;
+       buf->direct.map = t;
 
-err_free:
-       mlx5_buf_free(dev, buf);
+       while (t & ((1 << buf->page_shift) - 1)) {
+               --buf->page_shift;
+               buf->npages *= 2;
+       }
 
-       return -ENOMEM;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
 {
-       int i;
-
-       if (buf->nbufs == 1)
-               dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
-                                 buf->direct.map);
-       else {
-               if (BITS_PER_LONG == 64)
-                       vunmap(buf->direct.buf);
-
-               for (i = 0; i < buf->nbufs; i++)
-                       if (buf->page_list[i].buf)
-                               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                 buf->page_list[i].buf,
-                                                 buf->page_list[i].map);
-               kfree(buf->page_list);
-       }
+       dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+                         buf->direct.map);
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
@@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
        int i;
 
        for (i = 0; i < buf->npages; i++) {
-               if (buf->nbufs == 1)
-                       addr = buf->direct.map + (i << buf->page_shift);
-               else
-                       addr = buf->page_list[i].map;
+               addr = buf->direct.map + (i << buf->page_shift);
 
                pas[i] = cpu_to_be64(addr);
        }
index e3273faf4568945cb494e6598dbc013e61b11919..75ff58dc1ff5f9d9af725f7e5e3285e338b1be8c 100644 (file)
@@ -75,25 +75,6 @@ enum {
        MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR            = 0x10,
 };
 
-enum {
-       MLX5_CMD_STAT_OK                        = 0x0,
-       MLX5_CMD_STAT_INT_ERR                   = 0x1,
-       MLX5_CMD_STAT_BAD_OP_ERR                = 0x2,
-       MLX5_CMD_STAT_BAD_PARAM_ERR             = 0x3,
-       MLX5_CMD_STAT_BAD_SYS_STATE_ERR         = 0x4,
-       MLX5_CMD_STAT_BAD_RES_ERR               = 0x5,
-       MLX5_CMD_STAT_RES_BUSY                  = 0x6,
-       MLX5_CMD_STAT_LIM_ERR                   = 0x8,
-       MLX5_CMD_STAT_BAD_RES_STATE_ERR         = 0x9,
-       MLX5_CMD_STAT_IX_ERR                    = 0xa,
-       MLX5_CMD_STAT_NO_RES_ERR                = 0xf,
-       MLX5_CMD_STAT_BAD_INP_LEN_ERR           = 0x50,
-       MLX5_CMD_STAT_BAD_OUTP_LEN_ERR          = 0x51,
-       MLX5_CMD_STAT_BAD_QP_STATE_ERR          = 0x10,
-       MLX5_CMD_STAT_BAD_PKT_ERR               = 0x30,
-       MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR    = 0x40,
-};
-
 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
                                           struct mlx5_cmd_msg *in,
                                           struct mlx5_cmd_msg *out,
@@ -390,8 +371,17 @@ const char *mlx5_command_str(int command)
        case MLX5_CMD_OP_ARM_RQ:
                return "ARM_RQ";
 
-       case MLX5_CMD_OP_RESIZE_SRQ:
-               return "RESIZE_SRQ";
+       case MLX5_CMD_OP_CREATE_XRC_SRQ:
+               return "CREATE_XRC_SRQ";
+
+       case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+               return "DESTROY_XRC_SRQ";
+
+       case MLX5_CMD_OP_QUERY_XRC_SRQ:
+               return "QUERY_XRC_SRQ";
+
+       case MLX5_CMD_OP_ARM_XRC_SRQ:
+               return "ARM_XRC_SRQ";
 
        case MLX5_CMD_OP_ALLOC_PD:
                return "ALLOC_PD";
@@ -408,8 +398,8 @@ const char *mlx5_command_str(int command)
        case MLX5_CMD_OP_ATTACH_TO_MCG:
                return "ATTACH_TO_MCG";
 
-       case MLX5_CMD_OP_DETACH_FROM_MCG:
-               return "DETACH_FROM_MCG";
+       case MLX5_CMD_OP_DETTACH_FROM_MCG:
+               return "DETTACH_FROM_MCG";
 
        case MLX5_CMD_OP_ALLOC_XRCD:
                return "ALLOC_XRCD";
index eb0cf81f5f4518a06579a6c52e191b72ad1d0e50..04ab7e445eae080b0888d74022af1222479ea36b 100644 (file)
@@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
 }
 EXPORT_SYMBOL(mlx5_core_modify_cq);
 
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+                                  struct mlx5_core_cq *cq,
+                                  u16 cq_period,
+                                  u16 cq_max_count)
+{
+       struct mlx5_modify_cq_mbox_in in;
+
+       memset(&in, 0, sizeof(in));
+
+       in.cqn              = cpu_to_be32(cq->cqn);
+       in.ctx.cq_period    = cpu_to_be16(cq_period);
+       in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+       in.field_select     = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+                                         MLX5_CQ_MODIFY_COUNT);
+
+       return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
 int mlx5_init_cq_table(struct mlx5_core_dev *dev)
 {
        struct mlx5_cq_table *table = &dev->priv.cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
new file mode 100644 (file)
index 0000000..e9edb72
--- /dev/null
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/vport.h>
+#include "wq.h"
+#include "transobj.h"
+#include "mlx5_core.h"
+
+#define MLX5E_MAX_NUM_TC       8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
+#define MLX5E_PARAMS_MIN_MTU                            46
+
+#define MLX5E_TX_CQ_POLL_BUDGET        128
+#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+
+static const char vport_strings[][ETH_GSTRING_LEN] = {
+       /* vport statistics */
+       "rx_packets",
+       "rx_bytes",
+       "tx_packets",
+       "tx_bytes",
+       "rx_error_packets",
+       "rx_error_bytes",
+       "tx_error_packets",
+       "tx_error_bytes",
+       "rx_unicast_packets",
+       "rx_unicast_bytes",
+       "tx_unicast_packets",
+       "tx_unicast_bytes",
+       "rx_multicast_packets",
+       "rx_multicast_bytes",
+       "tx_multicast_packets",
+       "tx_multicast_bytes",
+       "rx_broadcast_packets",
+       "rx_broadcast_bytes",
+       "tx_broadcast_packets",
+       "tx_broadcast_bytes",
+
+       /* SW counters */
+       "tso_packets",
+       "tso_bytes",
+       "lro_packets",
+       "lro_bytes",
+       "rx_csum_good",
+       "rx_csum_none",
+       "tx_csum_offload",
+       "tx_queue_stopped",
+       "tx_queue_wake",
+       "tx_queue_dropped",
+       "rx_wqe_err",
+};
+
+struct mlx5e_vport_stats {
+       /* HW counters */
+       u64 rx_packets;
+       u64 rx_bytes;
+       u64 tx_packets;
+       u64 tx_bytes;
+       u64 rx_error_packets;
+       u64 rx_error_bytes;
+       u64 tx_error_packets;
+       u64 tx_error_bytes;
+       u64 rx_unicast_packets;
+       u64 rx_unicast_bytes;
+       u64 tx_unicast_packets;
+       u64 tx_unicast_bytes;
+       u64 rx_multicast_packets;
+       u64 rx_multicast_bytes;
+       u64 tx_multicast_packets;
+       u64 tx_multicast_bytes;
+       u64 rx_broadcast_packets;
+       u64 rx_broadcast_bytes;
+       u64 tx_broadcast_packets;
+       u64 tx_broadcast_bytes;
+
+       /* SW counters */
+       u64 tso_packets;
+       u64 tso_bytes;
+       u64 lro_packets;
+       u64 lro_bytes;
+       u64 rx_csum_good;
+       u64 rx_csum_none;
+       u64 tx_csum_offload;
+       u64 tx_queue_stopped;
+       u64 tx_queue_wake;
+       u64 tx_queue_dropped;
+       u64 rx_wqe_err;
+
+#define NUM_VPORT_COUNTERS     31
+};
+
+static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
+       "packets",
+       "csum_none",
+       "lro_packets",
+       "lro_bytes",
+       "wqe_err"
+};
+
+struct mlx5e_rq_stats {
+       u64 packets;
+       u64 csum_none;
+       u64 lro_packets;
+       u64 lro_bytes;
+       u64 wqe_err;
+#define NUM_RQ_STATS 5
+};
+
+static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
+       "packets",
+       "tso_packets",
+       "tso_bytes",
+       "csum_offload_none",
+       "stopped",
+       "wake",
+       "dropped",
+       "nop"
+};
+
+struct mlx5e_sq_stats {
+       u64 packets;
+       u64 tso_packets;
+       u64 tso_bytes;
+       u64 csum_offload_none;
+       u64 stopped;
+       u64 wake;
+       u64 dropped;
+       u64 nop;
+#define NUM_SQ_STATS 8
+};
+
+struct mlx5e_stats {
+       struct mlx5e_vport_stats   vport;
+};
+
+struct mlx5e_params {
+       u8  log_sq_size;
+       u8  log_rq_size;
+       u16 num_channels;
+       u8  default_vlan_prio;
+       u8  num_tc;
+       u16 rx_cq_moderation_usec;
+       u16 rx_cq_moderation_pkts;
+       u16 tx_cq_moderation_usec;
+       u16 tx_cq_moderation_pkts;
+       u16 min_rx_wqes;
+       u16 rx_hash_log_tbl_sz;
+       bool lro_en;
+       u32 lro_wqe_sz;
+};
+
+enum {
+       MLX5E_RQ_STATE_POST_WQES_ENABLE,
+};
+
+enum cq_flags {
+       MLX5E_CQ_HAS_CQES = 1,
+};
+
+struct mlx5e_cq {
+       /* data path - accessed per cqe */
+       struct mlx5_cqwq           wq;
+       void                      *sqrq;
+       unsigned long              flags;
+
+       /* data path - accessed per napi poll */
+       struct napi_struct        *napi;
+       struct mlx5_core_cq        mcq;
+       struct mlx5e_channel      *channel;
+
+       /* control */
+       struct mlx5_wq_ctrl        wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq {
+       /* data path */
+       struct mlx5_wq_ll      wq;
+       u32                    wqe_sz;
+       struct sk_buff       **skb;
+
+       struct device         *pdev;
+       struct net_device     *netdev;
+       struct mlx5e_rq_stats  stats;
+       struct mlx5e_cq        cq;
+
+       unsigned long          state;
+       int                    ix;
+
+       /* control */
+       struct mlx5_wq_ctrl    wq_ctrl;
+       u32                    rqn;
+       struct mlx5e_channel  *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_tx_skb_cb {
+       u32 num_bytes;
+       u8  num_wqebbs;
+       u8  num_dma;
+};
+
+#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+
+struct mlx5e_sq_dma {
+       dma_addr_t addr;
+       u32        size;
+};
+
+enum {
+       MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+};
+
+struct mlx5e_sq {
+       /* data path */
+
+       /* dirtied @completion */
+       u16                        cc;
+       u32                        dma_fifo_cc;
+
+       /* dirtied @xmit */
+       u16                        pc ____cacheline_aligned_in_smp;
+       u32                        dma_fifo_pc;
+       u32                        bf_offset;
+       struct mlx5e_sq_stats      stats;
+
+       struct mlx5e_cq            cq;
+
+       /* pointers to per packet info: write@xmit, read@completion */
+       struct sk_buff           **skb;
+       struct mlx5e_sq_dma       *dma_fifo;
+
+       /* read only */
+       struct mlx5_wq_cyc         wq;
+       u32                        dma_fifo_mask;
+       void __iomem              *uar_map;
+       struct netdev_queue       *txq;
+       u32                        sqn;
+       u32                        bf_buf_size;
+       struct device             *pdev;
+       __be32                     mkey_be;
+       unsigned long              state;
+
+       /* control path */
+       struct mlx5_wq_ctrl        wq_ctrl;
+       struct mlx5_uar            uar;
+       struct mlx5e_channel      *channel;
+       int                        tc;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+       return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+               (sq->cc  == sq->pc));
+}
+
+enum channel_flags {
+       MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+       /* data path */
+       struct mlx5e_rq            rq;
+       struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
+       struct napi_struct         napi;
+       struct device             *pdev;
+       struct net_device         *netdev;
+       __be32                     mkey_be;
+       u8                         num_tc;
+       unsigned long              flags;
+
+       /* control */
+       struct mlx5e_priv         *priv;
+       int                        ix;
+       int                        cpu;
+};
+
+enum mlx5e_traffic_types {
+       MLX5E_TT_IPV4_TCP = 0,
+       MLX5E_TT_IPV6_TCP = 1,
+       MLX5E_TT_IPV4_UDP = 2,
+       MLX5E_TT_IPV6_UDP = 3,
+       MLX5E_TT_IPV4     = 4,
+       MLX5E_TT_IPV6     = 5,
+       MLX5E_TT_ANY      = 6,
+       MLX5E_NUM_TT      = 7,
+};
+
+enum {
+       MLX5E_RQT_SPREADING  = 0,
+       MLX5E_RQT_DEFAULT_RQ = 1,
+       MLX5E_NUM_RQT        = 2,
+};
+
+struct mlx5e_eth_addr_info {
+       u8  addr[ETH_ALEN + 2];
+       u32 tt_vec;
+       u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_db {
+       struct hlist_head          netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+       struct hlist_head          netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+       struct mlx5e_eth_addr_info broadcast;
+       struct mlx5e_eth_addr_info allmulti;
+       struct mlx5e_eth_addr_info promisc;
+       bool                       broadcast_enabled;
+       bool                       allmulti_enabled;
+       bool                       promisc_enabled;
+};
+
+enum {
+       MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+       MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+       u32           active_vlans_ft_ix[VLAN_N_VID];
+       u32           untagged_rule_ft_ix;
+       u32           any_vlan_rule_ft_ix;
+       bool          filter_disabled;
+};
+
+struct mlx5e_flow_table {
+       void *vlan;
+       void *main;
+};
+
+struct mlx5e_priv {
+       /* priv data path fields - start */
+       int                        order_base_2_num_channels;
+       int                        queue_mapping_channel_mask;
+       int                        num_tc;
+       int                        default_vlan_prio;
+       /* priv data path fields - end */
+
+       unsigned long              state;
+       struct mutex               state_lock; /* Protects Interface state */
+       struct mlx5_uar            cq_uar;
+       u32                        pdn;
+       struct mlx5_core_mr        mr;
+
+       struct mlx5e_channel     **channel;
+       u32                        tisn[MLX5E_MAX_NUM_TC];
+       u32                        rqtn;
+       u32                        tirn[MLX5E_NUM_TT];
+
+       struct mlx5e_flow_table    ft;
+       struct mlx5e_eth_addr_db   eth_addr;
+       struct mlx5e_vlan_db       vlan;
+
+       struct mlx5e_params        params;
+       spinlock_t                 async_events_spinlock; /* sync hw events */
+       struct work_struct         update_carrier_work;
+       struct work_struct         set_rx_mode_work;
+       struct delayed_work        update_stats_work;
+
+       struct mlx5_core_dev      *mdev;
+       struct net_device         *netdev;
+       struct mlx5e_stats         stats;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_eth_seg  eth;
+};
+
+struct mlx5e_rx_wqe {
+       struct mlx5_wqe_srq_next_seg  next;
+       struct mlx5_wqe_data_seg      data;
+};
+
+enum mlx5e_link_mode {
+       MLX5E_1000BASE_CX_SGMII  = 0,
+       MLX5E_1000BASE_KX        = 1,
+       MLX5E_10GBASE_CX4        = 2,
+       MLX5E_10GBASE_KX4        = 3,
+       MLX5E_10GBASE_KR         = 4,
+       MLX5E_20GBASE_KR2        = 5,
+       MLX5E_40GBASE_CR4        = 6,
+       MLX5E_40GBASE_KR4        = 7,
+       MLX5E_56GBASE_R4         = 8,
+       MLX5E_10GBASE_CR         = 12,
+       MLX5E_10GBASE_SR         = 13,
+       MLX5E_10GBASE_ER         = 14,
+       MLX5E_40GBASE_SR4        = 15,
+       MLX5E_40GBASE_LR4        = 16,
+       MLX5E_100GBASE_CR4       = 20,
+       MLX5E_100GBASE_SR4       = 21,
+       MLX5E_100GBASE_KR4       = 22,
+       MLX5E_100GBASE_LR4       = 23,
+       MLX5E_100BASE_TX         = 24,
+       MLX5E_100BASE_T          = 25,
+       MLX5E_10GBASE_T          = 26,
+       MLX5E_25GBASE_CR         = 27,
+       MLX5E_25GBASE_KR         = 28,
+       MLX5E_25GBASE_SR         = 29,
+       MLX5E_50GBASE_CR2        = 30,
+       MLX5E_50GBASE_KR2        = 31,
+       MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+                         u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+                          u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+                            struct mlx5e_params *new_params);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+                                     struct mlx5e_tx_wqe *wqe)
+{
+       /* ensure wqe is visible to device before updating doorbell record */
+       dma_wmb();
+
+       *sq->wq.db = cpu_to_be32(sq->pc);
+
+       /* ensure doorbell record is visible to device before ringing the
+        * doorbell
+        */
+       wmb();
+
+       mlx5_write64((__be32 *)&wqe->ctrl,
+                    sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
+                    NULL);
+
+       sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+       struct mlx5_core_cq *mcq;
+
+       mcq = &cq->mcq;
+       mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
new file mode 100644 (file)
index 0000000..3889384
--- /dev/null
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+                             struct ethtool_drvinfo *drvinfo)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "%d.%d.%d",
+                fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+       strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+               sizeof(drvinfo->bus_info));
+}
+
+static const struct {
+       u32 supported;
+       u32 advertised;
+       u32 speed;
+} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
+       [MLX5E_1000BASE_CX_SGMII] = {
+               .supported  = SUPPORTED_1000baseKX_Full,
+               .advertised = ADVERTISED_1000baseKX_Full,
+               .speed      = 1000,
+       },
+       [MLX5E_1000BASE_KX] = {
+               .supported  = SUPPORTED_1000baseKX_Full,
+               .advertised = ADVERTISED_1000baseKX_Full,
+               .speed      = 1000,
+       },
+       [MLX5E_10GBASE_CX4] = {
+               .supported  = SUPPORTED_10000baseKX4_Full,
+               .advertised = ADVERTISED_10000baseKX4_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_10GBASE_KX4] = {
+               .supported  = SUPPORTED_10000baseKX4_Full,
+               .advertised = ADVERTISED_10000baseKX4_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_10GBASE_KR] = {
+               .supported  = SUPPORTED_10000baseKR_Full,
+               .advertised = ADVERTISED_10000baseKR_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_20GBASE_KR2] = {
+               .supported  = SUPPORTED_20000baseKR2_Full,
+               .advertised = ADVERTISED_20000baseKR2_Full,
+               .speed      = 20000,
+       },
+       [MLX5E_40GBASE_CR4] = {
+               .supported  = SUPPORTED_40000baseCR4_Full,
+               .advertised = ADVERTISED_40000baseCR4_Full,
+               .speed      = 40000,
+       },
+       [MLX5E_40GBASE_KR4] = {
+               .supported  = SUPPORTED_40000baseKR4_Full,
+               .advertised = ADVERTISED_40000baseKR4_Full,
+               .speed      = 40000,
+       },
+       [MLX5E_56GBASE_R4] = {
+               .supported  = SUPPORTED_56000baseKR4_Full,
+               .advertised = ADVERTISED_56000baseKR4_Full,
+               .speed      = 56000,
+       },
+       [MLX5E_10GBASE_CR] = {
+               .supported  = SUPPORTED_10000baseKR_Full,
+               .advertised = ADVERTISED_10000baseKR_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_10GBASE_SR] = {
+               .supported  = SUPPORTED_10000baseKR_Full,
+               .advertised = ADVERTISED_10000baseKR_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_10GBASE_ER] = {
+               .supported  = SUPPORTED_10000baseKR_Full,
+               .advertised = ADVERTISED_10000baseKR_Full,
+               .speed      = 10000,
+       },
+       [MLX5E_40GBASE_SR4] = {
+               .supported  = SUPPORTED_40000baseSR4_Full,
+               .advertised = ADVERTISED_40000baseSR4_Full,
+               .speed      = 40000,
+       },
+       [MLX5E_40GBASE_LR4] = {
+               .supported  = SUPPORTED_40000baseLR4_Full,
+               .advertised = ADVERTISED_40000baseLR4_Full,
+               .speed      = 40000,
+       },
+       [MLX5E_100GBASE_CR4] = {
+               .speed      = 100000,
+       },
+       [MLX5E_100GBASE_SR4] = {
+               .speed      = 100000,
+       },
+       [MLX5E_100GBASE_KR4] = {
+               .speed      = 100000,
+       },
+       [MLX5E_100GBASE_LR4] = {
+               .speed      = 100000,
+       },
+       [MLX5E_100BASE_TX]   = {
+               .speed      = 100,
+       },
+       [MLX5E_100BASE_T]    = {
+               .supported  = SUPPORTED_100baseT_Full,
+               .advertised = ADVERTISED_100baseT_Full,
+               .speed      = 100,
+       },
+       [MLX5E_10GBASE_T]    = {
+               .supported  = SUPPORTED_10000baseT_Full,
+               .advertised = ADVERTISED_10000baseT_Full,
+               .speed      = 1000,
+       },
+       [MLX5E_25GBASE_CR]   = {
+               .speed      = 25000,
+       },
+       [MLX5E_25GBASE_KR]   = {
+               .speed      = 25000,
+       },
+       [MLX5E_25GBASE_SR]   = {
+               .speed      = 25000,
+       },
+       [MLX5E_50GBASE_CR2]  = {
+               .speed      = 50000,
+       },
+       [MLX5E_50GBASE_KR2]  = {
+               .speed      = 50000,
+       },
+};
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               return NUM_VPORT_COUNTERS +
+                      priv->params.num_channels * NUM_RQ_STATS +
+                      priv->params.num_channels * priv->num_tc *
+                                                  NUM_SQ_STATS;
+       /* fallthrough */
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+                             uint32_t stringset, uint8_t *data)
+{
+       int i, j, tc, idx = 0;
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       switch (stringset) {
+       case ETH_SS_PRIV_FLAGS:
+               break;
+
+       case ETH_SS_TEST:
+               break;
+
+       case ETH_SS_STATS:
+               /* VPORT counters */
+               for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+                       strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                              vport_strings[i]);
+
+               /* per channel counters */
+               for (i = 0; i < priv->params.num_channels; i++)
+                       for (j = 0; j < NUM_RQ_STATS; j++)
+                               sprintf(data + (idx++) * ETH_GSTRING_LEN,
+                                       "rx%d_%s", i, rq_stats_strings[j]);
+
+               for (i = 0; i < priv->params.num_channels; i++)
+                       for (tc = 0; tc < priv->num_tc; tc++)
+                               for (j = 0; j < NUM_SQ_STATS; j++)
+                                       sprintf(data +
+                                               (idx++) * ETH_GSTRING_LEN,
+                                               "tx%d_%d_%s", i, tc,
+                                               sq_stats_strings[j]);
+               break;
+       }
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+                                   struct ethtool_stats *stats, u64 *data)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int i, j, tc, idx = 0;
+
+       if (!data)
+               return;
+
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_update_stats(priv);
+       mutex_unlock(&priv->state_lock);
+
+       for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+               data[idx++] = ((u64 *)&priv->stats.vport)[i];
+
+       /* per channel counters */
+       for (i = 0; i < priv->params.num_channels; i++)
+               for (j = 0; j < NUM_RQ_STATS; j++)
+                       data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+                                               &priv->state) ? 0 :
+                                      ((u64 *)&priv->channel[i]->rq.stats)[j];
+
+       for (i = 0; i < priv->params.num_channels; i++)
+               for (tc = 0; tc < priv->num_tc; tc++)
+                       for (j = 0; j < NUM_SQ_STATS; j++)
+                               data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+                                                       &priv->state) ? 0 :
+                               ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+                               struct ethtool_ringparam *param)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+       param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+       param->rx_pending     = 1 << priv->params.log_rq_size;
+       param->tx_pending     = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+                              struct ethtool_ringparam *param)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5e_params new_params;
+       u16 min_rx_wqes;
+       u8 log_rq_size;
+       u8 log_sq_size;
+       int err = 0;
+
+       if (param->rx_jumbo_pending) {
+               netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+                           __func__);
+               return -EINVAL;
+       }
+       if (param->rx_mini_pending) {
+               netdev_info(dev, "%s: rx_mini_pending not supported\n",
+                           __func__);
+               return -EINVAL;
+       }
+       if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+               netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+                           __func__, param->rx_pending,
+                           1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+               return -EINVAL;
+       }
+       if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+               netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+                           __func__, param->rx_pending,
+                           1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+               return -EINVAL;
+       }
+       if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+               netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+                           __func__, param->tx_pending,
+                           1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+               return -EINVAL;
+       }
+       if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+               netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+                           __func__, param->tx_pending,
+                           1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+               return -EINVAL;
+       }
+
+       log_rq_size = order_base_2(param->rx_pending);
+       log_sq_size = order_base_2(param->tx_pending);
+       min_rx_wqes = min_t(u16, param->rx_pending - 1,
+                           MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+       if (log_rq_size == priv->params.log_rq_size &&
+           log_sq_size == priv->params.log_sq_size &&
+           min_rx_wqes == priv->params.min_rx_wqes)
+               return 0;
+
+       mutex_lock(&priv->state_lock);
+       new_params = priv->params;
+       new_params.log_rq_size = log_rq_size;
+       new_params.log_sq_size = log_sq_size;
+       new_params.min_rx_wqes = min_rx_wqes;
+       err = mlx5e_update_priv_params(priv, &new_params);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+                              struct ethtool_channels *ch)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+
+       ch->max_combined   = ncv;
+       ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+                             struct ethtool_channels *ch)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+       unsigned int count = ch->combined_count;
+       struct mlx5e_params new_params;
+       int err = 0;
+
+       if (!count) {
+               netdev_info(dev, "%s: combined_count=0 not supported\n",
+                           __func__);
+               return -EINVAL;
+       }
+       if (ch->rx_count || ch->tx_count) {
+               netdev_info(dev, "%s: separate rx/tx count not supported\n",
+                           __func__);
+               return -EINVAL;
+       }
+       if (count > ncv) {
+               netdev_info(dev, "%s: count (%d) > max (%d)\n",
+                           __func__, count, ncv);
+               return -EINVAL;
+       }
+
+       if (priv->params.num_channels == count)
+               return 0;
+
+       mutex_lock(&priv->state_lock);
+       new_params = priv->params;
+       new_params.num_channels = count;
+       err = mlx5e_update_priv_params(priv, &new_params);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *coal)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation_usec;
+       coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+       coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation_usec;
+       coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+
+       return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *coal)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_channel *c;
+       int tc;
+       int i;
+
+       priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+       priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+       priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+       priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
+       for (i = 0; i < priv->params.num_channels; ++i) {
+               c = priv->channel[i];
+
+               for (tc = 0; tc < c->num_tc; tc++) {
+                       mlx5_core_modify_cq_moderation(mdev,
+                                               &c->sq[tc].cq.mcq,
+                                               coal->tx_coalesce_usecs,
+                                               coal->tx_max_coalesced_frames);
+               }
+
+               mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+                                              coal->rx_coalesce_usecs,
+                                              coal->rx_max_coalesced_frames);
+       }
+
+       return 0;
+}
+
+static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+{
+       int i;
+       u32 supported_modes = 0;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (eth_proto_cap & MLX5E_PROT_MASK(i))
+                       supported_modes |= ptys2ethtool_table[i].supported;
+       }
+       return supported_modes;
+}
+
+static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+{
+       int i;
+       u32 advertising_modes = 0;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (eth_proto_cap & MLX5E_PROT_MASK(i))
+                       advertising_modes |= ptys2ethtool_table[i].advertised;
+       }
+       return advertising_modes;
+}
+
+static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+{
+       if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+                          | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+                          | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+                          | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+                          | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+                          | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+               return SUPPORTED_FIBRE;
+       }
+
+       if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+                          | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+                          | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+                          | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+                          | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+               return SUPPORTED_Backplane;
+       }
+       return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+                            u32 eth_proto_oper,
+                            struct ethtool_cmd *cmd)
+{
+       int i;
+       u32 speed = SPEED_UNKNOWN;
+       u8 duplex = DUPLEX_UNKNOWN;
+
+       if (!netif_carrier_ok(netdev))
+               goto out;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+                       speed = ptys2ethtool_table[i].speed;
+                       duplex = DUPLEX_FULL;
+                       break;
+               }
+       }
+out:
+       ethtool_cmd_speed_set(cmd, speed);
+       cmd->duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap, u32 *supported)
+{
+       *supported |= ptys2ethtool_supported_port(eth_proto_cap);
+       *supported |= ptys2ethtool_supported_link(eth_proto_cap);
+       *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+                           u8 rx_pause, u32 *advertising)
+{
+       *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
+       *advertising |= tx_pause ? ADVERTISED_Pause : 0;
+       *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+       if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+                        | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+                        | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+                        | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+                       return PORT_FIBRE;
+       }
+
+       if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+                        | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+                        | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+                       return PORT_DA;
+       }
+
+       if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+                        | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+                        | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+                        | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+                       return PORT_NONE;
+       }
+
+       return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+{
+       *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+}
+
+static int mlx5e_get_settings(struct net_device *netdev,
+                             struct ethtool_cmd *cmd)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       u32 eth_proto_cap;
+       u32 eth_proto_admin;
+       u32 eth_proto_lp;
+       u32 eth_proto_oper;
+       int err;
+
+       err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+
+       if (err) {
+               netdev_err(netdev, "%s: query port ptys failed: %d\n",
+                          __func__, err);
+               goto err_query_ptys;
+       }
+
+       eth_proto_cap   = MLX5_GET(ptys_reg, out, eth_proto_capability);
+       eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+       eth_proto_oper  = MLX5_GET(ptys_reg, out, eth_proto_oper);
+       eth_proto_lp    = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+
+       cmd->supported   = 0;
+       cmd->advertising = 0;
+
+       get_supported(eth_proto_cap, &cmd->supported);
+       get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
+       get_speed_duplex(netdev, eth_proto_oper, cmd);
+
+       eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+       cmd->port = get_connector_port(eth_proto_oper);
+       get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+
+       cmd->transceiver = XCVR_INTERNAL;
+
+err_query_ptys:
+       return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+{
+       u32 i, ptys_modes = 0;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (ptys2ethtool_table[i].advertised & link_modes)
+                       ptys_modes |= MLX5E_PROT_MASK(i);
+       }
+
+       return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+       u32 i, speed_links = 0;
+
+       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+               if (ptys2ethtool_table[i].speed == speed)
+                       speed_links |= MLX5E_PROT_MASK(i);
+       }
+
+       return speed_links;
+}
+
+static int mlx5e_set_settings(struct net_device *netdev,
+                             struct ethtool_cmd *cmd)
+{
+       struct mlx5e_priv *priv    = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 link_modes;
+       u32 speed;
+       u32 eth_proto_cap, eth_proto_admin;
+       u8 port_status;
+       int err;
+
+       speed = ethtool_cmd_speed(cmd);
+
+       link_modes = cmd->autoneg == AUTONEG_ENABLE ?
+               mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+               mlx5e_ethtool2ptys_speed_link(speed);
+
+       err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+       if (err) {
+               netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+                          __func__, err);
+               goto out;
+       }
+
+       link_modes = link_modes & eth_proto_cap;
+       if (!link_modes) {
+               netdev_err(netdev, "%s: Not supported link mode(s) requested",
+                          __func__);
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+       if (err) {
+               netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+                          __func__, err);
+               goto out;
+       }
+
+       if (link_modes == eth_proto_admin)
+               goto out;
+
+       err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+       if (err) {
+               netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+                          __func__, err);
+               goto out;
+       }
+
+       err = mlx5_query_port_status(mdev, &port_status);
+       if (err)
+               goto out;
+
+       if (port_status == MLX5_PORT_DOWN)
+               return 0;
+
+       err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+       if (err)
+               goto out;
+       err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
+out:
+       return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+       .get_drvinfo       = mlx5e_get_drvinfo,
+       .get_link          = ethtool_op_get_link,
+       .get_strings       = mlx5e_get_strings,
+       .get_sset_count    = mlx5e_get_sset_count,
+       .get_ethtool_stats = mlx5e_get_ethtool_stats,
+       .get_ringparam     = mlx5e_get_ringparam,
+       .set_ringparam     = mlx5e_set_ringparam,
+       .get_channels      = mlx5e_get_channels,
+       .set_channels      = mlx5e_set_channels,
+       .get_coalesce      = mlx5e_get_coalesce,
+       .set_coalesce      = mlx5e_set_coalesce,
+       .get_settings      = mlx5e_get_settings,
+       .set_settings      = mlx5e_set_settings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
new file mode 100644 (file)
index 0000000..120db80
--- /dev/null
@@ -0,0 +1,860 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+enum {
+       MLX5E_FULLMATCH = 0,
+       MLX5E_ALLMULTI  = 1,
+       MLX5E_PROMISC   = 2,
+};
+
+enum {
+       MLX5E_UC        = 0,
+       MLX5E_MC_IPV4   = 1,
+       MLX5E_MC_IPV6   = 2,
+       MLX5E_MC_OTHER  = 3,
+};
+
+enum {
+       MLX5E_ACTION_NONE = 0,
+       MLX5E_ACTION_ADD  = 1,
+       MLX5E_ACTION_DEL  = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+       struct hlist_node          hlist;
+       u8                         action;
+       struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+       return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       int ix = mlx5e_hash_eth_addr(addr);
+       int found = 0;
+
+       hlist_for_each_entry(hn, &hash[ix], hlist)
+               if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+                       found = 1;
+                       break;
+               }
+
+       if (found) {
+               hn->action = MLX5E_ACTION_NONE;
+               return;
+       }
+
+       hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+       if (!hn)
+               return;
+
+       ether_addr_copy(hn->ai.addr, addr);
+       hn->action = MLX5E_ACTION_ADD;
+
+       hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+       hlist_del(&hn->hlist);
+       kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+                                              struct mlx5e_eth_addr_info *ai)
+{
+       void *ft = priv->ft.main;
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+       if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+       if (is_unicast_ether_addr(addr))
+               return MLX5E_UC;
+
+       if ((addr[0] == 0x01) &&
+           (addr[1] == 0x00) &&
+           (addr[2] == 0x5e) &&
+          !(addr[3] &  0x80))
+               return MLX5E_MC_IPV4;
+
+       if ((addr[0] == 0x33) &&
+           (addr[1] == 0x33))
+               return MLX5E_MC_IPV6;
+
+       return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+       int eth_addr_type;
+       u32 ret;
+
+       switch (type) {
+       case MLX5E_FULLMATCH:
+               eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+               switch (eth_addr_type) {
+               case MLX5E_UC:
+                       ret =
+                               (1 << MLX5E_TT_IPV4_TCP) |
+                               (1 << MLX5E_TT_IPV6_TCP) |
+                               (1 << MLX5E_TT_IPV4_UDP) |
+                               (1 << MLX5E_TT_IPV6_UDP) |
+                               (1 << MLX5E_TT_IPV4)     |
+                               (1 << MLX5E_TT_IPV6)     |
+                               (1 << MLX5E_TT_ANY)      |
+                               0;
+                       break;
+
+               case MLX5E_MC_IPV4:
+                       ret =
+                               (1 << MLX5E_TT_IPV4_UDP) |
+                               (1 << MLX5E_TT_IPV4)     |
+                               0;
+                       break;
+
+               case MLX5E_MC_IPV6:
+                       ret =
+                               (1 << MLX5E_TT_IPV6_UDP) |
+                               (1 << MLX5E_TT_IPV6)     |
+                               0;
+                       break;
+
+               case MLX5E_MC_OTHER:
+                       ret =
+                               (1 << MLX5E_TT_ANY)      |
+                               0;
+                       break;
+               }
+
+               break;
+
+       case MLX5E_ALLMULTI:
+               ret =
+                       (1 << MLX5E_TT_IPV4_UDP) |
+                       (1 << MLX5E_TT_IPV6_UDP) |
+                       (1 << MLX5E_TT_IPV4)     |
+                       (1 << MLX5E_TT_IPV6)     |
+                       (1 << MLX5E_TT_ANY)      |
+                       0;
+               break;
+
+       default: /* MLX5E_PROMISC */
+               ret =
+                       (1 << MLX5E_TT_IPV4_TCP) |
+                       (1 << MLX5E_TT_IPV6_TCP) |
+                       (1 << MLX5E_TT_IPV4_UDP) |
+                       (1 << MLX5E_TT_IPV6_UDP) |
+                       (1 << MLX5E_TT_IPV4)     |
+                       (1 << MLX5E_TT_IPV6)     |
+                       (1 << MLX5E_TT_ANY)      |
+                       0;
+               break;
+       }
+
+       return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+                                    struct mlx5e_eth_addr_info *ai, int type,
+                                    void *flow_context, void *match_criteria)
+{
+       u8 match_criteria_enable = 0;
+       void *match_value;
+       void *dest;
+       u8   *dmac;
+       u8   *match_criteria_dmac;
+       void *ft   = priv->ft.main;
+       u32  *tirn = priv->tirn;
+       u32  tt_vec;
+       int  err;
+
+       match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+       dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+                           outer_headers.dmac_47_16);
+       match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                          outer_headers.dmac_47_16);
+       dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+       MLX5_SET(flow_context, flow_context, action,
+                MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+       MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+       MLX5_SET(dest_format_struct, dest, destination_type,
+                MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+       switch (type) {
+       case MLX5E_FULLMATCH:
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               memset(match_criteria_dmac, 0xff, ETH_ALEN);
+               ether_addr_copy(dmac, ai->addr);
+               break;
+
+       case MLX5E_ALLMULTI:
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               match_criteria_dmac[0] = 0x01;
+               dmac[0] = 0x01;
+               break;
+
+       case MLX5E_PROMISC:
+               break;
+       }
+
+       tt_vec = mlx5e_get_tt_vec(ai, type);
+
+       if (tt_vec & (1 << MLX5E_TT_ANY)) {
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_ANY]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_ANY]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_ANY);
+       }
+
+       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                        outer_headers.ethertype);
+
+       if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV4]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+       }
+
+       if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV6]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+       }
+
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                        outer_headers.ip_protocol);
+       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+                IPPROTO_UDP);
+
+       if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4_UDP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+       }
+
+       if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6_UDP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+       }
+
+       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+                IPPROTO_TCP);
+
+       if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4_TCP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+       }
+
+       if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6_TCP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+               if (err) {
+                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+                       return err;
+               }
+               ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+       }
+
+       return 0;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+                                  struct mlx5e_eth_addr_info *ai, int type)
+{
+       u32 *flow_context;
+       u32 *match_criteria;
+       int err;
+
+       flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+                                     MLX5_ST_SZ_BYTES(dest_format_struct));
+       match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!flow_context || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto add_eth_addr_rule_out;
+       }
+
+       err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
+                                       match_criteria);
+       if (err)
+               netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+       kvfree(match_criteria);
+       kvfree(flow_context);
+       return err;
+}
+
+enum mlx5e_vlan_rule_type {
+       MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+       MLX5E_VLAN_RULE_TYPE_ANY_VID,
+       MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+                              enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+       u8 match_criteria_enable = 0;
+       u32 *flow_context;
+       void *match_value;
+       void *dest;
+       u32 *match_criteria;
+       u32 *ft_ix;
+       int err;
+
+       flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+                                     MLX5_ST_SZ_BYTES(dest_format_struct));
+       match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!flow_context || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto add_vlan_rule_out;
+       }
+       match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+       dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+       MLX5_SET(flow_context, flow_context, action,
+                MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+       MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+       MLX5_SET(dest_format_struct, dest, destination_type,
+                MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+       MLX5_SET(dest_format_struct, dest, destination_id,
+                mlx5_get_flow_table_id(priv->ft.main));
+
+       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                        outer_headers.vlan_tag);
+
+       switch (rule_type) {
+       case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+               ft_ix = &priv->vlan.untagged_rule_ft_ix;
+               break;
+       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+               ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+               MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+                        1);
+               break;
+       default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+               ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+               MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+                        1);
+               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+                                outer_headers.first_vid);
+               MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+                        vid);
+               break;
+       }
+
+       err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+                                       match_criteria, flow_context, ft_ix);
+       if (err)
+               netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+       kvfree(match_criteria);
+       kvfree(flow_context);
+       return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+                               enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+       switch (rule_type) {
+       case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+               mlx5_del_flow_table_entry(priv->ft.vlan,
+                                         priv->vlan.untagged_rule_ft_ix);
+               break;
+       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+               mlx5_del_flow_table_entry(priv->ft.vlan,
+                                         priv->vlan.any_vlan_rule_ft_ix);
+               break;
+       case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+               mlx5_del_flow_table_entry(priv->ft.vlan,
+                                         priv->vlan.active_vlans_ft_ix[vid]);
+               break;
+       }
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+       WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+       if (priv->vlan.filter_disabled) {
+               priv->vlan.filter_disabled = false;
+               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+                       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
+       }
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+       WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+       if (!priv->vlan.filter_disabled) {
+               priv->vlan.filter_disabled = true;
+               if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+                       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
+       }
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+                         u16 vid)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int err = 0;
+
+       mutex_lock(&priv->state_lock);
+
+       set_bit(vid, priv->vlan.active_vlans);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+                                         vid);
+
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+                          u16 vid)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       mutex_lock(&priv->state_lock);
+
+       clear_bit(vid, priv->vlan.active_vlans);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+       mutex_unlock(&priv->state_lock);
+
+       return 0;
+}
+
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+       u16 vid;
+       int err;
+
+       for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+                                         vid);
+               if (err)
+                       return err;
+       }
+
+       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+       if (err)
+               return err;
+
+       if (priv->vlan.filter_disabled) {
+               err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                         0);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+       u16 vid;
+
+       if (priv->vlan.filter_disabled)
+               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+       for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+       for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+               hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+                                struct mlx5e_eth_addr_hash_node *hn)
+{
+       switch (hn->action) {
+       case MLX5E_ACTION_ADD:
+               mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+               hn->action = MLX5E_ACTION_NONE;
+               break;
+
+       case MLX5E_ACTION_DEL:
+               mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+               mlx5e_del_eth_addr_from_hash(hn);
+               break;
+       }
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       struct netdev_hw_addr *ha;
+
+       netif_addr_lock_bh(netdev);
+
+       mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+                                  priv->netdev->dev_addr);
+
+       netdev_for_each_uc_addr(ha, netdev)
+               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+       netdev_for_each_mc_addr(ha, netdev)
+               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+       netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       struct hlist_node *tmp;
+       int i;
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+               mlx5e_execute_action(priv, hn);
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+               mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       struct hlist_node *tmp;
+       int i;
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+               hn->action = MLX5E_ACTION_DEL;
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+               hn->action = MLX5E_ACTION_DEL;
+
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_sync_netdev_addr(priv);
+
+       mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+       struct net_device *ndev = priv->netdev;
+
+       bool rx_mode_enable   = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+       bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+       bool broadcast_enabled = rx_mode_enable;
+
+       bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
+       bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
+       bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
+       bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
+       bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
+       bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
+
+       if (enable_promisc)
+               mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+       if (enable_allmulti)
+               mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+       if (enable_broadcast)
+               mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+       mlx5e_handle_netdev_addr(priv);
+
+       if (disable_broadcast)
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+       if (disable_allmulti)
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+       if (disable_promisc)
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+       ea->promisc_enabled   = promisc_enabled;
+       ea->allmulti_enabled  = allmulti_enabled;
+       ea->broadcast_enabled = broadcast_enabled;
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              set_rx_mode_work);
+
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_set_rx_mode_core(priv);
+       mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+       ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_table_group *g;
+       u8 *dmac;
+
+       g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+       if (!g)
+               return -ENOMEM;
+
+       g[0].log_sz = 2;
+       g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+                        outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+                        outer_headers.ip_protocol);
+
+       g[1].log_sz = 1;
+       g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+                        outer_headers.ethertype);
+
+       g[2].log_sz = 0;
+
+       g[3].log_sz = 14;
+       g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+                           outer_headers.dmac_47_16);
+       memset(dmac, 0xff, ETH_ALEN);
+       MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+                        outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+                        outer_headers.ip_protocol);
+
+       g[4].log_sz = 13;
+       g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+                           outer_headers.dmac_47_16);
+       memset(dmac, 0xff, ETH_ALEN);
+       MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+                        outer_headers.ethertype);
+
+       g[5].log_sz = 11;
+       g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+                           outer_headers.dmac_47_16);
+       memset(dmac, 0xff, ETH_ALEN);
+
+       g[6].log_sz = 2;
+       g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+                           outer_headers.dmac_47_16);
+       dmac[0] = 0x01;
+       MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+                        outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+                        outer_headers.ip_protocol);
+
+       g[7].log_sz = 1;
+       g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+                           outer_headers.dmac_47_16);
+       dmac[0] = 0x01;
+       MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+                        outer_headers.ethertype);
+
+       g[8].log_sz = 0;
+       g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+                           outer_headers.dmac_47_16);
+       dmac[0] = 0x01;
+       priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+                                              MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+                                              9, g);
+       kfree(g);
+
+       return priv->ft.main ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+       mlx5_destroy_flow_table(priv->ft.main);
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_table_group *g;
+
+       g = kcalloc(2, sizeof(*g), GFP_KERNEL);
+       if (!g)
+               return -ENOMEM;
+
+       g[0].log_sz = 12;
+       g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+                        outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+                        outer_headers.first_vid);
+
+       /* untagged + any vlan id */
+       g[1].log_sz = 1;
+       g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+                        outer_headers.vlan_tag);
+
+       priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+                                              MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+                                              2, g);
+
+       kfree(g);
+       return priv->ft.vlan ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+       mlx5_destroy_flow_table(priv->ft.vlan);
+}
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+       int err;
+
+       err = mlx5e_create_main_flow_table(priv);
+       if (err)
+               return err;
+
+       err = mlx5e_create_vlan_flow_table(priv);
+       if (err)
+               goto err_destroy_main_flow_table;
+
+       return 0;
+
+err_destroy_main_flow_table:
+       mlx5e_destroy_main_flow_table(priv);
+
+       return err;
+}
+
+void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+       mlx5e_destroy_vlan_flow_table(priv);
+       mlx5e_destroy_main_flow_table(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
new file mode 100644 (file)
index 0000000..7348c51
--- /dev/null
@@ -0,0 +1,1899 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+struct mlx5e_rq_param {
+       u32                        rqc[MLX5_ST_SZ_DW(rqc)];
+       struct mlx5_wq_param       wq;
+};
+
+struct mlx5e_sq_param {
+       u32                        sqc[MLX5_ST_SZ_DW(sqc)];
+       struct mlx5_wq_param       wq;
+};
+
+struct mlx5e_cq_param {
+       u32                        cqc[MLX5_ST_SZ_DW(cqc)];
+       struct mlx5_wq_param       wq;
+       u16                        eq_ix;
+};
+
+struct mlx5e_channel_param {
+       struct mlx5e_rq_param      rq;
+       struct mlx5e_sq_param      sq;
+       struct mlx5e_cq_param      rx_cq;
+       struct mlx5e_cq_param      tx_cq;
+};
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u8 port_state;
+
+       port_state = mlx5_query_vport_state(mdev,
+               MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+
+       if (port_state == VPORT_STATE_UP)
+               netif_carrier_on(priv->netdev);
+       else
+               netif_carrier_off(priv->netdev);
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              update_carrier_work);
+
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+               mlx5e_update_carrier(priv);
+       mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_vport_stats *s = &priv->stats.vport;
+       struct mlx5e_rq_stats *rq_stats;
+       struct mlx5e_sq_stats *sq_stats;
+       u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+       u32 *out;
+       int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+       u64 tx_offload_none;
+       int i, j;
+
+       out = mlx5_vzalloc(outlen);
+       if (!out)
+               return;
+
+       /* Collect firts the SW counters and then HW for consistency */
+       s->tso_packets          = 0;
+       s->tso_bytes            = 0;
+       s->tx_queue_stopped     = 0;
+       s->tx_queue_wake        = 0;
+       s->tx_queue_dropped     = 0;
+       tx_offload_none         = 0;
+       s->lro_packets          = 0;
+       s->lro_bytes            = 0;
+       s->rx_csum_none         = 0;
+       s->rx_wqe_err           = 0;
+       for (i = 0; i < priv->params.num_channels; i++) {
+               rq_stats = &priv->channel[i]->rq.stats;
+
+               s->lro_packets  += rq_stats->lro_packets;
+               s->lro_bytes    += rq_stats->lro_bytes;
+               s->rx_csum_none += rq_stats->csum_none;
+               s->rx_wqe_err   += rq_stats->wqe_err;
+
+               for (j = 0; j < priv->num_tc; j++) {
+                       sq_stats = &priv->channel[i]->sq[j].stats;
+
+                       s->tso_packets          += sq_stats->tso_packets;
+                       s->tso_bytes            += sq_stats->tso_bytes;
+                       s->tx_queue_stopped     += sq_stats->stopped;
+                       s->tx_queue_wake        += sq_stats->wake;
+                       s->tx_queue_dropped     += sq_stats->dropped;
+                       tx_offload_none         += sq_stats->csum_offload_none;
+               }
+       }
+
+       /* HW counters */
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(query_vport_counter_in, in, opcode,
+                MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+       MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+       MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+       memset(out, 0, outlen);
+
+       if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+               goto free_out;
+
+#define MLX5_GET_CTR(p, x) \
+       MLX5_GET64(query_vport_counter_out, p, x)
+
+       s->rx_error_packets     =
+               MLX5_GET_CTR(out, received_errors.packets);
+       s->rx_error_bytes       =
+               MLX5_GET_CTR(out, received_errors.octets);
+       s->tx_error_packets     =
+               MLX5_GET_CTR(out, transmit_errors.packets);
+       s->tx_error_bytes       =
+               MLX5_GET_CTR(out, transmit_errors.octets);
+
+       s->rx_unicast_packets   =
+               MLX5_GET_CTR(out, received_eth_unicast.packets);
+       s->rx_unicast_bytes     =
+               MLX5_GET_CTR(out, received_eth_unicast.octets);
+       s->tx_unicast_packets   =
+               MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+       s->tx_unicast_bytes     =
+               MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+       s->rx_multicast_packets =
+               MLX5_GET_CTR(out, received_eth_multicast.packets);
+       s->rx_multicast_bytes   =
+               MLX5_GET_CTR(out, received_eth_multicast.octets);
+       s->tx_multicast_packets =
+               MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+       s->tx_multicast_bytes   =
+               MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+       s->rx_broadcast_packets =
+               MLX5_GET_CTR(out, received_eth_broadcast.packets);
+       s->rx_broadcast_bytes   =
+               MLX5_GET_CTR(out, received_eth_broadcast.octets);
+       s->tx_broadcast_packets =
+               MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+       s->tx_broadcast_bytes   =
+               MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+       s->rx_packets =
+               s->rx_unicast_packets +
+               s->rx_multicast_packets +
+               s->rx_broadcast_packets;
+       s->rx_bytes =
+               s->rx_unicast_bytes +
+               s->rx_multicast_bytes +
+               s->rx_broadcast_bytes;
+       s->tx_packets =
+               s->tx_unicast_packets +
+               s->tx_multicast_packets +
+               s->tx_broadcast_packets;
+       s->tx_bytes =
+               s->tx_unicast_bytes +
+               s->tx_multicast_bytes +
+               s->tx_broadcast_bytes;
+
+       /* Update calculated offload counters */
+       s->tx_csum_offload = s->tx_packets - tx_offload_none;
+       s->rx_csum_good    = s->rx_packets - s->rx_csum_none;
+
+free_out:
+       kvfree(out);
+}
+
+static void mlx5e_update_stats_work(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+                                              update_stats_work);
+       mutex_lock(&priv->state_lock);
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               mlx5e_update_stats(priv);
+               schedule_delayed_work(dwork,
+                                     msecs_to_jiffies(
+                                             MLX5E_UPDATE_STATS_INTERVAL));
+       }
+       mutex_unlock(&priv->state_lock);
+}
+
+static void __mlx5e_async_event(struct mlx5e_priv *priv,
+                               enum mlx5_dev_event event)
+{
+       switch (event) {
+       case MLX5_DEV_EVENT_PORT_UP:
+       case MLX5_DEV_EVENT_PORT_DOWN:
+               schedule_work(&priv->update_carrier_work);
+               break;
+
+       default:
+               break;
+       }
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+                             enum mlx5_dev_event event, unsigned long param)
+{
+       struct mlx5e_priv *priv = vpriv;
+
+       spin_lock(&priv->async_events_spinlock);
+       if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+               __mlx5e_async_event(priv, event);
+       spin_unlock(&priv->async_events_spinlock);
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+       set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+       spin_lock_irq(&priv->async_events_spinlock);
+       clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+       spin_unlock_irq(&priv->async_events_spinlock);
+}
+
+static void mlx5e_send_nop(struct mlx5e_sq *sq)
+{
+       struct mlx5_wq_cyc                *wq  = &sq->wq;
+
+       u16 pi = sq->pc & wq->sz_m1;
+       struct mlx5e_tx_wqe              *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+       struct mlx5_wqe_ctrl_seg         *cseg = &wqe->ctrl;
+
+       memset(cseg, 0, sizeof(*cseg));
+
+       cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+       cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | 0x01);
+       cseg->fm_ce_se         = MLX5_WQE_CTRL_CQ_UPDATE;
+
+       sq->skb[pi] = NULL;
+       sq->pc++;
+       mlx5e_tx_notify_hw(sq, wqe);
+}
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+                          struct mlx5e_rq_param *param,
+                          struct mlx5e_rq *rq)
+{
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       void *rqc = param->rqc;
+       void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+       int wq_sz;
+       int err;
+       int i;
+
+       err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+                               &rq->wq_ctrl);
+       if (err)
+               return err;
+
+       rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+       wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+       rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+                              cpu_to_node(c->cpu));
+       if (!rq->skb) {
+               err = -ENOMEM;
+               goto err_rq_wq_destroy;
+       }
+
+       rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
+                               priv->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+
+       for (i = 0; i < wq_sz; i++) {
+               struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+
+               wqe->data.lkey       = c->mkey_be;
+               wqe->data.byte_count = cpu_to_be32(rq->wqe_sz);
+       }
+
+       rq->pdev    = c->pdev;
+       rq->netdev  = c->netdev;
+       rq->channel = c;
+       rq->ix      = c->ix;
+
+       return 0;
+
+err_rq_wq_destroy:
+       mlx5_wq_destroy(&rq->wq_ctrl);
+
+       return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+       kfree(rq->skb);
+       mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *rqc;
+       void *wq;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+               sizeof(u64) * rq->wq_ctrl.buf.npages;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+       wq  = MLX5_ADDR_OF(rqc, rqc, wq);
+
+       memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+       MLX5_SET(rqc,  rqc, cqn,                c->rq.cq.mcq.cqn);
+       MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
+       MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
+       MLX5_SET(wq,   wq,  wq_type,            MLX5_WQ_TYPE_LINKED_LIST);
+       MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
+                                               PAGE_SHIFT);
+       MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
+
+       mlx5_fill_page_array(&rq->wq_ctrl.buf,
+                            (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+       err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
+
+       kvfree(in);
+
+       return err;
+}
+
+static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *rqc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+       MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+       MLX5_SET(rqc, rqc, state, next_state);
+
+       err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       mlx5_core_destroy_rq(mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+       struct mlx5e_channel *c = rq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_wq_ll *wq = &rq->wq;
+       int i;
+
+       for (i = 0; i < 1000; i++) {
+               if (wq->cur_sz >= priv->params.min_rx_wqes)
+                       return 0;
+
+               msleep(20);
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+                        struct mlx5e_rq_param *param,
+                        struct mlx5e_rq *rq)
+{
+       int err;
+
+       err = mlx5e_create_rq(c, param, rq);
+       if (err)
+               return err;
+
+       err = mlx5e_enable_rq(rq, param);
+       if (err)
+               goto err_destroy_rq;
+
+       err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+       if (err)
+               goto err_disable_rq;
+
+       set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+       mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */
+
+       return 0;
+
+err_disable_rq:
+       mlx5e_disable_rq(rq);
+err_destroy_rq:
+       mlx5e_destroy_rq(rq);
+
+       return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+       clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+       napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+
+       mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+       while (!mlx5_wq_ll_is_empty(&rq->wq))
+               msleep(20);
+
+       /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
+       napi_synchronize(&rq->channel->napi);
+
+       mlx5e_disable_rq(rq);
+       mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+       kfree(sq->dma_fifo);
+       kfree(sq->skb);
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+       int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+       int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+       sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
+       sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
+                                   numa);
+
+       if (!sq->skb || !sq->dma_fifo) {
+               mlx5e_free_sq_db(sq);
+               return -ENOMEM;
+       }
+
+       sq->dma_fifo_mask = df_sz - 1;
+
+       return 0;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+                          int tc,
+                          struct mlx5e_sq_param *param,
+                          struct mlx5e_sq *sq)
+{
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *sqc = param->sqc;
+       void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+       int err;
+
+       err = mlx5_alloc_map_uar(mdev, &sq->uar);
+       if (err)
+               return err;
+
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+                                &sq->wq_ctrl);
+       if (err)
+               goto err_unmap_free_uar;
+
+       sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
+       sq->uar_map     = sq->uar.map;
+       sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+       if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)))
+               goto err_sq_wq_destroy;
+
+       sq->txq = netdev_get_tx_queue(priv->netdev,
+                                     c->ix + tc * priv->params.num_channels);
+
+       sq->pdev    = c->pdev;
+       sq->mkey_be = c->mkey_be;
+       sq->channel = c;
+       sq->tc      = tc;
+
+       return 0;
+
+err_sq_wq_destroy:
+       mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+       mlx5_unmap_free_uar(mdev, &sq->uar);
+
+       return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5e_priv *priv = c->priv;
+
+       mlx5e_free_sq_db(sq);
+       mlx5_wq_destroy(&sq->wq_ctrl);
+       mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *sqc;
+       void *wq;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+               sizeof(u64) * sq->wq_ctrl.buf.npages;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+       wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+       MLX5_SET(sqc,  sqc, user_index,         sq->tc);
+       MLX5_SET(sqc,  sqc, tis_num_0,          priv->tisn[sq->tc]);
+       MLX5_SET(sqc,  sqc, cqn,                c->sq[sq->tc].cq.mcq.cqn);
+       MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
+       MLX5_SET(sqc,  sqc, tis_lst_sz,         1);
+       MLX5_SET(sqc,  sqc, flush_in_error_en,  1);
+
+       MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
+       MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
+       MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
+                                         PAGE_SHIFT);
+       MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
+
+       mlx5_fill_page_array(&sq->wq_ctrl.buf,
+                            (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+       err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+       kvfree(in);
+
+       return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       void *in;
+       void *sqc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+       MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+       MLX5_SET(sqc, sqc, state, next_state);
+
+       err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+       struct mlx5e_channel *c = sq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       mlx5_core_destroy_sq(mdev, sq->sqn);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+                        int tc,
+                        struct mlx5e_sq_param *param,
+                        struct mlx5e_sq *sq)
+{
+       int err;
+
+       err = mlx5e_create_sq(c, tc, param, sq);
+       if (err)
+               return err;
+
+       err = mlx5e_enable_sq(sq, param);
+       if (err)
+               goto err_destroy_sq;
+
+       err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+       if (err)
+               goto err_disable_sq;
+
+       set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+       netdev_tx_reset_queue(sq->txq);
+       netif_tx_start_queue(sq->txq);
+
+       return 0;
+
+err_disable_sq:
+       mlx5e_disable_sq(sq);
+err_destroy_sq:
+       mlx5e_destroy_sq(sq);
+
+       return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+       __netif_tx_lock_bh(txq);
+       netif_tx_stop_queue(txq);
+       __netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+       clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+       napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
+       netif_tx_disable_queue(sq->txq);
+
+       /* ensure hw is notified of all pending wqes */
+       if (mlx5e_sq_has_room_for(sq, 1))
+               mlx5e_send_nop(sq);
+
+       mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+       while (sq->cc != sq->pc) /* wait till sq is empty */
+               msleep(20);
+
+       /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
+       napi_synchronize(&sq->channel->napi);
+
+       mlx5e_disable_sq(sq);
+       mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+                          struct mlx5e_cq_param *param,
+                          struct mlx5e_cq *cq)
+{
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_cq *mcq = &cq->mcq;
+       int eqn_not_used;
+       int irqn;
+       int err;
+       u32 i;
+
+       param->wq.numa = cpu_to_node(c->cpu);
+       param->eq_ix   = c->ix;
+
+       err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+                              &cq->wq_ctrl);
+       if (err)
+               return err;
+
+       mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+       cq->napi        = &c->napi;
+
+       mcq->cqe_sz     = 64;
+       mcq->set_ci_db  = cq->wq_ctrl.db.db;
+       mcq->arm_db     = cq->wq_ctrl.db.db + 1;
+       *mcq->set_ci_db = 0;
+       *mcq->arm_db    = 0;
+       mcq->vector     = param->eq_ix;
+       mcq->comp       = mlx5e_completion_event;
+       mcq->event      = mlx5e_cq_error_event;
+       mcq->irqn       = irqn;
+       mcq->uar        = &priv->cq_uar;
+
+       for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+               struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+               cqe->op_own = 0xf1;
+       }
+
+       cq->channel = c;
+
+       return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+       mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+       struct mlx5e_channel *c = cq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_core_cq *mcq = &cq->mcq;
+
+       void *in;
+       void *cqc;
+       int inlen;
+       int irqn_not_used;
+       int eqn;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+               sizeof(u64) * cq->wq_ctrl.buf.npages;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+       memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+       mlx5_fill_page_array(&cq->wq_ctrl.buf,
+                            (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+       mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+       MLX5_SET(cqc,   cqc, c_eqn,         eqn);
+       MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
+       MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+                                           PAGE_SHIFT);
+       MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
+
+       err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+       kvfree(in);
+
+       if (err)
+               return err;
+
+       mlx5e_cq_arm(cq);
+
+       return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+       struct mlx5e_channel *c = cq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+                        struct mlx5e_cq_param *param,
+                        struct mlx5e_cq *cq,
+                        u16 moderation_usecs,
+                        u16 moderation_frames)
+{
+       int err;
+       struct mlx5e_priv *priv = c->priv;
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       err = mlx5e_create_cq(c, param, cq);
+       if (err)
+               return err;
+
+       err = mlx5e_enable_cq(cq, param);
+       if (err)
+               goto err_destroy_cq;
+
+       err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+                                            moderation_usecs,
+                                            moderation_frames);
+       if (err)
+               goto err_destroy_cq;
+
+       return 0;
+
+err_destroy_cq:
+       mlx5e_destroy_cq(cq);
+
+       return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+       mlx5e_disable_cq(cq);
+       mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+       return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+                            struct mlx5e_channel_param *cparam)
+{
+       struct mlx5e_priv *priv = c->priv;
+       int err;
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++) {
+               err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+                                   priv->params.tx_cq_moderation_usec,
+                                   priv->params.tx_cq_moderation_pkts);
+               if (err)
+                       goto err_close_tx_cqs;
+
+               c->sq[tc].cq.sqrq = &c->sq[tc];
+       }
+
+       return 0;
+
+err_close_tx_cqs:
+       for (tc--; tc >= 0; tc--)
+               mlx5e_close_cq(&c->sq[tc].cq);
+
+       return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++)
+               mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+                         struct mlx5e_channel_param *cparam)
+{
+       int err;
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++) {
+               err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+               if (err)
+                       goto err_close_sqs;
+       }
+
+       return 0;
+
+err_close_sqs:
+       for (tc--; tc >= 0; tc--)
+               mlx5e_close_sq(&c->sq[tc]);
+
+       return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+       int tc;
+
+       for (tc = 0; tc < c->num_tc; tc++)
+               mlx5e_close_sq(&c->sq[tc]);
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+                             struct mlx5e_channel_param *cparam,
+                             struct mlx5e_channel **cp)
+{
+       struct net_device *netdev = priv->netdev;
+       int cpu = mlx5e_get_cpu(priv, ix);
+       struct mlx5e_channel *c;
+       int err;
+
+       c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+       if (!c)
+               return -ENOMEM;
+
+       c->priv     = priv;
+       c->ix       = ix;
+       c->cpu      = cpu;
+       c->pdev     = &priv->mdev->pdev->dev;
+       c->netdev   = priv->netdev;
+       c->mkey_be  = cpu_to_be32(priv->mr.key);
+       c->num_tc   = priv->num_tc;
+
+       netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+       err = mlx5e_open_tx_cqs(c, cparam);
+       if (err)
+               goto err_napi_del;
+
+       err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+                           priv->params.rx_cq_moderation_usec,
+                           priv->params.rx_cq_moderation_pkts);
+       if (err)
+               goto err_close_tx_cqs;
+       c->rq.cq.sqrq = &c->rq;
+
+       napi_enable(&c->napi);
+
+       err = mlx5e_open_sqs(c, cparam);
+       if (err)
+               goto err_disable_napi;
+
+       err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+       if (err)
+               goto err_close_sqs;
+
+       netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+       *cp = c;
+
+       return 0;
+
+err_close_sqs:
+       mlx5e_close_sqs(c);
+
+err_disable_napi:
+       napi_disable(&c->napi);
+       mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+       mlx5e_close_tx_cqs(c);
+
+err_napi_del:
+       netif_napi_del(&c->napi);
+       kfree(c);
+
+       return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+       mlx5e_close_rq(&c->rq);
+       mlx5e_close_sqs(c);
+       napi_disable(&c->napi);
+       mlx5e_close_cq(&c->rq.cq);
+       mlx5e_close_tx_cqs(c);
+       netif_napi_del(&c->napi);
+       kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+                                struct mlx5e_rq_param *param)
+{
+       void *rqc = param->rqc;
+       void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+       MLX5_SET(wq, wq, wq_type,          MLX5_WQ_TYPE_LINKED_LIST);
+       MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+       MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
+       MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
+       MLX5_SET(wq, wq, pd,               priv->pdn);
+
+       param->wq.numa   = dev_to_node(&priv->mdev->pdev->dev);
+       param->wq.linear = 1;
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+                                struct mlx5e_sq_param *param)
+{
+       void *sqc = param->sqc;
+       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
+       MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+       MLX5_SET(wq, wq, pd,            priv->pdn);
+
+       param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+                                       struct mlx5e_cq_param *param)
+{
+       void *cqc = param->cqc;
+
+       MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+                                   struct mlx5e_cq_param *param)
+{
+       void *cqc = param->cqc;
+
+       MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_rq_size);
+
+       mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+                                   struct mlx5e_cq_param *param)
+{
+       void *cqc = param->cqc;
+
+       MLX5_SET(cqc, cqc, log_cq_size,  priv->params.log_sq_size);
+
+       mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+                                     struct mlx5e_channel_param *cparam)
+{
+       memset(cparam, 0, sizeof(*cparam));
+
+       mlx5e_build_rq_param(priv, &cparam->rq);
+       mlx5e_build_sq_param(priv, &cparam->sq);
+       mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+       mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+       struct mlx5e_channel_param cparam;
+       int err;
+       int i;
+       int j;
+
+       priv->channel = kcalloc(priv->params.num_channels,
+                               sizeof(struct mlx5e_channel *), GFP_KERNEL);
+       if (!priv->channel)
+               return -ENOMEM;
+
+       mlx5e_build_channel_param(priv, &cparam);
+       for (i = 0; i < priv->params.num_channels; i++) {
+               err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+               if (err)
+                       goto err_close_channels;
+       }
+
+       for (j = 0; j < priv->params.num_channels; j++) {
+               err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+               if (err)
+                       goto err_close_channels;
+       }
+
+       return 0;
+
+err_close_channels:
+       for (i--; i >= 0; i--)
+               mlx5e_close_channel(priv->channel[i]);
+
+       kfree(priv->channel);
+
+       return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < priv->params.num_channels; i++)
+               mlx5e_close_channel(priv->channel[i]);
+
+       kfree(priv->channel);
+}
+
+static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+       void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(tisc, tisc, prio,  tc);
+
+       return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+       mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+       int num_tc = priv->num_tc;
+       int err;
+       int tc;
+
+       for (tc = 0; tc < num_tc; tc++) {
+               err = mlx5e_open_tis(priv, tc);
+               if (err)
+                       goto err_close_tises;
+       }
+
+       return 0;
+
+err_close_tises:
+       for (tc--; tc >= 0; tc--)
+               mlx5e_close_tis(priv, tc);
+
+       return err;
+}
+
+static void mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+       int num_tc = priv->num_tc;
+       int tc;
+
+       for (tc = 0; tc < num_tc; tc++)
+               mlx5e_close_tis(priv, tc);
+}
+
+static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 *in;
+       u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+       void *rqtc;
+       int inlen;
+       int err;
+       int sz;
+       int i;
+
+       sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+       inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+       MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+       MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+       for (i = 0; i < sz; i++) {
+               int ix = i % priv->params.num_channels;
+
+               MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+       }
+
+       MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+       if (!err)
+               priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+       MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+       mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+                                  sizeof(out));
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                         MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                         MLX5_HASH_FIELD_SEL_DST_IP   |\
+                         MLX5_HASH_FIELD_SEL_L4_SPORT |\
+                         MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+       if (priv->params.lro_en) {
+               MLX5_SET(tirc, tirc, lro_enable_mask,
+                        MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+                        MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+               MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+                        (priv->params.lro_wqe_sz -
+                         ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+               MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+                        MLX5_CAP_ETH(priv->mdev,
+                                     lro_timer_supported_periods[3]));
+       }
+
+       switch (tt) {
+       case MLX5E_TT_ANY:
+               MLX5_SET(tirc, tirc, disp_type,
+                        MLX5_TIRC_DISP_TYPE_DIRECT);
+               MLX5_SET(tirc, tirc, inline_rqn,
+                        priv->channel[0]->rq.rqn);
+               break;
+       default:
+               MLX5_SET(tirc, tirc, disp_type,
+                        MLX5_TIRC_DISP_TYPE_INDIRECT);
+               MLX5_SET(tirc, tirc, indirect_table,
+                        priv->rqtn);
+               MLX5_SET(tirc, tirc, rx_hash_fn,
+                        MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+               netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
+                                                rx_hash_toeplitz_key),
+                                   MLX5_FLD_SZ_BYTES(tirc,
+                                                     rx_hash_toeplitz_key));
+               break;
+       }
+
+       switch (tt) {
+       case MLX5E_TT_IPV4_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_ALL);
+               break;
+
+       case MLX5E_TT_IPV6_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_ALL);
+               break;
+
+       case MLX5E_TT_IPV4_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_ALL);
+               break;
+
+       case MLX5E_TT_IPV6_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_ALL);
+               break;
+
+       case MLX5E_TT_IPV4:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+
+       case MLX5E_TT_IPV6:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+       }
+}
+
+static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 *in;
+       void *tirc;
+       int inlen;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+       mlx5e_build_tir_ctx(priv, tirc, tt);
+
+       err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+       mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < MLX5E_NUM_TT; i++) {
+               err = mlx5e_open_tir(priv, i);
+               if (err)
+                       goto err_close_tirs;
+       }
+
+       return 0;
+
+err_close_tirs:
+       for (i--; i >= 0; i--)
+               mlx5e_close_tir(priv, i);
+
+       return err;
+}
+
+static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < MLX5E_NUM_TT; i++)
+               mlx5e_close_tir(priv, i);
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int actual_mtu;
+       int num_txqs;
+       int err;
+
+       num_txqs = roundup_pow_of_two(priv->params.num_channels) *
+                  priv->params.num_tc;
+       netif_set_real_num_tx_queues(netdev, num_txqs);
+       netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+       err = mlx5_set_port_mtu(mdev, netdev->mtu);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n",
+                          __func__, err);
+               return err;
+       }
+
+       err = mlx5_query_port_oper_mtu(mdev, &actual_mtu, 1);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n",
+                          __func__, err);
+               return err;
+       }
+
+       if (actual_mtu != netdev->mtu)
+               netdev_warn(netdev, "%s: Failed to set MTU to %d\n",
+                           __func__, netdev->mtu);
+
+       netdev->mtu = actual_mtu;
+
+       err = mlx5e_open_tises(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
+                          __func__, err);
+               return err;
+       }
+
+       err = mlx5e_open_channels(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+                          __func__, err);
+               goto err_close_tises;
+       }
+
+       err = mlx5e_open_rqt(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
+                          __func__, err);
+               goto err_close_channels;
+       }
+
+       err = mlx5e_open_tirs(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
+                          __func__, err);
+               goto err_close_rqls;
+       }
+
+       err = mlx5e_open_flow_table(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
+                          __func__, err);
+               goto err_close_tirs;
+       }
+
+       err = mlx5e_add_all_vlan_rules(priv);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+                          __func__, err);
+               goto err_close_flow_table;
+       }
+
+       mlx5e_init_eth_addr(priv);
+
+       set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+       mlx5e_update_carrier(priv);
+       mlx5e_set_rx_mode_core(priv);
+
+       schedule_delayed_work(&priv->update_stats_work, 0);
+       return 0;
+
+err_close_flow_table:
+       mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+       mlx5e_close_tirs(priv);
+
+err_close_rqls:
+       mlx5e_close_rqt(priv);
+
+err_close_channels:
+       mlx5e_close_channels(priv);
+
+err_close_tises:
+       mlx5e_close_tises(priv);
+
+       return err;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       mutex_lock(&priv->state_lock);
+       err = mlx5e_open_locked(netdev);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+       mlx5e_set_rx_mode_core(priv);
+       mlx5e_del_all_vlan_rules(priv);
+       netif_carrier_off(priv->netdev);
+       mlx5e_close_flow_table(priv);
+       mlx5e_close_tirs(priv);
+       mlx5e_close_rqt(priv);
+       mlx5e_close_channels(priv);
+       mlx5e_close_tises(priv);
+
+       return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       mutex_lock(&priv->state_lock);
+       err = mlx5e_close_locked(netdev);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+                            struct mlx5e_params *new_params)
+{
+       int err = 0;
+       int was_opened;
+
+       WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(priv->netdev);
+
+       priv->params = *new_params;
+
+       if (was_opened)
+               err = mlx5e_open_locked(priv->netdev);
+
+       return err;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
+       stats->rx_packets = vstats->rx_packets;
+       stats->rx_bytes   = vstats->rx_bytes;
+       stats->tx_packets = vstats->tx_packets;
+       stats->tx_bytes   = vstats->tx_bytes;
+       stats->multicast  = vstats->rx_multicast_packets +
+                           vstats->tx_multicast_packets;
+       stats->tx_errors  = vstats->tx_error_packets;
+       stats->rx_errors  = vstats->rx_error_packets;
+       stats->tx_dropped = vstats->tx_queue_dropped;
+       stats->rx_crc_errors = 0;
+       stats->rx_length_errors = 0;
+
+       return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       schedule_work(&priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct sockaddr *saddr = addr;
+
+       if (!is_valid_ether_addr(saddr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       netif_addr_lock_bh(netdev);
+       ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+       netif_addr_unlock_bh(netdev);
+
+       schedule_work(&priv->set_rx_mode_work);
+
+       return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+                             netdev_features_t features)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       netdev_features_t changes = features ^ netdev->features;
+       struct mlx5e_params new_params;
+       bool update_params = false;
+
+       mutex_lock(&priv->state_lock);
+       new_params = priv->params;
+
+       if (changes & NETIF_F_LRO) {
+               new_params.lro_en = !!(features & NETIF_F_LRO);
+               update_params = true;
+       }
+
+       if (update_params)
+               mlx5e_update_priv_params(priv, &new_params);
+
+       if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
+               if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+                       mlx5e_enable_vlan_filter(priv);
+               else
+                       mlx5e_disable_vlan_filter(priv);
+       }
+
+       mutex_unlock(&priv->state_lock);
+
+       return 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int max_mtu;
+       int err = 0;
+
+       err = mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+       if (err)
+               return err;
+
+       if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) {
+               netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n",
+                          __func__, MLX5E_PARAMS_MIN_MTU, max_mtu);
+               return -EINVAL;
+       }
+
+       mutex_lock(&priv->state_lock);
+       netdev->mtu = new_mtu;
+       err = mlx5e_update_priv_params(priv, &priv->params);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static struct net_device_ops mlx5e_netdev_ops = {
+       .ndo_open                = mlx5e_open,
+       .ndo_stop                = mlx5e_close,
+       .ndo_start_xmit          = mlx5e_xmit,
+       .ndo_get_stats64         = mlx5e_get_stats,
+       .ndo_set_rx_mode         = mlx5e_set_rx_mode,
+       .ndo_set_mac_address     = mlx5e_set_mac,
+       .ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
+       .ndo_set_features        = mlx5e_set_features,
+       .ndo_change_mtu          = mlx5e_change_mtu,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+       if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return -ENOTSUPP;
+       if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+           !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+           !MLX5_CAP_ETH(mdev, csum_cap) ||
+           !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+           !MLX5_CAP_ETH(mdev, vlan_cap) ||
+           !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) {
+               mlx5_core_warn(mdev,
+                              "Not creating net device, some required device capabilities are missing\n");
+               return -ENOTSUPP;
+       }
+       return 0;
+}
+
+static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
+                                   struct net_device *netdev,
+                                   int num_comp_vectors)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       priv->params.log_sq_size           =
+               MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+       priv->params.log_rq_size           =
+               MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+       priv->params.rx_cq_moderation_usec =
+               MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+       priv->params.rx_cq_moderation_pkts =
+               MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+       priv->params.tx_cq_moderation_usec =
+               MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+       priv->params.tx_cq_moderation_pkts =
+               MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+       priv->params.min_rx_wqes           =
+               MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+       priv->params.rx_hash_log_tbl_sz    =
+               (order_base_2(num_comp_vectors) >
+                MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+               order_base_2(num_comp_vectors)           :
+               MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+       priv->params.num_tc                = 1;
+       priv->params.default_vlan_prio     = 0;
+
+       priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+       priv->params.lro_wqe_sz            =
+               MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+       priv->mdev                         = mdev;
+       priv->netdev                       = netdev;
+       priv->params.num_channels          = num_comp_vectors;
+       priv->order_base_2_num_channels    = order_base_2(num_comp_vectors);
+       priv->queue_mapping_channel_mask   =
+               roundup_pow_of_two(num_comp_vectors) - 1;
+       priv->num_tc                       = priv->params.num_tc;
+       priv->default_vlan_prio            = priv->params.default_vlan_prio;
+
+       spin_lock_init(&priv->async_events_spinlock);
+       mutex_init(&priv->state_lock);
+
+       INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+       INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+       INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
+}
+
+static void mlx5e_build_netdev(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+
+       SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+       if (priv->num_tc > 1) {
+               mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+               mlx5e_netdev_ops.ndo_start_xmit   = mlx5e_xmit_multi_tc;
+       }
+
+       netdev->netdev_ops        = &mlx5e_netdev_ops;
+       netdev->watchdog_timeo    = 15 * HZ;
+
+       netdev->ethtool_ops       = &mlx5e_ethtool_ops;
+
+       netdev->vlan_features    |= NETIF_F_IP_CSUM;
+       netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
+       netdev->vlan_features    |= NETIF_F_GRO;
+       netdev->vlan_features    |= NETIF_F_TSO;
+       netdev->vlan_features    |= NETIF_F_TSO6;
+       netdev->vlan_features    |= NETIF_F_RXCSUM;
+       netdev->vlan_features    |= NETIF_F_RXHASH;
+
+       if (!!MLX5_CAP_ETH(mdev, lro_cap))
+               netdev->vlan_features    |= NETIF_F_LRO;
+
+       netdev->hw_features       = netdev->vlan_features;
+       netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
+       netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
+       netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+       netdev->features          = netdev->hw_features;
+       if (!priv->params.lro_en)
+               netdev->features  &= ~NETIF_F_LRO;
+
+       netdev->features         |= NETIF_F_HIGHDMA;
+
+       netdev->priv_flags       |= IFF_UNICAST_FLT;
+
+       mlx5e_set_netdev_dev_addr(netdev);
+}
+
+static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+                            struct mlx5_core_mr *mr)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_create_mkey_mbox_in *in;
+       int err;
+
+       in = mlx5_vzalloc(sizeof(*in));
+       if (!in)
+               return -ENOMEM;
+
+       in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+                       MLX5_PERM_LOCAL_READ  |
+                       MLX5_ACCESS_MODE_PA;
+       in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+       in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+       err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+                                   NULL);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+{
+       struct net_device *netdev;
+       struct mlx5e_priv *priv;
+       int ncv = mdev->priv.eq_table.num_comp_vectors;
+       int err;
+
+       if (mlx5e_check_required_hca_cap(mdev))
+               return NULL;
+
+       netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+                                   roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC,
+                                   ncv);
+       if (!netdev) {
+               mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+               return NULL;
+       }
+
+       mlx5e_build_netdev_priv(mdev, netdev, ncv);
+       mlx5e_build_netdev(netdev);
+
+       netif_carrier_off(netdev);
+
+       priv = netdev_priv(netdev);
+
+       err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
+                          __func__, err);
+               goto err_free_netdev;
+       }
+
+       err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
+                          __func__, err);
+               goto err_unmap_free_uar;
+       }
+
+       err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
+                          __func__, err);
+               goto err_dealloc_pd;
+       }
+
+       err = register_netdev(netdev);
+       if (err) {
+               netdev_err(netdev, "%s: register_netdev failed, %d\n",
+                          __func__, err);
+               goto err_destroy_mkey;
+       }
+
+       mlx5e_enable_async_events(priv);
+
+       return priv;
+
+err_destroy_mkey:
+       mlx5_core_destroy_mkey(mdev, &priv->mr);
+
+err_dealloc_pd:
+       mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+       mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_netdev:
+       free_netdev(netdev);
+
+       return NULL;
+}
+
+static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+{
+       struct mlx5e_priv *priv = vpriv;
+       struct net_device *netdev = priv->netdev;
+
+       unregister_netdev(netdev);
+       mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+       mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+       mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+       mlx5e_disable_async_events(priv);
+       flush_scheduled_work();
+       free_netdev(netdev);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+       struct mlx5e_priv *priv = vpriv;
+
+       return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+       .add       = mlx5e_create_netdev,
+       .remove    = mlx5e_destroy_netdev,
+       .event     = mlx5e_async_event,
+       .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
+       .get_dev   = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+       mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+       mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644 (file)
index 0000000..ce1317c
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "en.h"
+
+static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+                                    struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+       struct sk_buff *skb;
+       dma_addr_t dma_addr;
+
+       skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+       if (unlikely(!skb))
+               return -ENOMEM;
+
+       skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
+       dma_addr = dma_map_single(rq->pdev,
+                                 /* hw start padding */
+                                 skb->data - MLX5E_NET_IP_ALIGN,
+                                 /* hw   end padding */
+                                 rq->wqe_sz,
+                                 DMA_FROM_DEVICE);
+
+       if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
+               goto err_free_skb;
+
+       *((dma_addr_t *)skb->cb) = dma_addr;
+       wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+
+       rq->skb[ix] = skb;
+
+       return 0;
+
+err_free_skb:
+       dev_kfree_skb(skb);
+
+       return -ENOMEM;
+}
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+
+       if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+               return false;
+
+       while (!mlx5_wq_ll_is_full(wq)) {
+               struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+               if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+                       break;
+
+               mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+       }
+
+       /* ensure wqes are visible to device before updating doorbell record */
+       dma_wmb();
+
+       mlx5_wq_ll_update_db_record(wq);
+
+       return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+{
+       struct ethhdr   *eth    = (struct ethhdr *)(skb->data);
+       struct iphdr    *ipv4   = (struct iphdr *)(skb->data + ETH_HLEN);
+       struct ipv6hdr  *ipv6   = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+       struct tcphdr   *tcp;
+
+       u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+       int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
+                      (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+       u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+
+       if (eth->h_proto == htons(ETH_P_IP)) {
+               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+                                       sizeof(struct iphdr));
+               ipv6 = NULL;
+       } else {
+               tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+                                       sizeof(struct ipv6hdr));
+               ipv4 = NULL;
+       }
+
+       if (get_cqe_lro_tcppsh(cqe))
+               tcp->psh                = 1;
+
+       if (tcp_ack) {
+               tcp->ack                = 1;
+               tcp->ack_seq            = cqe->lro_ack_seq_num;
+               tcp->window             = cqe->lro_tcp_win;
+       }
+
+       if (ipv4) {
+               ipv4->ttl               = cqe->lro_min_ttl;
+               ipv4->tot_len           = cpu_to_be16(tot_len);
+               ipv4->check             = 0;
+               ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
+                                                      ipv4->ihl);
+       } else {
+               ipv6->hop_limit         = cqe->lro_min_ttl;
+               ipv6->payload_len       = cpu_to_be16(tot_len -
+                                                     sizeof(struct ipv6hdr));
+       }
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+                                     struct sk_buff *skb)
+{
+       u8 cht = cqe->rss_hash_type;
+       int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+                (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+                                           PKT_HASH_TYPE_NONE;
+       skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+                                     struct mlx5e_rq *rq,
+                                     struct sk_buff *skb)
+{
+       struct net_device *netdev = rq->netdev;
+       u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+       int lro_num_seg;
+
+       skb_put(skb, cqe_bcnt);
+
+       lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+       if (lro_num_seg > 1) {
+               mlx5e_lro_update_hdr(skb, cqe);
+               skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+               rq->stats.lro_packets++;
+               rq->stats.lro_bytes += cqe_bcnt;
+       }
+
+       if (likely(netdev->features & NETIF_F_RXCSUM) &&
+           (cqe->hds_ip_ext & CQE_L2_OK) &&
+           (cqe->hds_ip_ext & CQE_L3_OK) &&
+           (cqe->hds_ip_ext & CQE_L4_OK)) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       } else {
+               skb->ip_summed = CHECKSUM_NONE;
+               rq->stats.csum_none++;
+       }
+
+       skb->protocol = eth_type_trans(skb, netdev);
+
+       skb_record_rx_queue(skb, rq->ix);
+
+       if (likely(netdev->features & NETIF_F_RXHASH))
+               mlx5e_skb_set_hash(cqe, skb);
+
+       if (cqe_has_vlan(cqe))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      be16_to_cpu(cqe->vlan_info));
+}
+
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+       struct mlx5e_rq *rq = cq->sqrq;
+       int i;
+
+       /* avoid accessing cq (dma coherent memory) if not needed */
+       if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+               return false;
+
+       for (i = 0; i < budget; i++) {
+               struct mlx5e_rx_wqe *wqe;
+               struct mlx5_cqe64 *cqe;
+               struct sk_buff *skb;
+               __be16 wqe_counter_be;
+               u16 wqe_counter;
+
+               cqe = mlx5e_get_cqe(cq);
+               if (!cqe)
+                       break;
+
+               wqe_counter_be = cqe->wqe_counter;
+               wqe_counter    = be16_to_cpu(wqe_counter_be);
+               wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+               skb            = rq->skb[wqe_counter];
+               rq->skb[wqe_counter] = NULL;
+
+               dma_unmap_single(rq->pdev,
+                                *((dma_addr_t *)skb->cb),
+                                skb_end_offset(skb),
+                                DMA_FROM_DEVICE);
+
+               if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+                       rq->stats.wqe_err++;
+                       dev_kfree_skb(skb);
+                       goto wq_ll_pop;
+               }
+
+               mlx5e_build_rx_skb(cqe, rq, skb);
+               rq->stats.packets++;
+               napi_gro_receive(cq->napi, skb);
+
+wq_ll_pop:
+               mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+                              &wqe->next.next_wqe_index);
+       }
+
+       mlx5_cqwq_update_db_record(&cq->wq);
+
+       /* ensure cq space is freed before enabling more cqes */
+       wmb();
+
+       if (i == budget) {
+               set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+               return true;
+       }
+
+       return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644 (file)
index 0000000..8020986
--- /dev/null
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en.h"
+
+static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
+                                     u32 *size)
+{
+       sq->dma_fifo_pc--;
+       *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
+       *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+       dma_addr_t addr;
+       u32 size;
+       int i;
+
+       for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+               mlx5e_dma_pop_last_pushed(sq, &addr, &size);
+               dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+       }
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
+                                 u32 size)
+{
+       sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+       sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+       sq->dma_fifo_pc++;
+}
+
+static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
+                                u32 *size)
+{
+       *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
+       *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+                      void *accel_priv, select_queue_fallback_t fallback)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int channel_ix = fallback(dev, skb);
+       int up = skb_vlan_tag_present(skb)        ?
+                skb->vlan_tci >> VLAN_PRIO_SHIFT :
+                priv->default_vlan_prio;
+       int tc = netdev_get_prio_tc_map(dev, up);
+
+       return (tc << priv->order_base_2_num_channels) | channel_ix;
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+                                           struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+       return MLX5E_MIN_INLINE;
+}
+
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
+{
+       struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
+       int cpy1_sz = 2 * ETH_ALEN;
+       int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN;
+
+       skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
+       skb_pull_inline(skb, cpy1_sz);
+       vhdr->h_vlan_proto = skb->vlan_proto;
+       vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+       skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
+                                 cpy2_sz);
+       skb_pull_inline(skb, cpy2_sz);
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+       struct mlx5_wq_cyc       *wq   = &sq->wq;
+
+       u16 pi = sq->pc & wq->sz_m1;
+       struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+       struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+       struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
+       struct mlx5_wqe_data_seg *dseg;
+
+       u8  opcode = MLX5_OPCODE_SEND;
+       dma_addr_t dma_addr = 0;
+       u16 headlen;
+       u16 ds_cnt;
+       u16 ihs;
+       int i;
+
+       memset(wqe, 0, sizeof(*wqe));
+
+       if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+               eseg->cs_flags  = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+       else
+               sq->stats.csum_offload_none++;
+
+       if (skb_is_gso(skb)) {
+               u32 payload_len;
+               int num_pkts;
+
+               eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
+               opcode       = MLX5_OPCODE_LSO;
+               ihs          = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               payload_len  = skb->len - ihs;
+               num_pkts     =    (payload_len / skb_shinfo(skb)->gso_size) +
+                               !!(payload_len % skb_shinfo(skb)->gso_size);
+               MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
+                                                 (num_pkts - 1) * ihs;
+               sq->stats.tso_packets++;
+               sq->stats.tso_bytes += payload_len;
+       } else {
+               ihs             = mlx5e_get_inline_hdr_size(sq, skb);
+               MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
+                                                       ETH_ZLEN);
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
+       } else {
+               skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+               skb_pull_inline(skb, ihs);
+       }
+
+       eseg->inline_hdr_sz     = cpu_to_be16(ihs);
+
+       ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+       ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+                              MLX5_SEND_WQE_DS);
+       dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+       MLX5E_TX_SKB_CB(skb)->num_dma = 0;
+
+       headlen = skb_headlen(skb);
+       if (headlen) {
+               dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
+                                         DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+                       goto dma_unmap_wqe_err;
+
+               dseg->addr       = cpu_to_be64(dma_addr);
+               dseg->lkey       = sq->mkey_be;
+               dseg->byte_count = cpu_to_be32(headlen);
+
+               mlx5e_dma_push(sq, dma_addr, headlen);
+               MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+               dseg++;
+       }
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+               int fsz = skb_frag_size(frag);
+
+               dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+                                           DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+                       goto dma_unmap_wqe_err;
+
+               dseg->addr       = cpu_to_be64(dma_addr);
+               dseg->lkey       = sq->mkey_be;
+               dseg->byte_count = cpu_to_be32(fsz);
+
+               mlx5e_dma_push(sq, dma_addr, fsz);
+               MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+               dseg++;
+       }
+
+       ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
+
+       cseg->opmod_idx_opcode  = cpu_to_be32((sq->pc << 8) | opcode);
+       cseg->qpn_ds            = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+       cseg->fm_ce_se          = MLX5_WQE_CTRL_CQ_UPDATE;
+
+       sq->skb[pi] = skb;
+
+       MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
+                                                       MLX5_SEND_WQEBB_NUM_DS);
+       sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+       netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
+
+       if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) {
+               netif_tx_stop_queue(sq->txq);
+               sq->stats.stopped++;
+       }
+
+       if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+               mlx5e_tx_notify_hw(sq, wqe);
+
+       sq->stats.packets++;
+       return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+       sq->stats.dropped++;
+       mlx5e_dma_unmap_wqe_err(sq, skb);
+
+       dev_kfree_skb_any(skb);
+
+       return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int ix = skb->queue_mapping;
+       int tc = 0;
+       struct mlx5e_channel *c = priv->channel[ix];
+       struct mlx5e_sq *sq = &c->sq[tc];
+
+       return mlx5e_sq_xmit(sq, skb);
+}
+
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int ix = skb->queue_mapping & priv->queue_mapping_channel_mask;
+       int tc = skb->queue_mapping >> priv->order_base_2_num_channels;
+       struct mlx5e_channel *c = priv->channel[ix];
+       struct mlx5e_sq *sq = &c->sq[tc];
+
+       return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+{
+       struct mlx5e_sq *sq;
+       u32 dma_fifo_cc;
+       u32 nbytes;
+       u16 npkts;
+       u16 sqcc;
+       int i;
+
+       /* avoid accessing cq (dma coherent memory) if not needed */
+       if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+               return false;
+
+       sq = cq->sqrq;
+
+       npkts = 0;
+       nbytes = 0;
+
+       /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+        * otherwise a cq overrun may occur
+        */
+       sqcc = sq->cc;
+
+       /* avoid dirtying sq cache line every cqe */
+       dma_fifo_cc = sq->dma_fifo_cc;
+
+       for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+               struct mlx5_cqe64 *cqe;
+               struct sk_buff *skb;
+               u16 ci;
+               int j;
+
+               cqe = mlx5e_get_cqe(cq);
+               if (!cqe)
+                       break;
+
+               ci = sqcc & sq->wq.sz_m1;
+               skb = sq->skb[ci];
+
+               if (unlikely(!skb)) { /* nop */
+                       sq->stats.nop++;
+                       sqcc++;
+                       goto free_skb;
+               }
+
+               for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
+                       dma_addr_t addr;
+                       u32 size;
+
+                       mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
+                       dma_fifo_cc++;
+                       dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+               }
+
+               npkts++;
+               nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
+               sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+free_skb:
+               dev_kfree_skb(skb);
+       }
+
+       mlx5_cqwq_update_db_record(&cq->wq);
+
+       /* ensure cq space is freed before enabling more cqes */
+       wmb();
+
+       sq->dma_fifo_cc = dma_fifo_cc;
+       sq->cc = sqcc;
+
+       netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+       if (netif_tx_queue_stopped(sq->txq) &&
+           mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) &&
+           likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
+                               netif_tx_wake_queue(sq->txq);
+                               sq->stats.wake++;
+       }
+       if (i == MLX5E_TX_CQ_POLL_BUDGET) {
+               set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+               return true;
+       }
+
+       return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644 (file)
index 0000000..088bc42
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+       struct mlx5_cqwq *wq = &cq->wq;
+       u32 ci = mlx5_cqwq_get_ci(wq);
+       struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+       int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+       int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+       if (cqe_ownership_bit != sw_ownership_val)
+               return NULL;
+
+       mlx5_cqwq_pop(wq);
+
+       /* ensure cqe content is read after cqe ownership bit */
+       rmb();
+
+       return cqe;
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+       struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+                                              napi);
+       bool busy = false;
+       int i;
+
+       clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+       for (i = 0; i < c->num_tc; i++)
+               busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+
+       busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
+
+       busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq);
+
+       if (busy)
+               return budget;
+
+       napi_complete(napi);
+
+       /* avoid losing completion event during/after polling cqs */
+       if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+               napi_schedule(napi);
+               return 0;
+       }
+
+       for (i = 0; i < c->num_tc; i++)
+               mlx5e_cq_arm(&c->sq[i].cq);
+       mlx5e_cq_arm(&c->rq.cq);
+
+       return 0;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+       struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+       set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+       set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+       barrier();
+       napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+       struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+       struct mlx5e_channel *c = cq->channel;
+       struct mlx5e_priv *priv = c->priv;
+       struct net_device *netdev = priv->netdev;
+
+       netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+                  __func__, mcq->cqn, event);
+}
index 58800e4f39585c2fd30d76e8de4a21bfb6f8bf66..a40b96d4c6621231987ece6fb5364384c4714e4c 100644 (file)
@@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq)
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                       int nent, u64 mask, const char *name, struct mlx5_uar *uar)
 {
-       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_priv *priv = &dev->priv;
        struct mlx5_create_eq_mbox_in *in;
        struct mlx5_create_eq_mbox_out out;
        int err;
        int inlen;
 
        eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
-       err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
-                            &eq->buf);
+       err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
        if (err)
                return err;
 
@@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                goto err_in;
        }
 
-       snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+       snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
                 name, pci_name(dev->pdev));
+
        eq->eqn = out.eq_number;
        eq->irqn = vecidx;
        eq->dev = dev;
        eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-       err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
-                         eq->name, eq);
+       err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+                         priv->irq_info[vecidx].name, eq);
        if (err)
                goto err_eq;
 
@@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
        return 0;
 
 err_irq:
-       free_irq(table->msix_arr[vecidx].vector, eq);
+       free_irq(priv->msix_arr[vecidx].vector, eq);
 
 err_eq:
        mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
 
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
 {
-       struct mlx5_eq_table *table = &dev->priv.eq_table;
        int err;
 
        mlx5_debug_eq_remove(dev, eq);
-       free_irq(table->msix_arr[eq->irqn].vector, eq);
+       free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
        err = mlx5_cmd_destroy_eq(dev, eq->eqn);
        if (err)
                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
                               eq->eqn);
-       synchronize_irq(table->msix_arr[eq->irqn].vector);
+       synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
        mlx5_buf_free(dev, &eq->buf);
 
        return err;
@@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
        u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
        int err;
 
-       if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+       if (MLX5_CAP_GEN(dev, pg))
                async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
 
        err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
@@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
 
        err = mlx5_create_map_eq(dev, &table->pages_eq,
                                 MLX5_EQ_VEC_PAGES,
-                                dev->caps.gen.max_vf + 1,
+                                /* TODO: sriov max_vf + */ 1,
                                 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
                                 &dev->priv.uuari.uars[0]);
        if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
new file mode 100644 (file)
index 0000000..ca90b9b
--- /dev/null
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+       struct mlx5_flow_table_group    g;
+       u32                             id;
+       u32                             start_ix;
+};
+
+struct mlx5_flow_table {
+       struct mlx5_core_dev    *dev;
+       u8                      level;
+       u8                      type;
+       u32                     id;
+       struct mutex            mutex; /* sync bitmap alloc */
+       u16                     num_groups;
+       struct mlx5_ftg         *group;
+       unsigned long           *bitmap;
+       u32                     size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+                                  u32 flow_index, void *flow_context)
+{
+       u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+       u32 *in;
+       void *in_flow_context;
+       int fcdls =
+               MLX5_GET(flow_context, flow_context, destination_list_size) *
+               MLX5_ST_SZ_BYTES(dest_format_struct);
+       int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in) {
+               mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+               return -ENOMEM;
+       }
+
+       MLX5_SET(set_fte_in, in, table_type, ft->type);
+       MLX5_SET(set_fte_in, in, table_id,   ft->id);
+       MLX5_SET(set_fte_in, in, flow_index, flow_index);
+       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+       memcpy(in_flow_context, flow_context,
+              MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+       MLX5_SET(flow_context, in_flow_context, group_id,
+                ft->group[group_ix].id);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+                                        sizeof(out));
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+       u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+       u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+       MLX5_SET_DFTEI(in, table_type, ft->type);
+       MLX5_SET_DFTEI(in, table_id,   ft->id);
+       MLX5_SET_DFTEI(in, flow_index, flow_index);
+       MLX5_SET_DFTEI(in, opcode,     MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+       MLX5_SET_DFGI(in, table_type, ft->type);
+       MLX5_SET_DFGI(in, table_id,   ft->id);
+       MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+       MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+       u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+       u32 *in;
+       void *in_match_criteria;
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5_flow_table_group *g = &ft->group[i].g;
+       u32 start_ix = ft->group[i].start_ix;
+       u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in) {
+               mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+               return -ENOMEM;
+       }
+       in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+                                        match_criteria);
+
+       memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+       MLX5_SET_CFGI(in, table_type,            ft->type);
+       MLX5_SET_CFGI(in, table_id,              ft->id);
+       MLX5_SET_CFGI(in, opcode,                MLX5_CMD_OP_CREATE_FLOW_GROUP);
+       MLX5_SET_CFGI(in, start_flow_index,      start_ix);
+       MLX5_SET_CFGI(in, end_flow_index,        end_ix);
+       MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+       memcpy(in_match_criteria, g->match_criteria,
+              MLX5_ST_SZ_BYTES(fte_match_param));
+
+       err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+                                        sizeof(out));
+       if (!err)
+               ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+                                          group_id);
+
+       kvfree(in);
+
+       return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+       int i;
+
+       for (i = 0; i < ft->num_groups; i++)
+               mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < ft->num_groups; i++) {
+               err = mlx5_create_flow_group_cmd(ft, i);
+               if (err)
+                       goto err_destroy_flow_table_groups;
+       }
+
+       return 0;
+
+err_destroy_flow_table_groups:
+       for (i--; i >= 0; i--)
+               mlx5_destroy_flow_group_cmd(ft, i);
+
+       return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+       u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+       u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+       MLX5_SET(create_flow_table_in, in, level,      ft->level);
+       MLX5_SET(create_flow_table_in, in, log_size,   order_base_2(ft->size));
+
+       MLX5_SET(create_flow_table_in, in, opcode,
+                MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+                                        sizeof(out));
+       if (err)
+               return err;
+
+       ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+       return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+       MLX5_SET_DFTI(in, table_type, ft->type);
+       MLX5_SET_DFTI(in, table_id,   ft->id);
+       MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+                          u32 *match_criteria, int *group_ix)
+{
+       void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                     outer_headers);
+       void *mc_misc  = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                     misc_parameters);
+       void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+                                     inner_headers);
+       int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+       int mc_misc_sz  = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+       int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+       int i;
+
+       for (i = 0; i < ft->num_groups; i++) {
+               struct mlx5_flow_table_group *g = &ft->group[i].g;
+               void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+                                              g->match_criteria,
+                                              outer_headers);
+               void *gmc_misc  = MLX5_ADDR_OF(fte_match_param,
+                                              g->match_criteria,
+                                              misc_parameters);
+               void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+                                              g->match_criteria,
+                                              inner_headers);
+
+               if (g->match_criteria_enable != match_criteria_enable)
+                       continue;
+
+               if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+                       if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+                               continue;
+
+               if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+                       if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+                               continue;
+
+               if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+                       if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+                               continue;
+
+               *group_ix = i;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+       struct mlx5_ftg *g = &ft->group[group_ix];
+       int err = 0;
+
+       mutex_lock(&ft->mutex);
+
+       *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+       if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+               err = -ENOSPC;
+       else
+               __set_bit(*ix, ft->bitmap);
+
+       mutex_unlock(&ft->mutex);
+
+       return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+       __clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+                             void *match_criteria, void *flow_context,
+                             u32 *flow_index)
+{
+       struct mlx5_flow_table *ft = flow_table;
+       int group_ix;
+       int err;
+
+       err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+                             &group_ix);
+       if (err) {
+               mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+               return err;
+       }
+
+       err = alloc_flow_index(ft, group_ix, flow_index);
+       if (err) {
+               mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+               return err;
+       }
+
+       return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+       struct mlx5_flow_table *ft = flow_table;
+
+       mlx5_del_flow_entry_cmd(ft, flow_index);
+       mlx5_free_flow_index(ft, flow_index);
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+                            u16 num_groups,
+                            struct mlx5_flow_table_group *group)
+{
+       struct mlx5_flow_table *ft;
+       u32 start_ix = 0;
+       u32 ft_size = 0;
+       void *gr;
+       void *bm;
+       int err;
+       int i;
+
+       for (i = 0; i < num_groups; i++)
+               ft_size += (1 << group[i].log_sz);
+
+       ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+       gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+       bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+       if (!ft || !gr || !bm)
+               goto err_free_ft;
+
+       ft->group       = gr;
+       ft->bitmap      = bm;
+       ft->num_groups  = num_groups;
+       ft->level       = level;
+       ft->type        = table_type;
+       ft->size        = ft_size;
+       ft->dev         = dev;
+       mutex_init(&ft->mutex);
+
+       for (i = 0; i < ft->num_groups; i++) {
+               memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+               ft->group[i].start_ix = start_ix;
+               start_ix += 1 << group[i].log_sz;
+       }
+
+       err = mlx5_create_flow_table_cmd(ft);
+       if (err)
+               goto err_free_ft;
+
+       err = mlx5_create_flow_table_groups(ft);
+       if (err)
+               goto err_destroy_flow_table_cmd;
+
+       return ft;
+
+err_destroy_flow_table_cmd:
+       mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+       mlx5_core_warn(dev, "failed to alloc flow table\n");
+       kfree(bm);
+       kfree(gr);
+       kfree(ft);
+
+       return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+       struct mlx5_flow_table *ft = flow_table;
+
+       mlx5_destroy_flow_table_groups(ft);
+       mlx5_destroy_flow_table_cmd(ft);
+       kfree(ft->bitmap);
+       kfree(ft->group);
+       kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+       struct mlx5_flow_table *ft = flow_table;
+
+       return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
index 4b4cda3bcc5fa1eecf99b6ed5265d41cc40dfedc..9335e5ae18ccee954b4cc08eff41a01871b41b2e 100644 (file)
 #include <linux/module.h>
 #include "mlx5_core.h"
 
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev)
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+                                 int outlen)
 {
-       struct mlx5_cmd_query_adapter_mbox_out *out;
-       struct mlx5_cmd_query_adapter_mbox_in in;
+       u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+       u32 *out;
+       int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
        int err;
 
-       out = kzalloc(sizeof(*out), GFP_KERNEL);
+       out = kzalloc(outlen, GFP_KERNEL);
        if (!out)
                return -ENOMEM;
 
-       memset(&in, 0, sizeof(in));
-       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_ADAPTER);
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+       err = mlx5_cmd_query_adapter(dev, out, outlen);
        if (err)
-               goto out_out;
-
-       if (out->hdr.status) {
-               err = mlx5_cmd_status_to_err(&out->hdr);
-               goto out_out;
-       }
+               goto out;
 
-       memcpy(dev->board_id, out->vsd_psid, sizeof(out->vsd_psid));
+       memcpy(dev->board_id,
+              MLX5_ADDR_OF(query_adapter_out, out,
+                           query_adapter_struct.vsd_contd_psid),
+              MLX5_FLD_SZ_BYTES(query_adapter_out,
+                                query_adapter_struct.vsd_contd_psid));
 
-out_out:
+out:
        kfree(out);
-
        return err;
 }
 
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
 {
-       return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
+       u32 *out;
+       int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+       int err;
+
+       out = kzalloc(outlen, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       err = mlx5_cmd_query_adapter(mdev, out, outlen);
+       if (err)
+               goto out;
+
+       *vendor_id = MLX5_GET(query_adapter_out, out,
+                             query_adapter_struct.ieee_vendor_id);
+out:
+       kfree(out);
+       return err;
 }
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
 
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
 {
-       u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
-       int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-       void *out;
        int err;
 
-       if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
-               return -ENOTSUPP;
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+       if (err)
+               return err;
 
-       memset(in, 0, sizeof(in));
-       out = kzalloc(out_sz, GFP_KERNEL);
-       if (!out)
-               return -ENOMEM;
-       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-       MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
-       err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
        if (err)
-               goto out;
+               return err;
 
-       err = mlx5_cmd_status_to_err_v2(out);
-       if (err) {
-               mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
-               goto out;
+       if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
        }
 
-       memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
-              sizeof(*caps));
+       if (MLX5_CAP_GEN(dev, pg)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
+       }
 
-       mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
-               be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
-               be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
-               be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+       if (MLX5_CAP_GEN(dev, atomic)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
+       }
 
-out:
-       kfree(out);
-       return err;
+       if (MLX5_CAP_GEN(dev, roce)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
+       }
+
+       if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+                                        HCA_CAP_OPMOD_GET_CUR);
+               if (err)
+                       return err;
+               err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+                                        HCA_CAP_OPMOD_GET_MAX);
+               if (err)
+                       return err;
+       }
+       return 0;
 }
-EXPORT_SYMBOL(mlx5_query_odp_caps);
 
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
 {
index 28425e5ea91f871670e84721bb865c1725472e80..afad529838de748efc9f9253c6fde42abbe954a3 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/io-mapping.h>
+#include <linux/interrupt.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cq.h>
 #include <linux/mlx5/qp.h>
 #include <linux/mlx5/mlx5_ifc.h>
 #include "mlx5_core.h"
 
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0"
-#define DRIVER_RELDATE  "January 2015"
-
 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
 MODULE_LICENSE("Dual BSD/GPL");
@@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev)
 
 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
 {
-       struct mlx5_eq_table *table = &dev->priv.eq_table;
-       int num_eqs = 1 << dev->caps.gen.log_max_eq;
+       struct mlx5_priv *priv = &dev->priv;
+       struct mlx5_eq_table *table = &priv->eq_table;
+       int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
        int nvec;
        int i;
 
-       nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
+       nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+              MLX5_EQ_VEC_COMP_BASE;
        nvec = min_t(int, nvec, num_eqs);
        if (nvec <= MLX5_EQ_VEC_COMP_BASE)
                return -ENOMEM;
 
-       table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
-       if (!table->msix_arr)
-               return -ENOMEM;
+       priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+       priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+       if (!priv->msix_arr || !priv->irq_info)
+               goto err_free_msix;
 
        for (i = 0; i < nvec; i++)
-               table->msix_arr[i].entry = i;
+               priv->msix_arr[i].entry = i;
 
-       nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+       nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
                                     MLX5_EQ_VEC_COMP_BASE + 1, nvec);
        if (nvec < 0)
                return nvec;
@@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
        table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
 
        return 0;
+
+err_free_msix:
+       kfree(priv->irq_info);
+       kfree(priv->msix_arr);
+       return -ENOMEM;
 }
 
 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
 {
-       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_priv *priv = &dev->priv;
 
        pci_disable_msix(dev->pdev);
-       kfree(table->msix_arr);
+       kfree(priv->irq_info);
+       kfree(priv->msix_arr);
 }
 
 struct mlx5_reg_host_endianess {
@@ -277,98 +284,20 @@ static u16 to_fw_pkey_sz(u32 size)
        }
 }
 
-/* selectively copy writable fields clearing any reserved area
- */
-static void copy_rw_fields(void *to, struct mlx5_caps *from)
-{
-       __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
-       u64 v64;
-
-       MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
-       MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
-       MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
-       MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
-       MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
-       MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
-       v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
-       *flags_off = cpu_to_be64(v64);
-}
-
-static u16 get_pkey_table_size(int pkey)
-{
-       if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
-               return 0;
-
-       return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
-}
-
-static void fw2drv_caps(struct mlx5_caps *caps, void *out)
-{
-       struct mlx5_general_caps *gen = &caps->gen;
-
-       gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
-       gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
-       gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
-       gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
-       gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
-       gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
-       gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
-       gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
-       gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
-       gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
-       gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
-       gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
-       gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
-       gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
-       gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
-       gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
-       gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
-       gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
-       gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
-       gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
-       gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
-       gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
-       gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
-       gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
-       gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
-       pr_debug("flags = 0x%llx\n", gen->flags);
-       gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
-       gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
-       gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
-       gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
-       gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
-       gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
-       gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
-       gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
-       gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
-       gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
-       gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
-}
-
-static const char *caps_opmod_str(u16 opmod)
-{
-       switch (opmod) {
-       case HCA_CAP_OPMOD_GET_MAX:
-               return "GET_MAX";
-       case HCA_CAP_OPMOD_GET_CUR:
-               return "GET_CUR";
-       default:
-               return "Invalid";
-       }
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
-                      u16 opmod)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+                      enum mlx5_cap_mode cap_mode)
 {
        u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
        int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-       void *out;
+       void *out, *hca_caps;
+       u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
        int err;
 
        memset(in, 0, sizeof(in));
        out = kzalloc(out_sz, GFP_KERNEL);
        if (!out)
                return -ENOMEM;
+
        MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
        MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
        err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
@@ -377,12 +306,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
 
        err = mlx5_cmd_status_to_err_v2(out);
        if (err) {
-               mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
+               mlx5_core_warn(dev,
+                              "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+                              cap_type, cap_mode, err);
                goto query_ex;
        }
-       mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
-       fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
 
+       hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+       switch (cap_mode) {
+       case HCA_CAP_OPMOD_GET_MAX:
+               memcpy(dev->hca_caps_max[cap_type], hca_caps,
+                      MLX5_UN_SZ_BYTES(hca_cap_union));
+               break;
+       case HCA_CAP_OPMOD_GET_CUR:
+               memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+                      MLX5_UN_SZ_BYTES(hca_cap_union));
+               break;
+       default:
+               mlx5_core_warn(dev,
+                              "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+                              cap_type, cap_mode);
+               err = -EINVAL;
+               break;
+       }
 query_ex:
        kfree(out);
        return err;
@@ -409,49 +356,45 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
 {
        void *set_ctx = NULL;
        struct mlx5_profile *prof = dev->profile;
-       struct mlx5_caps *cur_caps = NULL;
-       struct mlx5_caps *max_caps = NULL;
        int err = -ENOMEM;
        int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+       void *set_hca_cap;
 
        set_ctx = kzalloc(set_sz, GFP_KERNEL);
        if (!set_ctx)
                goto query_ex;
 
-       max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
-       if (!max_caps)
-               goto query_ex;
-
-       cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
-       if (!cur_caps)
-               goto query_ex;
-
-       err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
        if (err)
                goto query_ex;
 
-       err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
        if (err)
                goto query_ex;
 
+       set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+                                  capability);
+       memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+              MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+       mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+                     mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+                     128);
        /* we limit the size of the pkey table to 128 entries for now */
-       cur_caps->gen.pkey_table_size = 128;
+       MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+                to_fw_pkey_sz(128));
 
        if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
-               cur_caps->gen.log_max_qp = prof->log_max_qp;
+               MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+                        prof->log_max_qp);
 
-       /* disable checksum */
-       cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+       /* disable cmdif checksum */
+       MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
-       copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
-                      cur_caps);
        err = set_caps(dev, set_ctx, set_sz);
 
 query_ex:
-       kfree(cur_caps);
-       kfree(max_caps);
        kfree(set_ctx);
-
        return err;
 }
 
@@ -507,6 +450,74 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
        return 0;
 }
 
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       struct msix_entry *msix = priv->msix_arr;
+       int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+       int numa_node           = dev_to_node(&mdev->pdev->dev);
+       int err;
+
+       if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+               mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+               return -ENOMEM;
+       }
+
+       cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                       priv->irq_info[i].mask);
+
+       err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+       if (err) {
+               mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+                              irq);
+               goto err_clear_mask;
+       }
+
+       return 0;
+
+err_clear_mask:
+       free_cpumask_var(priv->irq_info[i].mask);
+       return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+       struct mlx5_priv *priv  = &mdev->priv;
+       struct msix_entry *msix = priv->msix_arr;
+       int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+       irq_set_affinity_hint(irq, NULL);
+       free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int err;
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+               err = mlx5_irq_set_affinity_hint(mdev, i);
+               if (err)
+                       goto err_out;
+       }
+
+       return 0;
+
+err_out:
+       for (i--; i >= 0; i--)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+
+       return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+       int i;
+
+       for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+               mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
 {
        struct mlx5_eq_table *table = &dev->priv.eq_table;
@@ -549,7 +560,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->priv.eq_table;
-       char name[MLX5_MAX_EQ_NAME];
+       char name[MLX5_MAX_IRQ_NAME];
        struct mlx5_eq *eq;
        int ncomp_vec;
        int nent;
@@ -566,7 +577,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
                        goto clean;
                }
 
-               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+               snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
                err = mlx5_create_map_eq(dev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
                                         name, &dev->priv.uuari.uars[0]);
@@ -588,6 +599,61 @@ clean:
        return err;
 }
 
+#ifdef CONFIG_MLX5_CORE_EN
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+       u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+       u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+       u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+       u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+       int err;
+       u32 sup_issi;
+
+       memset(query_in, 0, sizeof(query_in));
+       memset(query_out, 0, sizeof(query_out));
+
+       MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+       err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+                                        query_out, sizeof(query_out));
+       if (err) {
+               if (((struct mlx5_outbox_hdr *)query_out)->status ==
+                   MLX5_CMD_STAT_BAD_OP_ERR) {
+                       pr_debug("Only ISSI 0 is supported\n");
+                       return 0;
+               }
+
+               pr_err("failed to query ISSI\n");
+               return err;
+       }
+
+       sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+       if (sup_issi & (1 << 1)) {
+               memset(set_in, 0, sizeof(set_in));
+               memset(set_out, 0, sizeof(set_out));
+
+               MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+               MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+               err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+                                                set_out, sizeof(set_out));
+               if (err) {
+                       pr_err("failed to set ISSI=1\n");
+                       return err;
+               }
+
+               dev->issi = 1;
+
+               return 0;
+       } else if (sup_issi & (1 << 0) || !sup_issi) {
+               return 0;
+       }
+
+       return -ENOTSUPP;
+}
+#endif
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
        struct mlx5_priv *priv = &dev->priv;
@@ -650,6 +716,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
                goto err_pagealloc_cleanup;
        }
 
+#ifdef CONFIG_MLX5_CORE_EN
+       err = mlx5_core_set_issi(dev);
+       if (err) {
+               dev_err(&pdev->dev, "failed to set issi\n");
+               goto err_disable_hca;
+       }
+#endif
+
        err = mlx5_satisfy_startup_pages(dev, 1);
        if (err) {
                dev_err(&pdev->dev, "failed to allocate boot pages\n");
@@ -688,15 +762,15 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        mlx5_start_health_poll(dev);
 
-       err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
+       err = mlx5_query_hca_caps(dev);
        if (err) {
                dev_err(&pdev->dev, "query hca failed\n");
                goto err_stop_poll;
        }
 
-       err = mlx5_cmd_query_adapter(dev);
+       err = mlx5_query_board_id(dev);
        if (err) {
-               dev_err(&pdev->dev, "query adapter failed\n");
+               dev_err(&pdev->dev, "query board id failed\n");
                goto err_stop_poll;
        }
 
@@ -730,6 +804,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
                goto err_stop_eqs;
        }
 
+       err = mlx5_irq_set_affinity_hints(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+               goto err_free_comp_eqs;
+       }
+
        MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
 
        mlx5_init_cq_table(dev);
@@ -739,6 +819,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        return 0;
 
+err_free_comp_eqs:
+       free_comp_eqs(dev);
+
 err_stop_eqs:
        mlx5_stop_eqs(dev);
 
@@ -793,6 +876,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
        mlx5_cleanup_srq_table(dev);
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
+       mlx5_irq_clear_affinity_hints(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_free_uuars(dev, &priv->uuari);
@@ -1048,6 +1132,10 @@ static int __init init(void)
        if (err)
                goto err_health;
 
+#ifdef CONFIG_MLX5_CORE_EN
+       mlx5e_init();
+#endif
+
        return 0;
 
 err_health:
@@ -1060,6 +1148,9 @@ err_debug:
 
 static void __exit cleanup(void)
 {
+#ifdef CONFIG_MLX5_CORE_EN
+       mlx5e_cleanup();
+#endif
        pci_unregister_driver(&mlx5_core_driver);
        mlx5_health_cleanup();
        destroy_workqueue(mlx5_core_wq);
index d79fd85d1dd50c6e991eb9659d426839e4e013a8..d5a0c2d61a18f8949869753cf3580129f11b4a5e 100644 (file)
@@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
 
        memset(&in, 0, sizeof(in));
        memset(&out, 0, sizeof(out));
-       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
+       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
        memcpy(in.gid, mgid, sizeof(*mgid));
        in.qpn = cpu_to_be32(qpn);
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
index a051b906afdf1a3fb8059c9567fe8378b4818cf9..fc88ecaecb4b4307f2d5c796cc91c548e856f845 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE  "January 2015"
+
 extern int mlx5_core_debug_mask;
 
 #define mlx5_core_dbg(dev, format, ...)                                        \
@@ -65,11 +69,20 @@ enum {
        MLX5_CMD_TIME, /* print command execution time */
 };
 
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+                                            int in_size, u32 *out,
+                                            int out_size)
+{
+       mlx5_cmd_exec(dev, in, in_size, out, out_size);
+       return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+}
 
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
-                          struct mlx5_caps *caps);
-int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
 
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
 #endif /* __MLX5_CORE_H__ */
index 49e90f2612d8c0b8803e635acfb4c92b51044c8a..619d3baf19eac890f94bcb0b9f74cef8b9363813 100644 (file)
@@ -102,3 +102,235 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
        return err;
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+                        int ptys_size, int proto_mask, u8 local_port)
+{
+       u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(ptys_reg, in, local_port, local_port);
+       MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+                                  ptys_size, MLX5_REG_PTYS, 0, 0);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+                             u32 *proto_cap, int proto_mask)
+{
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+       if (err)
+               return err;
+
+       if (proto_mask == MLX5_PTYS_EN)
+               *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+       else
+               *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+                               u32 *proto_admin, int proto_mask)
+{
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+       if (err)
+               return err;
+
+       if (proto_mask == MLX5_PTYS_EN)
+               *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+       else
+               *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+                                   u8 *link_width_oper, u8 local_port)
+{
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
+       if (err)
+               return err;
+
+       *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
+
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+                              u8 *proto_oper, int proto_mask,
+                              u8 local_port)
+{
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, local_port);
+       if (err)
+               return err;
+
+       if (proto_mask == MLX5_PTYS_EN)
+               *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+       else
+               *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+                       int proto_mask)
+{
+       u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(ptys_reg, in, local_port, 1);
+       MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+       if (proto_mask == MLX5_PTYS_EN)
+               MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+       else
+               MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PTYS, 0, 1);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+                        enum mlx5_port_status status)
+{
+       u32 in[MLX5_ST_SZ_DW(paos_reg)];
+       u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(paos_reg, in, admin_status, status);
+       MLX5_SET(paos_reg, in, ase, 1);
+
+       return mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                   sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+       u32 in[MLX5_ST_SZ_DW(paos_reg)];
+       u32 out[MLX5_ST_SZ_DW(paos_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PAOS, 0, 0);
+       if (err)
+               return err;
+
+       *status = MLX5_GET(paos_reg, out, oper_status);
+       return err;
+}
+
+static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
+                              int *admin_mtu, int *max_mtu, int *oper_mtu,
+                              u8 local_port)
+{
+       u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+       u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(pmtu_reg, in, local_port, local_port);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PMTU, 0, 0);
+       if (err)
+               return err;
+
+       if (max_mtu)
+               *max_mtu  = MLX5_GET(pmtu_reg, out, max_mtu);
+       if (oper_mtu)
+               *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+       if (admin_mtu)
+               *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+
+       return 0;
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
+{
+       u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+       u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+       MLX5_SET(pmtu_reg, in, local_port, 1);
+
+       return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+                                   MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+                           u8 local_port)
+{
+       return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, local_port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+                            u8 local_port)
+{
+       return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, local_port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+                               int pvlc_size,  u8 local_port)
+{
+       u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(ptys_reg, in, local_port, local_port);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+                                  pvlc_size, MLX5_REG_PVLC, 0, 0);
+
+       return err;
+}
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+                             u8 *vl_hw_cap, u8 local_port)
+{
+       u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
+       int err;
+
+       err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
+       if (err)
+               return err;
+
+       *vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
index dc7dbf7e9d98f28d83d55b6632ec44fe4275d9ce..8b494b5622631f3ecd5a625be835cd8116c29d50 100644 (file)
@@ -187,10 +187,17 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
        struct mlx5_destroy_qp_mbox_in din;
        struct mlx5_destroy_qp_mbox_out dout;
        int err;
+       void *qpc;
 
        memset(&out, 0, sizeof(out));
        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
 
+       if (dev->issi) {
+               qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+               /* 0xffffff means we ask to work with cqe version 0 */
+               MLX5_SET(qpc, qpc, user_index, 0xffffff);
+       }
+
        err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
        if (err) {
                mlx5_core_warn(dev, "ret %d\n", err);
index f9d25dcd03c1e2616be6434cae3a9146d1e83df5..c48f504ccbeba67198dc74696110c7cde5e471c7 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/mlx5/srq.h>
 #include <rdma/ib_verbs.h>
 #include "mlx5_core.h"
+#include "transobj.h"
 
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
 {
@@ -62,6 +63,74 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
                complete(&srq->free);
 }
 
+static int get_pas_size(void *srqc)
+{
+       u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
+       u32 log_srq_size  = MLX5_GET(srqc, srqc, log_srq_size);
+       u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
+       u32 page_offset   = MLX5_GET(srqc, srqc, page_offset);
+       u32 po_quanta     = 1 << (log_page_size - 6);
+       u32 rq_sz         = 1 << (log_srq_size + 4 + log_rq_stride);
+       u32 page_size     = 1 << log_page_size;
+       u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
+       u32 rq_num_pas    = (rq_sz_po + page_size - 1) / page_size;
+
+       return rq_num_pas * sizeof(u64);
+}
+
+static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+{
+       void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+       if (srqc_to_rmpc) {
+               switch (MLX5_GET(srqc, srqc, state)) {
+               case MLX5_SRQC_STATE_GOOD:
+                       MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+                       break;
+               case MLX5_SRQC_STATE_ERROR:
+                       MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
+                       break;
+               default:
+                       pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
+                               __LINE__, MLX5_GET(srqc, srqc, state));
+                       MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
+               }
+
+               MLX5_SET(wq,   wq, wq_signature,  MLX5_GET(srqc,  srqc, wq_signature));
+               MLX5_SET(wq,   wq, log_wq_pg_sz,  MLX5_GET(srqc,  srqc, log_page_size));
+               MLX5_SET(wq,   wq, log_wq_stride, MLX5_GET(srqc,  srqc, log_rq_stride) + 4);
+               MLX5_SET(wq,   wq, log_wq_sz,     MLX5_GET(srqc,  srqc, log_srq_size));
+               MLX5_SET(wq,   wq, page_offset,   MLX5_GET(srqc,  srqc, page_offset));
+               MLX5_SET(wq,   wq, lwm,           MLX5_GET(srqc,  srqc, lwm));
+               MLX5_SET(wq,   wq, pd,            MLX5_GET(srqc,  srqc, pd));
+               MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc,     srqc, dbr_addr));
+       } else {
+               switch (MLX5_GET(rmpc, rmpc, state)) {
+               case MLX5_RMPC_STATE_RDY:
+                       MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
+                       break;
+               case MLX5_RMPC_STATE_ERR:
+                       MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
+                       break;
+               default:
+                       pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
+                               __func__, __LINE__,
+                               MLX5_GET(rmpc, rmpc, state));
+                       MLX5_SET(srqc, srqc, state,
+                                MLX5_GET(rmpc, rmpc, state));
+               }
+
+               MLX5_SET(srqc,   srqc, wq_signature,   MLX5_GET(wq,   wq, wq_signature));
+               MLX5_SET(srqc,   srqc, log_page_size,  MLX5_GET(wq,   wq, log_wq_pg_sz));
+               MLX5_SET(srqc,   srqc, log_rq_stride,  MLX5_GET(wq,   wq, log_wq_stride) - 4);
+               MLX5_SET(srqc,   srqc, log_srq_size,   MLX5_GET(wq,   wq, log_wq_sz));
+               MLX5_SET(srqc,   srqc, page_offset,    MLX5_GET(wq,   wq, page_offset));
+               MLX5_SET(srqc,   srqc, lwm,            MLX5_GET(wq,   wq, lwm));
+               MLX5_SET(srqc,   srqc, pd,             MLX5_GET(wq,   wq, pd));
+               MLX5_SET64(srqc, srqc, dbr_addr,       MLX5_GET64(wq, wq, dbr_addr));
+       }
+}
+
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
 {
        struct mlx5_srq_table *table = &dev->priv.srq_table;
@@ -79,26 +148,311 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
 }
 EXPORT_SYMBOL(mlx5_core_get_srq);
 
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-                        struct mlx5_create_srq_mbox_in *in, int inlen)
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+                         struct mlx5_create_srq_mbox_in *in, int inlen)
 {
        struct mlx5_create_srq_mbox_out out;
-       struct mlx5_srq_table *table = &dev->priv.srq_table;
-       struct mlx5_destroy_srq_mbox_in din;
-       struct mlx5_destroy_srq_mbox_out dout;
        int err;
 
        memset(&out, 0, sizeof(out));
+
        in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
-       err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
-       if (err)
-               return err;
 
-       if (out.hdr.status)
-               return mlx5_cmd_status_to_err(&out.hdr);
+       err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
+                                        sizeof(out));
 
        srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
 
+       return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+                          struct mlx5_core_srq *srq)
+{
+       struct mlx5_destroy_srq_mbox_in in;
+       struct mlx5_destroy_srq_mbox_out out;
+
+       memset(&in, 0, sizeof(in));
+       memset(&out, 0, sizeof(out));
+       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
+       in.srqn = cpu_to_be32(srq->srqn);
+
+       return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+                                         (u32 *)(&out), sizeof(out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+                      u16 lwm, int is_srq)
+{
+       struct mlx5_arm_srq_mbox_in     in;
+       struct mlx5_arm_srq_mbox_out    out;
+
+       memset(&in, 0, sizeof(in));
+       memset(&out, 0, sizeof(out));
+
+       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
+       in.hdr.opmod = cpu_to_be16(!!is_srq);
+       in.srqn = cpu_to_be32(srq->srqn);
+       in.lwm = cpu_to_be16(lwm);
+
+       return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
+                                         sizeof(in), (u32 *)(&out),
+                                         sizeof(out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+                        struct mlx5_query_srq_mbox_out *out)
+{
+       struct mlx5_query_srq_mbox_in in;
+
+       memset(&in, 0, sizeof(in));
+
+       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
+       in.srqn = cpu_to_be32(srq->srqn);
+
+       return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
+                                         (u32 *)out, sizeof(*out));
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+                             struct mlx5_core_srq *srq,
+                             struct mlx5_create_srq_mbox_in *in,
+                             int srq_inlen)
+{
+       u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+       void *create_in;
+       void *srqc;
+       void *xrc_srqc;
+       void *pas;
+       int pas_size;
+       int inlen;
+       int err;
+
+       srqc      = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+       pas_size  = get_pas_size(srqc);
+       inlen     = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+       create_in = mlx5_vzalloc(inlen);
+       if (!create_in)
+               return -ENOMEM;
+
+       xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+                               xrc_srq_context_entry);
+       pas      = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+       memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+       memcpy(pas, in->pas, pas_size);
+       /* 0xffffff means we ask to work with cqe version 0 */
+       MLX5_SET(xrc_srqc,          xrc_srqc,  user_index, 0xffffff);
+       MLX5_SET(create_xrc_srq_in, create_in, opcode,
+                MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+       memset(create_out, 0, sizeof(create_out));
+       err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+                                        sizeof(create_out));
+       if (err)
+               goto out;
+
+       srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+out:
+       kvfree(create_in);
+       return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+                              struct mlx5_core_srq *srq)
+{
+       u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+       u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+       memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+       memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+       MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
+                MLX5_CMD_OP_DESTROY_XRC_SRQ);
+       MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+       return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+                                         xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+                          struct mlx5_core_srq *srq, u16 lwm)
+{
+       u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+       u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+       memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+       memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
+
+       MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
+       MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod,   MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+       MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+       MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm,      lwm);
+
+       return  mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+                                          xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+                            struct mlx5_core_srq *srq,
+                            struct mlx5_query_srq_mbox_out *out)
+{
+       u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+       u32 *xrcsrq_out;
+       void *srqc;
+       void *xrc_srqc;
+       int err;
+
+       xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+       if (!xrcsrq_out)
+               return -ENOMEM;
+       memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+
+       MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
+                MLX5_CMD_OP_QUERY_XRC_SRQ);
+       MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+       err =  mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
+                                         xrcsrq_out,
+                                         MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+       if (err)
+               goto out;
+
+       xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+                               xrc_srq_context_entry);
+       srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+       memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+
+out:
+       kvfree(xrcsrq_out);
+       return err;
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+                         struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+{
+       void *create_in;
+       void *rmpc;
+       void *srqc;
+       int pas_size;
+       int inlen;
+       int err;
+
+       srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+       pas_size = get_pas_size(srqc);
+       inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+       create_in = mlx5_vzalloc(inlen);
+       if (!create_in)
+               return -ENOMEM;
+
+       rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+
+       memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+       rmpc_srqc_reformat(srqc, rmpc, true);
+
+       err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+       kvfree(create_in);
+       return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+                          struct mlx5_core_srq *srq)
+{
+       return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev,
+                      struct mlx5_core_srq *srq,
+                      u16 lwm)
+{
+       void *in;
+       void *rmpc;
+       void *wq;
+       void *bitmask;
+       int err;
+
+       in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+       if (!in)
+               return -ENOMEM;
+
+       rmpc =    MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
+       bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
+       wq   =    MLX5_ADDR_OF(rmpc,            rmpc, wq);
+
+       MLX5_SET(modify_rmp_in, in,      rmp_state, MLX5_RMPC_STATE_RDY);
+       MLX5_SET(modify_rmp_in, in,      rmpn,      srq->srqn);
+       MLX5_SET(wq,            wq,      lwm,       lwm);
+       MLX5_SET(rmp_bitmask,   bitmask, lwm,       1);
+       MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+       err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+       kvfree(in);
+       return err;
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+                        struct mlx5_query_srq_mbox_out *out)
+{
+       u32 *rmp_out;
+       void *rmpc;
+       void *srqc;
+       int err;
+
+       rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+       if (!rmp_out)
+               return -ENOMEM;
+
+       err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+       if (err)
+               goto out;
+
+       srqc = MLX5_ADDR_OF(query_srq_out, out,     srq_context_entry);
+       rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+       rmpc_srqc_reformat(srqc, rmpc, false);
+
+out:
+       kvfree(rmp_out);
+       return err;
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev,
+                           struct mlx5_core_srq *srq,
+                           struct mlx5_create_srq_mbox_in *in,
+                           int inlen, int is_xrc)
+{
+       if (!dev->issi)
+               return create_srq_cmd(dev, srq, in, inlen);
+       else if (srq->common.res == MLX5_RES_XSRQ)
+               return create_xrc_srq_cmd(dev, srq, in, inlen);
+       else
+               return create_rmp_cmd(dev, srq, in, inlen);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev,
+                            struct mlx5_core_srq *srq)
+{
+       if (!dev->issi)
+               return destroy_srq_cmd(dev, srq);
+       else if (srq->common.res == MLX5_RES_XSRQ)
+               return destroy_xrc_srq_cmd(dev, srq);
+       else
+               return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+                        struct mlx5_create_srq_mbox_in *in, int inlen,
+                        int is_xrc)
+{
+       int err;
+       struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+       srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+
+       err = create_srq_split(dev, srq, in, inlen, is_xrc);
+       if (err)
+               return err;
+
        atomic_set(&srq->refcount, 1);
        init_completion(&srq->free);
 
@@ -107,25 +461,20 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
        spin_unlock_irq(&table->lock);
        if (err) {
                mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
-               goto err_cmd;
+               goto err_destroy_srq_split;
        }
 
        return 0;
 
-err_cmd:
-       memset(&din, 0, sizeof(din));
-       memset(&dout, 0, sizeof(dout));
-       din.srqn = cpu_to_be32(srq->srqn);
-       din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
-       mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+err_destroy_srq_split:
+       destroy_srq_split(dev, srq);
+
        return err;
 }
 EXPORT_SYMBOL(mlx5_core_create_srq);
 
 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
 {
-       struct mlx5_destroy_srq_mbox_in in;
-       struct mlx5_destroy_srq_mbox_out out;
        struct mlx5_srq_table *table = &dev->priv.srq_table;
        struct mlx5_core_srq *tmp;
        int err;
@@ -142,17 +491,10 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
                return -EINVAL;
        }
 
-       memset(&in, 0, sizeof(in));
-       memset(&out, 0, sizeof(out));
-       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
-       in.srqn = cpu_to_be32(srq->srqn);
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+       err = destroy_srq_split(dev, srq);
        if (err)
                return err;
 
-       if (out.hdr.status)
-               return mlx5_cmd_status_to_err(&out.hdr);
-
        if (atomic_dec_and_test(&srq->refcount))
                complete(&srq->free);
        wait_for_completion(&srq->free);
@@ -164,48 +506,24 @@ EXPORT_SYMBOL(mlx5_core_destroy_srq);
 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
                        struct mlx5_query_srq_mbox_out *out)
 {
-       struct mlx5_query_srq_mbox_in in;
-       int err;
-
-       memset(&in, 0, sizeof(in));
-       memset(out, 0, sizeof(*out));
-
-       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
-       in.srqn = cpu_to_be32(srq->srqn);
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
-       if (err)
-               return err;
-
-       if (out->hdr.status)
-               return mlx5_cmd_status_to_err(&out->hdr);
-
-       return err;
+       if (!dev->issi)
+               return query_srq_cmd(dev, srq, out);
+       else if (srq->common.res == MLX5_RES_XSRQ)
+               return query_xrc_srq_cmd(dev, srq, out);
+       else
+               return query_rmp_cmd(dev, srq, out);
 }
 EXPORT_SYMBOL(mlx5_core_query_srq);
 
 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
                      u16 lwm, int is_srq)
 {
-       struct mlx5_arm_srq_mbox_in     in;
-       struct mlx5_arm_srq_mbox_out    out;
-       int err;
-
-       memset(&in, 0, sizeof(in));
-       memset(&out, 0, sizeof(out));
-
-       in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
-       in.hdr.opmod = cpu_to_be16(!!is_srq);
-       in.srqn = cpu_to_be32(srq->srqn);
-       in.lwm = cpu_to_be16(lwm);
-
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-       if (err)
-               return err;
-
-       if (out.hdr.status)
-               return mlx5_cmd_status_to_err(&out.hdr);
-
-       return err;
+       if (!dev->issi)
+               return arm_srq_cmd(dev, srq, lwm, is_srq);
+       else if (srq->common.res == MLX5_RES_XSRQ)
+               return arm_xrc_srq_cmd(dev, srq, lwm);
+       else
+               return arm_rmp_cmd(dev, srq, lwm);
 }
 EXPORT_SYMBOL(mlx5_core_arm_srq);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
new file mode 100644 (file)
index 0000000..7a12028
--- /dev/null
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+       int err;
+
+       MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *rqn = MLX5_GET(create_rq_out, out, rqn);
+
+       return err;
+}
+
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+       MLX5_SET(modify_rq_in, in, rqn, rqn);
+       MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+       memset(out, 0, sizeof(out));
+       return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+       MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+       int err;
+
+       MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *sqn = MLX5_GET(create_sq_out, out, sqn);
+
+       return err;
+}
+
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+       MLX5_SET(modify_sq_in, in, sqn, sqn);
+       MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+       memset(out, 0, sizeof(out));
+       return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+       MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *tirn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+       int err;
+
+       MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+       return err;
+}
+
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+       u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+       MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *tisn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+       int err;
+
+       MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+       return err;
+}
+
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+       u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+       MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *rmpn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+       int err;
+
+       MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+       return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+       u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+
+       MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+       memset(out, 0, sizeof(out));
+       return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+       MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                         sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+       u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+       int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+       MLX5_SET(query_rmp_in, in, rmpn,   rmpn);
+
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+       void *in;
+       void *rmpc;
+       void *wq;
+       void *bitmask;
+       int  err;
+
+       in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+       if (!in)
+               return -ENOMEM;
+
+       rmpc    = MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
+       bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
+       wq      = MLX5_ADDR_OF(rmpc,            rmpc, wq);
+
+       MLX5_SET(modify_rmp_in, in,      rmp_state, MLX5_RMPC_STATE_RDY);
+       MLX5_SET(modify_rmp_in, in,      rmpn,      rmpn);
+       MLX5_SET(wq,            wq,      lwm,       lwm);
+       MLX5_SET(rmp_bitmask,   bitmask, lwm,       1);
+       MLX5_SET(rmpc,          rmpc,    state,     MLX5_RMPC_STATE_RDY);
+
+       err =  mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+       kvfree(in);
+
+       return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                         u32 *xsrqn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+       int err;
+
+       MLX5_SET(create_xrc_srq_in, in, opcode,     MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+       return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(destroy_xrc_srq_in, in, opcode,   MLX5_CMD_OP_DESTROY_XRC_SRQ);
+       MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                         sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+       u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+       void *srqc;
+       void *xrc_srqc;
+       int err;
+
+       memset(in, 0, sizeof(in));
+       MLX5_SET(query_xrc_srq_in, in, opcode,   MLX5_CMD_OP_QUERY_XRC_SRQ);
+       MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+       err =  mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+                                         out,
+                                         MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+       if (!err) {
+               xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+                                       xrc_srq_context_entry);
+               srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+               memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+       }
+
+       return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+       u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+       u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(arm_xrc_srq_in, in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
+       MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+       MLX5_SET(arm_xrc_srq_in, in, lwm,      lwm);
+       MLX5_SET(arm_xrc_srq_in, in, op_mod,
+                MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+
+       return  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                          sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
new file mode 100644 (file)
index 0000000..90322c1
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                       u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                       u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *tirn);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *tisn);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                         u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+#endif /* __TRANSOBJ_H__ */
index 5a89bb1d678a8e5ae6002a6ec9122bbd97d19085..9ef85873ceea8203655c8e63cdc4601d15088157 100644 (file)
@@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
        for (i = 0; i < tot_uuars; i++) {
                bf = &uuari->bfs[i];
 
-               bf->buf_size = dev->caps.gen.bf_reg_size / 2;
+               bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
                bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
                bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
                bf->reg = NULL; /* Add WC support */
-               bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
-                       MLX5_BF_OFFSET;
+               bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+                            (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+                            MLX5_BF_OFFSET;
                bf->need_lock = need_uuar_lock(i);
                spin_lock_init(&bf->lock);
                spin_lock_init(&bf->lock32);
@@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
 
        return 0;
 }
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+       phys_addr_t pfn;
+       phys_addr_t uar_bar_start;
+       int err;
+
+       err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+               return err;
+       }
+
+       uar_bar_start = pci_resource_start(mdev->pdev, 0);
+       pfn           = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+       uar->map      = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+       if (!uar->map) {
+               mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+               err = -ENOMEM;
+               goto err_free_uar;
+       }
+
+       return 0;
+
+err_free_uar:
+       mlx5_cmd_free_uar(mdev, uar->index);
+
+       return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+       iounmap(uar->map);
+       mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
new file mode 100644 (file)
index 0000000..b94177e
--- /dev/null
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+{
+       u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+       u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(query_vport_state_in, in, opcode,
+                MLX5_CMD_OP_QUERY_VPORT_STATE);
+       MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+
+       err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+                                        sizeof(out));
+       if (err)
+               mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+       return MLX5_GET(query_vport_state_out, out, state);
+}
+EXPORT_SYMBOL(mlx5_query_vport_state);
+
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+       u32  in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+       u32 *out;
+       int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+       u8 *out_addr;
+
+       out = mlx5_vzalloc(outlen);
+       if (!out)
+               return;
+
+       out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+                               nic_vport_context.permanent_address);
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(query_nic_vport_context_in, in, opcode,
+                MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+       memset(out, 0, outlen);
+       mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+
+       ether_addr_copy(addr, &out_addr[2]);
+
+       kvfree(out);
+}
+EXPORT_SYMBOL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+                            u8 port_num, u16  vf_num, u16 gid_index,
+                            union ib_gid *gid)
+{
+       int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+       int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+       int is_group_manager;
+       void *out = NULL;
+       void *in = NULL;
+       union ib_gid *tmp;
+       int tbsz;
+       int nout;
+       int err;
+
+       is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+       tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+       mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
+                     vf_num, gid_index, tbsz);
+
+       if (gid_index > tbsz && gid_index != 0xffff)
+               return -EINVAL;
+
+       if (gid_index == 0xffff)
+               nout = tbsz;
+       else
+               nout = 1;
+
+       out_sz += nout * sizeof(*gid);
+
+       in = kzalloc(in_sz, GFP_KERNEL);
+       out = kzalloc(out_sz, GFP_KERNEL);
+       if (!in || !out) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+       if (other_vport) {
+               if (is_group_manager) {
+                       MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
+                       MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+               } else {
+                       err = -EPERM;
+                       goto out;
+               }
+       }
+       MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+       if (MLX5_CAP_GEN(dev, num_ports) == 2)
+               MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+       err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+       if (err)
+               goto out;
+
+       err = mlx5_cmd_status_to_err_v2(out);
+       if (err)
+               goto out;
+
+       tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+       gid->global.subnet_prefix = tmp->global.subnet_prefix;
+       gid->global.interface_id = tmp->global.interface_id;
+
+out:
+       kfree(in);
+       kfree(out);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+                             u8 port_num, u16 vf_num, u16 pkey_index,
+                             u16 *pkey)
+{
+       int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+       int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+       int is_group_manager;
+       void *out = NULL;
+       void *in = NULL;
+       void *pkarr;
+       int nout;
+       int tbsz;
+       int err;
+       int i;
+
+       is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+       tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+       if (pkey_index > tbsz && pkey_index != 0xffff)
+               return -EINVAL;
+
+       if (pkey_index == 0xffff)
+               nout = tbsz;
+       else
+               nout = 1;
+
+       out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+       in = kzalloc(in_sz, GFP_KERNEL);
+       out = kzalloc(out_sz, GFP_KERNEL);
+       if (!in || !out) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+       if (other_vport) {
+               if (is_group_manager) {
+                       MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
+                       MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+               } else {
+                       err = -EPERM;
+                       goto out;
+               }
+       }
+       MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+       if (MLX5_CAP_GEN(dev, num_ports) == 2)
+               MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+       err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+       if (err)
+               goto out;
+
+       err = mlx5_cmd_status_to_err_v2(out);
+       if (err)
+               goto out;
+
+       pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+       for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
+               *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+       kfree(in);
+       kfree(out);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+                                u8 other_vport, u8 port_num,
+                                u16 vf_num,
+                                struct mlx5_hca_vport_context *rep)
+{
+       int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+       int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+       int is_group_manager;
+       void *out;
+       void *ctx;
+       int err;
+
+       is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+       memset(in, 0, sizeof(in));
+       out = kzalloc(out_sz, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+       if (other_vport) {
+               if (is_group_manager) {
+                       MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
+                       MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
+               } else {
+                       err = -EPERM;
+                       goto ex;
+               }
+       }
+
+       if (MLX5_CAP_GEN(dev, num_ports) == 2)
+               MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+       err = mlx5_cmd_exec(dev, in, sizeof(in), out,  out_sz);
+       if (err)
+               goto ex;
+       err = mlx5_cmd_status_to_err_v2(out);
+       if (err)
+               goto ex;
+
+       ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
+       rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
+       rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
+       rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
+       rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
+       rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
+       rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
+                                     port_physical_state);
+       rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
+       rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
+                                              port_physical_state);
+       rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
+       rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
+       rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
+       rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
+                                         cap_mask1_field_select);
+       rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
+       rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
+                                         cap_mask2_field_select);
+       rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
+       rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
+                                          init_type_reply);
+       rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
+       rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
+                                         subnet_timeout);
+       rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
+       rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
+       rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+                                                 qkey_violation_counter);
+       rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+                                                 pkey_violation_counter);
+       rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
+       rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
+                                           system_image_guid);
+
+ex:
+       kfree(out);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+                                          u64 *sys_image_guid)
+{
+       struct mlx5_hca_vport_context *rep;
+       int err;
+
+       rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+       if (!rep)
+               return -ENOMEM;
+
+       err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+       if (!err)
+               *sys_image_guid = rep->sys_image_guid;
+
+       kfree(rep);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+                                  u64 *node_guid)
+{
+       struct mlx5_hca_vport_context *rep;
+       int err;
+
+       rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+       if (!rep)
+               return -ENOMEM;
+
+       err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+       if (!err)
+               *node_guid = rep->node_guid;
+
+       kfree(rep);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644 (file)
index 0000000..8388411
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+       return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+       return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+       return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+       return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+       return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+       return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                      void *wqc, struct mlx5_wq_cyc *wq,
+                      struct mlx5_wq_ctrl *wq_ctrl)
+{
+       int err;
+
+       wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+       wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               return err;
+       }
+
+       err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               goto err_db_free;
+       }
+
+       wq->buf = wq_ctrl->buf.direct.buf;
+       wq->db  = wq_ctrl->db.db;
+
+       wq_ctrl->mdev = mdev;
+
+       return 0;
+
+err_db_free:
+       mlx5_db_free(mdev, &wq_ctrl->db);
+
+       return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                    void *cqc, struct mlx5_cqwq *wq,
+                    struct mlx5_wq_ctrl *wq_ctrl)
+{
+       int err;
+
+       wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+       wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+       wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               return err;
+       }
+
+       err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               goto err_db_free;
+       }
+
+       wq->buf = wq_ctrl->buf.direct.buf;
+       wq->db  = wq_ctrl->db.db;
+
+       wq_ctrl->mdev = mdev;
+
+       return 0;
+
+err_db_free:
+       mlx5_db_free(mdev, &wq_ctrl->db);
+
+       return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                     void *wqc, struct mlx5_wq_ll *wq,
+                     struct mlx5_wq_ctrl *wq_ctrl)
+{
+       struct mlx5_wqe_srq_next_seg *next_seg;
+       int err;
+       int i;
+
+       wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+       wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+               return err;
+       }
+
+       err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+               goto err_db_free;
+       }
+
+       wq->buf = wq_ctrl->buf.direct.buf;
+       wq->db  = wq_ctrl->db.db;
+
+       for (i = 0; i < wq->sz_m1; i++) {
+               next_seg = mlx5_wq_ll_get_wqe(wq, i);
+               next_seg->next_wqe_index = cpu_to_be16(i + 1);
+       }
+       next_seg = mlx5_wq_ll_get_wqe(wq, i);
+       wq->tail_next = &next_seg->next_wqe_index;
+
+       wq_ctrl->mdev = mdev;
+
+       return 0;
+
+err_db_free:
+       mlx5_db_free(mdev, &wq_ctrl->db);
+
+       return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+       mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+       mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644 (file)
index 0000000..e0ddd69
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+       int             linear;
+       int             numa;
+};
+
+struct mlx5_wq_ctrl {
+       struct mlx5_core_dev    *mdev;
+       struct mlx5_buf         buf;
+       struct mlx5_db          db;
+};
+
+struct mlx5_wq_cyc {
+       void                    *buf;
+       __be32                  *db;
+       u16                     sz_m1;
+       u8                      log_stride;
+};
+
+struct mlx5_cqwq {
+       void                    *buf;
+       __be32                  *db;
+       u32                     sz_m1;
+       u32                     cc; /* consumer counter */
+       u8                      log_sz;
+       u8                      log_stride;
+};
+
+struct mlx5_wq_ll {
+       void                    *buf;
+       __be32                  *db;
+       __be16                  *tail_next;
+       u16                     sz_m1;
+       u16                     head;
+       u16                     wqe_ctr;
+       u16                     cur_sz;
+       u8                      log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                      void *wqc, struct mlx5_wq_cyc *wq,
+                      struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                    void *cqc, struct mlx5_cqwq *wq,
+                    struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+                     void *wqc, struct mlx5_wq_ll *wq,
+                     struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+       return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+       return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+       int equal   = (cc1 == cc2);
+       int smaller = 0x8000 & (cc1 - cc2);
+
+       return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+       return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+       return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+       return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+       wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+       *wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+       return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+       return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+       return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+       wq->head = head_next;
+       wq->wqe_ctr++;
+       wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+                                 __be16 *next_tail_next)
+{
+       *wq->tail_next = ix;
+       wq->tail_next = next_tail_next;
+       wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+       *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */
index 6f332ebdf3b5a812dd34be59a072e497af380975..75dc46c5fca29229b5734110526137fc2c9786cb 100644 (file)
@@ -6664,7 +6664,7 @@ static void mib_read_work(struct work_struct *work)
                                wake_up_interruptible(
                                        &hw_priv->counter[i].counter);
                        }
-               } else if (jiffies >= hw_priv->counter[i].time) {
+               } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
                        /* Only read MIB counters when the port is connected. */
                        if (media_connected == mib->state)
                                hw_priv->counter[i].read = 1;
@@ -6689,7 +6689,7 @@ static void mib_monitor(unsigned long ptr)
 
        /* This is used to verify Wake-on-LAN is working. */
        if (hw_priv->pme_wait) {
-               if (hw_priv->pme_wait <= jiffies) {
+               if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
                        hw_clr_wol_pme_status(&hw_priv->hw);
                        hw_priv->pme_wait = 0;
                }
index 81d0f1c86d6dee1243d5d65a7e499767698844eb..becbb5f1f5a7af9a22c20761da1dcf1f7db34ec1 100644 (file)
@@ -244,7 +244,6 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                napi_gro_receive(&priv->napi, skb);
                rx++;
 
-               ndev->last_rx = jiffies;
                priv->stats.rx_packets++;
                priv->stats.rx_bytes += len;
                if (desc0 & RX_DESC0_MULTICAST)
index 1e0f72b65459027059cd85a3cece83ae6c24a1be..c28111749e1f9ba95c8b49231da95f97278f31e8 100644 (file)
@@ -5308,7 +5308,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
 
 /**
  * s2io_ethtool_sset - Sets different link parameters.
- * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
  * @info: pointer to the structure with parameters given by ethtool to set
  * link information.
  * Description:
@@ -5793,7 +5794,8 @@ static void s2io_vpd_read(struct s2io_nic *nic)
 
 /**
  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
- *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
+ *  @sp : private member of the device structure, which is a pointer to the
+ *  s2io_nic structure.
  *  @eeprom : pointer to the user level structure provided by ethtool,
  *  containing all relevant information.
  *  @data_buf : user defined value to be written into Eeprom.
index f221126a5c4e6789cb2630a07dc58b02f0676239..055f3763e577baf1fb4edb4d86f919f24edb95da 100644 (file)
@@ -1326,9 +1326,6 @@ struct qlcnic_eswitch {
 };
 
 
-/* Return codes for Error handling */
-#define QL_STATUS_INVALID_PARAM        -1
-
 #define MAX_BW                 100     /* % of link speed */
 #define MIN_BW                 1       /* % of link speed */
 #define MAX_VLAN_ID            4095
index 367f3976df5690d71ba845214423296913aa07dd..2f6cc423ab1dff21cf810c5e6737af58dbae0686 100644 (file)
@@ -1031,7 +1031,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
                pfn = pci_info[i].id;
 
                if (pfn >= ahw->max_vnic_func) {
-                       ret = QL_STATUS_INVALID_PARAM;
+                       ret = -EINVAL;
                        dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
                                __func__, pfn, ahw->max_vnic_func);
                        goto err_eswitch;
index 59a721fba018249679bf15d0984b90e4835c155e..05c28f2c6df702ff5ccb7d8bf026f73a80cafe67 100644 (file)
@@ -24,8 +24,6 @@
 #include <linux/hwmon-sysfs.h>
 #endif
 
-#define QLC_STATUS_UNSUPPORTED_CMD     -2
-
 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
 {
        return -EOPNOTSUPP;
@@ -166,7 +164,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
        u8 b_state, b_rate;
 
        if (len != sizeof(u16))
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        memcpy(&beacon, buf, sizeof(u16));
        err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
@@ -383,17 +381,17 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
                dest_pci_func = pm_cfg[i].dest_npar;
                src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
                if (src_index < 0)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
 
                dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
                if (dest_index < 0)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
 
                s_esw_id = adapter->npars[src_index].phy_port;
                d_esw_id = adapter->npars[dest_index].phy_port;
 
                if (s_esw_id != d_esw_id)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
        }
 
        return 0;
@@ -414,7 +412,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
        count   = size / sizeof(struct qlcnic_pm_func_cfg);
        rem     = size % sizeof(struct qlcnic_pm_func_cfg);
        if (rem)
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
@@ -427,7 +425,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
                action = !!pm_cfg[i].action;
                index = qlcnic_is_valid_nic_func(adapter, pci_func);
                if (index < 0)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
 
                id = adapter->npars[index].phy_port;
                ret = qlcnic_config_port_mirroring(adapter, id,
@@ -440,7 +438,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
                pci_func = pm_cfg[i].pci_func;
                index = qlcnic_is_valid_nic_func(adapter, pci_func);
                if (index < 0)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
                id = adapter->npars[index].phy_port;
                adapter->npars[index].enable_pm = !!pm_cfg[i].action;
                adapter->npars[index].dest_npar = id;
@@ -499,11 +497,11 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
        for (i = 0; i < count; i++) {
                pci_func = esw_cfg[i].pci_func;
                if (pci_func >= ahw->max_vnic_func)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
 
                if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
                        if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
-                               return QL_STATUS_INVALID_PARAM;
+                               return -EINVAL;
 
                switch (esw_cfg[i].op_mode) {
                case QLCNIC_PORT_DEFAULTS:
@@ -517,25 +515,25 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
 
                        if (ret != QLCNIC_NON_PRIV_FUNC) {
                                if (esw_cfg[i].mac_anti_spoof != 0)
-                                       return QL_STATUS_INVALID_PARAM;
+                                       return -EINVAL;
                                if (esw_cfg[i].mac_override != 1)
-                                       return QL_STATUS_INVALID_PARAM;
+                                       return -EINVAL;
                                if (esw_cfg[i].promisc_mode != 1)
-                                       return QL_STATUS_INVALID_PARAM;
+                                       return -EINVAL;
                        }
                        break;
                case QLCNIC_ADD_VLAN:
                        if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
-                               return QL_STATUS_INVALID_PARAM;
+                               return -EINVAL;
                        if (!esw_cfg[i].op_type)
-                               return QL_STATUS_INVALID_PARAM;
+                               return -EINVAL;
                        break;
                case QLCNIC_DEL_VLAN:
                        if (!esw_cfg[i].op_type)
-                               return QL_STATUS_INVALID_PARAM;
+                               return -EINVAL;
                        break;
                default:
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
                }
        }
 
@@ -559,7 +557,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
        count   = size / sizeof(struct qlcnic_esw_func_cfg);
        rem     = size % sizeof(struct qlcnic_esw_func_cfg);
        if (rem)
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
@@ -570,7 +568,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
        for (i = 0; i < count; i++) {
                if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
                        if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
-                               return QL_STATUS_INVALID_PARAM;
+                               return -EINVAL;
 
                if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
                        continue;
@@ -604,7 +602,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
                pci_func = esw_cfg[i].pci_func;
                index = qlcnic_is_valid_nic_func(adapter, pci_func);
                if (index < 0)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
                npar = &adapter->npars[index];
                switch (esw_cfg[i].op_mode) {
                case QLCNIC_PORT_DEFAULTS:
@@ -654,7 +652,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
 
                esw_cfg[pci_func].pci_func = pci_func;
                if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
        }
        qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        return size;
@@ -669,11 +667,11 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
        for (i = 0; i < count; i++) {
                pci_func = np_cfg[i].pci_func;
                if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
 
                if (!IS_VALID_BW(np_cfg[i].min_bw) ||
                    !IS_VALID_BW(np_cfg[i].max_bw))
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
        }
        return 0;
 }
@@ -694,7 +692,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
        count   = size / sizeof(struct qlcnic_npar_func_cfg);
        rem     = size % sizeof(struct qlcnic_npar_func_cfg);
        if (rem)
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
        np_cfg = (struct qlcnic_npar_func_cfg *)buf;
@@ -717,7 +715,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
                        return ret;
                index = qlcnic_is_valid_nic_func(adapter, pci_func);
                if (index < 0)
-                       return QL_STATUS_INVALID_PARAM;
+                       return -EINVAL;
                adapter->npars[index].min_bw = nic_info.min_tx_bw;
                adapter->npars[index].max_bw = nic_info.max_tx_bw;
        }
@@ -784,13 +782,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
        int ret;
 
        if (qlcnic_83xx_check(adapter))
-               return QLC_STATUS_UNSUPPORTED_CMD;
+               return -EOPNOTSUPP;
 
        if (size != sizeof(struct qlcnic_esw_statistics))
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        if (offset >= adapter->ahw->max_vnic_func)
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        memset(&port_stats, 0, size);
        ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -819,13 +817,13 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
        int ret;
 
        if (qlcnic_83xx_check(adapter))
-               return QLC_STATUS_UNSUPPORTED_CMD;
+               return -EOPNOTSUPP;
 
        if (size != sizeof(struct qlcnic_esw_statistics))
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        memset(&esw_stats, 0, size);
        ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -853,10 +851,10 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
        int ret;
 
        if (qlcnic_83xx_check(adapter))
-               return QLC_STATUS_UNSUPPORTED_CMD;
+               return -EOPNOTSUPP;
 
        if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
                                     QLCNIC_QUERY_RX_COUNTER);
@@ -883,10 +881,10 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
        int ret;
 
        if (qlcnic_83xx_check(adapter))
-               return QLC_STATUS_UNSUPPORTED_CMD;
+               return -EOPNOTSUPP;
 
        if (offset >= adapter->ahw->max_vnic_func)
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
                                     QLCNIC_QUERY_RX_COUNTER);
@@ -953,9 +951,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
        if (!size)
-               return QL_STATUS_INVALID_PARAM;
-       if (!buf)
-               return QL_STATUS_INVALID_PARAM;
+               return -EINVAL;
 
        count = size / sizeof(u32);
 
@@ -1132,9 +1128,6 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
        struct device *dev = container_of(kobj, struct device, kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
-       if (!buf)
-               return QL_STATUS_INVALID_PARAM;
-
        ret = kstrtoul(buf, 16, &data);
 
        switch (data) {
index 25800a1dedcb9fbe0635e80386521dc789575fba..02b7115b6aaa64add8632094f6f0dfe354a89b7d 100644 (file)
@@ -3871,9 +3871,6 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
                return status;
        }
 
-       end_jiffies = jiffies +
-               max((unsigned long)1, usecs_to_jiffies(30));
-
        /* Check if bit is set then skip the mailbox command and
         * clear the bit, else we are in normal reset process.
         */
@@ -3888,6 +3885,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
 
        ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
 
+       end_jiffies = jiffies + usecs_to_jiffies(30);
        do {
                value = ql_read32(qdev, RST_FO);
                if ((value & RST_FO_FR) == 0)
index 6af028d5f9bcbdcc3aae303a114fd64d87986eed..2f87909f51866f1b219552d3c3f062f9338a32fc 100644 (file)
@@ -839,7 +839,7 @@ static const struct of_device_id qca_spi_of_match[] = {
 MODULE_DEVICE_TABLE(of, qca_spi_of_match);
 
 static int
-qca_spi_probe(struct spi_device *spi_device)
+qca_spi_probe(struct spi_device *spi)
 {
        struct qcaspi *qca = NULL;
        struct net_device *qcaspi_devs = NULL;
@@ -847,52 +847,52 @@ qca_spi_probe(struct spi_device *spi_device)
        u16 signature;
        const char *mac;
 
-       if (!spi_device->dev.of_node) {
-               dev_err(&spi_device->dev, "Missing device tree\n");
+       if (!spi->dev.of_node) {
+               dev_err(&spi->dev, "Missing device tree\n");
                return -EINVAL;
        }
 
-       legacy_mode = of_property_read_bool(spi_device->dev.of_node,
+       legacy_mode = of_property_read_bool(spi->dev.of_node,
                                            "qca,legacy-mode");
 
        if (qcaspi_clkspeed == 0) {
-               if (spi_device->max_speed_hz)
-                       qcaspi_clkspeed = spi_device->max_speed_hz;
+               if (spi->max_speed_hz)
+                       qcaspi_clkspeed = spi->max_speed_hz;
                else
                        qcaspi_clkspeed = QCASPI_CLK_SPEED;
        }
 
        if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
            (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
-               dev_info(&spi_device->dev, "Invalid clkspeed: %d\n",
+               dev_info(&spi->dev, "Invalid clkspeed: %d\n",
                         qcaspi_clkspeed);
                return -EINVAL;
        }
 
        if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
            (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
-               dev_info(&spi_device->dev, "Invalid burst len: %d\n",
+               dev_info(&spi->dev, "Invalid burst len: %d\n",
                         qcaspi_burst_len);
                return -EINVAL;
        }
 
        if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
            (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
-               dev_info(&spi_device->dev, "Invalid pluggable: %d\n",
+               dev_info(&spi->dev, "Invalid pluggable: %d\n",
                         qcaspi_pluggable);
                return -EINVAL;
        }
 
-       dev_info(&spi_device->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
+       dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
                 QCASPI_DRV_VERSION,
                 qcaspi_clkspeed,
                 qcaspi_burst_len,
                 qcaspi_pluggable);
 
-       spi_device->mode = SPI_MODE_3;
-       spi_device->max_speed_hz = qcaspi_clkspeed;
-       if (spi_setup(spi_device) < 0) {
-               dev_err(&spi_device->dev, "Unable to setup SPI device\n");
+       spi->mode = SPI_MODE_3;
+       spi->max_speed_hz = qcaspi_clkspeed;
+       if (spi_setup(spi) < 0) {
+               dev_err(&spi->dev, "Unable to setup SPI device\n");
                return -EFAULT;
        }
 
@@ -905,23 +905,23 @@ qca_spi_probe(struct spi_device *spi_device)
        qca = netdev_priv(qcaspi_devs);
        if (!qca) {
                free_netdev(qcaspi_devs);
-               dev_err(&spi_device->dev, "Fail to retrieve private structure\n");
+               dev_err(&spi->dev, "Fail to retrieve private structure\n");
                return -ENOMEM;
        }
        qca->net_dev = qcaspi_devs;
-       qca->spi_dev = spi_device;
+       qca->spi_dev = spi;
        qca->legacy_mode = legacy_mode;
 
-       spi_set_drvdata(spi_device, qcaspi_devs);
+       spi_set_drvdata(spi, qcaspi_devs);
 
-       mac = of_get_mac_address(spi_device->dev.of_node);
+       mac = of_get_mac_address(spi->dev.of_node);
 
        if (mac)
                ether_addr_copy(qca->net_dev->dev_addr, mac);
 
        if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
                eth_hw_addr_random(qca->net_dev);
-               dev_info(&spi_device->dev, "Using random MAC address: %pM\n",
+               dev_info(&spi->dev, "Using random MAC address: %pM\n",
                         qca->net_dev->dev_addr);
        }
 
@@ -932,7 +932,7 @@ qca_spi_probe(struct spi_device *spi_device)
                qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature);
 
                if (signature != QCASPI_GOOD_SIGNATURE) {
-                       dev_err(&spi_device->dev, "Invalid signature (0x%04X)\n",
+                       dev_err(&spi->dev, "Invalid signature (0x%04X)\n",
                                signature);
                        free_netdev(qcaspi_devs);
                        return -EFAULT;
@@ -940,7 +940,7 @@ qca_spi_probe(struct spi_device *spi_device)
        }
 
        if (register_netdev(qcaspi_devs)) {
-               dev_info(&spi_device->dev, "Unable to register net device %s\n",
+               dev_info(&spi->dev, "Unable to register net device %s\n",
                         qcaspi_devs->name);
                free_netdev(qcaspi_devs);
                return -EFAULT;
@@ -952,9 +952,9 @@ qca_spi_probe(struct spi_device *spi_device)
 }
 
 static int
-qca_spi_remove(struct spi_device *spi_device)
+qca_spi_remove(struct spi_device *spi)
 {
-       struct net_device *qcaspi_devs = spi_get_drvdata(spi_device);
+       struct net_device *qcaspi_devs = spi_get_drvdata(spi);
        struct qcaspi *qca = netdev_priv(qcaspi_devs);
 
        qcaspi_remove_device_debugfs(qca);
index cf98cc9bbc8dc9d57545bbbe25592f6878fcf324..819289e5be4f726c13ce30c5c5d8df2f45e6a32a 100644 (file)
@@ -181,7 +181,7 @@ struct rocker_desc_info {
        size_t data_size;
        size_t tlv_size;
        struct rocker_desc *desc;
-       DEFINE_DMA_UNMAP_ADDR(mapaddr);
+       dma_addr_t mapaddr;
 };
 
 struct rocker_dma_ring_info {
@@ -225,6 +225,7 @@ struct rocker_port {
        struct napi_struct napi_rx;
        struct rocker_dma_ring_info tx_ring;
        struct rocker_dma_ring_info rx_ring;
+       struct list_head trans_mem;
 };
 
 struct rocker {
@@ -236,21 +237,21 @@ struct rocker {
        struct {
                u64 id;
        } hw;
-       spinlock_t cmd_ring_lock;
+       spinlock_t cmd_ring_lock;               /* for cmd ring accesses */
        struct rocker_dma_ring_info cmd_ring;
        struct rocker_dma_ring_info event_ring;
        DECLARE_HASHTABLE(flow_tbl, 16);
-       spinlock_t flow_tbl_lock;
+       spinlock_t flow_tbl_lock;               /* for flow tbl accesses */
        u64 flow_tbl_next_cookie;
        DECLARE_HASHTABLE(group_tbl, 16);
-       spinlock_t group_tbl_lock;
+       spinlock_t group_tbl_lock;              /* for group tbl accesses */
        DECLARE_HASHTABLE(fdb_tbl, 16);
-       spinlock_t fdb_tbl_lock;
+       spinlock_t fdb_tbl_lock;                /* for fdb tbl accesses */
        unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
        DECLARE_HASHTABLE(internal_vlan_tbl, 8);
-       spinlock_t internal_vlan_tbl_lock;
+       spinlock_t internal_vlan_tbl_lock;      /* for vlan tbl accesses */
        DECLARE_HASHTABLE(neigh_tbl, 16);
-       spinlock_t neigh_tbl_lock;
+       spinlock_t neigh_tbl_lock;              /* for neigh tbl accesses */
        u32 neigh_tbl_next_index;
 };
 
@@ -294,7 +295,7 @@ static bool rocker_vlan_id_is_internal(__be16 vlan_id)
        return (_vlan_id >= start && _vlan_id <= end);
 }
 
-static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
                                      u16 vid, bool *pop_vlan)
 {
        __be16 vlan_id;
@@ -311,7 +312,7 @@ static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
        return vlan_id;
 }
 
-static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
                                   __be16 vlan_id)
 {
        if (rocker_vlan_id_is_internal(vlan_id))
@@ -320,21 +321,87 @@ static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
        return ntohs(vlan_id);
 }
 
-static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
 {
        return !!rocker_port->bridge_dev;
 }
 
+static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
+                                    enum switchdev_trans trans, size_t size)
+{
+       struct list_head *elem = NULL;
+
+       /* If in transaction prepare phase, allocate the memory
+        * and enqueue it on a per-port list.  If in transaction
+        * commit phase, dequeue the memory from the per-port list
+        * rather than re-allocating the memory.  The idea is the
+        * driver code paths for prepare and commit are identical
+        * so the memory allocated in the prepare phase is the
+        * memory used in the commit phase.
+        */
+
+       switch (trans) {
+       case SWITCHDEV_TRANS_PREPARE:
+               elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
+               if (!elem)
+                       return NULL;
+               list_add_tail(elem, &rocker_port->trans_mem);
+               break;
+       case SWITCHDEV_TRANS_COMMIT:
+               BUG_ON(list_empty(&rocker_port->trans_mem));
+               elem = rocker_port->trans_mem.next;
+               list_del_init(elem);
+               break;
+       case SWITCHDEV_TRANS_NONE:
+               elem = kzalloc(size + sizeof(*elem), GFP_KERNEL);
+               if (elem)
+                       INIT_LIST_HEAD(elem);
+               break;
+       default:
+               break;
+       }
+
+       return elem ? elem + 1 : NULL;
+}
+
+static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
+                                enum switchdev_trans trans, size_t size)
+{
+       return __rocker_port_mem_alloc(rocker_port, trans, size);
+}
+
+static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
+                                enum switchdev_trans trans, size_t n,
+                                size_t size)
+{
+       return __rocker_port_mem_alloc(rocker_port, trans, n * size);
+}
+
+static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
+{
+       struct list_head *elem;
+
+       /* Frees are ignored if in transaction prepare phase.  The
+        * memory remains on the per-port list until freed in the
+        * commit phase.
+        */
+
+       if (trans == SWITCHDEV_TRANS_PREPARE)
+               return;
+
+       elem = (struct list_head *)mem - 1;
+       BUG_ON(!list_empty(elem));
+       kfree(elem);
+}
+
 struct rocker_wait {
        wait_queue_head_t wait;
        bool done;
-       bool nowait;
 };
 
 static void rocker_wait_reset(struct rocker_wait *wait)
 {
        wait->done = false;
-       wait->nowait = false;
 }
 
 static void rocker_wait_init(struct rocker_wait *wait)
@@ -343,20 +410,22 @@ static void rocker_wait_init(struct rocker_wait *wait)
        rocker_wait_reset(wait);
 }
 
-static struct rocker_wait *rocker_wait_create(gfp_t gfp)
+static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
+                                             enum switchdev_trans trans)
 {
        struct rocker_wait *wait;
 
-       wait = kmalloc(sizeof(*wait), gfp);
+       wait = rocker_port_kzalloc(rocker_port, trans, sizeof(*wait));
        if (!wait)
                return NULL;
        rocker_wait_init(wait);
        return wait;
 }
 
-static void rocker_wait_destroy(struct rocker_wait *work)
+static void rocker_wait_destroy(enum switchdev_trans trans,
+                               struct rocker_wait *wait)
 {
-       kfree(work);
+       rocker_port_kfree(trans, wait);
 }
 
 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
@@ -374,18 +443,18 @@ static void rocker_wait_wake_up(struct rocker_wait *wait)
        wake_up(&wait->wait);
 }
 
-static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
 {
        return rocker->msix_entries[vector].vector;
 }
 
-static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
 {
        return rocker_msix_vector(rocker_port->rocker,
                                  ROCKER_MSIX_VEC_TX(rocker_port->port_number));
 }
 
-static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
 {
        return rocker_msix_vector(rocker_port->rocker,
                                  ROCKER_MSIX_VEC_RX(rocker_port->port_number));
@@ -404,9 +473,9 @@ static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
  * HW basic testing functions
  *****************************/
 
-static int rocker_reg_test(struct rocker *rocker)
+static int rocker_reg_test(const struct rocker *rocker)
 {
-       struct pci_dev *pdev = rocker->pdev;
+       const struct pci_dev *pdev = rocker->pdev;
        u64 test_reg;
        u64 rnd;
 
@@ -434,12 +503,12 @@ static int rocker_reg_test(struct rocker *rocker)
        return 0;
 }
 
-static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
-                              u32 test_type, dma_addr_t dma_handle,
-                              unsigned char *buf, unsigned char *expect,
-                              size_t size)
+static int rocker_dma_test_one(const struct rocker *rocker,
+                              struct rocker_wait *wait, u32 test_type,
+                              dma_addr_t dma_handle, const unsigned char *buf,
+                              const unsigned char *expect, size_t size)
 {
-       struct pci_dev *pdev = rocker->pdev;
+       const struct pci_dev *pdev = rocker->pdev;
        int i;
 
        rocker_wait_reset(wait);
@@ -463,7 +532,7 @@ static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
 
-static int rocker_dma_test_offset(struct rocker *rocker,
+static int rocker_dma_test_offset(const struct rocker *rocker,
                                  struct rocker_wait *wait, int offset)
 {
        struct pci_dev *pdev = rocker->pdev;
@@ -523,7 +592,8 @@ free_alloc:
        return err;
 }
 
-static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+static int rocker_dma_test(const struct rocker *rocker,
+                          struct rocker_wait *wait)
 {
        int i;
        int err;
@@ -545,9 +615,9 @@ static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int rocker_basic_hw_test(struct rocker *rocker)
+static int rocker_basic_hw_test(const struct rocker *rocker)
 {
-       struct pci_dev *pdev = rocker->pdev;
+       const struct pci_dev *pdev = rocker->pdev;
        struct rocker_wait wait;
        int err;
 
@@ -680,7 +750,7 @@ static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
        return *(u64 *) rocker_tlv_data(tlv);
 }
 
-static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
                             const char *buf, int buf_len)
 {
        const struct rocker_tlv *tlv;
@@ -693,19 +763,19 @@ static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
                u32 type = rocker_tlv_type(tlv);
 
                if (type > 0 && type <= maxtype)
-                       tb[type] = (struct rocker_tlv *) tlv;
+                       tb[type] = tlv;
        }
 }
 
-static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
                                    const struct rocker_tlv *tlv)
 {
        rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
                         rocker_tlv_len(tlv));
 }
 
-static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
-                                 struct rocker_desc_info *desc_info)
+static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
+                                 const struct rocker_desc_info *desc_info)
 {
        rocker_tlv_parse(tb, maxtype, desc_info->data,
                         desc_info->desc->tlv_size);
@@ -790,9 +860,9 @@ static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
 }
 
 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
-                                  struct rocker_tlv *start)
+                                  const struct rocker_tlv *start)
 {
-       desc_info->tlv_size = (char *) start - desc_info->data;
+       desc_info->tlv_size = (const char *) start - desc_info->data;
 }
 
 /******************************************
@@ -804,7 +874,7 @@ static u32 __pos_inc(u32 pos, size_t limit)
        return ++pos == limit ? 0 : pos;
 }
 
-static int rocker_desc_err(struct rocker_desc_info *desc_info)
+static int rocker_desc_err(const struct rocker_desc_info *desc_info)
 {
        int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
 
@@ -832,31 +902,31 @@ static int rocker_desc_err(struct rocker_desc_info *desc_info)
        return -EINVAL;
 }
 
-static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
 {
        desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
 }
 
-static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
 {
        u32 comp_err = desc_info->desc->comp_err;
 
        return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
 }
 
-static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
 {
        return (void *)(uintptr_t)desc_info->desc->cookie;
 }
 
-static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
                                       void *ptr)
 {
        desc_info->desc->cookie = (uintptr_t) ptr;
 }
 
 static struct rocker_desc_info *
-rocker_desc_head_get(struct rocker_dma_ring_info *info)
+rocker_desc_head_get(const struct rocker_dma_ring_info *info)
 {
        static struct rocker_desc_info *desc_info;
        u32 head = __pos_inc(info->head, info->size);
@@ -868,15 +938,15 @@ rocker_desc_head_get(struct rocker_dma_ring_info *info)
        return desc_info;
 }
 
-static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
 {
        desc_info->desc->buf_size = desc_info->data_size;
        desc_info->desc->tlv_size = desc_info->tlv_size;
 }
 
-static void rocker_desc_head_set(struct rocker *rocker,
+static void rocker_desc_head_set(const struct rocker *rocker,
                                 struct rocker_dma_ring_info *info,
-                                struct rocker_desc_info *desc_info)
+                                const struct rocker_desc_info *desc_info)
 {
        u32 head = __pos_inc(info->head, info->size);
 
@@ -901,8 +971,8 @@ rocker_desc_tail_get(struct rocker_dma_ring_info *info)
        return desc_info;
 }
 
-static void rocker_dma_ring_credits_set(struct rocker *rocker,
-                                       struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_credits_set(const struct rocker *rocker,
+                                       const struct rocker_dma_ring_info *info,
                                        u32 credits)
 {
        if (credits)
@@ -915,7 +985,7 @@ static unsigned long rocker_dma_ring_size_fix(size_t size)
                   min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
 }
 
-static int rocker_dma_ring_create(struct rocker *rocker,
+static int rocker_dma_ring_create(const struct rocker *rocker,
                                  unsigned int type,
                                  size_t size,
                                  struct rocker_dma_ring_info *info)
@@ -951,8 +1021,8 @@ static int rocker_dma_ring_create(struct rocker *rocker,
        return 0;
 }
 
-static void rocker_dma_ring_destroy(struct rocker *rocker,
-                                   struct rocker_dma_ring_info *info)
+static void rocker_dma_ring_destroy(const struct rocker *rocker,
+                                   const struct rocker_dma_ring_info *info)
 {
        rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
 
@@ -962,7 +1032,7 @@ static void rocker_dma_ring_destroy(struct rocker *rocker,
        kfree(info->desc_info);
 }
 
-static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
                                             struct rocker_dma_ring_info *info)
 {
        int i;
@@ -977,8 +1047,8 @@ static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
        rocker_desc_commit(&info->desc_info[i]);
 }
 
-static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
-                                     struct rocker_dma_ring_info *info,
+static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
+                                     const struct rocker_dma_ring_info *info,
                                      int direction, size_t buf_size)
 {
        struct pci_dev *pdev = rocker->pdev;
@@ -1015,7 +1085,7 @@ static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
 
 rollback:
        for (i--; i >= 0; i--) {
-               struct rocker_desc_info *desc_info = &info->desc_info[i];
+               const struct rocker_desc_info *desc_info = &info->desc_info[i];
 
                pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
                                 desc_info->data_size, direction);
@@ -1024,15 +1094,15 @@ rollback:
        return err;
 }
 
-static void rocker_dma_ring_bufs_free(struct rocker *rocker,
-                                     struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
+                                     const struct rocker_dma_ring_info *info,
                                      int direction)
 {
        struct pci_dev *pdev = rocker->pdev;
        int i;
 
        for (i = 0; i < info->size; i++) {
-               struct rocker_desc_info *desc_info = &info->desc_info[i];
+               const struct rocker_desc_info *desc_info = &info->desc_info[i];
                struct rocker_desc *desc = &info->desc[i];
 
                desc->buf_addr = 0;
@@ -1045,7 +1115,7 @@ static void rocker_dma_ring_bufs_free(struct rocker *rocker,
 
 static int rocker_dma_rings_init(struct rocker *rocker)
 {
-       struct pci_dev *pdev = rocker->pdev;
+       const struct pci_dev *pdev = rocker->pdev;
        int err;
 
        err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
@@ -1102,11 +1172,11 @@ static void rocker_dma_rings_fini(struct rocker *rocker)
        rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
 }
 
-static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
-                                     struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
                                      struct rocker_desc_info *desc_info,
                                      struct sk_buff *skb, size_t buf_len)
 {
+       const struct rocker *rocker = rocker_port->rocker;
        struct pci_dev *pdev = rocker->pdev;
        dma_addr_t dma_handle;
 
@@ -1126,13 +1196,12 @@ tlv_put_failure:
        return -EMSGSIZE;
 }
 
-static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
 {
        return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
 }
 
-static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
-                                       struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
                                        struct rocker_desc_info *desc_info)
 {
        struct net_device *dev = rocker_port->dev;
@@ -1149,8 +1218,7 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
        skb = netdev_alloc_skb_ip_align(dev, buf_len);
        if (!skb)
                return -ENOMEM;
-       err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
-                                        skb, buf_len);
+       err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
        if (err) {
                dev_kfree_skb_any(skb);
                return err;
@@ -1159,8 +1227,8 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
        return 0;
 }
 
-static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
-                                        struct rocker_tlv **attrs)
+static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
+                                        const struct rocker_tlv **attrs)
 {
        struct pci_dev *pdev = rocker->pdev;
        dma_addr_t dma_handle;
@@ -1174,10 +1242,10 @@ static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
        pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
 }
 
-static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
-                                       struct rocker_desc_info *desc_info)
+static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
+                                       const struct rocker_desc_info *desc_info)
 {
-       struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+       const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
        struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
 
        if (!skb)
@@ -1187,15 +1255,15 @@ static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
        dev_kfree_skb_any(skb);
 }
 
-static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
-                                        struct rocker_port *rocker_port)
+static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
 {
-       struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+       const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+       const struct rocker *rocker = rocker_port->rocker;
        int i;
        int err;
 
        for (i = 0; i < rx_ring->size; i++) {
-               err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+               err = rocker_dma_rx_ring_skb_alloc(rocker_port,
                                                   &rx_ring->desc_info[i]);
                if (err)
                        goto rollback;
@@ -1208,10 +1276,10 @@ rollback:
        return err;
 }
 
-static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
-                                        struct rocker_port *rocker_port)
+static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
 {
-       struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+       const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+       const struct rocker *rocker = rocker_port->rocker;
        int i;
 
        for (i = 0; i < rx_ring->size; i++)
@@ -1257,7 +1325,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
                goto err_dma_rx_ring_bufs_alloc;
        }
 
-       err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+       err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
        if (err) {
                netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
                goto err_dma_rx_ring_skbs_alloc;
@@ -1283,7 +1351,7 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
 {
        struct rocker *rocker = rocker_port->rocker;
 
-       rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+       rocker_dma_rx_ring_skbs_free(rocker_port);
        rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
                                  PCI_DMA_BIDIRECTIONAL);
        rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
@@ -1292,7 +1360,8 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
        rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
 }
 
-static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+static void rocker_port_set_enable(const struct rocker_port *rocker_port,
+                                  bool enable)
 {
        u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
 
@@ -1310,19 +1379,14 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
 {
        struct rocker *rocker = dev_id;
-       struct rocker_desc_info *desc_info;
+       const struct rocker_desc_info *desc_info;
        struct rocker_wait *wait;
        u32 credits = 0;
 
        spin_lock(&rocker->cmd_ring_lock);
        while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
                wait = rocker_desc_cookie_ptr_get(desc_info);
-               if (wait->nowait) {
-                       rocker_desc_gen_clear(desc_info);
-                       rocker_wait_destroy(wait);
-               } else {
-                       rocker_wait_wake_up(wait);
-               }
+               rocker_wait_wake_up(wait);
                credits++;
        }
        spin_unlock(&rocker->cmd_ring_lock);
@@ -1331,22 +1395,22 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void rocker_port_link_up(struct rocker_port *rocker_port)
+static void rocker_port_link_up(const struct rocker_port *rocker_port)
 {
        netif_carrier_on(rocker_port->dev);
        netdev_info(rocker_port->dev, "Link is up\n");
 }
 
-static void rocker_port_link_down(struct rocker_port *rocker_port)
+static void rocker_port_link_down(const struct rocker_port *rocker_port)
 {
        netif_carrier_off(rocker_port->dev);
        netdev_info(rocker_port->dev, "Link is down\n");
 }
 
-static int rocker_event_link_change(struct rocker *rocker,
+static int rocker_event_link_change(const struct rocker *rocker,
                                    const struct rocker_tlv *info)
 {
-       struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+       const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
        unsigned int port_number;
        bool link_up;
        struct rocker_port *rocker_port;
@@ -1374,22 +1438,44 @@ static int rocker_event_link_change(struct rocker *rocker,
 }
 
 #define ROCKER_OP_FLAG_REMOVE          BIT(0)
-#define ROCKER_OP_FLAG_NOWAIT          BIT(1)
-#define ROCKER_OP_FLAG_LEARNED         BIT(2)
-#define ROCKER_OP_FLAG_REFRESH         BIT(3)
+#define ROCKER_OP_FLAG_LEARNED         BIT(1)
+#define ROCKER_OP_FLAG_REFRESH         BIT(2)
 
 static int rocker_port_fdb(struct rocker_port *rocker_port,
+                          enum switchdev_trans trans,
                           const unsigned char *addr,
                           __be16 vlan_id, int flags);
 
-static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+struct rocker_mac_vlan_seen_work {
+       struct work_struct work;
+       struct rocker_port *rocker_port;
+       int flags;
+       unsigned char addr[ETH_ALEN];
+       __be16 vlan_id;
+};
+
+static void rocker_event_mac_vlan_seen_work(struct work_struct *work)
+{
+       const struct rocker_mac_vlan_seen_work *sw =
+               container_of(work, struct rocker_mac_vlan_seen_work, work);
+
+       rtnl_lock();
+       rocker_port_fdb(sw->rocker_port, SWITCHDEV_TRANS_NONE,
+                       sw->addr, sw->vlan_id, sw->flags);
+       rtnl_unlock();
+
+       kfree(work);
+}
+
+static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
                                      const struct rocker_tlv *info)
 {
-       struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+       struct rocker_mac_vlan_seen_work *sw;
+       const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
        unsigned int port_number;
        struct rocker_port *rocker_port;
-       unsigned char *addr;
-       int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
+       const unsigned char *addr;
+       int flags = ROCKER_OP_FLAG_LEARNED;
        __be16 vlan_id;
 
        rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
@@ -1411,14 +1497,27 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker,
            rocker_port->stp_state != BR_STATE_FORWARDING)
                return 0;
 
-       return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+       sw = kmalloc(sizeof(*sw), GFP_ATOMIC);
+       if (!sw)
+               return -ENOMEM;
+
+       INIT_WORK(&sw->work, rocker_event_mac_vlan_seen_work);
+
+       sw->rocker_port = rocker_port;
+       sw->flags = flags;
+       ether_addr_copy(sw->addr, addr);
+       sw->vlan_id = vlan_id;
+
+       schedule_work(&sw->work);
+
+       return 0;
 }
 
-static int rocker_event_process(struct rocker *rocker,
-                               struct rocker_desc_info *desc_info)
+static int rocker_event_process(const struct rocker *rocker,
+                               const struct rocker_desc_info *desc_info)
 {
-       struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
-       struct rocker_tlv *info;
+       const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+       const struct rocker_tlv *info;
        u16 type;
 
        rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
@@ -1442,8 +1541,8 @@ static int rocker_event_process(struct rocker *rocker,
 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
 {
        struct rocker *rocker = dev_id;
-       struct pci_dev *pdev = rocker->pdev;
-       struct rocker_desc_info *desc_info;
+       const struct pci_dev *pdev = rocker->pdev;
+       const struct rocker_desc_info *desc_info;
        u32 credits = 0;
        int err;
 
@@ -1487,65 +1586,70 @@ static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
  * Command interface
  ********************/
 
-typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
-                              struct rocker_port *rocker_port,
-                              struct rocker_desc_info *desc_info,
-                              void *priv);
+typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
+                                   struct rocker_desc_info *desc_info,
+                                   void *priv);
+
+typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
+                                   const struct rocker_desc_info *desc_info,
+                                   void *priv);
 
-static int rocker_cmd_exec(struct rocker *rocker,
-                          struct rocker_port *rocker_port,
-                          rocker_cmd_cb_t prepare, void *prepare_priv,
-                          rocker_cmd_cb_t process, void *process_priv,
-                          bool nowait)
+static int rocker_cmd_exec(struct rocker_port *rocker_port,
+                          enum switchdev_trans trans,
+                          rocker_cmd_prep_cb_t prepare, void *prepare_priv,
+                          rocker_cmd_proc_cb_t process, void *process_priv)
 {
+       struct rocker *rocker = rocker_port->rocker;
        struct rocker_desc_info *desc_info;
        struct rocker_wait *wait;
        unsigned long flags;
        int err;
 
-       wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
+       wait = rocker_wait_create(rocker_port, trans);
        if (!wait)
                return -ENOMEM;
-       wait->nowait = nowait;
 
        spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
+
        desc_info = rocker_desc_head_get(&rocker->cmd_ring);
        if (!desc_info) {
                spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
                err = -EAGAIN;
                goto out;
        }
-       err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+
+       err = prepare(rocker_port, desc_info, prepare_priv);
        if (err) {
                spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
                goto out;
        }
+
        rocker_desc_cookie_ptr_set(desc_info, wait);
-       rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
-       spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
 
-       if (nowait)
-               return 0;
+       if (trans != SWITCHDEV_TRANS_PREPARE)
+               rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
 
-       if (!rocker_wait_event_timeout(wait, HZ / 10))
-               return -EIO;
+       spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
+
+       if (trans != SWITCHDEV_TRANS_PREPARE)
+               if (!rocker_wait_event_timeout(wait, HZ / 10))
+                       return -EIO;
 
        err = rocker_desc_err(desc_info);
        if (err)
                return err;
 
        if (process)
-               err = process(rocker, rocker_port, desc_info, process_priv);
+               err = process(rocker_port, desc_info, process_priv);
 
        rocker_desc_gen_clear(desc_info);
 out:
-       rocker_wait_destroy(wait);
+       rocker_wait_destroy(trans, wait);
        return err;
 }
 
 static int
-rocker_cmd_get_port_settings_prep(struct rocker *rocker,
-                                 struct rocker_port *rocker_port,
+rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
                                  struct rocker_desc_info *desc_info,
                                  void *priv)
 {
@@ -1565,14 +1669,13 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker,
 }
 
 static int
-rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
-                                         struct rocker_port *rocker_port,
-                                         struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
+                                         const struct rocker_desc_info *desc_info,
                                          void *priv)
 {
        struct ethtool_cmd *ecmd = priv;
-       struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
-       struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+       const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+       const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
        u32 speed;
        u8 duplex;
        u8 autoneg;
@@ -1604,15 +1707,14 @@ rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
 }
 
 static int
-rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
-                                         struct rocker_port *rocker_port,
-                                         struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
+                                         const struct rocker_desc_info *desc_info,
                                          void *priv)
 {
        unsigned char *macaddr = priv;
-       struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
-       struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
-       struct rocker_tlv *attr;
+       const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+       const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+       const struct rocker_tlv *attr;
 
        rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
        if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1637,17 +1739,16 @@ struct port_name {
 };
 
 static int
-rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
-                                           struct rocker_port *rocker_port,
-                                           struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
+                                           const struct rocker_desc_info *desc_info,
                                            void *priv)
 {
-       struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
-       struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+       const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+       const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
        struct port_name *name = priv;
-       struct rocker_tlv *attr;
+       const struct rocker_tlv *attr;
        size_t i, j, len;
-       char *str;
+       const char *str;
 
        rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
        if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1679,8 +1780,7 @@ rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
 }
 
 static int
-rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
-                                         struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
                                          struct rocker_desc_info *desc_info,
                                          void *priv)
 {
@@ -1710,12 +1810,11 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
 }
 
 static int
-rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
-                                         struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
                                          struct rocker_desc_info *desc_info,
                                          void *priv)
 {
-       unsigned char *macaddr = priv;
+       const unsigned char *macaddr = priv;
        struct rocker_tlv *cmd_info;
 
        if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1735,8 +1834,7 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
 }
 
 static int
-rocker_cmd_set_port_learning_prep(struct rocker *rocker,
-                                 struct rocker_port *rocker_port,
+rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
                                  struct rocker_desc_info *desc_info,
                                  void *priv)
 {
@@ -1761,46 +1859,48 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker,
 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
                                                struct ethtool_cmd *ecmd)
 {
-       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+       return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
                               rocker_cmd_get_port_settings_prep, NULL,
                               rocker_cmd_get_port_settings_ethtool_proc,
-                              ecmd, false);
+                              ecmd);
 }
 
 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
                                                unsigned char *macaddr)
 {
-       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+       return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
                               rocker_cmd_get_port_settings_prep, NULL,
                               rocker_cmd_get_port_settings_macaddr_proc,
-                              macaddr, false);
+                              macaddr);
 }
 
 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
                                                struct ethtool_cmd *ecmd)
 {
-       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+       return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
                               rocker_cmd_set_port_settings_ethtool_prep,
-                              ecmd, NULL, NULL, false);
+                              ecmd, NULL, NULL);
 }
 
 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
                                                unsigned char *macaddr)
 {
-       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+       return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
                               rocker_cmd_set_port_settings_macaddr_prep,
-                              macaddr, NULL, NULL, false);
+                              macaddr, NULL, NULL);
 }
 
-static int rocker_port_set_learning(struct rocker_port *rocker_port)
+static int rocker_port_set_learning(struct rocker_port *rocker_port,
+                                   enum switchdev_trans trans)
 {
-       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+       return rocker_cmd_exec(rocker_port, trans,
                               rocker_cmd_set_port_learning_prep,
-                              NULL, NULL, NULL, false);
+                              NULL, NULL, NULL);
 }
 
-static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
-                                          struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+                               const struct rocker_flow_tbl_entry *entry)
 {
        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
                               entry->key.ig_port.in_pport))
@@ -1815,8 +1915,9 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
        return 0;
 }
 
-static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
-                                       struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+                            const struct rocker_flow_tbl_entry *entry)
 {
        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
                               entry->key.vlan.in_pport))
@@ -1838,8 +1939,9 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
        return 0;
 }
 
-static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
-                                           struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+                                const struct rocker_flow_tbl_entry *entry)
 {
        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
                               entry->key.term_mac.in_pport))
@@ -1875,7 +1977,7 @@ static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
 
 static int
 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
-                                     struct rocker_flow_tbl_entry *entry)
+                                     const struct rocker_flow_tbl_entry *entry)
 {
        if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
                                entry->key.ucast_routing.eth_type))
@@ -1896,8 +1998,9 @@ rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
        return 0;
 }
 
-static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
-                                         struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+                              const struct rocker_flow_tbl_entry *entry)
 {
        if (entry->key.bridge.has_eth_dst &&
            rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
@@ -1929,8 +2032,9 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
        return 0;
 }
 
-static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
-                                      struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+                           const struct rocker_flow_tbl_entry *entry)
 {
        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
                               entry->key.acl.in_pport))
@@ -1995,12 +2099,11 @@ static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
        return 0;
 }
 
-static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
-                                  struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
                                   struct rocker_desc_info *desc_info,
                                   void *priv)
 {
-       struct rocker_flow_tbl_entry *entry = priv;
+       const struct rocker_flow_tbl_entry *entry = priv;
        struct rocker_tlv *cmd_info;
        int err = 0;
 
@@ -2053,8 +2156,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
        return 0;
 }
 
-static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
-                                  struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
                                   struct rocker_desc_info *desc_info,
                                   void *priv)
 {
@@ -2090,7 +2192,7 @@ rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
 
 static int
 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
-                                   struct rocker_group_tbl_entry *entry)
+                                   const struct rocker_group_tbl_entry *entry)
 {
        if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
                               entry->l2_rewrite.group_id))
@@ -2113,7 +2215,7 @@ rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
 
 static int
 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
-                                  struct rocker_group_tbl_entry *entry)
+                                  const struct rocker_group_tbl_entry *entry)
 {
        int i;
        struct rocker_tlv *group_ids;
@@ -2139,7 +2241,7 @@ rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
 
 static int
 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
-                                   struct rocker_group_tbl_entry *entry)
+                                   const struct rocker_group_tbl_entry *entry)
 {
        if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
            rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
@@ -2163,8 +2265,7 @@ rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
        return 0;
 }
 
-static int rocker_cmd_group_tbl_add(struct rocker *rocker,
-                                   struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
                                    struct rocker_desc_info *desc_info,
                                    void *priv)
 {
@@ -2209,8 +2310,7 @@ static int rocker_cmd_group_tbl_add(struct rocker *rocker,
        return 0;
 }
 
-static int rocker_cmd_group_tbl_del(struct rocker *rocker,
-                                   struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
                                    struct rocker_desc_info *desc_info,
                                    void *priv)
 {
@@ -2293,7 +2393,8 @@ static void rocker_free_tbls(struct rocker *rocker)
 }
 
 static struct rocker_flow_tbl_entry *
-rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+rocker_flow_tbl_find(const struct rocker *rocker,
+                    const struct rocker_flow_tbl_entry *match)
 {
        struct rocker_flow_tbl_entry *found;
        size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
@@ -2308,8 +2409,8 @@ rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
 }
 
 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
-                              struct rocker_flow_tbl_entry *match,
-                              bool nowait)
+                              enum switchdev_trans trans,
+                              struct rocker_flow_tbl_entry *match)
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_flow_tbl_entry *found;
@@ -2324,8 +2425,9 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
 
        if (found) {
                match->cookie = found->cookie;
-               hash_del(&found->entry);
-               kfree(found);
+               if (trans != SWITCHDEV_TRANS_PREPARE)
+                       hash_del(&found->entry);
+               rocker_port_kfree(trans, found);
                found = match;
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
        } else {
@@ -2334,18 +2436,18 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
        }
 
-       hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
+       if (trans != SWITCHDEV_TRANS_PREPARE)
+               hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
 
        spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
 
-       return rocker_cmd_exec(rocker, rocker_port,
-                              rocker_cmd_flow_tbl_add,
-                              found, NULL, NULL, nowait);
+       return rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_add,
+                              found, NULL, NULL);
 }
 
 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
-                              struct rocker_flow_tbl_entry *match,
-                              bool nowait)
+                              enum switchdev_trans trans,
+                              struct rocker_flow_tbl_entry *match)
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_flow_tbl_entry *found;
@@ -2360,47 +2462,43 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
        found = rocker_flow_tbl_find(rocker, match);
 
        if (found) {
-               hash_del(&found->entry);
+               if (trans != SWITCHDEV_TRANS_PREPARE)
+                       hash_del(&found->entry);
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
        }
 
        spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
 
-       kfree(match);
+       rocker_port_kfree(trans, match);
 
        if (found) {
-               err = rocker_cmd_exec(rocker, rocker_port,
+               err = rocker_cmd_exec(rocker_port, trans,
                                      rocker_cmd_flow_tbl_del,
-                                     found, NULL, NULL, nowait);
-               kfree(found);
+                                     found, NULL, NULL);
+               rocker_port_kfree(trans, found);
        }
 
        return err;
 }
 
-static gfp_t rocker_op_flags_gfp(int flags)
-{
-       return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
-}
-
 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
-                             int flags, struct rocker_flow_tbl_entry *entry)
+                             enum switchdev_trans trans, int flags,
+                             struct rocker_flow_tbl_entry *entry)
 {
-       bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
        if (flags & ROCKER_OP_FLAG_REMOVE)
-               return rocker_flow_tbl_del(rocker_port, entry, nowait);
+               return rocker_flow_tbl_del(rocker_port, trans, entry);
        else
-               return rocker_flow_tbl_add(rocker_port, entry, nowait);
+               return rocker_flow_tbl_add(rocker_port, trans, entry);
 }
 
 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
-                                  int flags, u32 in_pport, u32 in_pport_mask,
+                                  enum switchdev_trans trans, int flags,
+                                  u32 in_pport, u32 in_pport_mask,
                                   enum rocker_of_dpa_table_id goto_tbl)
 {
        struct rocker_flow_tbl_entry *entry;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
@@ -2410,18 +2508,19 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
        entry->key.ig_port.in_pport_mask = in_pport_mask;
        entry->key.ig_port.goto_tbl = goto_tbl;
 
-       return rocker_flow_tbl_do(rocker_port, flags, entry);
+       return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
-                               int flags, u32 in_pport,
-                               __be16 vlan_id, __be16 vlan_id_mask,
+                               enum switchdev_trans trans, int flags,
+                               u32 in_pport, __be16 vlan_id,
+                               __be16 vlan_id_mask,
                                enum rocker_of_dpa_table_id goto_tbl,
                                bool untagged, __be16 new_vlan_id)
 {
        struct rocker_flow_tbl_entry *entry;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
@@ -2435,10 +2534,11 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
        entry->key.vlan.untagged = untagged;
        entry->key.vlan.new_vlan_id = new_vlan_id;
 
-       return rocker_flow_tbl_do(rocker_port, flags, entry);
+       return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
+                                   enum switchdev_trans trans,
                                    u32 in_pport, u32 in_pport_mask,
                                    __be16 eth_type, const u8 *eth_dst,
                                    const u8 *eth_dst_mask, __be16 vlan_id,
@@ -2447,7 +2547,7 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
 {
        struct rocker_flow_tbl_entry *entry;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
@@ -2471,11 +2571,11 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
        entry->key.term_mac.vlan_id_mask = vlan_id_mask;
        entry->key.term_mac.copy_to_cpu = copy_to_cpu;
 
-       return rocker_flow_tbl_do(rocker_port, flags, entry);
+       return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
-                                 int flags,
+                                 enum switchdev_trans trans, int flags,
                                  const u8 *eth_dst, const u8 *eth_dst_mask,
                                  __be16 vlan_id, u32 tunnel_id,
                                  enum rocker_of_dpa_table_id goto_tbl,
@@ -2487,7 +2587,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
        bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
        bool wild = false;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
@@ -2500,7 +2600,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
        if (eth_dst_mask) {
                entry->key.bridge.has_eth_dst_mask = 1;
                ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
-               if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
+               if (!ether_addr_equal(eth_dst_mask, ff_mac))
                        wild = true;
        }
 
@@ -2525,10 +2625,11 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
        entry->key.bridge.group_id = group_id;
        entry->key.bridge.copy_to_cpu = copy_to_cpu;
 
-       return rocker_flow_tbl_do(rocker_port, flags, entry);
+       return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
+                                         enum switchdev_trans trans,
                                          __be16 eth_type, __be32 dst,
                                          __be32 dst_mask, u32 priority,
                                          enum rocker_of_dpa_table_id goto_tbl,
@@ -2536,7 +2637,7 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
 {
        struct rocker_flow_tbl_entry *entry;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
@@ -2550,30 +2651,29 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
        entry->key_len = offsetof(struct rocker_flow_tbl_key,
                                  ucast_routing.group_id);
 
-       return rocker_flow_tbl_do(rocker_port, flags, entry);
+       return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
-                              int flags, u32 in_pport,
-                              u32 in_pport_mask,
+                              enum switchdev_trans trans, int flags,
+                              u32 in_pport, u32 in_pport_mask,
                               const u8 *eth_src, const u8 *eth_src_mask,
                               const u8 *eth_dst, const u8 *eth_dst_mask,
-                              __be16 eth_type,
-                              __be16 vlan_id, __be16 vlan_id_mask,
-                              u8 ip_proto, u8 ip_proto_mask,
-                              u8 ip_tos, u8 ip_tos_mask,
+                              __be16 eth_type, __be16 vlan_id,
+                              __be16 vlan_id_mask, u8 ip_proto,
+                              u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
                               u32 group_id)
 {
        u32 priority;
        struct rocker_flow_tbl_entry *entry;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
        priority = ROCKER_PRIORITY_ACL_NORMAL;
        if (eth_dst && eth_dst_mask) {
-               if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
+               if (ether_addr_equal(eth_dst_mask, mcast_mac))
                        priority = ROCKER_PRIORITY_ACL_DFLT;
                else if (is_link_local_ether_addr(eth_dst))
                        priority = ROCKER_PRIORITY_ACL_CTRL;
@@ -2602,12 +2702,12 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
        entry->key.acl.ip_tos_mask = ip_tos_mask;
        entry->key.acl.group_id = group_id;
 
-       return rocker_flow_tbl_do(rocker_port, flags, entry);
+       return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static struct rocker_group_tbl_entry *
-rocker_group_tbl_find(struct rocker *rocker,
-                     struct rocker_group_tbl_entry *match)
+rocker_group_tbl_find(const struct rocker *rocker,
+                     const struct rocker_group_tbl_entry *match)
 {
        struct rocker_group_tbl_entry *found;
 
@@ -2620,22 +2720,23 @@ rocker_group_tbl_find(struct rocker *rocker,
        return NULL;
 }
 
-static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
+static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
+                                       struct rocker_group_tbl_entry *entry)
 {
        switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
        case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
        case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
-               kfree(entry->group_ids);
+               rocker_port_kfree(trans, entry->group_ids);
                break;
        default:
                break;
        }
-       kfree(entry);
+       rocker_port_kfree(trans, entry);
 }
 
 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
-                               struct rocker_group_tbl_entry *match,
-                               bool nowait)
+                               enum switchdev_trans trans,
+                               struct rocker_group_tbl_entry *match)
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_group_tbl_entry *found;
@@ -2646,8 +2747,9 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
        found = rocker_group_tbl_find(rocker, match);
 
        if (found) {
-               hash_del(&found->entry);
-               rocker_group_tbl_entry_free(found);
+               if (trans != SWITCHDEV_TRANS_PREPARE)
+                       hash_del(&found->entry);
+               rocker_group_tbl_entry_free(trans, found);
                found = match;
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
        } else {
@@ -2655,18 +2757,18 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
        }
 
-       hash_add(rocker->group_tbl, &found->entry, found->group_id);
+       if (trans != SWITCHDEV_TRANS_PREPARE)
+               hash_add(rocker->group_tbl, &found->entry, found->group_id);
 
        spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
 
-       return rocker_cmd_exec(rocker, rocker_port,
-                              rocker_cmd_group_tbl_add,
-                              found, NULL, NULL, nowait);
+       return rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_add,
+                              found, NULL, NULL);
 }
 
 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
-                               struct rocker_group_tbl_entry *match,
-                               bool nowait)
+                               enum switchdev_trans trans,
+                               struct rocker_group_tbl_entry *match)
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_group_tbl_entry *found;
@@ -2678,93 +2780,95 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
        found = rocker_group_tbl_find(rocker, match);
 
        if (found) {
-               hash_del(&found->entry);
+               if (trans != SWITCHDEV_TRANS_PREPARE)
+                       hash_del(&found->entry);
                found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
        }
 
        spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
 
-       rocker_group_tbl_entry_free(match);
+       rocker_group_tbl_entry_free(trans, match);
 
        if (found) {
-               err = rocker_cmd_exec(rocker, rocker_port,
+               err = rocker_cmd_exec(rocker_port, trans,
                                      rocker_cmd_group_tbl_del,
-                                     found, NULL, NULL, nowait);
-               rocker_group_tbl_entry_free(found);
+                                     found, NULL, NULL);
+               rocker_group_tbl_entry_free(trans, found);
        }
 
        return err;
 }
 
 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
-                              int flags, struct rocker_group_tbl_entry *entry)
+                              enum switchdev_trans trans, int flags,
+                              struct rocker_group_tbl_entry *entry)
 {
-       bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
-
        if (flags & ROCKER_OP_FLAG_REMOVE)
-               return rocker_group_tbl_del(rocker_port, entry, nowait);
+               return rocker_group_tbl_del(rocker_port, trans, entry);
        else
-               return rocker_group_tbl_add(rocker_port, entry, nowait);
+               return rocker_group_tbl_add(rocker_port, trans, entry);
 }
 
 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
-                                    int flags, __be16 vlan_id,
-                                    u32 out_pport, int pop_vlan)
+                                    enum switchdev_trans trans, int flags,
+                                    __be16 vlan_id, u32 out_pport,
+                                    int pop_vlan)
 {
        struct rocker_group_tbl_entry *entry;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
        entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
        entry->l2_interface.pop_vlan = pop_vlan;
 
-       return rocker_group_tbl_do(rocker_port, flags, entry);
+       return rocker_group_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
+                                  enum switchdev_trans trans,
                                   int flags, u8 group_count,
-                                  u32 *group_ids, u32 group_id)
+                                  const u32 *group_ids, u32 group_id)
 {
        struct rocker_group_tbl_entry *entry;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
        entry->group_id = group_id;
        entry->group_count = group_count;
 
-       entry->group_ids = kcalloc(group_count, sizeof(u32),
-                                  rocker_op_flags_gfp(flags));
+       entry->group_ids = rocker_port_kcalloc(rocker_port, trans, group_count,
+                                              sizeof(u32));
        if (!entry->group_ids) {
-               kfree(entry);
+               rocker_port_kfree(trans, entry);
                return -ENOMEM;
        }
        memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
 
-       return rocker_group_tbl_do(rocker_port, flags, entry);
+       return rocker_group_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
-                                int flags, __be16 vlan_id,
-                                u8 group_count, u32 *group_ids,
-                                u32 group_id)
+                                enum switchdev_trans trans, int flags,
+                                __be16 vlan_id, u8 group_count,
+                                const u32 *group_ids, u32 group_id)
 {
-       return rocker_group_l2_fan_out(rocker_port, flags,
+       return rocker_group_l2_fan_out(rocker_port, trans, flags,
                                       group_count, group_ids,
                                       group_id);
 }
 
 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
-                                  int flags, u32 index, u8 *src_mac,
-                                  u8 *dst_mac, __be16 vlan_id,
-                                  bool ttl_check, u32 pport)
+                                  enum switchdev_trans trans, int flags,
+                                  u32 index, const u8 *src_mac, const u8 *dst_mac,
+                                  __be16 vlan_id, bool ttl_check, u32 pport)
 {
        struct rocker_group_tbl_entry *entry;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
@@ -2777,11 +2881,11 @@ static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
        entry->l3_unicast.ttl_check = ttl_check;
        entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
 
-       return rocker_group_tbl_do(rocker_port, flags, entry);
+       return rocker_group_tbl_do(rocker_port, trans, flags, entry);
 }
 
 static struct rocker_neigh_tbl_entry *
-       rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
 {
        struct rocker_neigh_tbl_entry *found;
 
@@ -2794,37 +2898,44 @@ static struct rocker_neigh_tbl_entry *
 }
 
 static void _rocker_neigh_add(struct rocker *rocker,
+                             enum switchdev_trans trans,
                              struct rocker_neigh_tbl_entry *entry)
 {
-       entry->index = rocker->neigh_tbl_next_index++;
+       entry->index = rocker->neigh_tbl_next_index;
+       if (trans == SWITCHDEV_TRANS_PREPARE)
+               return;
+       rocker->neigh_tbl_next_index++;
        entry->ref_count++;
        hash_add(rocker->neigh_tbl, &entry->entry,
                 be32_to_cpu(entry->ip_addr));
 }
 
-static void _rocker_neigh_del(struct rocker *rocker,
+static void _rocker_neigh_del(enum switchdev_trans trans,
                              struct rocker_neigh_tbl_entry *entry)
 {
+       if (trans == SWITCHDEV_TRANS_PREPARE)
+               return;
        if (--entry->ref_count == 0) {
                hash_del(&entry->entry);
-               kfree(entry);
+               rocker_port_kfree(trans, entry);
        }
 }
 
-static void _rocker_neigh_update(struct rocker *rocker,
-                                struct rocker_neigh_tbl_entry *entry,
-                                u8 *eth_dst, bool ttl_check)
+static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
+                                enum switchdev_trans trans,
+                                const u8 *eth_dst, bool ttl_check)
 {
        if (eth_dst) {
                ether_addr_copy(entry->eth_dst, eth_dst);
                entry->ttl_check = ttl_check;
-       } else {
+       } else if (trans != SWITCHDEV_TRANS_PREPARE) {
                entry->ref_count++;
        }
 }
 
 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
-                                 int flags, __be32 ip_addr, u8 *eth_dst)
+                                 enum switchdev_trans trans,
+                                 int flags, __be32 ip_addr, const u8 *eth_dst)
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_neigh_tbl_entry *entry;
@@ -2840,7 +2951,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
        bool removing;
        int err = 0;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
@@ -2857,12 +2968,12 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
                entry->dev = rocker_port->dev;
                ether_addr_copy(entry->eth_dst, eth_dst);
                entry->ttl_check = true;
-               _rocker_neigh_add(rocker, entry);
+               _rocker_neigh_add(rocker, trans, entry);
        } else if (removing) {
                memcpy(entry, found, sizeof(*entry));
-               _rocker_neigh_del(rocker, found);
+               _rocker_neigh_del(trans, found);
        } else if (updating) {
-               _rocker_neigh_update(rocker, found, eth_dst, true);
+               _rocker_neigh_update(found, trans, eth_dst, true);
                memcpy(entry, found, sizeof(*entry));
        } else {
                err = -ENOENT;
@@ -2879,7 +2990,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
         * other routes' nexthops.
         */
 
-       err = rocker_group_l3_unicast(rocker_port, flags,
+       err = rocker_group_l3_unicast(rocker_port, trans, flags,
                                      entry->index,
                                      rocker_port->dev->dev_addr,
                                      entry->eth_dst,
@@ -2895,7 +3006,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
 
        if (adding || removing) {
                group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
-               err = rocker_flow_tbl_ucast4_routing(rocker_port,
+               err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
                                                     eth_type, ip_addr,
                                                     inet_make_mask(32),
                                                     priority, goto_tbl,
@@ -2909,13 +3020,13 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
 
 err_out:
        if (!adding)
-               kfree(entry);
+               rocker_port_kfree(trans, entry);
 
        return err;
 }
 
 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
-                                   __be32 ip_addr)
+                                   enum switchdev_trans trans, __be32 ip_addr)
 {
        struct net_device *dev = rocker_port->dev;
        struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
@@ -2933,7 +3044,8 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
         */
 
        if (n->nud_state & NUD_VALID)
-               err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
+               err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
+                                            ip_addr, n->ha);
        else
                neigh_event_send(n, NULL);
 
@@ -2941,7 +3053,8 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
        return err;
 }
 
-static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
+static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
+                              enum switchdev_trans trans, int flags,
                               __be32 ip_addr, u32 *index)
 {
        struct rocker *rocker = rocker_port->rocker;
@@ -2954,7 +3067,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
        bool resolved = true;
        int err = 0;
 
-       entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+       entry = rocker_port_kzalloc(rocker_port, trans, sizeof(*entry));
        if (!entry)
                return -ENOMEM;
 
@@ -2971,13 +3084,13 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
        if (adding) {
                entry->ip_addr = ip_addr;
                entry->dev = rocker_port->dev;
-               _rocker_neigh_add(rocker, entry);
+               _rocker_neigh_add(rocker, trans, entry);
                *index = entry->index;
                resolved = false;
        } else if (removing) {
-               _rocker_neigh_del(rocker, found);
+               _rocker_neigh_del(trans, found);
        } else if (updating) {
-               _rocker_neigh_update(rocker, found, NULL, false);
+               _rocker_neigh_update(found, trans, NULL, false);
                resolved = !is_zero_ether_addr(found->eth_dst);
        } else {
                err = -ENOENT;
@@ -2986,7 +3099,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
        spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
 
        if (!adding)
-               kfree(entry);
+               rocker_port_kfree(trans, entry);
 
        if (err)
                return err;
@@ -2994,24 +3107,25 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
        /* Resolved means neigh ip_addr is resolved to neigh mac. */
 
        if (!resolved)
-               err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
+               err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
 
        return err;
 }
 
 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
+                                       enum switchdev_trans trans,
                                        int flags, __be16 vlan_id)
 {
        struct rocker_port *p;
-       struct rocker *rocker = rocker_port->rocker;
+       const struct rocker *rocker = rocker_port->rocker;
        u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
        u32 *group_ids;
        u8 group_count = 0;
        int err = 0;
        int i;
 
-       group_ids = kcalloc(rocker->port_count, sizeof(u32),
-                           rocker_op_flags_gfp(flags));
+       group_ids = rocker_port_kcalloc(rocker_port, trans, rocker->port_count,
+                                       sizeof(u32));
        if (!group_ids)
                return -ENOMEM;
 
@@ -3022,6 +3136,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
 
        for (i = 0; i < rocker->port_count; i++) {
                p = rocker->ports[i];
+               if (!p)
+                       continue;
                if (!rocker_port_is_bridged(p))
                        continue;
                if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
@@ -3034,23 +3150,22 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
        if (group_count == 0)
                goto no_ports_in_vlan;
 
-       err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
-                                   group_count, group_ids,
-                                   group_id);
+       err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
+                                   group_count, group_ids, group_id);
        if (err)
                netdev_err(rocker_port->dev,
                           "Error (%d) port VLAN l2 flood group\n", err);
 
 no_ports_in_vlan:
-       kfree(group_ids);
+       rocker_port_kfree(trans, group_ids);
        return err;
 }
 
 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
-                                     int flags, __be16 vlan_id,
-                                     bool pop_vlan)
+                                     enum switchdev_trans trans, int flags,
+                                     __be16 vlan_id, bool pop_vlan)
 {
-       struct rocker *rocker = rocker_port->rocker;
+       const struct rocker *rocker = rocker_port->rocker;
        struct rocker_port *p;
        bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
        u32 out_pport;
@@ -3065,9 +3180,8 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
        if (rocker_port->stp_state == BR_STATE_LEARNING ||
            rocker_port->stp_state == BR_STATE_FORWARDING) {
                out_pport = rocker_port->pport;
-               err = rocker_group_l2_interface(rocker_port, flags,
-                                               vlan_id, out_pport,
-                                               pop_vlan);
+               err = rocker_group_l2_interface(rocker_port, trans, flags,
+                                               vlan_id, out_pport, pop_vlan);
                if (err) {
                        netdev_err(rocker_port->dev,
                                   "Error (%d) port VLAN l2 group for pport %d\n",
@@ -3083,7 +3197,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
 
        for (i = 0; i < rocker->port_count; i++) {
                p = rocker->ports[i];
-               if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+               if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
                        ref++;
        }
 
@@ -3091,9 +3205,8 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
                return 0;
 
        out_pport = 0;
-       err = rocker_group_l2_interface(rocker_port, flags,
-                                       vlan_id, out_pport,
-                                       pop_vlan);
+       err = rocker_group_l2_interface(rocker_port, trans, flags,
+                                       vlan_id, out_pport, pop_vlan);
        if (err) {
                netdev_err(rocker_port->dev,
                           "Error (%d) port VLAN l2 group for CPU port\n", err);
@@ -3149,14 +3262,14 @@ static struct rocker_ctrl {
 };
 
 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
-                                    int flags, struct rocker_ctrl *ctrl,
-                                    __be16 vlan_id)
+                                    enum switchdev_trans trans, int flags,
+                                    const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
        u32 in_pport = rocker_port->pport;
        u32 in_pport_mask = 0xffffffff;
        u32 out_pport = 0;
-       u8 *eth_src = NULL;
-       u8 *eth_src_mask = NULL;
+       const u8 *eth_src = NULL;
+       const u8 *eth_src_mask = NULL;
        __be16 vlan_id_mask = htons(0xffff);
        u8 ip_proto = 0;
        u8 ip_proto_mask = 0;
@@ -3165,7 +3278,7 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
        u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
        int err;
 
-       err = rocker_flow_tbl_acl(rocker_port, flags,
+       err = rocker_flow_tbl_acl(rocker_port, trans, flags,
                                  in_pport, in_pport_mask,
                                  eth_src, eth_src_mask,
                                  ctrl->eth_dst, ctrl->eth_dst_mask,
@@ -3182,7 +3295,8 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
 }
 
 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
-                                       int flags, struct rocker_ctrl *ctrl,
+                                       enum switchdev_trans trans, int flags,
+                                       const struct rocker_ctrl *ctrl,
                                        __be16 vlan_id)
 {
        enum rocker_of_dpa_table_id goto_tbl =
@@ -3194,7 +3308,7 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
        if (!rocker_port_is_bridged(rocker_port))
                return 0;
 
-       err = rocker_flow_tbl_bridge(rocker_port, flags,
+       err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
                                     ctrl->eth_dst, ctrl->eth_dst_mask,
                                     vlan_id, tunnel_id,
                                     goto_tbl, group_id, ctrl->copy_to_cpu);
@@ -3206,8 +3320,8 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
 }
 
 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
-                                     int flags, struct rocker_ctrl *ctrl,
-                                     __be16 vlan_id)
+                                     enum switchdev_trans trans, int flags,
+                                     const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
        u32 in_pport_mask = 0xffffffff;
        __be16 vlan_id_mask = htons(0xffff);
@@ -3216,7 +3330,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
        if (ntohs(vlan_id) == 0)
                vlan_id = rocker_port->internal_vlan_id;
 
-       err = rocker_flow_tbl_term_mac(rocker_port,
+       err = rocker_flow_tbl_term_mac(rocker_port, trans,
                                       rocker_port->pport, in_pport_mask,
                                       ctrl->eth_type, ctrl->eth_dst,
                                       ctrl->eth_dst_mask, vlan_id,
@@ -3229,32 +3343,34 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
        return err;
 }
 
-static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
-                                struct rocker_ctrl *ctrl, __be16 vlan_id)
+static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
+                                enum switchdev_trans trans, int flags,
+                                const struct rocker_ctrl *ctrl, __be16 vlan_id)
 {
        if (ctrl->acl)
-               return rocker_port_ctrl_vlan_acl(rocker_port, flags,
+               return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
                                                 ctrl, vlan_id);
        if (ctrl->bridge)
-               return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
+               return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
                                                    ctrl, vlan_id);
 
        if (ctrl->term)
-               return rocker_port_ctrl_vlan_term(rocker_port, flags,
+               return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
                                                  ctrl, vlan_id);
 
        return -EOPNOTSUPP;
 }
 
 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
-                                    int flags, __be16 vlan_id)
+                                    enum switchdev_trans trans, int flags,
+                                    __be16 vlan_id)
 {
        int err = 0;
        int i;
 
        for (i = 0; i < ROCKER_CTRL_MAX; i++) {
                if (rocker_port->ctrls[i]) {
-                       err = rocker_port_ctrl_vlan(rocker_port, flags,
+                       err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
                                                    &rocker_ctrls[i], vlan_id);
                        if (err)
                                return err;
@@ -3264,8 +3380,9 @@ static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
        return err;
 }
 
-static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
-                           struct rocker_ctrl *ctrl)
+static int rocker_port_ctrl(struct rocker_port *rocker_port,
+                           enum switchdev_trans trans, int flags,
+                           const struct rocker_ctrl *ctrl)
 {
        u16 vid;
        int err = 0;
@@ -3273,7 +3390,7 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
        for (vid = 1; vid < VLAN_N_VID; vid++) {
                if (!test_bit(vid, rocker_port->vlan_bitmap))
                        continue;
-               err = rocker_port_ctrl_vlan(rocker_port, flags,
+               err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
                                            ctrl, htons(vid));
                if (err)
                        break;
@@ -3282,8 +3399,8 @@ static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
        return err;
 }
 
-static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
-                           u16 vid)
+static int rocker_port_vlan(struct rocker_port *rocker_port,
+                           enum switchdev_trans trans, int flags, u16 vid)
 {
        enum rocker_of_dpa_table_id goto_tbl =
                ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
@@ -3297,50 +3414,57 @@ static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
 
        internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
 
-       if (adding && test_and_set_bit(ntohs(internal_vlan_id),
-                                      rocker_port->vlan_bitmap))
+       if (adding && test_bit(ntohs(internal_vlan_id),
+                              rocker_port->vlan_bitmap))
                        return 0; /* already added */
-       else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
-                                               rocker_port->vlan_bitmap))
+       else if (!adding && !test_bit(ntohs(internal_vlan_id),
+                                     rocker_port->vlan_bitmap))
                        return 0; /* already removed */
 
+       change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
        if (adding) {
-               err = rocker_port_ctrl_vlan_add(rocker_port, flags,
+               err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
                                                internal_vlan_id);
                if (err) {
                        netdev_err(rocker_port->dev,
                                   "Error (%d) port ctrl vlan add\n", err);
-                       return err;
+                       goto err_out;
                }
        }
 
-       err = rocker_port_vlan_l2_groups(rocker_port, flags,
+       err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
                                         internal_vlan_id, untagged);
        if (err) {
                netdev_err(rocker_port->dev,
                           "Error (%d) port VLAN l2 groups\n", err);
-               return err;
+               goto err_out;
        }
 
-       err = rocker_port_vlan_flood_group(rocker_port, flags,
+       err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
                                           internal_vlan_id);
        if (err) {
                netdev_err(rocker_port->dev,
                           "Error (%d) port VLAN l2 flood group\n", err);
-               return err;
+               goto err_out;
        }
 
-       err = rocker_flow_tbl_vlan(rocker_port, flags,
+       err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
                                   in_pport, vlan_id, vlan_id_mask,
                                   goto_tbl, untagged, internal_vlan_id);
        if (err)
                netdev_err(rocker_port->dev,
                           "Error (%d) port VLAN table\n", err);
 
+err_out:
+       if (trans == SWITCHDEV_TRANS_PREPARE)
+               change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
+
        return err;
 }
 
-static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
+static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
+                             enum switchdev_trans trans, int flags)
 {
        enum rocker_of_dpa_table_id goto_tbl;
        u32 in_pport;
@@ -3355,7 +3479,7 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
        in_pport_mask = 0xffff0000;
        goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
 
-       err = rocker_flow_tbl_ig_port(rocker_port, flags,
+       err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
                                      in_pport, in_pport_mask,
                                      goto_tbl);
        if (err)
@@ -3367,7 +3491,8 @@ static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
 
 struct rocker_fdb_learn_work {
        struct work_struct work;
-       struct net_device *dev;
+       struct rocker_port *rocker_port;
+       enum switchdev_trans trans;
        int flags;
        u8 addr[ETH_ALEN];
        u16 vid;
@@ -3375,27 +3500,28 @@ struct rocker_fdb_learn_work {
 
 static void rocker_port_fdb_learn_work(struct work_struct *work)
 {
-       struct rocker_fdb_learn_work *lw =
+       const struct rocker_fdb_learn_work *lw =
                container_of(work, struct rocker_fdb_learn_work, work);
        bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
        bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
-       struct netdev_switch_notifier_fdb_info info;
+       struct switchdev_notifier_fdb_info info;
 
        info.addr = lw->addr;
        info.vid = lw->vid;
 
        if (learned && removing)
-               call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
-                                            lw->dev, &info.info);
+               call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
+                                        lw->rocker_port->dev, &info.info);
        else if (learned && !removing)
-               call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
-                                            lw->dev, &info.info);
+               call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
+                                        lw->rocker_port->dev, &info.info);
 
-       kfree(work);
+       rocker_port_kfree(lw->trans, work);
 }
 
 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
-                                int flags, const u8 *addr, __be16 vlan_id)
+                                enum switchdev_trans trans, int flags,
+                                const u8 *addr, __be16 vlan_id)
 {
        struct rocker_fdb_learn_work *lw;
        enum rocker_of_dpa_table_id goto_tbl =
@@ -3411,8 +3537,8 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
                group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
 
        if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
-               err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
-                                            vlan_id, tunnel_id, goto_tbl,
+               err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
+                                            NULL, vlan_id, tunnel_id, goto_tbl,
                                             group_id, copy_to_cpu);
                if (err)
                        return err;
@@ -3424,24 +3550,29 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
        if (!rocker_port_is_bridged(rocker_port))
                return 0;
 
-       lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
+       lw = rocker_port_kzalloc(rocker_port, trans, sizeof(*lw));
        if (!lw)
                return -ENOMEM;
 
        INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
 
-       lw->dev = rocker_port->dev;
+       lw->rocker_port = rocker_port;
+       lw->trans = trans;
        lw->flags = flags;
        ether_addr_copy(lw->addr, addr);
        lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
 
-       schedule_work(&lw->work);
+       if (trans == SWITCHDEV_TRANS_PREPARE)
+               rocker_port_kfree(trans, lw);
+       else
+               schedule_work(&lw->work);
 
        return 0;
 }
 
 static struct rocker_fdb_tbl_entry *
-rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+rocker_fdb_tbl_find(const struct rocker *rocker,
+                   const struct rocker_fdb_tbl_entry *match)
 {
        struct rocker_fdb_tbl_entry *found;
 
@@ -3453,6 +3584,7 @@ rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
 }
 
 static int rocker_port_fdb(struct rocker_port *rocker_port,
+                          enum switchdev_trans trans,
                           const unsigned char *addr,
                           __be16 vlan_id, int flags)
 {
@@ -3462,7 +3594,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
        bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
        unsigned long lock_flags;
 
-       fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
+       fdb = rocker_port_kzalloc(rocker_port, trans, sizeof(*fdb));
        if (!fdb)
                return -ENOMEM;
 
@@ -3477,32 +3609,35 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
        found = rocker_fdb_tbl_find(rocker, fdb);
 
        if (removing && found) {
-               kfree(fdb);
-               hash_del(&found->entry);
+               rocker_port_kfree(trans, fdb);
+               if (trans != SWITCHDEV_TRANS_PREPARE)
+                       hash_del(&found->entry);
        } else if (!removing && !found) {
-               hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
+               if (trans != SWITCHDEV_TRANS_PREPARE)
+                       hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
        }
 
        spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
 
        /* Check if adding and already exists, or removing and can't find */
        if (!found != !removing) {
-               kfree(fdb);
+               rocker_port_kfree(trans, fdb);
                if (!found && removing)
                        return 0;
                /* Refreshing existing to update aging timers */
                flags |= ROCKER_OP_FLAG_REFRESH;
        }
 
-       return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
+       return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
 }
 
-static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
+static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
+                                enum switchdev_trans trans)
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_fdb_tbl_entry *found;
        unsigned long lock_flags;
-       int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
+       int flags = ROCKER_OP_FLAG_REMOVE;
        struct hlist_node *tmp;
        int bkt;
        int err = 0;
@@ -3518,12 +3653,13 @@ static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
                        continue;
                if (!found->learned)
                        continue;
-               err = rocker_port_fdb_learn(rocker_port, flags,
+               err = rocker_port_fdb_learn(rocker_port, trans, flags,
                                            found->key.addr,
                                            found->key.vlan_id);
                if (err)
                        goto err_out;
-               hash_del(&found->entry);
+               if (trans != SWITCHDEV_TRANS_PREPARE)
+                       hash_del(&found->entry);
        }
 
 err_out:
@@ -3533,7 +3669,8 @@ err_out:
 }
 
 static int rocker_port_router_mac(struct rocker_port *rocker_port,
-                                 int flags, __be16 vlan_id)
+                                 enum switchdev_trans trans, int flags,
+                                 __be16 vlan_id)
 {
        u32 in_pport_mask = 0xffffffff;
        __be16 eth_type;
@@ -3546,7 +3683,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
                vlan_id = rocker_port->internal_vlan_id;
 
        eth_type = htons(ETH_P_IP);
-       err = rocker_flow_tbl_term_mac(rocker_port,
+       err = rocker_flow_tbl_term_mac(rocker_port, trans,
                                       rocker_port->pport, in_pport_mask,
                                       eth_type, rocker_port->dev->dev_addr,
                                       dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3555,7 +3692,7 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
                return err;
 
        eth_type = htons(ETH_P_IPV6);
-       err = rocker_flow_tbl_term_mac(rocker_port,
+       err = rocker_flow_tbl_term_mac(rocker_port, trans,
                                       rocker_port->pport, in_pport_mask,
                                       eth_type, rocker_port->dev->dev_addr,
                                       dst_mac_mask, vlan_id, vlan_id_mask,
@@ -3564,13 +3701,14 @@ static int rocker_port_router_mac(struct rocker_port *rocker_port,
        return err;
 }
 
-static int rocker_port_fwding(struct rocker_port *rocker_port)
+static int rocker_port_fwding(struct rocker_port *rocker_port,
+                             enum switchdev_trans trans)
 {
        bool pop_vlan;
        u32 out_pport;
        __be16 vlan_id;
        u16 vid;
-       int flags = ROCKER_OP_FLAG_NOWAIT;
+       int flags = 0;
        int err;
 
        /* Port will be forwarding-enabled if its STP state is LEARNING
@@ -3590,9 +3728,8 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
                        continue;
                vlan_id = htons(vid);
                pop_vlan = rocker_vlan_id_is_internal(vlan_id);
-               err = rocker_group_l2_interface(rocker_port, flags,
-                                               vlan_id, out_pport,
-                                               pop_vlan);
+               err = rocker_group_l2_interface(rocker_port, trans, flags,
+                                               vlan_id, out_pport, pop_vlan);
                if (err) {
                        netdev_err(rocker_port->dev,
                                   "Error (%d) port VLAN l2 group for pport %d\n",
@@ -3604,13 +3741,21 @@ static int rocker_port_fwding(struct rocker_port *rocker_port)
        return 0;
 }
 
-static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
+static int rocker_port_stp_update(struct rocker_port *rocker_port,
+                                 enum switchdev_trans trans, u8 state)
 {
        bool want[ROCKER_CTRL_MAX] = { 0, };
+       bool prev_ctrls[ROCKER_CTRL_MAX];
+       u8 prev_state;
        int flags;
        int err;
        int i;
 
+       if (trans == SWITCHDEV_TRANS_PREPARE) {
+               memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
+               prev_state = rocker_port->stp_state;
+       }
+
        if (rocker_port->stp_state == state)
                return 0;
 
@@ -3638,45 +3783,54 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
 
        for (i = 0; i < ROCKER_CTRL_MAX; i++) {
                if (want[i] != rocker_port->ctrls[i]) {
-                       flags = ROCKER_OP_FLAG_NOWAIT |
-                               (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
-                       err = rocker_port_ctrl(rocker_port, flags,
+                       flags = (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
+                       err = rocker_port_ctrl(rocker_port, trans, flags,
                                               &rocker_ctrls[i]);
                        if (err)
-                               return err;
+                               goto err_out;
                        rocker_port->ctrls[i] = want[i];
                }
        }
 
-       err = rocker_port_fdb_flush(rocker_port);
+       err = rocker_port_fdb_flush(rocker_port, trans);
        if (err)
-               return err;
+               goto err_out;
+
+       err = rocker_port_fwding(rocker_port, trans);
 
-       return rocker_port_fwding(rocker_port);
+err_out:
+       if (trans == SWITCHDEV_TRANS_PREPARE) {
+               memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
+               rocker_port->stp_state = prev_state;
+       }
+
+       return err;
 }
 
-static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
+                                 enum switchdev_trans trans)
 {
        if (rocker_port_is_bridged(rocker_port))
                /* bridge STP will enable port */
                return 0;
 
        /* port is not bridged, so simulate going to FORWARDING state */
-       return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
+       return rocker_port_stp_update(rocker_port, trans, BR_STATE_FORWARDING);
 }
 
-static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
+static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
+                                  enum switchdev_trans trans)
 {
        if (rocker_port_is_bridged(rocker_port))
                /* bridge STP will disable port */
                return 0;
 
        /* port is not bridged, so simulate going to DISABLED state */
-       return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
+       return rocker_port_stp_update(rocker_port, trans, BR_STATE_DISABLED);
 }
 
 static struct rocker_internal_vlan_tbl_entry *
-rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
 {
        struct rocker_internal_vlan_tbl_entry *found;
 
@@ -3731,8 +3885,9 @@ found:
        return found->vlan_id;
 }
 
-static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
-                                            int ifindex)
+static void
+rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
+                                int ifindex)
 {
        struct rocker *rocker = rocker_port->rocker;
        struct rocker_internal_vlan_tbl_entry *found;
@@ -3760,11 +3915,12 @@ not_found:
        spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
 }
 
-static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
-                               int dst_len, struct fib_info *fi, u32 tb_id,
-                               int flags)
+static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
+                               enum switchdev_trans trans, __be32 dst,
+                               int dst_len, const struct fib_info *fi,
+                               u32 tb_id, int flags)
 {
-       struct fib_nh *nh;
+       const struct fib_nh *nh;
        __be16 eth_type = htons(ETH_P_IP);
        __be32 dst_mask = inet_make_mask(dst_len);
        __be16 internal_vlan_id = rocker_port->internal_vlan_id;
@@ -3784,7 +3940,7 @@ static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
        has_gw = !!nh->nh_gw;
 
        if (has_gw && nh_on_port) {
-               err = rocker_port_ipv4_nh(rocker_port, flags,
+               err = rocker_port_ipv4_nh(rocker_port, trans, flags,
                                          nh->nh_gw, &index);
                if (err)
                        return err;
@@ -3795,7 +3951,7 @@ static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
                group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
        }
 
-       err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
+       err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
                                             dst_mask, priority, goto_tbl,
                                             group_id, flags);
        if (err)
@@ -3834,7 +3990,7 @@ static int rocker_port_open(struct net_device *dev)
                goto err_request_rx_irq;
        }
 
-       err = rocker_port_fwd_enable(rocker_port);
+       err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
        if (err)
                goto err_fwd_enable;
 
@@ -3861,7 +4017,7 @@ static int rocker_port_stop(struct net_device *dev)
        rocker_port_set_enable(rocker_port, false);
        napi_disable(&rocker_port->napi_rx);
        napi_disable(&rocker_port->napi_tx);
-       rocker_port_fwd_disable(rocker_port);
+       rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE);
        free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
        free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
        rocker_port_dma_rings_fini(rocker_port);
@@ -3869,12 +4025,12 @@ static int rocker_port_stop(struct net_device *dev)
        return 0;
 }
 
-static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
-                                      struct rocker_desc_info *desc_info)
+static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
+                                      const struct rocker_desc_info *desc_info)
 {
-       struct rocker *rocker = rocker_port->rocker;
+       const struct rocker *rocker = rocker_port->rocker;
        struct pci_dev *pdev = rocker->pdev;
-       struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+       const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
        struct rocker_tlv *attr;
        int rem;
 
@@ -3882,7 +4038,7 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
        if (!attrs[ROCKER_TLV_TX_FRAGS])
                return;
        rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
-               struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+               const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
                dma_addr_t dma_handle;
                size_t len;
 
@@ -3899,11 +4055,11 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
        }
 }
 
-static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
                                       struct rocker_desc_info *desc_info,
                                       char *buf, size_t buf_len)
 {
-       struct rocker *rocker = rocker_port->rocker;
+       const struct rocker *rocker = rocker_port->rocker;
        struct pci_dev *pdev = rocker->pdev;
        dma_addr_t dma_handle;
        struct rocker_tlv *frag;
@@ -4008,269 +4164,333 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
-static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
-                                      __be16 proto, u16 vid)
+static int rocker_port_get_phys_port_name(struct net_device *dev,
+                                         char *buf, size_t len)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
+       struct port_name name = { .buf = buf, .len = len };
        int err;
 
-       err = rocker_port_vlan(rocker_port, 0, vid);
-       if (err)
-               return err;
+       err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
+                             rocker_cmd_get_port_settings_prep, NULL,
+                             rocker_cmd_get_port_settings_phys_name_proc,
+                             &name);
 
-       return rocker_port_router_mac(rocker_port, 0, htons(vid));
+       return err ? -EOPNOTSUPP : 0;
 }
 
-static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
-                                       __be16 proto, u16 vid)
+static const struct net_device_ops rocker_port_netdev_ops = {
+       .ndo_open                       = rocker_port_open,
+       .ndo_stop                       = rocker_port_stop,
+       .ndo_start_xmit                 = rocker_port_xmit,
+       .ndo_set_mac_address            = rocker_port_set_mac_address,
+       .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
+       .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
+       .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
+       .ndo_fdb_add                    = switchdev_port_fdb_add,
+       .ndo_fdb_del                    = switchdev_port_fdb_del,
+       .ndo_fdb_dump                   = switchdev_port_fdb_dump,
+       .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
+};
+
+/********************
+ * swdev interface
+ ********************/
+
+static int rocker_port_attr_get(struct net_device *dev,
+                               struct switchdev_attr *attr)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       int err;
+       const struct rocker_port *rocker_port = netdev_priv(dev);
+       const struct rocker *rocker = rocker_port->rocker;
 
-       err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
-                                    htons(vid));
-       if (err)
-               return err;
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_PORT_PARENT_ID:
+               attr->u.ppid.id_len = sizeof(rocker->hw.id);
+               memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
+               break;
+       case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+               attr->u.brport_flags = rocker_port->brport_flags;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
 
-       return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
+       return 0;
 }
 
-static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-                              struct net_device *dev,
-                              const unsigned char *addr, u16 vid,
-                              u16 nlm_flags)
+static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
-       int flags = 0;
+       struct list_head *mem, *tmp;
 
-       if (!rocker_port_is_bridged(rocker_port))
-               return -EINVAL;
-
-       return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+       list_for_each_safe(mem, tmp, &rocker_port->trans_mem) {
+               list_del(mem);
+               kfree(mem);
+       }
 }
 
-static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-                              struct net_device *dev,
-                              const unsigned char *addr, u16 vid)
+static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
+                                       enum switchdev_trans trans,
+                                       unsigned long brport_flags)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
-       int flags = ROCKER_OP_FLAG_REMOVE;
+       unsigned long orig_flags;
+       int err = 0;
 
-       if (!rocker_port_is_bridged(rocker_port))
-               return -EINVAL;
+       orig_flags = rocker_port->brport_flags;
+       rocker_port->brport_flags = brport_flags;
+       if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
+               err = rocker_port_set_learning(rocker_port, trans);
 
-       return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
+       if (trans == SWITCHDEV_TRANS_PREPARE)
+               rocker_port->brport_flags = orig_flags;
+
+       return err;
 }
 
-static int rocker_fdb_fill_info(struct sk_buff *skb,
-                               struct rocker_port *rocker_port,
-                               const unsigned char *addr, u16 vid,
-                               u32 portid, u32 seq, int type,
-                               unsigned int flags)
+static int rocker_port_attr_set(struct net_device *dev,
+                               struct switchdev_attr *attr)
 {
-       struct nlmsghdr *nlh;
-       struct ndmsg *ndm;
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int err = 0;
 
-       nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
-       if (!nlh)
-               return -EMSGSIZE;
+       switch (attr->trans) {
+       case SWITCHDEV_TRANS_PREPARE:
+               BUG_ON(!list_empty(&rocker_port->trans_mem));
+               break;
+       case SWITCHDEV_TRANS_ABORT:
+               rocker_port_trans_abort(rocker_port);
+               return 0;
+       default:
+               break;
+       }
 
-       ndm = nlmsg_data(nlh);
-       ndm->ndm_family  = AF_BRIDGE;
-       ndm->ndm_pad1    = 0;
-       ndm->ndm_pad2    = 0;
-       ndm->ndm_flags   = NTF_SELF;
-       ndm->ndm_type    = 0;
-       ndm->ndm_ifindex = rocker_port->dev->ifindex;
-       ndm->ndm_state   = NUD_REACHABLE;
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_PORT_STP_STATE:
+               err = rocker_port_stp_update(rocker_port, attr->trans,
+                                            attr->u.stp_state);
+               break;
+       case SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS:
+               err = rocker_port_brport_flags_set(rocker_port, attr->trans,
+                                                  attr->u.brport_flags);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
 
-       if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
-               goto nla_put_failure;
+       return err;
+}
 
-       if (vid && nla_put_u16(skb, NDA_VLAN, vid))
-               goto nla_put_failure;
+static int rocker_port_vlan_add(struct rocker_port *rocker_port,
+                               enum switchdev_trans trans, u16 vid, u16 flags)
+{
+       int err;
 
-       nlmsg_end(skb, nlh);
-       return 0;
+       /* XXX deal with flags for PVID and untagged */
 
-nla_put_failure:
-       nlmsg_cancel(skb, nlh);
-       return -EMSGSIZE;
+       err = rocker_port_vlan(rocker_port, trans, 0, vid);
+       if (err)
+               return err;
+
+       err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
+       if (err)
+               rocker_port_vlan(rocker_port, trans,
+                                ROCKER_OP_FLAG_REMOVE, vid);
+
+       return err;
 }
 
-static int rocker_port_fdb_dump(struct sk_buff *skb,
-                               struct netlink_callback *cb,
-                               struct net_device *dev,
-                               struct net_device *filter_dev,
-                               int idx)
+static int rocker_port_vlans_add(struct rocker_port *rocker_port,
+                                enum switchdev_trans trans,
+                                const struct switchdev_obj_vlan *vlan)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       struct rocker *rocker = rocker_port->rocker;
-       struct rocker_fdb_tbl_entry *found;
-       struct hlist_node *tmp;
-       int bkt;
-       unsigned long lock_flags;
-       const unsigned char *addr;
        u16 vid;
        int err;
 
-       spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
-       hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
-               if (found->key.pport != rocker_port->pport)
-                       continue;
-               if (idx < cb->args[0])
-                       goto skip;
-               addr = found->key.addr;
-               vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
-               err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
-                                          NETLINK_CB(cb->skb).portid,
-                                          cb->nlh->nlmsg_seq,
-                                          RTM_NEWNEIGH, NLM_F_MULTI);
-               if (err < 0)
-                       break;
-skip:
-               ++idx;
+       for (vid = vlan->vid_start; vid <= vlan->vid_end; vid++) {
+               err = rocker_port_vlan_add(rocker_port, trans,
+                                          vid, vlan->flags);
+               if (err)
+                       return err;
        }
-       spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
-       return idx;
+
+       return 0;
 }
 
-static int rocker_port_bridge_setlink(struct net_device *dev,
-                                     struct nlmsghdr *nlh, u16 flags)
+static int rocker_port_fdb_add(struct rocker_port *rocker_port,
+                              enum switchdev_trans trans,
+                              const struct switchdev_obj_fdb *fdb)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       struct nlattr *protinfo;
-       struct nlattr *attr;
-       int err;
+       __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+       int flags = 0;
 
-       protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
-                                  IFLA_PROTINFO);
-       if (protinfo) {
-               attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
-               if (attr) {
-                       if (nla_len(attr) < sizeof(u8))
-                               return -EINVAL;
-
-                       if (nla_get_u8(attr))
-                               rocker_port->brport_flags |= BR_LEARNING;
-                       else
-                               rocker_port->brport_flags &= ~BR_LEARNING;
-                       err = rocker_port_set_learning(rocker_port);
-                       if (err)
-                               return err;
-               }
-               attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
-               if (attr) {
-                       if (nla_len(attr) < sizeof(u8))
-                               return -EINVAL;
-
-                       if (nla_get_u8(attr))
-                               rocker_port->brport_flags |= BR_LEARNING_SYNC;
-                       else
-                               rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
-               }
-       }
+       if (!rocker_port_is_bridged(rocker_port))
+               return -EINVAL;
 
-       return 0;
+       return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
 }
 
-static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                     struct net_device *dev,
-                                     u32 filter_mask, int nlflags)
+static int rocker_port_obj_add(struct net_device *dev,
+                              struct switchdev_obj *obj)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
-       u16 mode = BRIDGE_MODE_UNDEF;
-       u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+       const struct switchdev_obj_ipv4_fib *fib4;
+       int err = 0;
+
+       switch (obj->trans) {
+       case SWITCHDEV_TRANS_PREPARE:
+               BUG_ON(!list_empty(&rocker_port->trans_mem));
+               break;
+       case SWITCHDEV_TRANS_ABORT:
+               rocker_port_trans_abort(rocker_port);
+               return 0;
+       default:
+               break;
+       }
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_PORT_VLAN:
+               err = rocker_port_vlans_add(rocker_port, obj->trans,
+                                           &obj->u.vlan);
+               break;
+       case SWITCHDEV_OBJ_IPV4_FIB:
+               fib4 = &obj->u.ipv4_fib;
+               err = rocker_port_fib_ipv4(rocker_port, obj->trans,
+                                          htonl(fib4->dst), fib4->dst_len,
+                                          fib4->fi, fib4->tb_id, 0);
+               break;
+       case SWITCHDEV_OBJ_PORT_FDB:
+               err = rocker_port_fdb_add(rocker_port, obj->trans, &obj->u.fdb);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
 
-       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
-                                      rocker_port->brport_flags, mask,
-                                      nlflags);
+       return err;
 }
 
-static int rocker_port_get_phys_port_name(struct net_device *dev,
-                                         char *buf, size_t len)
+static int rocker_port_vlan_del(struct rocker_port *rocker_port,
+                               u16 vid, u16 flags)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       struct port_name name = { .buf = buf, .len = len };
        int err;
 
-       err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
-                             rocker_cmd_get_port_settings_prep, NULL,
-                             rocker_cmd_get_port_settings_phys_name_proc,
-                             &name, false);
+       err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
+                                    ROCKER_OP_FLAG_REMOVE, htons(vid));
+       if (err)
+               return err;
 
-       return err ? -EOPNOTSUPP : 0;
+       return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
+                               ROCKER_OP_FLAG_REMOVE, vid);
 }
 
-static const struct net_device_ops rocker_port_netdev_ops = {
-       .ndo_open                       = rocker_port_open,
-       .ndo_stop                       = rocker_port_stop,
-       .ndo_start_xmit                 = rocker_port_xmit,
-       .ndo_set_mac_address            = rocker_port_set_mac_address,
-       .ndo_vlan_rx_add_vid            = rocker_port_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid           = rocker_port_vlan_rx_kill_vid,
-       .ndo_fdb_add                    = rocker_port_fdb_add,
-       .ndo_fdb_del                    = rocker_port_fdb_del,
-       .ndo_fdb_dump                   = rocker_port_fdb_dump,
-       .ndo_bridge_setlink             = rocker_port_bridge_setlink,
-       .ndo_bridge_getlink             = rocker_port_bridge_getlink,
-       .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
-};
+static int rocker_port_vlans_del(struct rocker_port *rocker_port,
+                                const struct switchdev_obj_vlan *vlan)
+{
+       u16 vid;
+       int err;
 
-/********************
- * swdev interface
- ********************/
+       for (vid = vlan->vid_start; vid <= vlan->vid_end; vid++) {
+               err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
 
-static int rocker_port_swdev_parent_id_get(struct net_device *dev,
-                                          struct netdev_phys_item_id *psid)
+static int rocker_port_fdb_del(struct rocker_port *rocker_port,
+                              enum switchdev_trans trans,
+                              const struct switchdev_obj_fdb *fdb)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       struct rocker *rocker = rocker_port->rocker;
+       __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
+       int flags = ROCKER_OP_FLAG_REMOVE;
 
-       psid->id_len = sizeof(rocker->hw.id);
-       memcpy(&psid->id, &rocker->hw.id, psid->id_len);
-       return 0;
+       if (!rocker_port_is_bridged(rocker_port))
+               return -EINVAL;
+
+       return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
 }
 
-static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
+static int rocker_port_obj_del(struct net_device *dev,
+                              struct switchdev_obj *obj)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
+       const struct switchdev_obj_ipv4_fib *fib4;
+       int err = 0;
 
-       return rocker_port_stp_update(rocker_port, state);
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_PORT_VLAN:
+               err = rocker_port_vlans_del(rocker_port, &obj->u.vlan);
+               break;
+       case SWITCHDEV_OBJ_IPV4_FIB:
+               fib4 = &obj->u.ipv4_fib;
+               err = rocker_port_fib_ipv4(rocker_port, SWITCHDEV_TRANS_NONE,
+                                          htonl(fib4->dst), fib4->dst_len,
+                                          fib4->fi, fib4->tb_id,
+                                          ROCKER_OP_FLAG_REMOVE);
+               break;
+       case SWITCHDEV_OBJ_PORT_FDB:
+               err = rocker_port_fdb_del(rocker_port, obj->trans, &obj->u.fdb);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
 }
 
-static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
-                                         __be32 dst, int dst_len,
-                                         struct fib_info *fi,
-                                         u8 tos, u8 type,
-                                         u32 nlflags, u32 tb_id)
+static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
+                               struct switchdev_obj *obj)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       int flags = 0;
+       struct rocker *rocker = rocker_port->rocker;
+       struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+       struct rocker_fdb_tbl_entry *found;
+       struct hlist_node *tmp;
+       unsigned long lock_flags;
+       int bkt;
+       int err = 0;
+
+       spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
+       hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
+               if (found->key.pport != rocker_port->pport)
+                       continue;
+               fdb->addr = found->key.addr;
+               fdb->vid = rocker_port_vlan_to_vid(rocker_port,
+                                                  found->key.vlan_id);
+               err = obj->cb(rocker_port->dev, obj);
+               if (err)
+                       break;
+       }
+       spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
 
-       return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
-                                   fi, tb_id, flags);
+       return err;
 }
 
-static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
-                                         __be32 dst, int dst_len,
-                                         struct fib_info *fi,
-                                         u8 tos, u8 type, u32 tb_id)
+static int rocker_port_obj_dump(struct net_device *dev,
+                               struct switchdev_obj *obj)
 {
-       struct rocker_port *rocker_port = netdev_priv(dev);
-       int flags = ROCKER_OP_FLAG_REMOVE;
+       const struct rocker_port *rocker_port = netdev_priv(dev);
+       int err = 0;
 
-       return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
-                                   fi, tb_id, flags);
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_PORT_FDB:
+               err = rocker_port_fdb_dump(rocker_port, obj);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
 }
 
-static const struct swdev_ops rocker_port_swdev_ops = {
-       .swdev_parent_id_get            = rocker_port_swdev_parent_id_get,
-       .swdev_port_stp_update          = rocker_port_swdev_port_stp_update,
-       .swdev_fib_ipv4_add             = rocker_port_swdev_fib_ipv4_add,
-       .swdev_fib_ipv4_del             = rocker_port_swdev_fib_ipv4_del,
+static const struct switchdev_ops rocker_port_switchdev_ops = {
+       .switchdev_port_attr_get        = rocker_port_attr_get,
+       .switchdev_port_attr_set        = rocker_port_attr_set,
+       .switchdev_port_obj_add         = rocker_port_obj_add,
+       .switchdev_port_obj_del         = rocker_port_obj_del,
+       .switchdev_port_obj_dump        = rocker_port_obj_dump,
 };
 
 /********************
@@ -4334,8 +4554,7 @@ static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
 }
 
 static int
-rocker_cmd_get_port_stats_prep(struct rocker *rocker,
-                              struct rocker_port *rocker_port,
+rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
                               struct rocker_desc_info *desc_info,
                               void *priv)
 {
@@ -4359,14 +4578,13 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker,
 }
 
 static int
-rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
-                                      struct rocker_port *rocker_port,
-                                      struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
+                                      const struct rocker_desc_info *desc_info,
                                       void *priv)
 {
-       struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
-       struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
-       struct rocker_tlv *pattr;
+       const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+       const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+       const struct rocker_tlv *pattr;
        u32 pport;
        u64 *data = priv;
        int i;
@@ -4400,10 +4618,10 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
                                             void *priv)
 {
-       return rocker_cmd_exec(rocker_port->rocker, rocker_port,
+       return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
                               rocker_cmd_get_port_stats_prep, NULL,
                               rocker_cmd_get_port_stats_ethtool_proc,
-                              priv, false);
+                              priv);
 }
 
 static void rocker_port_get_stats(struct net_device *dev,
@@ -4417,8 +4635,6 @@ static void rocker_port_get_stats(struct net_device *dev,
                for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
                        data[i] = 0;
        }
-
-       return;
 }
 
 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
@@ -4453,8 +4669,8 @@ static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
 {
        struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
-       struct rocker *rocker = rocker_port->rocker;
-       struct rocker_desc_info *desc_info;
+       const struct rocker *rocker = rocker_port->rocker;
+       const struct rocker_desc_info *desc_info;
        u32 credits = 0;
        int err;
 
@@ -4472,8 +4688,9 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
                if (err == 0) {
                        rocker_port->dev->stats.tx_packets++;
                        rocker_port->dev->stats.tx_bytes += skb->len;
-               } else
+               } else {
                        rocker_port->dev->stats.tx_errors++;
+               }
 
                dev_kfree_skb_any(skb);
                credits++;
@@ -4488,11 +4705,11 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
        return 0;
 }
 
-static int rocker_port_rx_proc(struct rocker *rocker,
-                              struct rocker_port *rocker_port,
+static int rocker_port_rx_proc(const struct rocker *rocker,
+                              const struct rocker_port *rocker_port,
                               struct rocker_desc_info *desc_info)
 {
-       struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+       const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
        struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
        size_t rx_len;
 
@@ -4514,7 +4731,7 @@ static int rocker_port_rx_proc(struct rocker *rocker,
 
        netif_receive_skb(skb);
 
-       return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+       return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
 }
 
 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
@@ -4525,7 +4742,7 @@ static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
 {
        struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
-       struct rocker *rocker = rocker_port->rocker;
+       const struct rocker *rocker = rocker_port->rocker;
        struct rocker_desc_info *desc_info;
        u32 credits = 0;
        int err;
@@ -4565,9 +4782,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
  * PCI driver ops
  *****************/
 
-static void rocker_carrier_init(struct rocker_port *rocker_port)
+static void rocker_carrier_init(const struct rocker_port *rocker_port)
 {
-       struct rocker *rocker = rocker_port->rocker;
+       const struct rocker *rocker = rocker_port->rocker;
        u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
        bool link_up;
 
@@ -4578,23 +4795,24 @@ static void rocker_carrier_init(struct rocker_port *rocker_port)
                netif_carrier_off(rocker_port->dev);
 }
 
-static void rocker_remove_ports(struct rocker *rocker)
+static void rocker_remove_ports(const struct rocker *rocker)
 {
        struct rocker_port *rocker_port;
        int i;
 
        for (i = 0; i < rocker->port_count; i++) {
                rocker_port = rocker->ports[i];
-               rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+               rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+                                  ROCKER_OP_FLAG_REMOVE);
                unregister_netdev(rocker_port->dev);
        }
        kfree(rocker->ports);
 }
 
-static void rocker_port_dev_addr_init(struct rocker *rocker,
-                                     struct rocker_port *rocker_port)
+static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
 {
-       struct pci_dev *pdev = rocker->pdev;
+       const struct rocker *rocker = rocker_port->rocker;
+       const struct pci_dev *pdev = rocker->pdev;
        int err;
 
        err = rocker_cmd_get_port_settings_macaddr(rocker_port,
@@ -4607,9 +4825,10 @@ static void rocker_port_dev_addr_init(struct rocker *rocker,
 
 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
 {
-       struct pci_dev *pdev = rocker->pdev;
+       const struct pci_dev *pdev = rocker->pdev;
        struct rocker_port *rocker_port;
        struct net_device *dev;
+       u16 untagged_vid = 0;
        int err;
 
        dev = alloc_etherdev(sizeof(struct rocker_port));
@@ -4621,20 +4840,19 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
        rocker_port->port_number = port_number;
        rocker_port->pport = port_number + 1;
        rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
+       INIT_LIST_HEAD(&rocker_port->trans_mem);
 
-       rocker_port_dev_addr_init(rocker, rocker_port);
+       rocker_port_dev_addr_init(rocker_port);
        dev->netdev_ops = &rocker_port_netdev_ops;
        dev->ethtool_ops = &rocker_port_ethtool_ops;
-       dev->swdev_ops = &rocker_port_swdev_ops;
+       dev->switchdev_ops = &rocker_port_switchdev_ops;
        netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
                       NAPI_POLL_WEIGHT);
        netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
                       NAPI_POLL_WEIGHT);
        rocker_carrier_init(rocker_port);
 
-       dev->features |= NETIF_F_NETNS_LOCAL |
-                        NETIF_F_HW_VLAN_CTAG_FILTER |
-                        NETIF_F_HW_SWITCH_OFFLOAD;
+       dev->features |= NETIF_F_NETNS_LOCAL;
 
        err = register_netdev(dev);
        if (err) {
@@ -4643,18 +4861,29 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
        }
        rocker->ports[port_number] = rocker_port;
 
-       rocker_port_set_learning(rocker_port);
+       rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
 
-       rocker_port->internal_vlan_id =
-               rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
-       err = rocker_port_ig_tbl(rocker_port, 0);
+       err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
        if (err) {
                dev_err(&pdev->dev, "install ig port table failed\n");
                goto err_port_ig_tbl;
        }
 
+       rocker_port->internal_vlan_id =
+               rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+
+       err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+                                  untagged_vid, 0);
+       if (err) {
+               netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
+               goto err_untagged_vlan;
+       }
+
        return 0;
 
+err_untagged_vlan:
+       rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+                          ROCKER_OP_FLAG_REMOVE);
 err_port_ig_tbl:
        unregister_netdev(dev);
 err_register_netdev:
@@ -4669,7 +4898,7 @@ static int rocker_probe_ports(struct rocker *rocker)
        int err;
 
        alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
-       rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+       rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
        if (!rocker->ports)
                return -ENOMEM;
        for (i = 0; i < rocker->port_count; i++) {
@@ -4718,7 +4947,7 @@ err_enable_msix:
        return err;
 }
 
-static void rocker_msix_fini(struct rocker *rocker)
+static void rocker_msix_fini(const struct rocker *rocker)
 {
        pci_disable_msix(rocker->pdev);
        kfree(rocker->msix_entries);
@@ -4884,7 +5113,7 @@ static struct pci_driver rocker_pci_driver = {
  * Net device notifier event handler
  ************************************/
 
-static bool rocker_port_dev_check(struct net_device *dev)
+static bool rocker_port_dev_check(const struct net_device *dev)
 {
        return dev->netdev_ops == &rocker_port_netdev_ops;
 }
@@ -4892,45 +5121,54 @@ static bool rocker_port_dev_check(struct net_device *dev)
 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
                                   struct net_device *bridge)
 {
+       u16 untagged_vid = 0;
        int err;
 
+       /* Port is joining bridge, so the internal VLAN for the
+        * port is going to change to the bridge internal VLAN.
+        * Let's remove untagged VLAN (vid=0) from port and
+        * re-add once internal VLAN has changed.
+        */
+
+       err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
+       if (err)
+               return err;
+
        rocker_port_internal_vlan_id_put(rocker_port,
                                         rocker_port->dev->ifindex);
+       rocker_port->internal_vlan_id =
+               rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
 
        rocker_port->bridge_dev = bridge;
 
-       /* Use bridge internal VLAN ID for untagged pkts */
-       err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
-       if (err)
-               return err;
-       rocker_port->internal_vlan_id =
-               rocker_port_internal_vlan_id_get(rocker_port,
-                                                bridge->ifindex);
-       return rocker_port_vlan(rocker_port, 0, 0);
+       return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+                                   untagged_vid, 0);
 }
 
 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
 {
+       u16 untagged_vid = 0;
        int err;
 
-       rocker_port_internal_vlan_id_put(rocker_port,
-                                        rocker_port->bridge_dev->ifindex);
-
-       rocker_port->bridge_dev = NULL;
-
-       /* Use port internal VLAN ID for untagged pkts */
-       err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
+       err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
        if (err)
                return err;
+
+       rocker_port_internal_vlan_id_put(rocker_port,
+                                        rocker_port->bridge_dev->ifindex);
        rocker_port->internal_vlan_id =
                rocker_port_internal_vlan_id_get(rocker_port,
                                                 rocker_port->dev->ifindex);
-       err = rocker_port_vlan(rocker_port, 0, 0);
+
+       rocker_port->bridge_dev = NULL;
+
+       err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+                                  untagged_vid, 0);
        if (err)
                return err;
 
        if (rocker_port->dev->flags & IFF_UP)
-               err = rocker_port_fwd_enable(rocker_port);
+               err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE);
 
        return err;
 }
@@ -4992,7 +5230,8 @@ static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
        int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
        __be32 ip_addr = *(__be32 *)n->primary_key;
 
-       return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
+       return rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+                                     flags, ip_addr, n->ha);
 }
 
 static int rocker_netevent_event(struct notifier_block *unused,
index a4e9591d7457f3a5cb52b9b8c9201a4c77530907..c61fbf968036a3fe4a57f8afbef704bcffa37dc7 100644 (file)
@@ -65,9 +65,9 @@ enum {
 #define ROCKER_TEST_DMA_CTRL           0x0034
 
 /* Rocker test register ctrl */
-#define ROCKER_TEST_DMA_CTRL_CLEAR     (1 << 0)
-#define ROCKER_TEST_DMA_CTRL_FILL      (1 << 1)
-#define ROCKER_TEST_DMA_CTRL_INVERT    (1 << 2)
+#define ROCKER_TEST_DMA_CTRL_CLEAR     BIT(0)
+#define ROCKER_TEST_DMA_CTRL_FILL      BIT(1)
+#define ROCKER_TEST_DMA_CTRL_INVERT    BIT(2)
 
 /* Rocker DMA ring register offsets */
 #define ROCKER_DMA_DESC_ADDR(x)                (0x1000 + (x) * 32)  /* 8-byte */
@@ -79,7 +79,7 @@ enum {
 #define ROCKER_DMA_DESC_RES1(x)                (0x101c + (x) * 32)
 
 /* Rocker dma ctrl register bits */
-#define ROCKER_DMA_DESC_CTRL_RESET     (1 << 0)
+#define ROCKER_DMA_DESC_CTRL_RESET     BIT(0)
 
 /* Rocker DMA ring types */
 enum rocker_dma_type {
@@ -111,7 +111,7 @@ struct rocker_desc {
        u16 comp_err;
 };
 
-#define ROCKER_DMA_DESC_COMP_ERR_GEN   (1 << 15)
+#define ROCKER_DMA_DESC_COMP_ERR_GEN   BIT(15)
 
 /* Rocker DMA TLV struct */
 struct rocker_tlv {
@@ -237,14 +237,14 @@ enum {
        ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1,
 };
 
-#define ROCKER_RX_FLAGS_IPV4                   (1 << 0)
-#define ROCKER_RX_FLAGS_IPV6                   (1 << 1)
-#define ROCKER_RX_FLAGS_CSUM_CALC              (1 << 2)
-#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD         (1 << 3)
-#define ROCKER_RX_FLAGS_IP_FRAG                        (1 << 4)
-#define ROCKER_RX_FLAGS_TCP                    (1 << 5)
-#define ROCKER_RX_FLAGS_UDP                    (1 << 6)
-#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD      (1 << 7)
+#define ROCKER_RX_FLAGS_IPV4                   BIT(0)
+#define ROCKER_RX_FLAGS_IPV6                   BIT(1)
+#define ROCKER_RX_FLAGS_CSUM_CALC              BIT(2)
+#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD         BIT(3)
+#define ROCKER_RX_FLAGS_IP_FRAG                        BIT(4)
+#define ROCKER_RX_FLAGS_TCP                    BIT(5)
+#define ROCKER_RX_FLAGS_UDP                    BIT(6)
+#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD      BIT(7)
 
 enum {
        ROCKER_TLV_TX_UNSPEC,
@@ -460,6 +460,6 @@ enum rocker_of_dpa_overlay_type {
 #define ROCKER_SWITCH_ID               0x0320 /* 8-byte */
 
 /* Rocker control bits */
-#define ROCKER_CONTROL_RESET           (1 << 0)
+#define ROCKER_CONTROL_RESET           BIT(0)
 
 #endif
index 0889212944486f9978c2c9f404bd3c87b4b5777f..4dd92b7b80f41d2e2c0591f9d3930a24ed6325be 100644 (file)
@@ -36,3 +36,12 @@ config SFC_SRIOV
          This enables support for the SFC9000 I/O Virtualization
          features, allowing accelerated network performance in
          virtualized environments.
+config SFC_MCDI_LOGGING
+       bool "Solarflare SFC9000/SFC9100-family MCDI logging support"
+       depends on SFC
+       default y
+       ---help---
+         This enables support for tracing of MCDI (Management-Controller-to-
+         Driver-Interface) commands and responses, allowing debugging of
+         driver/firmware interaction.  The tracing is actually enabled by
+         a sysfs file 'mcdi_logging' under the PCI device.
index 3a83c0dca8e614de4ed467fe0cd16f5601e20720..ce8470fe79d5524f8ac0b1536f70d04e5ede2007 100644 (file)
@@ -3,6 +3,6 @@ sfc-y                   += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
                           tenxpress.o txc43128_phy.o falcon_boards.o \
                           mcdi.o mcdi_port.o mcdi_mon.o ptp.o
 sfc-$(CONFIG_SFC_MTD)  += mtd.o
-sfc-$(CONFIG_SFC_SRIOV)        += siena_sriov.o
+sfc-$(CONFIG_SFC_SRIOV)        += sriov.o siena_sriov.o ef10_sriov.o
 
 obj-$(CONFIG_SFC)      += sfc.o
index fbb6cfa0f5f1d634c22eee11d3d62ca52bfef96e..84764345546829a02bd1cf8650d16544c5fe968e 100644 (file)
@@ -15,6 +15,7 @@
 #include "nic.h"
 #include "workarounds.h"
 #include "selftest.h"
+#include "ef10_sriov.h"
 #include <linux/in.h>
 #include <linux/jhash.h>
 #include <linux/wait.h>
@@ -30,6 +31,9 @@ enum {
 
 /* The reserved RSS context value */
 #define EFX_EF10_RSS_CONTEXT_INVALID   0xffffffff
+/* The maximum size of a shared RSS context */
+/* TODO: this should really be from the mcdi protocol export */
+#define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
 
 /* The filter table(s) are managed by firmware and we have write-only
  * access.  When removing filters we must identify them to the
@@ -77,7 +81,6 @@ struct efx_ef10_filter_table {
 /* An arbitrary search limit for the software hash table */
 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
 
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx);
 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
 
@@ -92,8 +95,49 @@ static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
 
 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
 {
-       return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+       int bar;
+
+       bar = efx->type->mem_bar;
+       return resource_size(&efx->pci_dev->resource[bar]);
+}
+
+static int efx_ef10_get_pf_index(struct efx_nic *efx)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       size_t outlen;
+       int rc;
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+                         sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < sizeof(outbuf))
+               return -EIO;
+
+       nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
+       return 0;
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_get_vf_index(struct efx_nic *efx)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       size_t outlen;
+       int rc;
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
+                         sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < sizeof(outbuf))
+               return -EIO;
+
+       nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
+       return 0;
 }
+#endif
 
 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
 {
@@ -117,6 +161,13 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
        nic_data->datapath_caps =
                MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
 
+       /* record the DPCPU firmware IDs to determine VEB vswitching support.
+        */
+       nic_data->rx_dpcpu_fw_id =
+               MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+       nic_data->tx_dpcpu_fw_id =
+               MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
        if (!(nic_data->datapath_caps &
              (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
                netif_err(efx, drv, efx->net_dev,
@@ -147,7 +198,7 @@ static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
        return rc > 0 ? rc : -ERANGE;
 }
 
-static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
 {
        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
        size_t outlen;
@@ -167,9 +218,66 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
        return 0;
 }
 
+static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
+       size_t outlen;
+       int num_addrs, rc;
+
+       MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+                      EVB_PORT_ID_ASSIGNED);
+       rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
+                         sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+       if (rc)
+               return rc;
+       if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
+               return -EIO;
+
+       num_addrs = MCDI_DWORD(outbuf,
+                              VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
+
+       WARN_ON(num_addrs != 1);
+
+       ether_addr_copy(mac_address,
+                       MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
+
+       return 0;
+}
+
+static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
+                                              struct device_attribute *attr,
+                                              char *buf)
+{
+       struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+       return sprintf(buf, "%d\n",
+                      ((efx->mcdi->fn_flags) &
+                       (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+                      ? 1 : 0);
+}
+
+static ssize_t efx_ef10_show_primary_flag(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+       return sprintf(buf, "%d\n",
+                      ((efx->mcdi->fn_flags) &
+                       (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+                      ? 1 : 0);
+}
+
+static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
+                  NULL);
+static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
+
 static int efx_ef10_probe(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data;
+       struct net_device *net_dev = efx->net_dev;
        int i, rc;
 
        /* We can have one VI for each 8K region.  However, until we
@@ -178,7 +286,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
        efx->max_channels =
                min_t(unsigned int,
                      EFX_MAX_CHANNELS,
-                     resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+                     efx_ef10_mem_map_size(efx) /
                      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
        if (WARN_ON(efx->max_channels == 0))
                return -EIO;
@@ -188,6 +296,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
                return -ENOMEM;
        efx->nic_data = nic_data;
 
+       /* we assume later that we can copy from this buffer in dwords */
+       BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
        rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
                                  8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
        if (rc)
@@ -209,6 +320,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
 
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 
+       nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
        /* In case we're recovering from a crash (kexec), we want to
         * cancel any outstanding request by the previous user of this
         * function.  We send a special message using the least
@@ -230,45 +343,85 @@ static int efx_ef10_probe(struct efx_nic *efx)
        if (rc)
                goto fail3;
 
+       rc = device_create_file(&efx->pci_dev->dev,
+                               &dev_attr_link_control_flag);
+       if (rc)
+               goto fail3;
+
+       rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+       if (rc)
+               goto fail4;
+
+       rc = efx_ef10_get_pf_index(efx);
+       if (rc)
+               goto fail5;
+
        rc = efx_ef10_init_datapath_caps(efx);
        if (rc < 0)
-               goto fail3;
+               goto fail5;
 
        efx->rx_packet_len_offset =
                ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
 
        rc = efx_mcdi_port_get_number(efx);
        if (rc < 0)
-               goto fail3;
+               goto fail5;
        efx->port_num = rc;
+       net_dev->dev_port = rc;
 
-       rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+       rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
        if (rc)
-               goto fail3;
+               goto fail5;
 
        rc = efx_ef10_get_sysclk_freq(efx);
        if (rc < 0)
-               goto fail3;
+               goto fail5;
        efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
 
-       /* Check whether firmware supports bug 35388 workaround */
+       /* Check whether firmware supports bug 35388 workaround.
+        * First try to enable it, then if we get EPERM, just
+        * ask if it's already enabled
+        */
        rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
-       if (rc == 0)
+       if (rc == 0) {
                nic_data->workaround_35388 = true;
-       else if (rc != -ENOSYS && rc != -ENOENT)
-               goto fail3;
+       } else if (rc == -EPERM) {
+               unsigned int enabled;
+
+               rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
+               if (rc)
+                       goto fail3;
+               nic_data->workaround_35388 = enabled &
+                       MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
+       } else if (rc != -ENOSYS && rc != -ENOENT) {
+               goto fail5;
+       }
        netif_dbg(efx, probe, efx->net_dev,
                  "workaround for bug 35388 is %sabled\n",
                  nic_data->workaround_35388 ? "en" : "dis");
 
        rc = efx_mcdi_mon_probe(efx);
-       if (rc)
-               goto fail3;
+       if (rc && rc != -EPERM)
+               goto fail5;
 
        efx_ptp_probe(efx, NULL);
 
+#ifdef CONFIG_SFC_SRIOV
+       if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
+               struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+               struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+               efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
+       } else
+#endif
+               ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+
        return 0;
 
+fail5:
+       device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+fail4:
+       device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
 fail3:
        efx_mcdi_fini(efx);
 fail2:
@@ -281,7 +434,7 @@ fail1:
 
 static int efx_ef10_free_vis(struct efx_nic *efx)
 {
-       MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+       MCDI_DECLARE_BUF_ERR(outbuf);
        size_t outlen;
        int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
                                    outbuf, sizeof(outbuf), &outlen);
@@ -352,9 +505,9 @@ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
 static int efx_ef10_link_piobufs(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       MCDI_DECLARE_BUF(inbuf,
-                        max(MC_CMD_LINK_PIOBUF_IN_LEN,
-                            MC_CMD_UNLINK_PIOBUF_IN_LEN));
+       _MCDI_DECLARE_BUF(inbuf,
+                         max(MC_CMD_LINK_PIOBUF_IN_LEN,
+                             MC_CMD_UNLINK_PIOBUF_IN_LEN));
        struct efx_channel *channel;
        struct efx_tx_queue *tx_queue;
        unsigned int offset, index;
@@ -363,6 +516,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
        BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
        BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
 
+       memset(inbuf, 0, sizeof(inbuf));
+
        /* Link a buffer to each VI in the write-combining mapping */
        for (index = 0; index < nic_data->n_piobufs; ++index) {
                MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
@@ -475,6 +630,25 @@ static void efx_ef10_remove(struct efx_nic *efx)
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        int rc;
 
+#ifdef CONFIG_SFC_SRIOV
+       struct efx_ef10_nic_data *nic_data_pf;
+       struct pci_dev *pci_dev_pf;
+       struct efx_nic *efx_pf;
+       struct ef10_vf *vf;
+
+       if (efx->pci_dev->is_virtfn) {
+               pci_dev_pf = efx->pci_dev->physfn;
+               if (pci_dev_pf) {
+                       efx_pf = pci_get_drvdata(pci_dev_pf);
+                       nic_data_pf = efx_pf->nic_data;
+                       vf = nic_data_pf->vf + nic_data->vf_index;
+                       vf->efx = NULL;
+               } else
+                       netif_info(efx, drv, efx->net_dev,
+                                  "Could not get the PF id from VF\n");
+       }
+#endif
+
        efx_ptp_remove(efx);
 
        efx_mcdi_mon_remove(efx);
@@ -490,11 +664,78 @@ static void efx_ef10_remove(struct efx_nic *efx)
        if (!nic_data->must_restore_piobufs)
                efx_ef10_free_piobufs(efx);
 
+       device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+       device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
+
        efx_mcdi_fini(efx);
        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
        kfree(nic_data);
 }
 
+static int efx_ef10_probe_pf(struct efx_nic *efx)
+{
+       return efx_ef10_probe(efx);
+}
+
+#ifdef CONFIG_SFC_SRIOV
+static int efx_ef10_probe_vf(struct efx_nic *efx)
+{
+       int rc;
+       struct pci_dev *pci_dev_pf;
+
+       /* If the parent PF has no VF data structure, it doesn't know about this
+        * VF so fail probe.  The VF needs to be re-created.  This can happen
+        * if the PF driver is unloaded while the VF is assigned to a guest.
+        */
+       pci_dev_pf = efx->pci_dev->physfn;
+       if (pci_dev_pf) {
+               struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+               struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
+
+               if (!nic_data_pf->vf) {
+                       netif_info(efx, drv, efx->net_dev,
+                                  "The VF cannot link to its parent PF; "
+                                  "please destroy and re-create the VF\n");
+                       return -EBUSY;
+               }
+       }
+
+       rc = efx_ef10_probe(efx);
+       if (rc)
+               return rc;
+
+       rc = efx_ef10_get_vf_index(efx);
+       if (rc)
+               goto fail;
+
+       if (efx->pci_dev->is_virtfn) {
+               if (efx->pci_dev->physfn) {
+                       struct efx_nic *efx_pf =
+                               pci_get_drvdata(efx->pci_dev->physfn);
+                       struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
+                       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+                       nic_data_p->vf[nic_data->vf_index].efx = efx;
+                       nic_data_p->vf[nic_data->vf_index].pci_dev =
+                               efx->pci_dev;
+               } else
+                       netif_info(efx, drv, efx->net_dev,
+                                  "Could not get the PF id from VF\n");
+       }
+
+       return 0;
+
+fail:
+       efx_ef10_remove(efx);
+       return rc;
+}
+#else
+static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
+{
+       return 0;
+}
+#endif
+
 static int efx_ef10_alloc_vis(struct efx_nic *efx,
                              unsigned int min_vis, unsigned int max_vis)
 {
@@ -687,7 +928,9 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
                nic_data->must_restore_piobufs = false;
        }
 
-       efx_ef10_rx_push_rss_config(efx);
+       /* don't fail init if RSS setup doesn't work */
+       efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+
        return 0;
 }
 
@@ -702,6 +945,14 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 }
 
+static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
+{
+       if (reason == RESET_TYPE_MC_FAILURE)
+               return RESET_TYPE_DATAPATH;
+
+       return efx_mcdi_map_reset_reason(reason);
+}
+
 static int efx_ef10_map_reset_flags(u32 *flags)
 {
        enum {
@@ -760,93 +1011,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
        [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
-       EF10_DMA_STAT(tx_bytes, TX_BYTES),
-       EF10_DMA_STAT(tx_packets, TX_PKTS),
-       EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
-       EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
-       EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
-       EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
-       EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
-       EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
-       EF10_DMA_STAT(tx_64, TX_64_PKTS),
-       EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
-       EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
-       EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
-       EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
-       EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
-       EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
-       EF10_DMA_STAT(rx_bytes, RX_BYTES),
-       EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
-       EF10_OTHER_STAT(rx_good_bytes),
-       EF10_OTHER_STAT(rx_bad_bytes),
-       EF10_DMA_STAT(rx_packets, RX_PKTS),
-       EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
-       EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
-       EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
-       EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
-       EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
-       EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
-       EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
-       EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
-       EF10_DMA_STAT(rx_64, RX_64_PKTS),
-       EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
-       EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
-       EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
-       EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
-       EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
-       EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
-       EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
-       EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
-       EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
-       EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
-       EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
-       EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+       EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
+       EF10_DMA_STAT(port_tx_packets, TX_PKTS),
+       EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+       EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
+       EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+       EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+       EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+       EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+       EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
+       EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+       EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+       EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+       EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+       EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+       EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+       EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
+       EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+       EF10_OTHER_STAT(port_rx_good_bytes),
+       EF10_OTHER_STAT(port_rx_bad_bytes),
+       EF10_DMA_STAT(port_rx_packets, RX_PKTS),
+       EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+       EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+       EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+       EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
+       EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+       EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+       EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+       EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+       EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
+       EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+       EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+       EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+       EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+       EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+       EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+       EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+       EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+       EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+       EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+       EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+       EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
        GENERIC_SW_STAT(rx_nodesc_trunc),
        GENERIC_SW_STAT(rx_noskb_drops),
-       EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
-       EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
-       EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
-       EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
-       EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
-       EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
-       EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
-       EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
-       EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
-       EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
-       EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
-       EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+       EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+       EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+       EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+       EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+       EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
+       EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
+       EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+       EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+       EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+       EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+       EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
+       EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
+       EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
+       EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
+       EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
+       EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
+       EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
+       EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
+       EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
+       EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
+       EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
+       EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
+       EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
+       EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
+       EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
+       EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
+       EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
+       EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
+       EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
+       EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
 };
 
-#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |          \
-                              (1ULL << EF10_STAT_tx_packets) |         \
-                              (1ULL << EF10_STAT_tx_pause) |           \
-                              (1ULL << EF10_STAT_tx_unicast) |         \
-                              (1ULL << EF10_STAT_tx_multicast) |       \
-                              (1ULL << EF10_STAT_tx_broadcast) |       \
-                              (1ULL << EF10_STAT_rx_bytes) |           \
-                              (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
-                              (1ULL << EF10_STAT_rx_good_bytes) |      \
-                              (1ULL << EF10_STAT_rx_bad_bytes) |       \
-                              (1ULL << EF10_STAT_rx_packets) |         \
-                              (1ULL << EF10_STAT_rx_good) |            \
-                              (1ULL << EF10_STAT_rx_bad) |             \
-                              (1ULL << EF10_STAT_rx_pause) |           \
-                              (1ULL << EF10_STAT_rx_control) |         \
-                              (1ULL << EF10_STAT_rx_unicast) |         \
-                              (1ULL << EF10_STAT_rx_multicast) |       \
-                              (1ULL << EF10_STAT_rx_broadcast) |       \
-                              (1ULL << EF10_STAT_rx_lt64) |            \
-                              (1ULL << EF10_STAT_rx_64) |              \
-                              (1ULL << EF10_STAT_rx_65_to_127) |       \
-                              (1ULL << EF10_STAT_rx_128_to_255) |      \
-                              (1ULL << EF10_STAT_rx_256_to_511) |      \
-                              (1ULL << EF10_STAT_rx_512_to_1023) |     \
-                              (1ULL << EF10_STAT_rx_1024_to_15xx) |    \
-                              (1ULL << EF10_STAT_rx_15xx_to_jumbo) |   \
-                              (1ULL << EF10_STAT_rx_gtjumbo) |         \
-                              (1ULL << EF10_STAT_rx_bad_gtjumbo) |     \
-                              (1ULL << EF10_STAT_rx_overflow) |        \
-                              (1ULL << EF10_STAT_rx_nodesc_drops) |    \
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) |     \
+                              (1ULL << EF10_STAT_port_tx_packets) |    \
+                              (1ULL << EF10_STAT_port_tx_pause) |      \
+                              (1ULL << EF10_STAT_port_tx_unicast) |    \
+                              (1ULL << EF10_STAT_port_tx_multicast) |  \
+                              (1ULL << EF10_STAT_port_tx_broadcast) |  \
+                              (1ULL << EF10_STAT_port_rx_bytes) |      \
+                              (1ULL <<                                 \
+                               EF10_STAT_port_rx_bytes_minus_good_bytes) | \
+                              (1ULL << EF10_STAT_port_rx_good_bytes) | \
+                              (1ULL << EF10_STAT_port_rx_bad_bytes) |  \
+                              (1ULL << EF10_STAT_port_rx_packets) |    \
+                              (1ULL << EF10_STAT_port_rx_good) |       \
+                              (1ULL << EF10_STAT_port_rx_bad) |        \
+                              (1ULL << EF10_STAT_port_rx_pause) |      \
+                              (1ULL << EF10_STAT_port_rx_control) |    \
+                              (1ULL << EF10_STAT_port_rx_unicast) |    \
+                              (1ULL << EF10_STAT_port_rx_multicast) |  \
+                              (1ULL << EF10_STAT_port_rx_broadcast) |  \
+                              (1ULL << EF10_STAT_port_rx_lt64) |       \
+                              (1ULL << EF10_STAT_port_rx_64) |         \
+                              (1ULL << EF10_STAT_port_rx_65_to_127) |  \
+                              (1ULL << EF10_STAT_port_rx_128_to_255) | \
+                              (1ULL << EF10_STAT_port_rx_256_to_511) | \
+                              (1ULL << EF10_STAT_port_rx_512_to_1023) |\
+                              (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
+                              (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
+                              (1ULL << EF10_STAT_port_rx_gtjumbo) |    \
+                              (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
+                              (1ULL << EF10_STAT_port_rx_overflow) |   \
+                              (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
                               (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
                               (1ULL << GENERIC_STAT_rx_noskb_drops))
 
@@ -854,39 +1124,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
  * switchable port we do not expose these because they might not
  * include all the packets they should.
  */
-#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) |      \
-                                (1ULL << EF10_STAT_tx_lt64) |          \
-                                (1ULL << EF10_STAT_tx_64) |            \
-                                (1ULL << EF10_STAT_tx_65_to_127) |     \
-                                (1ULL << EF10_STAT_tx_128_to_255) |    \
-                                (1ULL << EF10_STAT_tx_256_to_511) |    \
-                                (1ULL << EF10_STAT_tx_512_to_1023) |   \
-                                (1ULL << EF10_STAT_tx_1024_to_15xx) |  \
-                                (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
+                                (1ULL << EF10_STAT_port_tx_lt64) |     \
+                                (1ULL << EF10_STAT_port_tx_64) |       \
+                                (1ULL << EF10_STAT_port_tx_65_to_127) |\
+                                (1ULL << EF10_STAT_port_tx_128_to_255) |\
+                                (1ULL << EF10_STAT_port_tx_256_to_511) |\
+                                (1ULL << EF10_STAT_port_tx_512_to_1023) |\
+                                (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
+                                (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
 
 /* These statistics are only provided by the 40G MAC.  For a 10G/40G
  * switchable port we do expose these because the errors will otherwise
  * be silent.
  */
-#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
-                                 (1ULL << EF10_STAT_rx_length_error))
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
+                                 (1ULL << EF10_STAT_port_rx_length_error))
 
 /* These statistics are only provided if the firmware supports the
  * capability PM_AND_RXDP_COUNTERS.
  */
 #define HUNT_PM_AND_RXDP_STAT_MASK (                                   \
-       (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) |                   \
-       (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) |                 \
-       (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) |                    \
-       (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) |                  \
-       (1ULL << EF10_STAT_rx_pm_trunc_qbb) |                           \
-       (1ULL << EF10_STAT_rx_pm_discard_qbb) |                         \
-       (1ULL << EF10_STAT_rx_pm_discard_mapping) |                     \
-       (1ULL << EF10_STAT_rx_dp_q_disabled_packets) |                  \
-       (1ULL << EF10_STAT_rx_dp_di_dropped_packets) |                  \
-       (1ULL << EF10_STAT_rx_dp_streaming_packets) |                   \
-       (1ULL << EF10_STAT_rx_dp_hlb_fetch) |                           \
-       (1ULL << EF10_STAT_rx_dp_hlb_wait))
+       (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) |              \
+       (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) |            \
+       (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) |               \
+       (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) |             \
+       (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) |                      \
+       (1ULL << EF10_STAT_port_rx_pm_discard_qbb) |                    \
+       (1ULL << EF10_STAT_port_rx_pm_discard_mapping) |                \
+       (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) |             \
+       (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) |             \
+       (1ULL << EF10_STAT_port_rx_dp_streaming_packets) |              \
+       (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) |                      \
+       (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
 
 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 {
@@ -894,6 +1164,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
        u32 port_caps = efx_mcdi_phy_get_caps(efx);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
 
+       if (!(efx->mcdi->fn_flags &
+             1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+               return 0;
+
        if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
                raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
        else
@@ -908,13 +1182,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
 
 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
 {
-       u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       u64 raw_mask[2];
+
+       raw_mask[0] = efx_ef10_raw_stat_mask(efx);
+
+       /* Only show vadaptor stats when EVB capability is present */
+       if (nic_data->datapath_caps &
+           (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
+               raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
+               raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+       } else {
+               raw_mask[1] = 0;
+       }
 
 #if BITS_PER_LONG == 64
-       mask[0] = raw_mask;
+       mask[0] = raw_mask[0];
+       mask[1] = raw_mask[1];
 #else
-       mask[0] = raw_mask & 0xffffffff;
-       mask[1] = raw_mask >> 32;
+       mask[0] = raw_mask[0] & 0xffffffff;
+       mask[1] = raw_mask[0] >> 32;
+       mask[2] = raw_mask[1] & 0xffffffff;
+       mask[3] = raw_mask[1] >> 32;
 #endif
 }
 
@@ -927,7 +1216,51 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
                                      mask, names);
 }
 
-static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+                                          struct rtnl_link_stats64 *core_stats)
+{
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       u64 *stats = nic_data->stats;
+       size_t stats_count = 0, index;
+
+       efx_ef10_get_stat_mask(efx, mask);
+
+       if (full_stats) {
+               for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+                       if (efx_ef10_stat_desc[index].name) {
+                               *full_stats++ = stats[index];
+                               ++stats_count;
+                       }
+               }
+       }
+
+       if (core_stats) {
+               core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
+                                        stats[EF10_STAT_rx_multicast] +
+                                        stats[EF10_STAT_rx_broadcast];
+               core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
+                                        stats[EF10_STAT_tx_multicast] +
+                                        stats[EF10_STAT_tx_broadcast];
+               core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
+                                      stats[EF10_STAT_rx_multicast_bytes] +
+                                      stats[EF10_STAT_rx_broadcast_bytes];
+               core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
+                                      stats[EF10_STAT_tx_multicast_bytes] +
+                                      stats[EF10_STAT_tx_broadcast_bytes];
+               core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
+                                        stats[GENERIC_STAT_rx_noskb_drops];
+               core_stats->multicast = stats[EF10_STAT_rx_multicast];
+               core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+               core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+               core_stats->rx_errors = core_stats->rx_crc_errors;
+               core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+       }
+
+       return stats_count;
+}
+
+static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -952,67 +1285,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
                return -EAGAIN;
 
        /* Update derived statistics */
-       efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
-       stats[EF10_STAT_rx_good_bytes] =
-               stats[EF10_STAT_rx_bytes] -
-               stats[EF10_STAT_rx_bytes_minus_good_bytes];
-       efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
-                            stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+       efx_nic_fix_nodesc_drop_stat(efx,
+                                    &stats[EF10_STAT_port_rx_nodesc_drops]);
+       stats[EF10_STAT_port_rx_good_bytes] =
+               stats[EF10_STAT_port_rx_bytes] -
+               stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
+       efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
+                            stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
        efx_update_sw_stats(efx, stats);
        return 0;
 }
 
 
-static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
-                                   struct rtnl_link_stats64 *core_stats)
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+                                      struct rtnl_link_stats64 *core_stats)
 {
-       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
-       struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       u64 *stats = nic_data->stats;
-       size_t stats_count = 0, index;
        int retry;
 
-       efx_ef10_get_stat_mask(efx, mask);
-
        /* If we're unlucky enough to read statistics during the DMA, wait
         * up to 10ms for it to finish (typically takes <500us)
         */
        for (retry = 0; retry < 100; ++retry) {
-               if (efx_ef10_try_update_nic_stats(efx) == 0)
+               if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
                        break;
                udelay(100);
        }
 
-       if (full_stats) {
-               for_each_set_bit(index, mask, EF10_STAT_COUNT) {
-                       if (efx_ef10_stat_desc[index].name) {
-                               *full_stats++ = stats[index];
-                               ++stats_count;
-                       }
-               }
+       return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
+static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+       __le64 generation_start, generation_end;
+       u64 *stats = nic_data->stats;
+       u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+       struct efx_buffer stats_buf;
+       __le64 *dma_stats;
+       int rc;
+
+       spin_unlock_bh(&efx->stats_lock);
+
+       if (in_interrupt()) {
+               /* If in atomic context, cannot update stats.  Just update the
+                * software stats and return so the caller can continue.
+                */
+               spin_lock_bh(&efx->stats_lock);
+               efx_update_sw_stats(efx, stats);
+               return 0;
        }
 
-       if (core_stats) {
-               core_stats->rx_packets = stats[EF10_STAT_rx_packets];
-               core_stats->tx_packets = stats[EF10_STAT_tx_packets];
-               core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
-               core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
-               core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
-                                        stats[GENERIC_STAT_rx_nodesc_trunc] +
-                                        stats[GENERIC_STAT_rx_noskb_drops];
-               core_stats->multicast = stats[EF10_STAT_rx_multicast];
-               core_stats->rx_length_errors =
-                       stats[EF10_STAT_rx_gtjumbo] +
-                       stats[EF10_STAT_rx_length_error];
-               core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
-               core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
-               core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
-               core_stats->rx_errors = (core_stats->rx_length_errors +
-                                        core_stats->rx_crc_errors +
-                                        core_stats->rx_frame_errors);
+       efx_ef10_get_stat_mask(efx, mask);
+
+       rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+       if (rc) {
+               spin_lock_bh(&efx->stats_lock);
+               return rc;
        }
 
-       return stats_count;
+       dma_stats = stats_buf.addr;
+       dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+       MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
+       MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
+                             MAC_STATS_IN_DMA, 1);
+       MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+       MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+                               NULL, 0, NULL);
+       spin_lock_bh(&efx->stats_lock);
+       if (rc) {
+               /* Expect ENOENT if DMA queues have not been set up */
+               if (rc != -ENOENT || atomic_read(&efx->active_queues))
+                       efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
+                                              sizeof(inbuf), NULL, 0, rc);
+               goto out;
+       }
+
+       generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+       if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
+               WARN_ON_ONCE(1);
+               goto out;
+       }
+       rmb();
+       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
+                            stats, stats_buf.addr, false);
+       rmb();
+       generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+       if (generation_end != generation_start) {
+               rc = -EAGAIN;
+               goto out;
+       }
+
+       efx_update_sw_stats(efx, stats);
+out:
+       efx_nic_free_buffer(efx, &stats_buf);
+       return rc;
+}
+
+static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
+                                      struct rtnl_link_stats64 *core_stats)
+{
+       if (efx_ef10_try_update_nic_stats_vf(efx))
+               return 0;
+
+       return efx_ef10_update_stats_common(efx, full_stats, core_stats);
 }
 
 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
@@ -1044,6 +1424,14 @@ static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
        }
 }
 
+static void efx_ef10_get_wol_vf(struct efx_nic *efx,
+                               struct ethtool_wolinfo *wol) {}
+
+static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
+{
+       return -EOPNOTSUPP;
+}
+
 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
 {
        wol->supported = 0;
@@ -1123,13 +1511,17 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
        /* All our allocations have been reset */
        efx_ef10_reset_mc_allocations(efx);
 
+       /* Driver-created vswitches and vports must be re-created */
+       nic_data->must_probe_vswitching = true;
+       nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
        /* The datapath firmware might have been changed */
        nic_data->must_check_datapath_caps = true;
 
        /* MAC statistics have been cleared on the NIC; clear the local
         * statistic that we update with efx_update_diff_stat().
         */
-       nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+       nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
 
        return -EIO;
 }
@@ -1232,16 +1624,17 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
                                                       EFX_BUF_SIZE));
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
        bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
        size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
        struct efx_channel *channel = tx_queue->channel;
        struct efx_nic *efx = tx_queue->efx;
-       size_t inlen, outlen;
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       size_t inlen;
        dma_addr_t dma_addr;
        efx_qword_t *txd;
        int rc;
        int i;
+       BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
 
        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
@@ -1251,7 +1644,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
                              INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
                              INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
-       MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+       MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
 
        dma_addr = tx_queue->txd.buf.dma_addr;
 
@@ -1266,7 +1659,7 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
        inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
 
        rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
-                         outbuf, sizeof(outbuf), &outlen);
+                         NULL, 0, NULL);
        if (rc)
                goto fail;
 
@@ -1299,7 +1692,7 @@ fail:
 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+       MCDI_DECLARE_BUF_ERR(outbuf);
        struct efx_nic *efx = tx_queue->efx;
        size_t outlen;
        int rc;
@@ -1378,19 +1771,33 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
        }
 }
 
-static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
+                                     bool exclusive, unsigned *context_size)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
        MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
        size_t outlen;
        int rc;
+       u32 alloc_type = exclusive ?
+                               MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
+                               MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+       unsigned rss_spread = exclusive ?
+                               efx->rss_spread :
+                               min(rounddown_pow_of_two(efx->rss_spread),
+                                   EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
+
+       if (!exclusive && rss_spread == 1) {
+               *context = EFX_EF10_RSS_CONTEXT_INVALID;
+               if (context_size)
+                       *context_size = 1;
+               return 0;
+       }
 
        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
-                      EVB_PORT_ID_ASSIGNED);
-       MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
-                      MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
-       MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
-                      EFX_MAX_CHANNELS);
+                      nic_data->vport_id);
+       MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
+       MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
 
        rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
                outbuf, sizeof(outbuf), &outlen);
@@ -1402,6 +1809,9 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
 
        *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
 
+       if (context_size)
+               *context_size = rss_spread;
+
        return 0;
 }
 
@@ -1418,7 +1828,8 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
        WARN_ON(rc != 0);
 }
 
-static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
+                                      const u32 *rx_indir_table)
 {
        MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
        MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -1432,7 +1843,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
        for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
                MCDI_PTR(tablebuf,
                         RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
-                               (u8) efx->rx_indir_table[i];
+                               (u8) rx_indir_table[i];
 
        rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
                          sizeof(tablebuf), NULL, 0, NULL);
@@ -1460,27 +1871,119 @@ static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 }
 
-static void efx_ef10_rx_push_rss_config(struct efx_nic *efx)
+static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
+                                             unsigned *context_size)
 {
+       u32 new_rx_rss_context;
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       int rc;
+       int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+                                           false, context_size);
 
-       netif_dbg(efx, drv, efx->net_dev, "pushing RSS config\n");
+       if (rc != 0)
+               return rc;
 
-       if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
-               rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
-               if (rc != 0)
-                       goto fail;
+       nic_data->rx_rss_context = new_rx_rss_context;
+       nic_data->rx_rss_context_exclusive = false;
+       efx_set_default_rx_indir_table(efx);
+       return 0;
+}
+
+static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
+                                                const u32 *rx_indir_table)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       int rc;
+       u32 new_rx_rss_context;
+
+       if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
+           !nic_data->rx_rss_context_exclusive) {
+               rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
+                                               true, NULL);
+               if (rc == -EOPNOTSUPP)
+                       return rc;
+               else if (rc != 0)
+                       goto fail1;
+       } else {
+               new_rx_rss_context = nic_data->rx_rss_context;
        }
 
-       rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+       rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
+                                        rx_indir_table);
        if (rc != 0)
-               goto fail;
+               goto fail2;
 
-       return;
+       if (nic_data->rx_rss_context != new_rx_rss_context)
+               efx_ef10_rx_free_indir_table(efx);
+       nic_data->rx_rss_context = new_rx_rss_context;
+       nic_data->rx_rss_context_exclusive = true;
+       if (rx_indir_table != efx->rx_indir_table)
+               memcpy(efx->rx_indir_table, rx_indir_table,
+                      sizeof(efx->rx_indir_table));
+       return 0;
 
-fail:
+fail2:
+       if (new_rx_rss_context != nic_data->rx_rss_context)
+               efx_ef10_free_rss_context(efx, new_rx_rss_context);
+fail1:
        netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+       return rc;
+}
+
+static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
+                                         const u32 *rx_indir_table)
+{
+       int rc;
+
+       if (efx->rss_spread == 1)
+               return 0;
+
+       rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+
+       if (rc == -ENOBUFS && !user) {
+               unsigned context_size;
+               bool mismatch = false;
+               size_t i;
+
+               for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
+                    i++)
+                       mismatch = rx_indir_table[i] !=
+                               ethtool_rxfh_indir_default(i, efx->rss_spread);
+
+               rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
+               if (rc == 0) {
+                       if (context_size != efx->rss_spread)
+                               netif_warn(efx, probe, efx->net_dev,
+                                          "Could not allocate an exclusive RSS"
+                                          " context; allocated a shared one of"
+                                          " different size."
+                                          " Wanted %u, got %u.\n",
+                                          efx->rss_spread, context_size);
+                       else if (mismatch)
+                               netif_warn(efx, probe, efx->net_dev,
+                                          "Could not allocate an exclusive RSS"
+                                          " context; allocated a shared one but"
+                                          " could not apply custom"
+                                          " indirection.\n");
+                       else
+                               netif_info(efx, probe, efx->net_dev,
+                                          "Could not allocate an exclusive RSS"
+                                          " context; allocated a shared one.\n");
+               }
+       }
+       return rc;
+}
+
+static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
+                                         const u32 *rx_indir_table
+                                         __attribute__ ((unused)))
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       if (user)
+               return -EOPNOTSUPP;
+       if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+               return 0;
+       return efx_ef10_rx_push_shared_rss_config(efx, NULL);
 }
 
 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
@@ -1496,14 +1999,15 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
        MCDI_DECLARE_BUF(inbuf,
                         MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
                                                EFX_BUF_SIZE));
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
        size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
        struct efx_nic *efx = rx_queue->efx;
-       size_t inlen, outlen;
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       size_t inlen;
        dma_addr_t dma_addr;
        int rc;
        int i;
+       BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
 
        rx_queue->scatter_n = 0;
        rx_queue->scatter_len = 0;
@@ -1517,7 +2021,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
                              INIT_RXQ_IN_FLAG_PREFIX, 1,
                              INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
-       MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+       MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
 
        dma_addr = rx_queue->rxd.buf.dma_addr;
 
@@ -1532,7 +2036,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
        inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
 
        rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
-                         outbuf, sizeof(outbuf), &outlen);
+                         NULL, 0, NULL);
        if (rc)
                netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
                            efx_rx_queue_index(rx_queue));
@@ -1541,7 +2045,7 @@ static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+       MCDI_DECLARE_BUF_ERR(outbuf);
        struct efx_nic *efx = rx_queue->efx;
        size_t outlen;
        int rc;
@@ -1703,7 +2207,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
 static void efx_ef10_ev_fini(struct efx_channel *channel)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+       MCDI_DECLARE_BUF_ERR(outbuf);
        struct efx_nic *efx = channel->efx;
        size_t outlen;
        int rc;
@@ -2286,11 +2790,12 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                               match_fields);
        }
 
-       MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+       MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
                       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
                       MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
                       MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+       MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
                       MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
@@ -3055,6 +3560,9 @@ fail:
        return rc;
 }
 
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3064,9 +3572,14 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
        bool failed = false;
        int rc;
 
+       WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
        if (!nic_data->must_restore_filters)
                return;
 
+       if (!table)
+               return;
+
        spin_lock_bh(&efx->filter_lock);
 
        for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
@@ -3102,6 +3615,7 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
                nic_data->must_restore_filters = false;
 }
 
+/* Caller must hold efx->filter_sem for write */
 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3110,6 +3624,10 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
        unsigned int filter_idx;
        int rc;
 
+       efx->filter_state = NULL;
+       if (!table)
+               return;
+
        for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
                spec = efx_ef10_filter_entry_spec(table, filter_idx);
                if (!spec)
@@ -3135,6 +3653,9 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
        kfree(table);
 }
 
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3149,6 +3670,9 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
        if (!efx_dev_registered(efx))
                return;
 
+       if (!table)
+               return;
+
        /* Mark old filters that may need to be removed */
        spin_lock_bh(&efx->filter_lock);
        n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
@@ -3280,6 +3804,78 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
        WARN_ON(remove_failed);
 }
 
+static int efx_ef10_set_mac_address(struct efx_nic *efx)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       bool was_enabled = efx->port_enabled;
+       int rc;
+
+       efx_device_detach_sync(efx);
+       efx_net_stop(efx->net_dev);
+       down_write(&efx->filter_sem);
+       efx_ef10_filter_table_remove(efx);
+
+       ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
+                       efx->net_dev->dev_addr);
+       MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+                      nic_data->vport_id);
+       rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
+                         sizeof(inbuf), NULL, 0, NULL);
+
+       efx_ef10_filter_table_probe(efx);
+       up_write(&efx->filter_sem);
+       if (was_enabled)
+               efx_net_open(efx->net_dev);
+       netif_device_attach(efx->net_dev);
+
+#if !defined(CONFIG_SFC_SRIOV)
+       if (rc == -EPERM)
+               netif_err(efx, drv, efx->net_dev,
+                         "Cannot change MAC address; use sfboot to enable mac-spoofing"
+                         " on this interface\n");
+#else
+       if (rc == -EPERM) {
+               struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+               /* Switch to PF and change MAC address on vport */
+               if (efx->pci_dev->is_virtfn && pci_dev_pf) {
+                       struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+                       if (!efx_ef10_sriov_set_vf_mac(efx_pf,
+                                                      nic_data->vf_index,
+                                                      efx->net_dev->dev_addr))
+                               return 0;
+               }
+               netif_err(efx, drv, efx->net_dev,
+                         "Cannot change MAC address; use sfboot to enable mac-spoofing"
+                         " on this interface\n");
+       } else if (efx->pci_dev->is_virtfn) {
+               /* Successfully changed by VF (with MAC spoofing), so update the
+                * parent PF if possible.
+                */
+               struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+
+               if (pci_dev_pf) {
+                       struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+                       struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
+                       unsigned int i;
+
+                       for (i = 0; i < efx_pf->vf_count; ++i) {
+                               struct ef10_vf *vf = nic_data->vf + i;
+
+                               if (vf->efx == efx) {
+                                       ether_addr_copy(vf->mac,
+                                                       efx->net_dev->dev_addr);
+                                       return 0;
+                               }
+                       }
+               }
+       }
+#endif
+       return rc;
+}
+
 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
 {
        efx_ef10_filter_sync_rx_mode(efx);
@@ -3287,6 +3883,13 @@ static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
        return efx_mcdi_set_mac(efx);
 }
 
+static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
+{
+       efx_ef10_filter_sync_rx_mode(efx);
+
+       return 0;
+}
+
 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
@@ -3494,6 +4097,9 @@ static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
        _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
 }
 
+static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
+                                           u32 host_time) {}
+
 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
                                           bool temp)
 {
@@ -3571,6 +4177,12 @@ static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
        return 0;
 }
 
+static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
+                                        struct hwtstamp_config *init)
+{
+       return -EOPNOTSUPP;
+}
+
 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
                                      struct hwtstamp_config *init)
 {
@@ -3607,14 +4219,118 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
        }
 }
 
+const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
+       .is_vf = true,
+       .mem_bar = EFX_MEM_VF_BAR,
+       .mem_map_size = efx_ef10_mem_map_size,
+       .probe = efx_ef10_probe_vf,
+       .remove = efx_ef10_remove,
+       .dimension_resources = efx_ef10_dimension_resources,
+       .init = efx_ef10_init_nic,
+       .fini = efx_port_dummy_op_void,
+       .map_reset_reason = efx_ef10_map_reset_reason,
+       .map_reset_flags = efx_ef10_map_reset_flags,
+       .reset = efx_ef10_reset,
+       .probe_port = efx_mcdi_port_probe,
+       .remove_port = efx_mcdi_port_remove,
+       .fini_dmaq = efx_ef10_fini_dmaq,
+       .prepare_flr = efx_ef10_prepare_flr,
+       .finish_flr = efx_port_dummy_op_void,
+       .describe_stats = efx_ef10_describe_stats,
+       .update_stats = efx_ef10_update_stats_vf,
+       .start_stats = efx_port_dummy_op_void,
+       .pull_stats = efx_port_dummy_op_void,
+       .stop_stats = efx_port_dummy_op_void,
+       .set_id_led = efx_mcdi_set_id_led,
+       .push_irq_moderation = efx_ef10_push_irq_moderation,
+       .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
+       .check_mac_fault = efx_mcdi_mac_check_fault,
+       .reconfigure_port = efx_mcdi_port_reconfigure,
+       .get_wol = efx_ef10_get_wol_vf,
+       .set_wol = efx_ef10_set_wol_vf,
+       .resume_wol = efx_port_dummy_op_void,
+       .mcdi_request = efx_ef10_mcdi_request,
+       .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+       .mcdi_read_response = efx_ef10_mcdi_read_response,
+       .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+       .irq_enable_master = efx_port_dummy_op_void,
+       .irq_test_generate = efx_ef10_irq_test_generate,
+       .irq_disable_non_ev = efx_port_dummy_op_void,
+       .irq_handle_msi = efx_ef10_msi_interrupt,
+       .irq_handle_legacy = efx_ef10_legacy_interrupt,
+       .tx_probe = efx_ef10_tx_probe,
+       .tx_init = efx_ef10_tx_init,
+       .tx_remove = efx_ef10_tx_remove,
+       .tx_write = efx_ef10_tx_write,
+       .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+       .rx_probe = efx_ef10_rx_probe,
+       .rx_init = efx_ef10_rx_init,
+       .rx_remove = efx_ef10_rx_remove,
+       .rx_write = efx_ef10_rx_write,
+       .rx_defer_refill = efx_ef10_rx_defer_refill,
+       .ev_probe = efx_ef10_ev_probe,
+       .ev_init = efx_ef10_ev_init,
+       .ev_fini = efx_ef10_ev_fini,
+       .ev_remove = efx_ef10_ev_remove,
+       .ev_process = efx_ef10_ev_process,
+       .ev_read_ack = efx_ef10_ev_read_ack,
+       .ev_test_generate = efx_ef10_ev_test_generate,
+       .filter_table_probe = efx_ef10_filter_table_probe,
+       .filter_table_restore = efx_ef10_filter_table_restore,
+       .filter_table_remove = efx_ef10_filter_table_remove,
+       .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+       .filter_insert = efx_ef10_filter_insert,
+       .filter_remove_safe = efx_ef10_filter_remove_safe,
+       .filter_get_safe = efx_ef10_filter_get_safe,
+       .filter_clear_rx = efx_ef10_filter_clear_rx,
+       .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+       .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+       .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+       .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+       .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+       .mtd_probe = efx_port_dummy_op_int,
+#endif
+       .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
+       .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+#ifdef CONFIG_SFC_SRIOV
+       .vswitching_probe = efx_ef10_vswitching_probe_vf,
+       .vswitching_restore = efx_ef10_vswitching_restore_vf,
+       .vswitching_remove = efx_ef10_vswitching_remove_vf,
+       .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
+#endif
+       .get_mac_address = efx_ef10_get_mac_address_vf,
+       .set_mac_address = efx_ef10_set_mac_address,
+
+       .revision = EFX_REV_HUNT_A0,
+       .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+       .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+       .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+       .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
+       .can_rx_scatter = true,
+       .always_rx_scatter = true,
+       .max_interrupt_mode = EFX_INT_MODE_MSIX,
+       .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+       .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                            NETIF_F_RXHASH | NETIF_F_NTUPLE),
+       .mcdi_max_ver = 2,
+       .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+       .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
+                           1 << HWTSTAMP_FILTER_ALL,
+};
+
 const struct efx_nic_type efx_hunt_a0_nic_type = {
+       .is_vf = false,
+       .mem_bar = EFX_MEM_BAR,
        .mem_map_size = efx_ef10_mem_map_size,
-       .probe = efx_ef10_probe,
+       .probe = efx_ef10_probe_pf,
        .remove = efx_ef10_remove,
        .dimension_resources = efx_ef10_dimension_resources,
        .init = efx_ef10_init_nic,
        .fini = efx_port_dummy_op_void,
-       .map_reset_reason = efx_mcdi_map_reset_reason,
+       .map_reset_reason = efx_ef10_map_reset_reason,
        .map_reset_flags = efx_ef10_map_reset_flags,
        .reset = efx_ef10_reset,
        .probe_port = efx_mcdi_port_probe,
@@ -3623,7 +4339,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .prepare_flr = efx_ef10_prepare_flr,
        .finish_flr = efx_port_dummy_op_void,
        .describe_stats = efx_ef10_describe_stats,
-       .update_stats = efx_ef10_update_stats,
+       .update_stats = efx_ef10_update_stats_pf,
        .start_stats = efx_mcdi_mac_start_stats,
        .pull_stats = efx_mcdi_mac_pull_stats,
        .stop_stats = efx_mcdi_mac_stop_stats,
@@ -3650,7 +4366,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .tx_init = efx_ef10_tx_init,
        .tx_remove = efx_ef10_tx_remove,
        .tx_write = efx_ef10_tx_write,
-       .rx_push_rss_config = efx_ef10_rx_push_rss_config,
+       .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
        .rx_probe = efx_ef10_rx_probe,
        .rx_init = efx_ef10_rx_init,
        .rx_remove = efx_ef10_rx_remove,
@@ -3689,11 +4405,24 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .ptp_write_host_time = efx_ef10_ptp_write_host_time,
        .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
        .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+       .sriov_configure = efx_ef10_sriov_configure,
        .sriov_init = efx_ef10_sriov_init,
        .sriov_fini = efx_ef10_sriov_fini,
-       .sriov_mac_address_changed = efx_ef10_sriov_mac_address_changed,
        .sriov_wanted = efx_ef10_sriov_wanted,
        .sriov_reset = efx_ef10_sriov_reset,
+       .sriov_flr = efx_ef10_sriov_flr,
+       .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
+       .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
+       .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
+       .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
+       .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
+       .vswitching_probe = efx_ef10_vswitching_probe_pf,
+       .vswitching_restore = efx_ef10_vswitching_restore_pf,
+       .vswitching_remove = efx_ef10_vswitching_remove_pf,
+#endif
+       .get_mac_address = efx_ef10_get_mac_address_pf,
+       .set_mac_address = efx_ef10_set_mac_address,
 
        .revision = EFX_REV_HUNT_A0,
        .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
new file mode 100644 (file)
index 0000000..6c9b6e4
--- /dev/null
@@ -0,0 +1,783 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "net_driver.h"
+#include "ef10_sriov.h"
+#include "efx.h"
+#include "nic.h"
+#include "mcdi_pcol.h"
+
+static int efx_ef10_evb_port_assign(struct efx_nic *efx, unsigned int port_id,
+                                   unsigned int vf_fn)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_EVB_PORT_ASSIGN_IN_LEN);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       MCDI_SET_DWORD(inbuf, EVB_PORT_ASSIGN_IN_PORT_ID, port_id);
+       MCDI_POPULATE_DWORD_2(inbuf, EVB_PORT_ASSIGN_IN_FUNCTION,
+                             EVB_PORT_ASSIGN_IN_PF, nic_data->pf_index,
+                             EVB_PORT_ASSIGN_IN_VF, vf_fn);
+
+       return efx_mcdi_rpc(efx, MC_CMD_EVB_PORT_ASSIGN, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_add_mac(struct efx_nic *efx,
+                                 unsigned int port_id, u8 *mac)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
+       ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
+
+       return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
+                           sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_del_mac(struct efx_nic *efx,
+                                 unsigned int port_id, u8 *mac)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+       ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+       return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+                           sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_vswitch_alloc(struct efx_nic *efx, unsigned int port_id,
+                                 unsigned int vswitch_type)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_ALLOC_IN_LEN);
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+       MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_TYPE, vswitch_type);
+       MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 2);
+       MCDI_POPULATE_DWORD_1(inbuf, VSWITCH_ALLOC_IN_FLAGS,
+                             VSWITCH_ALLOC_IN_FLAG_AUTO_PORT, 0);
+
+       /* Quietly try to allocate 2 VLAN tags */
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VSWITCH_ALLOC, inbuf, sizeof(inbuf),
+                               NULL, 0, NULL);
+
+       /* If 2 VLAN tags is too many, revert to trying with 1 VLAN tags */
+       if (rc == -EPROTO) {
+               MCDI_SET_DWORD(inbuf, VSWITCH_ALLOC_IN_NUM_VLAN_TAGS, 1);
+               rc = efx_mcdi_rpc(efx, MC_CMD_VSWITCH_ALLOC, inbuf,
+                                 sizeof(inbuf), NULL, 0, NULL);
+       } else if (rc) {
+               efx_mcdi_display_error(efx, MC_CMD_VSWITCH_ALLOC,
+                                      MC_CMD_VSWITCH_ALLOC_IN_LEN,
+                                      NULL, 0, rc);
+       }
+       return rc;
+}
+
+static int efx_ef10_vswitch_free(struct efx_nic *efx, unsigned int port_id)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VSWITCH_FREE_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VSWITCH_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+       return efx_mcdi_rpc(efx, MC_CMD_VSWITCH_FREE, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static int efx_ef10_vport_alloc(struct efx_nic *efx,
+                               unsigned int port_id_in,
+                               unsigned int vport_type,
+                               u16 vlan,
+                               unsigned int *port_id_out)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ALLOC_IN_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_ALLOC_OUT_LEN);
+       size_t outlen;
+       int rc;
+
+       EFX_WARN_ON_PARANOID(!port_id_out);
+
+       MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_UPSTREAM_PORT_ID, port_id_in);
+       MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_TYPE, vport_type);
+       MCDI_SET_DWORD(inbuf, VPORT_ALLOC_IN_NUM_VLAN_TAGS,
+                      (vlan != EFX_EF10_NO_VLAN));
+       MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_FLAGS,
+                             VPORT_ALLOC_IN_FLAG_AUTO_PORT, 0);
+       if (vlan != EFX_EF10_NO_VLAN)
+               MCDI_POPULATE_DWORD_1(inbuf, VPORT_ALLOC_IN_VLAN_TAGS,
+                                     VPORT_ALLOC_IN_VLAN_TAG_0, vlan);
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_ALLOC, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < MC_CMD_VPORT_ALLOC_OUT_LEN)
+               return -EIO;
+
+       *port_id_out = MCDI_DWORD(outbuf, VPORT_ALLOC_OUT_VPORT_ID);
+       return 0;
+}
+
+static int efx_ef10_vport_free(struct efx_nic *efx, unsigned int port_id)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_FREE_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VPORT_FREE_IN_VPORT_ID, port_id);
+
+       return efx_mcdi_rpc(efx, MC_CMD_VPORT_FREE, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+       return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
+
+       MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+       return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       int i;
+
+       if (!nic_data->vf)
+               return;
+
+       for (i = 0; i < efx->vf_count; i++) {
+               struct ef10_vf *vf = nic_data->vf + i;
+
+               /* If VF is assigned, do not free the vport  */
+               if (vf->pci_dev &&
+                   vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+                       continue;
+
+               if (vf->vport_assigned) {
+                       efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i);
+                       vf->vport_assigned = 0;
+               }
+
+               if (!is_zero_ether_addr(vf->mac)) {
+                       efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+                       eth_zero_addr(vf->mac);
+               }
+
+               if (vf->vport_id) {
+                       efx_ef10_vport_free(efx, vf->vport_id);
+                       vf->vport_id = 0;
+               }
+
+               vf->efx = NULL;
+       }
+}
+
+static void efx_ef10_sriov_free_vf_vswitching(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       efx_ef10_sriov_free_vf_vports(efx);
+       kfree(nic_data->vf);
+       nic_data->vf = NULL;
+}
+
+static int efx_ef10_sriov_assign_vf_vport(struct efx_nic *efx,
+                                         unsigned int vf_i)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct ef10_vf *vf = nic_data->vf + vf_i;
+       int rc;
+
+       if (WARN_ON_ONCE(!nic_data->vf))
+               return -EOPNOTSUPP;
+
+       rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+                                 MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+                                 vf->vlan, &vf->vport_id);
+       if (rc)
+               return rc;
+
+       rc = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+       if (rc) {
+               eth_zero_addr(vf->mac);
+               return rc;
+       }
+
+       rc =  efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+       if (rc)
+               return rc;
+
+       vf->vport_assigned = 1;
+       return 0;
+}
+
+static int efx_ef10_sriov_alloc_vf_vswitching(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       unsigned int i;
+       int rc;
+
+       nic_data->vf = kcalloc(efx->vf_count, sizeof(struct ef10_vf),
+                              GFP_KERNEL);
+       if (!nic_data->vf)
+               return -ENOMEM;
+
+       for (i = 0; i < efx->vf_count; i++) {
+               random_ether_addr(nic_data->vf[i].mac);
+               nic_data->vf[i].efx = NULL;
+               nic_data->vf[i].vlan = EFX_EF10_NO_VLAN;
+
+               rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+               if (rc)
+                       goto fail;
+       }
+
+       return 0;
+fail:
+       efx_ef10_sriov_free_vf_vports(efx);
+       kfree(nic_data->vf);
+       nic_data->vf = NULL;
+       return rc;
+}
+
+static int efx_ef10_sriov_restore_vf_vswitching(struct efx_nic *efx)
+{
+       unsigned int i;
+       int rc;
+
+       for (i = 0; i < efx->vf_count; i++) {
+               rc = efx_ef10_sriov_assign_vf_vport(efx, i);
+               if (rc)
+                       goto fail;
+       }
+
+       return 0;
+fail:
+       efx_ef10_sriov_free_vf_vswitching(efx);
+       return rc;
+}
+
+/* On top of the default firmware vswitch setup, create a VEB vswitch and
+ * expansion vport for use by this function.
+ */
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct net_device *net_dev = efx->net_dev;
+       int rc;
+
+       if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
+               /* vswitch not needed as we have no VFs */
+               efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+               return 0;
+       }
+
+       rc = efx_ef10_vswitch_alloc(efx, EVB_PORT_ID_ASSIGNED,
+                                   MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB);
+       if (rc)
+               goto fail1;
+
+       rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+                                 MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+                                 EFX_EF10_NO_VLAN, &nic_data->vport_id);
+       if (rc)
+               goto fail2;
+
+       rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, net_dev->dev_addr);
+       if (rc)
+               goto fail3;
+       ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
+
+       rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+       if (rc)
+               goto fail4;
+
+       return 0;
+fail4:
+       efx_ef10_vport_del_mac(efx, nic_data->vport_id, nic_data->vport_mac);
+       eth_zero_addr(nic_data->vport_mac);
+fail3:
+       efx_ef10_vport_free(efx, nic_data->vport_id);
+       nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+fail2:
+       efx_ef10_vswitch_free(efx, EVB_PORT_ID_ASSIGNED);
+fail1:
+       return rc;
+}
+
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+}
+
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       int rc;
+
+       if (!nic_data->must_probe_vswitching)
+               return 0;
+
+       rc = efx_ef10_vswitching_probe_pf(efx);
+       if (rc)
+               goto fail;
+
+       rc = efx_ef10_sriov_restore_vf_vswitching(efx);
+       if (rc)
+               goto fail;
+
+       nic_data->must_probe_vswitching = false;
+fail:
+       return rc;
+}
+
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       int rc;
+
+       if (!nic_data->must_probe_vswitching)
+               return 0;
+
+       rc = efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+       if (rc)
+               return rc;
+
+       nic_data->must_probe_vswitching = false;
+       return 0;
+}
+
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       efx_ef10_sriov_free_vf_vswitching(efx);
+
+       efx_ef10_vadaptor_free(efx, nic_data->vport_id);
+
+       if (nic_data->vport_id == EVB_PORT_ID_ASSIGNED)
+               return; /* No vswitch was ever created */
+
+       if (!is_zero_ether_addr(nic_data->vport_mac)) {
+               efx_ef10_vport_del_mac(efx, nic_data->vport_id,
+                                      efx->net_dev->dev_addr);
+               eth_zero_addr(nic_data->vport_mac);
+       }
+       efx_ef10_vport_free(efx, nic_data->vport_id);
+       nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+
+       /* Only free the vswitch if no VFs are assigned */
+       if (!pci_vfs_assigned(efx->pci_dev))
+               efx_ef10_vswitch_free(efx, nic_data->vport_id);
+}
+
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
+{
+       efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+}
+
+static int efx_ef10_pci_sriov_enable(struct efx_nic *efx, int num_vfs)
+{
+       int rc = 0;
+       struct pci_dev *dev = efx->pci_dev;
+
+       efx->vf_count = num_vfs;
+
+       rc = efx_ef10_sriov_alloc_vf_vswitching(efx);
+       if (rc)
+               goto fail1;
+
+       rc = pci_enable_sriov(dev, num_vfs);
+       if (rc)
+               goto fail2;
+
+       return 0;
+fail2:
+       efx_ef10_sriov_free_vf_vswitching(efx);
+fail1:
+       efx->vf_count = 0;
+       netif_err(efx, probe, efx->net_dev,
+                 "Failed to enable SRIOV VFs\n");
+       return rc;
+}
+
+static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
+{
+       struct pci_dev *dev = efx->pci_dev;
+       unsigned int vfs_assigned = 0;
+
+       vfs_assigned = pci_vfs_assigned(dev);
+
+       if (vfs_assigned && !force) {
+               netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+                          "please detach them before disabling SR-IOV\n");
+               return -EBUSY;
+       }
+
+       if (!vfs_assigned)
+               pci_disable_sriov(dev);
+
+       efx_ef10_sriov_free_vf_vswitching(efx);
+       efx->vf_count = 0;
+       return 0;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+       if (num_vfs == 0)
+               return efx_ef10_pci_sriov_disable(efx, false);
+       else
+               return efx_ef10_pci_sriov_enable(efx, num_vfs);
+}
+
+int efx_ef10_sriov_init(struct efx_nic *efx)
+{
+       return 0;
+}
+
+void efx_ef10_sriov_fini(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       unsigned int i;
+       int rc;
+
+       if (!nic_data->vf) {
+               /* Remove any un-assigned orphaned VFs */
+               if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev))
+                       pci_disable_sriov(efx->pci_dev);
+               return;
+       }
+
+       /* Remove any VFs in the host */
+       for (i = 0; i < efx->vf_count; ++i) {
+               struct efx_nic *vf_efx = nic_data->vf[i].efx;
+
+               if (vf_efx)
+                       vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
+       }
+
+       rc = efx_ef10_pci_sriov_disable(efx, true);
+       if (rc)
+               netif_dbg(efx, drv, efx->net_dev,
+                         "Disabling SRIOV was not successful rc=%d\n", rc);
+       else
+               netif_dbg(efx, drv, efx->net_dev, "SRIOV disabled\n");
+}
+
+static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
+                                    u8 *mac)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
+       MCDI_DECLARE_BUF_ERR(outbuf);
+       size_t outlen;
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
+       ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
+                         sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+
+       return rc;
+}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct ef10_vf *vf;
+       int rc;
+
+       if (!nic_data->vf)
+               return -EOPNOTSUPP;
+
+       if (vf_i >= efx->vf_count)
+               return -EINVAL;
+       vf = nic_data->vf + vf_i;
+
+       if (vf->efx) {
+               efx_device_detach_sync(vf->efx);
+               efx_net_stop(vf->efx->net_dev);
+
+               down_write(&vf->efx->filter_sem);
+               vf->efx->type->filter_table_remove(vf->efx);
+
+               rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+               if (rc) {
+                       up_write(&vf->efx->filter_sem);
+                       return rc;
+               }
+       }
+
+       rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+       if (rc)
+               return rc;
+
+       if (!is_zero_ether_addr(vf->mac)) {
+               rc = efx_ef10_vport_del_vf_mac(efx, vf->vport_id, vf->mac);
+               if (rc)
+                       return rc;
+       }
+
+       if (!is_zero_ether_addr(mac)) {
+               rc = efx_ef10_vport_add_mac(efx, vf->vport_id, mac);
+               if (rc) {
+                       eth_zero_addr(vf->mac);
+                       goto fail;
+               }
+               if (vf->efx)
+                       ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+       }
+
+       ether_addr_copy(vf->mac, mac);
+
+       rc = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+       if (rc)
+               goto fail;
+
+       if (vf->efx) {
+               /* VF cannot use the vport_id that the PF created */
+               rc = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+               if (rc) {
+                       up_write(&vf->efx->filter_sem);
+                       return rc;
+               }
+               vf->efx->type->filter_table_probe(vf->efx);
+               up_write(&vf->efx->filter_sem);
+               efx_net_open(vf->efx->net_dev);
+               netif_device_attach(vf->efx->net_dev);
+       }
+
+       return 0;
+
+fail:
+       memset(vf->mac, 0, ETH_ALEN);
+       return rc;
+}
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
+                              u8 qos)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct ef10_vf *vf;
+       u16 old_vlan, new_vlan;
+       int rc = 0, rc2 = 0;
+
+       if (vf_i >= efx->vf_count)
+               return -EINVAL;
+       if (qos != 0)
+               return -EINVAL;
+
+       vf = nic_data->vf + vf_i;
+
+       new_vlan = (vlan == 0) ? EFX_EF10_NO_VLAN : vlan;
+       if (new_vlan == vf->vlan)
+               return 0;
+
+       if (vf->efx) {
+               efx_device_detach_sync(vf->efx);
+               efx_net_stop(vf->efx->net_dev);
+
+               down_write(&vf->efx->filter_sem);
+               vf->efx->type->filter_table_remove(vf->efx);
+
+               rc = efx_ef10_vadaptor_free(vf->efx, EVB_PORT_ID_ASSIGNED);
+               if (rc)
+                       goto restore_filters;
+       }
+
+       if (vf->vport_assigned) {
+               rc = efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, vf_i);
+               if (rc) {
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "Failed to change vlan on VF %d.\n", vf_i);
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "This is likely because the VF is bound to a driver in a VM.\n");
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "Please unload the driver in the VM.\n");
+                       goto restore_vadaptor;
+               }
+               vf->vport_assigned = 0;
+       }
+
+       if (!is_zero_ether_addr(vf->mac)) {
+               rc = efx_ef10_vport_del_mac(efx, vf->vport_id, vf->mac);
+               if (rc)
+                       goto restore_evb_port;
+       }
+
+       if (vf->vport_id) {
+               rc = efx_ef10_vport_free(efx, vf->vport_id);
+               if (rc)
+                       goto restore_mac;
+               vf->vport_id = 0;
+       }
+
+       /* Do the actual vlan change */
+       old_vlan = vf->vlan;
+       vf->vlan = new_vlan;
+
+       /* Restore everything in reverse order */
+       rc = efx_ef10_vport_alloc(efx, EVB_PORT_ID_ASSIGNED,
+                                 MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL,
+                                 vf->vlan, &vf->vport_id);
+       if (rc)
+               goto reset_nic;
+
+restore_mac:
+       if (!is_zero_ether_addr(vf->mac)) {
+               rc2 = efx_ef10_vport_add_mac(efx, vf->vport_id, vf->mac);
+               if (rc2) {
+                       eth_zero_addr(vf->mac);
+                       goto reset_nic;
+               }
+       }
+
+restore_evb_port:
+       rc2 = efx_ef10_evb_port_assign(efx, vf->vport_id, vf_i);
+       if (rc2)
+               goto reset_nic;
+       else
+               vf->vport_assigned = 1;
+
+restore_vadaptor:
+       if (vf->efx) {
+               rc2 = efx_ef10_vadaptor_alloc(vf->efx, EVB_PORT_ID_ASSIGNED);
+               if (rc2)
+                       goto reset_nic;
+       }
+
+restore_filters:
+       if (vf->efx) {
+               rc2 = vf->efx->type->filter_table_probe(vf->efx);
+               if (rc2)
+                       goto reset_nic;
+
+               up_write(&vf->efx->filter_sem);
+
+               rc2 = efx_net_open(vf->efx->net_dev);
+               if (rc2)
+                       goto reset_nic;
+
+               netif_device_attach(vf->efx->net_dev);
+       }
+       return rc;
+
+reset_nic:
+       if (vf->efx) {
+               up_write(&vf->efx->filter_sem);
+               netif_err(efx, drv, efx->net_dev,
+                         "Failed to restore VF - scheduling reset.\n");
+               efx_schedule_reset(vf->efx, RESET_TYPE_DATAPATH);
+       } else {
+               netif_err(efx, drv, efx->net_dev,
+                         "Failed to restore the VF and cannot reset the VF "
+                         "- VF is not functional.\n");
+               netif_err(efx, drv, efx->net_dev,
+                         "Please reload the driver attached to the VF.\n");
+       }
+
+       return rc ? rc : rc2;
+}
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
+                                  bool spoofchk)
+{
+       return spoofchk ? -EOPNOTSUPP : 0;
+}
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+                                    int link_state)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       BUILD_BUG_ON(IFLA_VF_LINK_STATE_AUTO !=
+                    MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO);
+       BUILD_BUG_ON(IFLA_VF_LINK_STATE_ENABLE !=
+                    MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP);
+       BUILD_BUG_ON(IFLA_VF_LINK_STATE_DISABLE !=
+                    MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN);
+       MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+                             LINK_STATE_MODE_IN_FUNCTION_PF,
+                             nic_data->pf_index,
+                             LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+       MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE, link_state);
+       return efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL); /* don't care what old mode was */
+}
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+                                struct ifla_vf_info *ivf)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_STATE_MODE_IN_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_LINK_STATE_MODE_OUT_LEN);
+
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct ef10_vf *vf;
+       size_t outlen;
+       int rc;
+
+       if (vf_i >= efx->vf_count)
+               return -EINVAL;
+
+       if (!nic_data->vf)
+               return -EOPNOTSUPP;
+
+       vf = nic_data->vf + vf_i;
+
+       ivf->vf = vf_i;
+       ivf->min_tx_rate = 0;
+       ivf->max_tx_rate = 0;
+       ether_addr_copy(ivf->mac, vf->mac);
+       ivf->vlan = (vf->vlan == EFX_EF10_NO_VLAN) ? 0 : vf->vlan;
+       ivf->qos = 0;
+
+       MCDI_POPULATE_DWORD_2(inbuf, LINK_STATE_MODE_IN_FUNCTION,
+                             LINK_STATE_MODE_IN_FUNCTION_PF,
+                             nic_data->pf_index,
+                             LINK_STATE_MODE_IN_FUNCTION_VF, vf_i);
+       MCDI_SET_DWORD(inbuf, LINK_STATE_MODE_IN_NEW_MODE,
+                      MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE);
+       rc = efx_mcdi_rpc(efx, MC_CMD_LINK_STATE_MODE, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+       if (outlen < MC_CMD_LINK_STATE_MODE_OUT_LEN)
+               return -EIO;
+       ivf->linkstate = MCDI_DWORD(outbuf, LINK_STATE_MODE_OUT_OLD_MODE);
+
+       return 0;
+}
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+                                   struct netdev_phys_item_id *ppid)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       if (!is_valid_ether_addr(nic_data->port_id))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = ETH_ALEN;
+       memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
new file mode 100644 (file)
index 0000000..db4ef53
--- /dev/null
@@ -0,0 +1,69 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EF10_SRIOV_H
+#define EF10_SRIOV_H
+
+#include "net_driver.h"
+
+/**
+ * struct ef10_vf - PF's store of VF data
+ * @efx: efx_nic struct for the current VF
+ * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned
+ * @vport_id: vport ID for the VF
+ * @vport_assigned: record whether the vport is currently assigned to the VF
+ * @mac: MAC address for the VF, zero when address is removed from the vport
+ * @vlan: Default VLAN for the VF or #EFX_EF10_NO_VLAN
+ */
+struct ef10_vf {
+       struct efx_nic *efx;
+       struct pci_dev *pci_dev;
+       unsigned int vport_id;
+       unsigned int vport_assigned;
+       u8 mac[ETH_ALEN];
+       u16 vlan;
+#define EFX_EF10_NO_VLAN       0
+};
+
+static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx)
+{
+       return false;
+}
+
+int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_ef10_sriov_init(struct efx_nic *efx);
+static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
+void efx_ef10_sriov_fini(struct efx_nic *efx);
+static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
+
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+
+int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
+                              u16 vlan, u8 qos);
+
+int efx_ef10_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+                                  bool spoofchk);
+
+int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
+                                struct ifla_vf_info *ivf);
+
+int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
+                                    int link_state);
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+                                   struct netdev_phys_item_id *ppid);
+
+int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
+int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
+void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
+
+#endif /* EF10_SRIOV_H */
index 4b00545a3ace7784b3b3e668ccd68b4334c1500b..0c42ed9c9e4c56f7556ecbde4347556ca893550c 100644 (file)
@@ -26,6 +26,7 @@
 #include "efx.h"
 #include "nic.h"
 #include "selftest.h"
+#include "sriov.h"
 
 #include "mcdi.h"
 #include "workarounds.h"
@@ -76,6 +77,7 @@ const char *const efx_reset_type_names[] = {
        [RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
        [RESET_TYPE_WORLD]              = "WORLD",
        [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+       [RESET_TYPE_DATAPATH]           = "DATAPATH",
        [RESET_TYPE_MC_BIST]            = "MC_BIST",
        [RESET_TYPE_DISABLE]            = "DISABLE",
        [RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
@@ -948,6 +950,16 @@ void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
 
 static void efx_fini_port(struct efx_nic *efx);
 
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx)
+{
+       down_read(&efx->filter_sem);
+       efx->type->reconfigure_mac(efx);
+       up_read(&efx->filter_sem);
+}
+
 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
  * the MAC appropriately. All other PHY configuration changes are pushed
  * through phy_op->set_settings(), and pushed asynchronously to the MAC
@@ -1001,7 +1013,7 @@ static void efx_mac_work(struct work_struct *data)
 
        mutex_lock(&efx->mac_lock);
        if (efx->port_enabled)
-               efx->type->reconfigure_mac(efx);
+               efx_mac_reconfigure(efx);
        mutex_unlock(&efx->mac_lock);
 }
 
@@ -1041,11 +1053,11 @@ static int efx_init_port(struct efx_nic *efx)
 
        /* Reconfigure the MAC before creating dma queues (required for
         * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
-       efx->type->reconfigure_mac(efx);
+       efx_mac_reconfigure(efx);
 
        /* Ensure the PHY advertises the correct flow control settings */
        rc = efx->phy_op->reconfigure(efx);
-       if (rc)
+       if (rc && rc != -EPERM)
                goto fail2;
 
        mutex_unlock(&efx->mac_lock);
@@ -1067,7 +1079,7 @@ static void efx_start_port(struct efx_nic *efx)
        efx->port_enabled = true;
 
        /* Ensure MAC ingress/egress is enabled */
-       efx->type->reconfigure_mac(efx);
+       efx_mac_reconfigure(efx);
 
        mutex_unlock(&efx->mac_lock);
 }
@@ -1200,10 +1212,12 @@ static int efx_init_io(struct efx_nic *efx)
        struct pci_dev *pci_dev = efx->pci_dev;
        dma_addr_t dma_mask = efx->type->max_dma_mask;
        unsigned int mem_map_size = efx->type->mem_map_size(efx);
-       int rc;
+       int rc, bar;
 
        netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
 
+       bar = efx->type->mem_bar;
+
        rc = pci_enable_device(pci_dev);
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
@@ -1234,8 +1248,8 @@ static int efx_init_io(struct efx_nic *efx)
        netif_dbg(efx, probe, efx->net_dev,
                  "using DMA mask %llx\n", (unsigned long long) dma_mask);
 
-       efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
-       rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
+       efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+       rc = pci_request_region(pci_dev, bar, "sfc");
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
                          "request for memory BAR failed\n");
@@ -1258,7 +1272,7 @@ static int efx_init_io(struct efx_nic *efx)
        return 0;
 
  fail4:
-       pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+       pci_release_region(efx->pci_dev, bar);
  fail3:
        efx->membase_phys = 0;
  fail2:
@@ -1269,6 +1283,8 @@ static int efx_init_io(struct efx_nic *efx)
 
 static void efx_fini_io(struct efx_nic *efx)
 {
+       int bar;
+
        netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
 
        if (efx->membase) {
@@ -1277,11 +1293,23 @@ static void efx_fini_io(struct efx_nic *efx)
        }
 
        if (efx->membase_phys) {
-               pci_release_region(efx->pci_dev, EFX_MEM_BAR);
+               bar = efx->type->mem_bar;
+               pci_release_region(efx->pci_dev, bar);
                efx->membase_phys = 0;
        }
 
-       pci_disable_device(efx->pci_dev);
+       /* Don't disable bus-mastering if VFs are assigned */
+       if (!pci_vfs_assigned(efx->pci_dev))
+               pci_disable_device(efx->pci_dev);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx)
+{
+       size_t i;
+
+       for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+               efx->rx_indir_table[i] =
+                       ethtool_rxfh_indir_default(i, efx->rss_spread);
 }
 
 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
@@ -1314,15 +1342,19 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
        /* If RSS is requested for the PF *and* VFs then we can't write RSS
         * table entries that are inaccessible to VFs
         */
-       if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
-           count > efx_vf_size(efx)) {
-               netif_warn(efx, probe, efx->net_dev,
-                          "Reducing number of RSS channels from %u to %u for "
-                          "VF support. Increase vf-msix-limit to use more "
-                          "channels on the PF.\n",
-                          count, efx_vf_size(efx));
-               count = efx_vf_size(efx);
+#ifdef CONFIG_SFC_SRIOV
+       if (efx->type->sriov_wanted) {
+               if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+                   count > efx_vf_size(efx)) {
+                       netif_warn(efx, probe, efx->net_dev,
+                                  "Reducing number of RSS channels from %u to %u for "
+                                  "VF support. Increase vf-msix-limit to use more "
+                                  "channels on the PF.\n",
+                                  count, efx_vf_size(efx));
+                       count = efx_vf_size(efx);
+               }
        }
+#endif
 
        return count;
 }
@@ -1426,10 +1458,15 @@ static int efx_probe_interrupts(struct efx_nic *efx)
        }
 
        /* RSS might be usable on VFs even if it is disabled on the PF */
-
-       efx->rss_spread = ((efx->n_rx_channels > 1 ||
-                           !efx->type->sriov_wanted(efx)) ?
-                          efx->n_rx_channels : efx_vf_size(efx));
+#ifdef CONFIG_SFC_SRIOV
+       if (efx->type->sriov_wanted) {
+               efx->rss_spread = ((efx->n_rx_channels > 1 ||
+                                   !efx->type->sriov_wanted(efx)) ?
+                                  efx->n_rx_channels : efx_vf_size(efx));
+               return 0;
+       }
+#endif
+       efx->rss_spread = efx->n_rx_channels;
 
        return 0;
 }
@@ -1593,7 +1630,6 @@ static void efx_set_channels(struct efx_nic *efx)
 
 static int efx_probe_nic(struct efx_nic *efx)
 {
-       size_t i;
        int rc;
 
        netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
@@ -1616,10 +1652,9 @@ static int efx_probe_nic(struct efx_nic *efx)
                goto fail2;
 
        if (efx->n_channels > 1)
-               netdev_rss_key_fill(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
-       for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
-               efx->rx_indir_table[i] =
-                       ethtool_rxfh_indir_default(i, efx->rss_spread);
+               netdev_rss_key_fill(&efx->rx_hash_key,
+                                   sizeof(efx->rx_hash_key));
+       efx_set_default_rx_indir_table(efx);
 
        netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
        netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
@@ -1650,10 +1685,11 @@ static int efx_probe_filters(struct efx_nic *efx)
        int rc;
 
        spin_lock_init(&efx->filter_lock);
-
+       init_rwsem(&efx->filter_sem);
+       down_write(&efx->filter_sem);
        rc = efx->type->filter_table_probe(efx);
        if (rc)
-               return rc;
+               goto out_unlock;
 
 #ifdef CONFIG_RFS_ACCEL
        if (efx->type->offload_features & NETIF_F_NTUPLE) {
@@ -1662,12 +1698,14 @@ static int efx_probe_filters(struct efx_nic *efx)
                                           GFP_KERNEL);
                if (!efx->rps_flow_id) {
                        efx->type->filter_table_remove(efx);
-                       return -ENOMEM;
+                       rc = -ENOMEM;
+                       goto out_unlock;
                }
        }
 #endif
-
-       return 0;
+out_unlock:
+       up_write(&efx->filter_sem);
+       return rc;
 }
 
 static void efx_remove_filters(struct efx_nic *efx)
@@ -1675,12 +1713,16 @@ static void efx_remove_filters(struct efx_nic *efx)
 #ifdef CONFIG_RFS_ACCEL
        kfree(efx->rps_flow_id);
 #endif
+       down_write(&efx->filter_sem);
        efx->type->filter_table_remove(efx);
+       up_write(&efx->filter_sem);
 }
 
 static void efx_restore_filters(struct efx_nic *efx)
 {
+       down_read(&efx->filter_sem);
        efx->type->filter_table_restore(efx);
+       up_read(&efx->filter_sem);
 }
 
 /**************************************************************************
@@ -1712,21 +1754,33 @@ static int efx_probe_all(struct efx_nic *efx)
        }
        efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
 
+#ifdef CONFIG_SFC_SRIOV
+       rc = efx->type->vswitching_probe(efx);
+       if (rc) /* not fatal; the PF will still work fine */
+               netif_warn(efx, probe, efx->net_dev,
+                          "failed to setup vswitching rc=%d;"
+                          " VFs may not function\n", rc);
+#endif
+
        rc = efx_probe_filters(efx);
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
                          "failed to create filter tables\n");
-               goto fail3;
+               goto fail4;
        }
 
        rc = efx_probe_channels(efx);
        if (rc)
-               goto fail4;
+               goto fail5;
 
        return 0;
 
- fail4:
+ fail5:
        efx_remove_filters(efx);
+ fail4:
+#ifdef CONFIG_SFC_SRIOV
+       efx->type->vswitching_remove(efx);
+#endif
  fail3:
        efx_remove_port(efx);
  fail2:
@@ -1816,6 +1870,9 @@ static void efx_remove_all(struct efx_nic *efx)
 {
        efx_remove_channels(efx);
        efx_remove_filters(efx);
+#ifdef CONFIG_SFC_SRIOV
+       efx->type->vswitching_remove(efx);
+#endif
        efx_remove_port(efx);
        efx_remove_nic(efx);
 }
@@ -2059,7 +2116,7 @@ static int efx_busy_poll(struct napi_struct *napi)
  *************************************************************************/
 
 /* Context: process, rtnl_lock() held. */
-static int efx_net_open(struct net_device *net_dev)
+int efx_net_open(struct net_device *net_dev)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        int rc;
@@ -2088,7 +2145,7 @@ static int efx_net_open(struct net_device *net_dev)
  * Note that the kernel will ignore our return code; this method
  * should really be a void.
  */
-static int efx_net_stop(struct net_device *net_dev)
+int efx_net_stop(struct net_device *net_dev)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
@@ -2146,7 +2203,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 
        mutex_lock(&efx->mac_lock);
        net_dev->mtu = new_mtu;
-       efx->type->reconfigure_mac(efx);
+       efx_mac_reconfigure(efx);
        mutex_unlock(&efx->mac_lock);
 
        efx_start_all(efx);
@@ -2159,6 +2216,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
        struct efx_nic *efx = netdev_priv(net_dev);
        struct sockaddr *addr = data;
        u8 *new_addr = addr->sa_data;
+       u8 old_addr[6];
+       int rc;
 
        if (!is_valid_ether_addr(new_addr)) {
                netif_err(efx, drv, efx->net_dev,
@@ -2167,12 +2226,20 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
                return -EADDRNOTAVAIL;
        }
 
+       /* save old address */
+       ether_addr_copy(old_addr, net_dev->dev_addr);
        ether_addr_copy(net_dev->dev_addr, new_addr);
-       efx->type->sriov_mac_address_changed(efx);
+       if (efx->type->set_mac_address) {
+               rc = efx->type->set_mac_address(efx);
+               if (rc) {
+                       ether_addr_copy(net_dev->dev_addr, old_addr);
+                       return rc;
+               }
+       }
 
        /* Reconfigure the MAC */
        mutex_lock(&efx->mac_lock);
-       efx->type->reconfigure_mac(efx);
+       efx_mac_reconfigure(efx);
        mutex_unlock(&efx->mac_lock);
 
        return 0;
@@ -2199,7 +2266,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
        return 0;
 }
 
-static const struct net_device_ops efx_farch_netdev_ops = {
+static const struct net_device_ops efx_netdev_ops = {
        .ndo_open               = efx_net_open,
        .ndo_stop               = efx_net_stop,
        .ndo_get_stats64        = efx_net_stats,
@@ -2212,10 +2279,12 @@ static const struct net_device_ops efx_farch_netdev_ops = {
        .ndo_set_rx_mode        = efx_set_rx_mode,
        .ndo_set_features       = efx_set_features,
 #ifdef CONFIG_SFC_SRIOV
-       .ndo_set_vf_mac         = efx_siena_sriov_set_vf_mac,
-       .ndo_set_vf_vlan        = efx_siena_sriov_set_vf_vlan,
-       .ndo_set_vf_spoofchk    = efx_siena_sriov_set_vf_spoofchk,
-       .ndo_get_vf_config      = efx_siena_sriov_get_vf_config,
+       .ndo_set_vf_mac         = efx_sriov_set_vf_mac,
+       .ndo_set_vf_vlan        = efx_sriov_set_vf_vlan,
+       .ndo_set_vf_spoofchk    = efx_sriov_set_vf_spoofchk,
+       .ndo_get_vf_config      = efx_sriov_get_vf_config,
+       .ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
+       .ndo_get_phys_port_id   = efx_sriov_get_phys_port_id,
 #endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = efx_netpoll,
@@ -2229,29 +2298,6 @@ static const struct net_device_ops efx_farch_netdev_ops = {
 #endif
 };
 
-static const struct net_device_ops efx_ef10_netdev_ops = {
-       .ndo_open               = efx_net_open,
-       .ndo_stop               = efx_net_stop,
-       .ndo_get_stats64        = efx_net_stats,
-       .ndo_tx_timeout         = efx_watchdog,
-       .ndo_start_xmit         = efx_hard_start_xmit,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_do_ioctl           = efx_ioctl,
-       .ndo_change_mtu         = efx_change_mtu,
-       .ndo_set_mac_address    = efx_set_mac_address,
-       .ndo_set_rx_mode        = efx_set_rx_mode,
-       .ndo_set_features       = efx_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = efx_netpoll,
-#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = efx_busy_poll,
-#endif
-#ifdef CONFIG_RFS_ACCEL
-       .ndo_rx_flow_steer      = efx_filter_rfs,
-#endif
-};
-
 static void efx_update_name(struct efx_nic *efx)
 {
        strcpy(efx->name, efx->net_dev->name);
@@ -2264,8 +2310,7 @@ static int efx_netdev_event(struct notifier_block *this,
 {
        struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
 
-       if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
-            net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
+       if ((net_dev->netdev_ops == &efx_netdev_ops) &&
            event == NETDEV_CHANGENAME)
                efx_update_name(netdev_priv(net_dev));
 
@@ -2284,6 +2329,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
 }
 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+       struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+                           const char *buf, size_t count)
+{
+       struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+       struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+       bool enable = count > 0 && *buf != '0';
+
+       mcdi->logging_enabled = enable;
+       return count;
+}
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+#endif
+
 static int efx_register_netdev(struct efx_nic *efx)
 {
        struct net_device *net_dev = efx->net_dev;
@@ -2292,12 +2359,9 @@ static int efx_register_netdev(struct efx_nic *efx)
 
        net_dev->watchdog_timeo = 5 * HZ;
        net_dev->irq = efx->pci_dev->irq;
-       if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
-               net_dev->netdev_ops = &efx_ef10_netdev_ops;
+       net_dev->netdev_ops = &efx_netdev_ops;
+       if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
                net_dev->priv_flags |= IFF_UNICAST_FLT;
-       } else {
-               net_dev->netdev_ops = &efx_farch_netdev_ops;
-       }
        net_dev->ethtool_ops = &efx_ethtool_ops;
        net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
 
@@ -2344,9 +2408,21 @@ static int efx_register_netdev(struct efx_nic *efx)
                          "failed to init net dev attributes\n");
                goto fail_registered;
        }
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+       if (rc) {
+               netif_err(efx, drv, efx->net_dev,
+                         "failed to init net dev attributes\n");
+               goto fail_attr_mcdi_logging;
+       }
+#endif
 
        return 0;
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+fail_attr_mcdi_logging:
+       device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+#endif
 fail_registered:
        rtnl_lock();
        efx_dissociate(efx);
@@ -2365,13 +2441,14 @@ static void efx_unregister_netdev(struct efx_nic *efx)
 
        BUG_ON(netdev_priv(efx->net_dev) != efx);
 
-       strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
-       device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-
-       rtnl_lock();
-       unregister_netdevice(efx->net_dev);
-       efx->state = STATE_UNINIT;
-       rtnl_unlock();
+       if (efx_dev_registered(efx)) {
+               strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+#ifdef CONFIG_SFC_MCDI_LOGGING
+               device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+#endif
+               device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+               unregister_netdev(efx->net_dev);
+       }
 }
 
 /**************************************************************************
@@ -2393,7 +2470,8 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
        efx_disable_interrupts(efx);
 
        mutex_lock(&efx->mac_lock);
-       if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
+       if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+           method != RESET_TYPE_DATAPATH)
                efx->phy_op->fini(efx);
        efx->type->fini(efx);
 }
@@ -2422,11 +2500,13 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
        if (!ok)
                goto fail;
 
-       if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) {
+       if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+           method != RESET_TYPE_DATAPATH) {
                rc = efx->phy_op->init(efx);
                if (rc)
                        goto fail;
-               if (efx->phy_op->reconfigure(efx))
+               rc = efx->phy_op->reconfigure(efx);
+               if (rc && rc != -EPERM)
                        netif_err(efx, drv, efx->net_dev,
                                  "could not restore PHY settings\n");
        }
@@ -2434,8 +2514,20 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
        rc = efx_enable_interrupts(efx);
        if (rc)
                goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+       rc = efx->type->vswitching_restore(efx);
+       if (rc) /* not fatal; the PF will still work fine */
+               netif_warn(efx, probe, efx->net_dev,
+                          "failed to restore vswitching rc=%d;"
+                          " VFs may not function\n", rc);
+#endif
+
+       down_read(&efx->filter_sem);
        efx_restore_filters(efx);
-       efx->type->sriov_reset(efx);
+       up_read(&efx->filter_sem);
+       if (efx->type->sriov_reset)
+               efx->type->sriov_reset(efx);
 
        mutex_unlock(&efx->mac_lock);
 
@@ -2605,6 +2697,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
        case RESET_TYPE_WORLD:
        case RESET_TYPE_DISABLE:
        case RESET_TYPE_RECOVER_OR_DISABLE:
+       case RESET_TYPE_DATAPATH:
        case RESET_TYPE_MC_BIST:
        case RESET_TYPE_MCDI_TIMEOUT:
                method = type;
@@ -2655,6 +2748,8 @@ static const struct pci_device_id efx_pci_table[] = {
         .driver_data = (unsigned long) &siena_a0_nic_type},
        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
         .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),  /* SFC9120 VF */
+        .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
         .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
        {0}                     /* end of list */
@@ -2809,7 +2904,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
 }
 
 /* Final NIC shutdown
- * This is called only at module unload (or hotplug removal).
+ * This is called only at module unload (or hotplug removal).  A PF can call
+ * this on its VFs to ensure they are unbound first.
  */
 static void efx_pci_remove(struct pci_dev *pci_dev)
 {
@@ -2826,7 +2922,9 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
        efx_disable_interrupts(efx);
        rtnl_unlock();
 
-       efx->type->sriov_fini(efx);
+       if (efx->type->sriov_fini)
+               efx->type->sriov_fini(efx);
+
        efx_unregister_netdev(efx);
 
        efx_mtd_remove(efx);
@@ -3008,7 +3106,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
        netif_info(efx, probe, efx->net_dev,
                   "Solarflare NIC detected\n");
 
-       efx_probe_vpd_strings(efx);
+       if (!efx->type->is_vf)
+               efx_probe_vpd_strings(efx);
 
        /* Set up basic I/O (BAR mappings etc) */
        rc = efx_init_io(efx);
@@ -3023,10 +3122,12 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
        if (rc)
                goto fail4;
 
-       rc = efx->type->sriov_init(efx);
-       if (rc)
-               netif_err(efx, probe, efx->net_dev,
-                         "SR-IOV can't be enabled rc %d\n", rc);
+       if (efx->type->sriov_init) {
+               rc = efx->type->sriov_init(efx);
+               if (rc)
+                       netif_err(efx, probe, efx->net_dev,
+                                 "SR-IOV can't be enabled rc %d\n", rc);
+       }
 
        netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
 
@@ -3058,6 +3159,26 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
        return rc;
 }
 
+/* efx_pci_sriov_configure returns the actual number of Virtual Functions
+ * enabled on success
+ */
+#ifdef CONFIG_SFC_SRIOV
+static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+       int rc;
+       struct efx_nic *efx = pci_get_drvdata(dev);
+
+       if (efx->type->sriov_configure) {
+               rc = efx->type->sriov_configure(efx, num_vfs);
+               if (rc)
+                       return rc;
+               else
+                       return num_vfs;
+       } else
+               return -EOPNOTSUPP;
+}
+#endif
+
 static int efx_pm_freeze(struct device *dev)
 {
        struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
@@ -3280,6 +3401,9 @@ static struct pci_driver efx_pci_driver = {
        .remove         = efx_pci_remove,
        .driver.pm      = &efx_pm_ops,
        .err_handler    = &efx_err_handlers,
+#ifdef CONFIG_SFC_SRIOV
+       .sriov_configure = efx_pci_sriov_configure,
+#endif
 };
 
 /**************************************************************************
@@ -3302,9 +3426,11 @@ static int __init efx_init_module(void)
        if (rc)
                goto err_notifier;
 
+#ifdef CONFIG_SFC_SRIOV
        rc = efx_init_sriov();
        if (rc)
                goto err_sriov;
+#endif
 
        reset_workqueue = create_singlethread_workqueue("sfc_reset");
        if (!reset_workqueue) {
@@ -3321,8 +3447,10 @@ static int __init efx_init_module(void)
  err_pci:
        destroy_workqueue(reset_workqueue);
  err_reset:
+#ifdef CONFIG_SFC_SRIOV
        efx_fini_sriov();
  err_sriov:
+#endif
        unregister_netdevice_notifier(&efx_netdev_notifier);
  err_notifier:
        return rc;
@@ -3334,7 +3462,9 @@ static void __exit efx_exit_module(void)
 
        pci_unregister_driver(&efx_pci_driver);
        destroy_workqueue(reset_workqueue);
+#ifdef CONFIG_SFC_SRIOV
        efx_fini_sriov();
+#endif
        unregister_netdevice_notifier(&efx_netdev_notifier);
 
 }
index 2587c582a8216eb9ec5bafc6310eb1eb9c94531f..acb1e0718485708aa3ca5a3533f95a6a3fee0f13 100644 (file)
 #include "filter.h"
 
 /* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All VFs use BAR 0/1 for memory */
 #define EFX_MEM_BAR 2
+#define EFX_MEM_VF_BAR 0
+
+int efx_net_open(struct net_device *net_dev);
+int efx_net_stop(struct net_device *net_dev);
 
 /* TX */
 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
@@ -32,6 +37,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
 extern unsigned int efx_piobuf_size;
 
 /* RX */
+void efx_set_default_rx_indir_table(struct efx_nic *efx);
 void efx_rx_config_page_split(struct efx_nic *efx);
 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
@@ -71,6 +77,8 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 
 /* Filters */
 
+void efx_mac_reconfigure(struct efx_nic *efx);
+
 /**
  * efx_filter_insert_filter - add or replace a filter
  * @efx: NIC in which to insert the filter
@@ -220,6 +228,13 @@ static inline void efx_mtd_rename(struct efx_nic *efx) {}
 static inline void efx_mtd_remove(struct efx_nic *efx) {}
 #endif
 
+#ifdef CONFIG_SFC_SRIOV
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+       return 1 << efx->vi_scale;
+}
+#endif
+
 static inline void efx_schedule_channel(struct efx_channel *channel)
 {
        netif_vdbg(channel->efx, intr, channel->efx->net_dev,
index d1dbb5fb31bb515b6926d981bf8eab0aafa6deae..c94f56271dd451eba1990720034ccc217cdf5ed2 100644 (file)
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
  * @RESET_TYPE_WORLD: Reset as much as possible
  * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
  * unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
  * @RESET_TYPE_MC_BIST: MC entering BIST mode.
  * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
  * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
@@ -159,6 +160,7 @@ enum reset_type {
        RESET_TYPE_ALL,
        RESET_TYPE_WORLD,
        RESET_TYPE_RECOVER_OR_DISABLE,
+       RESET_TYPE_DATAPATH,
        RESET_TYPE_MC_BIST,
        RESET_TYPE_DISABLE,
        RESET_TYPE_MAX_METHOD,
index 4835bc0d0de87a125cbe1d1b0ece01d4f3bd280f..034797661f96462b73910661bb813575f3a2e330 100644 (file)
@@ -734,7 +734,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
        /* Reconfigure the MAC. The PHY *may* generate a link state change event
         * if the user just changed the advertised capabilities, but there's no
         * harm doing this twice */
-       efx->type->reconfigure_mac(efx);
+       efx_mac_reconfigure(efx);
 
 out:
        mutex_unlock(&efx->mac_lock);
@@ -1109,9 +1109,8 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
                return -EOPNOTSUPP;
        if (!indir)
                return 0;
-       memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
-       efx->type->rx_push_rss_config(efx);
-       return 0;
+
+       return efx->type->rx_push_rss_config(efx, true, indir);
 }
 
 static int efx_ethtool_get_ts_info(struct net_device *net_dev,
index f166c8ef38a3073a70fa436b2ba4865fb7575d1f..80e69af21642620df7607cbd0b2b882032a7abfd 100644 (file)
@@ -477,16 +477,29 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
  *
  **************************************************************************
  */
+static int dummy_rx_push_rss_config(struct efx_nic *efx, bool user,
+                                   const u32 *rx_indir_table)
+{
+       (void) efx;
+       (void) user;
+       (void) rx_indir_table;
+       return -ENOSYS;
+}
 
-static void falcon_b0_rx_push_rss_config(struct efx_nic *efx)
+static int falcon_b0_rx_push_rss_config(struct efx_nic *efx, bool user,
+                                       const u32 *rx_indir_table)
 {
        efx_oword_t temp;
 
+       (void) user;
        /* Set hash key for IPv4 */
        memcpy(&temp, efx->rx_hash_key, sizeof(temp));
        efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
 
+       memcpy(efx->rx_indir_table, rx_indir_table,
+              sizeof(efx->rx_indir_table));
        efx_farch_rx_push_indir_table(efx);
+       return 0;
 }
 
 /**************************************************************************
@@ -2507,7 +2520,7 @@ static int falcon_init_nic(struct efx_nic *efx)
        falcon_init_rx_cfg(efx);
 
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
-               falcon_b0_rx_push_rss_config(efx);
+               falcon_b0_rx_push_rss_config(efx, false, efx->rx_indir_table);
 
                /* Set destination of both TX and RX Flush events */
                EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
@@ -2687,6 +2700,8 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
  */
 
 const struct efx_nic_type falcon_a1_nic_type = {
+       .is_vf = false,
+       .mem_bar = EFX_MEM_BAR,
        .mem_map_size = falcon_a1_mem_map_size,
        .probe = falcon_probe_nic,
        .remove = falcon_remove_nic,
@@ -2729,7 +2744,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
        .tx_init = efx_farch_tx_init,
        .tx_remove = efx_farch_tx_remove,
        .tx_write = efx_farch_tx_write,
-       .rx_push_rss_config = efx_port_dummy_op_void,
+       .rx_push_rss_config = dummy_rx_push_rss_config,
        .rx_probe = efx_farch_rx_probe,
        .rx_init = efx_farch_rx_init,
        .rx_remove = efx_farch_rx_remove,
@@ -2766,11 +2781,6 @@ const struct efx_nic_type falcon_a1_nic_type = {
        .mtd_write = falcon_mtd_write,
        .mtd_sync = falcon_mtd_sync,
 #endif
-       .sriov_init = efx_falcon_sriov_init,
-       .sriov_fini = efx_falcon_sriov_fini,
-       .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
-       .sriov_wanted = efx_falcon_sriov_wanted,
-       .sriov_reset = efx_falcon_sriov_reset,
 
        .revision = EFX_REV_FALCON_A1,
        .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
@@ -2788,6 +2798,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
 };
 
 const struct efx_nic_type falcon_b0_nic_type = {
+       .is_vf = false,
+       .mem_bar = EFX_MEM_BAR,
        .mem_map_size = falcon_b0_mem_map_size,
        .probe = falcon_probe_nic,
        .remove = falcon_remove_nic,
@@ -2867,11 +2879,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
        .mtd_write = falcon_mtd_write,
        .mtd_sync = falcon_mtd_sync,
 #endif
-       .sriov_init = efx_falcon_sriov_init,
-       .sriov_fini = efx_falcon_sriov_fini,
-       .sriov_mac_address_changed = efx_falcon_sriov_mac_address_changed,
-       .sriov_wanted = efx_falcon_sriov_wanted,
-       .sriov_reset = efx_falcon_sriov_reset,
 
        .revision = EFX_REV_FALCON_B0,
        .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
index bb89e96a125eab7bf0e5d8569f6119adb720dbb5..f08266f0eca2363ec11138d15598caaf5a800e4a 100644 (file)
@@ -20,6 +20,8 @@
 #include "efx.h"
 #include "nic.h"
 #include "farch_regs.h"
+#include "sriov.h"
+#include "siena_sriov.h"
 #include "io.h"
 #include "workarounds.h"
 
@@ -1198,13 +1200,17 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
                netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
                           channel->channel, ev_sub_data);
                efx_farch_handle_tx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
                efx_siena_sriov_tx_flush_done(efx, event);
+#endif
                break;
        case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
                netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
                           channel->channel, ev_sub_data);
                efx_farch_handle_rx_flush_done(efx, event);
+#ifdef CONFIG_SFC_SRIOV
                efx_siena_sriov_rx_flush_done(efx, event);
+#endif
                break;
        case FSE_AZ_EVQ_INIT_DONE_EV:
                netif_dbg(efx, hw, efx->net_dev,
@@ -1242,8 +1248,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
                                  " RX Q %d is disabled.\n", ev_sub_data,
                                  ev_sub_data);
                        efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
-               } else
+               }
+#ifdef CONFIG_SFC_SRIOV
+               else
                        efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
                break;
        case FSE_BZ_TX_DSC_ERROR_EV:
                if (ev_sub_data < EFX_VI_BASE) {
@@ -1252,8 +1261,11 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
                                  " TX Q %d is disabled.\n", ev_sub_data,
                                  ev_sub_data);
                        efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
-               } else
+               }
+#ifdef CONFIG_SFC_SRIOV
+               else
                        efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
+#endif
                break;
        default:
                netif_vdbg(efx, hw, efx->net_dev,
@@ -1317,9 +1329,11 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
                case FSE_AZ_EV_CODE_DRIVER_EV:
                        efx_farch_handle_driver_event(channel, &event);
                        break;
+#ifdef CONFIG_SFC_SRIOV
                case FSE_CZ_EV_CODE_USER_EV:
                        efx_siena_sriov_event(channel, &event);
                        break;
+#endif
                case FSE_CZ_EV_CODE_MCDI_EV:
                        efx_mcdi_process_event(channel, &event);
                        break;
@@ -1685,28 +1699,32 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
        vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
 
 #ifdef CONFIG_SFC_SRIOV
-       if (efx->type->sriov_wanted(efx)) {
-               unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
-               nic_data->vf_buftbl_base = buftbl_min;
-
-               vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
-               vi_count = max(vi_count, EFX_VI_BASE);
-               buftbl_free = (sram_lim_qw - buftbl_min -
-                              vi_count * vi_dc_entries);
-
-               entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
-                                 efx_vf_size(efx));
-               vf_limit = min(buftbl_free / entries_per_vf,
-                              (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
-               if (efx->vf_count > vf_limit) {
-                       netif_err(efx, probe, efx->net_dev,
-                                 "Reducing VF count from from %d to %d\n",
-                                 efx->vf_count, vf_limit);
-                       efx->vf_count = vf_limit;
+       if (efx->type->sriov_wanted) {
+               if (efx->type->sriov_wanted(efx)) {
+                       unsigned vi_dc_entries, buftbl_free;
+                       unsigned entries_per_vf, vf_limit;
+
+                       nic_data->vf_buftbl_base = buftbl_min;
+
+                       vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+                       vi_count = max(vi_count, EFX_VI_BASE);
+                       buftbl_free = (sram_lim_qw - buftbl_min -
+                                      vi_count * vi_dc_entries);
+
+                       entries_per_vf = ((vi_dc_entries +
+                                          EFX_VF_BUFTBL_PER_VI) *
+                                         efx_vf_size(efx));
+                       vf_limit = min(buftbl_free / entries_per_vf,
+                                      (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+                       if (efx->vf_count > vf_limit) {
+                               netif_err(efx, probe, efx->net_dev,
+                                         "Reducing VF count from from %d to %d\n",
+                                         efx->vf_count, vf_limit);
+                               efx->vf_count = vf_limit;
+                       }
+                       vi_count += efx->vf_count * efx_vf_size(efx);
                }
-               vi_count += efx->vf_count * efx_vf_size(efx);
        }
 #endif
 
index d37928f01949d1473938011a23635a8e5a71a8a8..81640f8bb811b099f6b2afb55cbb55f004c1096e 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/delay.h>
+#include <linux/moduleparam.h>
 #include <asm/cmpxchg.h>
 #include "net_driver.h"
 #include "nic.h"
@@ -54,18 +55,32 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
 static bool efx_mcdi_poll_once(struct efx_nic *efx);
 static void efx_mcdi_abandon(struct efx_nic *efx);
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static bool mcdi_logging_default;
+module_param(mcdi_logging_default, bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+                "Enable MCDI logging on newly-probed functions");
+#endif
+
 int efx_mcdi_init(struct efx_nic *efx)
 {
        struct efx_mcdi_iface *mcdi;
        bool already_attached;
-       int rc;
+       int rc = -ENOMEM;
 
        efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
        if (!efx->mcdi)
-               return -ENOMEM;
+               goto fail;
 
        mcdi = efx_mcdi(efx);
        mcdi->efx = efx;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       /* consuming code assumes buffer is page-sized */
+       mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+       if (!mcdi->logging_buffer)
+               goto fail1;
+       mcdi->logging_enabled = mcdi_logging_default;
+#endif
        init_waitqueue_head(&mcdi->wq);
        spin_lock_init(&mcdi->iface_lock);
        mcdi->state = MCDI_STATE_QUIESCENT;
@@ -81,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx)
        /* Recover from a failed assertion before probing */
        rc = efx_mcdi_handle_assertion(efx);
        if (rc)
-               return rc;
+               goto fail2;
 
        /* Let the MC (and BMC, if this is a LOM) know that the driver
         * is loaded. We should do this before we reset the NIC.
@@ -90,7 +105,7 @@ int efx_mcdi_init(struct efx_nic *efx)
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
                          "Unable to register driver with MCPU\n");
-               return rc;
+               goto fail2;
        }
        if (already_attached)
                /* Not a fatal error */
@@ -102,6 +117,15 @@ int efx_mcdi_init(struct efx_nic *efx)
                efx->primary = efx;
 
        return 0;
+fail2:
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+       kfree(efx->mcdi);
+       efx->mcdi = NULL;
+fail:
+       return rc;
 }
 
 void efx_mcdi_fini(struct efx_nic *efx)
@@ -114,6 +138,10 @@ void efx_mcdi_fini(struct efx_nic *efx)
        /* Relinquish the device (back to the BMC, if this is a LOM) */
        efx_mcdi_drv_attach(efx, false, NULL);
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
        kfree(efx->mcdi);
 }
 
@@ -121,6 +149,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
                                  const efx_dword_t *inbuf, size_t inlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
        efx_dword_t hdr[2];
        size_t hdr_len;
        u32 xflags, seqno;
@@ -165,6 +196,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
                hdr_len = 8;
        }
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+               int bytes = 0;
+               int i;
+               /* Lengths should always be a whole number of dwords, so scream
+                * if they're not.
+                */
+               WARN_ON_ONCE(hdr_len % 4);
+               WARN_ON_ONCE(inlen % 4);
+
+               /* We own the logging buffer, as only one MCDI can be in
+                * progress on a NIC at any one time.  So no need for locking.
+                */
+               for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+                                         " %08x", le32_to_cpu(hdr[i].u32[0]));
+
+               for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+                                         " %08x", le32_to_cpu(inbuf[i].u32[0]));
+
+               netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+       }
+#endif
+
        efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
 
        mcdi->new_epoch = false;
@@ -206,6 +262,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
        unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
        efx_dword_t hdr;
 
        efx->type->mcdi_read_response(efx, &hdr, 0, 4);
@@ -223,6 +282,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
                        EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
        }
 
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+               size_t hdr_len, data_len;
+               int bytes = 0;
+               int i;
+
+               WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+               hdr_len = mcdi->resp_hdr_len / 4;
+               /* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+                * to dword size, and the MCDI buffer is always dword size
+                */
+               data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+               /* We own the logging buffer, as only one MCDI can be in
+                * progress on a NIC at any one time.  So no need for locking.
+                */
+               for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+                       efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+                                         " %08x", le32_to_cpu(hdr.u32[0]));
+               }
+
+               for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+                       efx->type->mcdi_read_response(efx, &hdr,
+                                       mcdi->resp_hdr_len + (i * 4), 4);
+                       bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+                                         " %08x", le32_to_cpu(hdr.u32[0]));
+               }
+
+               netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+       }
+#endif
+
        if (error && mcdi->resp_data_len == 0) {
                netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
                mcdi->resprc = -EIO;
@@ -406,7 +498,7 @@ static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
        struct efx_mcdi_async_param *async;
        size_t hdr_len, data_len, err_len;
        efx_dword_t *outbuf;
-       MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+       MCDI_DECLARE_BUF_ERR(errbuf);
        int rc;
 
        if (cmpxchg(&mcdi->state,
@@ -534,7 +626,7 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
                                size_t *outlen_actual, bool quiet)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       MCDI_DECLARE_BUF_OUT_OR_ERR(errbuf, 0);
+       MCDI_DECLARE_BUF_ERR(errbuf);
        int rc;
 
        if (mcdi->mode == MCDI_MODE_POLL)
@@ -1035,7 +1127,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
                /* MAC stats are gather lazily.  We can ignore this. */
                break;
        case MCDI_EVENT_CODE_FLR:
-               efx_siena_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
+               if (efx->type->sriov_flr)
+                       efx->type->sriov_flr(efx,
+                                            MCDI_EVENT_FIELD(*event, FLR_VF));
                break;
        case MCDI_EVENT_CODE_PTP_RX:
        case MCDI_EVENT_CODE_PTP_FAULT:
@@ -1081,9 +1175,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
 
 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
 {
-       MCDI_DECLARE_BUF(outbuf,
-                        max(MC_CMD_GET_VERSION_OUT_LEN,
-                            MC_CMD_GET_CAPABILITIES_OUT_LEN));
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
        size_t outlength;
        const __le16 *ver_words;
        size_t offset;
@@ -1108,19 +1200,11 @@ void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
         * single version.  Report which variants are running.
         */
        if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
-               BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
-               rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
-                                 outbuf, sizeof(outbuf), &outlength);
-               if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
-                       offset += snprintf(
-                               buf + offset, len - offset, " rx? tx?");
-               else
-                       offset += snprintf(
-                               buf + offset, len - offset, " rx%x tx%x",
-                               MCDI_WORD(outbuf,
-                                         GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
-                               MCDI_WORD(outbuf,
-                                         GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+               struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+               offset += snprintf(buf + offset, len - offset, " rx%x tx%x",
+                                  nic_data->rx_dpcpu_fw_id,
+                                  nic_data->tx_dpcpu_fw_id);
 
                /* It's theoretically possible for the string to exceed 31
                 * characters, though in practice the first three version
@@ -1150,10 +1234,26 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
        MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
        MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
 
-       rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
-                         outbuf, sizeof(outbuf), &outlen);
-       if (rc)
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+                               outbuf, sizeof(outbuf), &outlen);
+       /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
+        * specified will fail with EPERM, and we have to tell the MC we don't
+        * care what firmware we get.
+        */
+       if (rc == -EPERM) {
+               netif_dbg(efx, probe, efx->net_dev,
+                         "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+               MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
+                              MC_CMD_FW_DONT_CARE);
+               rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+                                       sizeof(inbuf), outbuf, sizeof(outbuf),
+                                       &outlen);
+       }
+       if (rc) {
+               efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
+                                      outbuf, outlen, rc);
                goto fail;
+       }
        if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
                rc = -EIO;
                goto fail;
@@ -1178,16 +1278,6 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
         * and are completely trusted by firmware.  Abort probing
         * if that's not true for this function.
         */
-       if (driver_operating &&
-           (efx->mcdi->fn_flags &
-            (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
-             1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
-           (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
-            1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
-               netif_err(efx, probe, efx->net_dev,
-                         "This driver version only supports one function per port\n");
-               return -ENODEV;
-       }
 
        if (was_attached != NULL)
                *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -1385,10 +1475,13 @@ fail1:
        return rc;
 }
 
+/* Returns 1 if an assertion was read, 0 if no assertion had fired,
+ * negative on error.
+ */
 static int efx_mcdi_read_assertion(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
-       MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
        unsigned int flags, index;
        const char *reason;
        size_t outlen;
@@ -1406,6 +1499,8 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
                rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
                                        inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
                                        outbuf, sizeof(outbuf), &outlen);
+               if (rc == -EPERM)
+                       return 0;
        } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
 
        if (rc) {
@@ -1443,24 +1538,31 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
                          MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
                                           index));
 
-       return 0;
+       return 1;
 }
 
-static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+static int efx_mcdi_exit_assertion(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+       int rc;
 
        /* If the MC is running debug firmware, it might now be
         * waiting for a debugger to attach, but we just want it to
         * reboot.  We set a flag that makes the command a no-op if it
-        * has already done so.  We don't know what return code to
-        * expect (0 or -EIO), so ignore it.
+        * has already done so.
+        * The MCDI will thus return either 0 or -EIO.
         */
        BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
        MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
                       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
-       (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
-                           NULL, 0, NULL);
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+                               NULL, 0, NULL);
+       if (rc == -EIO)
+               rc = 0;
+       if (rc)
+               efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
+                                      NULL, 0, rc);
+       return rc;
 }
 
 int efx_mcdi_handle_assertion(struct efx_nic *efx)
@@ -1468,12 +1570,10 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
        int rc;
 
        rc = efx_mcdi_read_assertion(efx);
-       if (rc)
+       if (rc <= 0)
                return rc;
 
-       efx_mcdi_exit_assertion(efx);
-
-       return 0;
+       return efx_mcdi_exit_assertion(efx);
 }
 
 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
@@ -1550,7 +1650,9 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
        if (rc)
                return rc;
 
-       if (method == RESET_TYPE_WORLD)
+       if (method == RESET_TYPE_DATAPATH)
+               return 0;
+       else if (method == RESET_TYPE_WORLD)
                return efx_mcdi_reset_mc(efx);
        else
                return efx_mcdi_reset_func(efx);
@@ -1688,6 +1790,36 @@ int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
                            NULL, 0, NULL);
 }
 
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+                            unsigned int *enabled_out)
+{
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
+       size_t outlen;
+       int rc;
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               goto fail;
+
+       if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
+               rc = -EIO;
+               goto fail;
+       }
+
+       if (impl_out)
+               *impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+
+       if (enabled_out)
+               *enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
+
+       return 0;
+
+fail:
+       netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+       return rc;
+}
+
 #ifdef CONFIG_SFC_MTD
 
 #define EFX_MCDI_NVRAM_LEN_MAX 128
index 56465f7465a22b09c9a5fdd1c1dd8b1fa90da399..1838afe2da920c59f7ca43744c7a27a0f4e62a6c 100644 (file)
@@ -58,6 +58,8 @@ enum efx_mcdi_mode {
  *     enabled
  * @async_list: Queue of asynchronous requests
  * @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
  */
 struct efx_mcdi_iface {
        struct efx_nic *efx;
@@ -74,6 +76,10 @@ struct efx_mcdi_iface {
        spinlock_t async_lock;
        struct list_head async_list;
        struct timer_list async_timer;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+       char *logging_buffer;
+       bool logging_enabled;
+#endif
 };
 
 struct efx_mcdi_mon {
@@ -176,10 +182,12 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
  * 32-bit-aligned.  Also, on Siena we must copy to the MC shared
  * memory strictly 32 bits at a time, so add any necessary padding.
  */
-#define MCDI_DECLARE_BUF(_name, _len)                                  \
+#define _MCDI_DECLARE_BUF(_name, _len)                                 \
        efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
-#define MCDI_DECLARE_BUF_OUT_OR_ERR(_name, _len)                       \
-       MCDI_DECLARE_BUF(_name, max_t(size_t, _len, 8))
+#define MCDI_DECLARE_BUF(_name, _len)                                  \
+       _MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
+#define MCDI_DECLARE_BUF_ERR(_name)                                    \
+       MCDI_DECLARE_BUF(_name, 8)
 #define _MCDI_PTR(_buf, _offset)                                       \
        ((u8 *)(_buf) + (_offset))
 #define MCDI_PTR(_buf, _field)                                         \
@@ -339,6 +347,8 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
 int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+                            unsigned int *enabled_out);
 
 #ifdef CONFIG_SFC_MCDI_MON
 int efx_mcdi_mon_probe(struct efx_nic *efx);
index e028de10e1b743d2e9adf6334d0ad3cbdb176001..45fca9fc66b7c9b2512abef1d3363dfbf1ab5344 100644 (file)
  */
 #define MC_CMD_READ32 0x1
 
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_READ32_IN msgrequest */
 #define    MC_CMD_READ32_IN_LEN 8
 #define       MC_CMD_READ32_IN_ADDR_OFST 0
  */
 #define MC_CMD_WRITE32 0x2
 
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_WRITE32_IN msgrequest */
 #define    MC_CMD_WRITE32_IN_LENMIN 8
 #define    MC_CMD_WRITE32_IN_LENMAX 252
  */
 #define MC_CMD_COPYCODE 0x3
 
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_COPYCODE_IN msgrequest */
 #define    MC_CMD_COPYCODE_IN_LEN 16
 /* Source address */
  */
 #define MC_CMD_SET_FUNC 0x4
 
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_FUNC_IN msgrequest */
 #define    MC_CMD_SET_FUNC_IN_LEN 4
 /* Set function */
  */
 #define MC_CMD_GET_BOOT_STATUS 0x5
 
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
 #define    MC_CMD_GET_BOOT_STATUS_IN_LEN 0
 
  */
 #define MC_CMD_GET_ASSERTS 0x6
 
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_ASSERTS_IN msgrequest */
 #define    MC_CMD_GET_ASSERTS_IN_LEN 4
 /* Set to clear assertion */
  */
 #define MC_CMD_LOG_CTRL 0x7
 
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_LOG_CTRL_IN msgrequest */
 #define    MC_CMD_LOG_CTRL_IN_LEN 8
 /* Log destination */
  */
 #define MC_CMD_GET_VERSION 0x8
 
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_VERSION_IN msgrequest */
 #define    MC_CMD_GET_VERSION_IN_LEN 0
 
  */
 #define MC_CMD_PTP 0xb
 
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_PTP_IN msgrequest */
 #define    MC_CMD_PTP_IN_LEN 1
 /* PTP operation code */
  */
 #define MC_CMD_CSR_READ32 0xc
 
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_CSR_READ32_IN msgrequest */
 #define    MC_CMD_CSR_READ32_IN_LEN 12
 /* Address */
  */
 #define MC_CMD_CSR_WRITE32 0xd
 
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_CSR_WRITE32_IN msgrequest */
 #define    MC_CMD_CSR_WRITE32_IN_LENMIN 12
 #define    MC_CMD_CSR_WRITE32_IN_LENMAX 252
  */
 #define MC_CMD_HP 0x54
 
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_HP_IN msgrequest */
 #define    MC_CMD_HP_IN_LEN 16
 /* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
  */
 #define MC_CMD_STACKINFO 0xf
 
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_STACKINFO_IN msgrequest */
 #define    MC_CMD_STACKINFO_IN_LEN 0
 
  */
 #define MC_CMD_MDIO_READ 0x10
 
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_MDIO_READ_IN msgrequest */
 #define    MC_CMD_MDIO_READ_IN_LEN 16
 /* Bus number; there are two MDIO buses: one for the internal PHY, and one for
  */
 #define MC_CMD_MDIO_WRITE 0x11
 
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_MDIO_WRITE_IN msgrequest */
 #define    MC_CMD_MDIO_WRITE_IN_LEN 20
 /* Bus number; there are two MDIO buses: one for the internal PHY, and one for
  */
 #define MC_CMD_DBI_WRITE 0x12
 
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DBI_WRITE_IN msgrequest */
 #define    MC_CMD_DBI_WRITE_IN_LENMIN 12
 #define    MC_CMD_DBI_WRITE_IN_LENMAX 252
  */
 #define MC_CMD_GET_BOARD_CFG 0x18
 
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_BOARD_CFG_IN msgrequest */
 #define    MC_CMD_GET_BOARD_CFG_IN_LEN 0
 
  */
 #define MC_CMD_DBI_READX 0x19
 
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DBI_READX_IN msgrequest */
 #define    MC_CMD_DBI_READX_IN_LENMIN 8
 #define    MC_CMD_DBI_READX_IN_LENMAX 248
  */
 #define MC_CMD_SET_RAND_SEED 0x1a
 
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_RAND_SEED_IN msgrequest */
 #define    MC_CMD_SET_RAND_SEED_IN_LEN 16
 /* Seed value. */
  */
 #define MC_CMD_DRV_ATTACH 0x1c
 
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_DRV_ATTACH_IN msgrequest */
 #define    MC_CMD_DRV_ATTACH_IN_LEN 12
 /* new state (0=detached, 1=attached) to set if UPDATE=1 */
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
 #define          MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Only this option is allowed for non-admin functions */
+#define          MC_CMD_FW_DONT_CARE  0xffffffff
 
 /* MC_CMD_DRV_ATTACH_OUT msgresponse */
 #define    MC_CMD_DRV_ATTACH_OUT_LEN 4
  */
 #define MC_CMD_PORT_RESET 0x20
 
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_PORT_RESET_IN msgrequest */
 #define    MC_CMD_PORT_RESET_IN_LEN 0
 
  * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
  */
 #define MC_CMD_ENTITY_RESET 0x20
+/*      MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
 
 /* MC_CMD_ENTITY_RESET_IN msgrequest */
 #define    MC_CMD_ENTITY_RESET_IN_LEN 4
  */
 #define MC_CMD_PUTS 0x23
 
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
 #define    MC_CMD_PUTS_IN_LENMAX 252
  */
 #define MC_CMD_GET_PHY_CFG 0x24
 
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PHY_CFG_IN msgrequest */
 #define    MC_CMD_GET_PHY_CFG_IN_LEN 0
 
  */
 #define MC_CMD_START_BIST 0x25
 
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_START_BIST_IN msgrequest */
 #define    MC_CMD_START_BIST_IN_LEN 4
 /* Type of test. */
  */
 #define MC_CMD_POLL_BIST 0x26
 
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_POLL_BIST_IN msgrequest */
 #define    MC_CMD_POLL_BIST_IN_LEN 0
 
  */
 #define MC_CMD_GET_LOOPBACK_MODES 0x28
 
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
 #define    MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
 
  */
 #define MC_CMD_GET_LINK 0x29
 
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_LINK_IN msgrequest */
 #define    MC_CMD_GET_LINK_IN_LEN 0
 
  */
 #define MC_CMD_SET_LINK 0x2a
 
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_SET_LINK_IN msgrequest */
 #define    MC_CMD_SET_LINK_IN_LEN 16
 /* ??? */
  */
 #define MC_CMD_SET_ID_LED 0x2b
 
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_SET_ID_LED_IN msgrequest */
 #define    MC_CMD_SET_ID_LED_IN_LEN 4
 /* Set LED state. */
  */
 #define MC_CMD_SET_MAC 0x2c
 
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_SET_MAC_IN msgrequest */
 #define    MC_CMD_SET_MAC_IN_LEN 24
 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
  */
 #define MC_CMD_PHY_STATS 0x2d
 
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_PHY_STATS_IN msgrequest */
 #define    MC_CMD_PHY_STATS_IN_LEN 8
 /* ??? */
  */
 #define MC_CMD_MAC_STATS 0x2e
 
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_MAC_STATS_IN msgrequest */
-#define    MC_CMD_MAC_STATS_IN_LEN 16
+#define    MC_CMD_MAC_STATS_IN_LEN 20
 /* ??? */
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
 #define       MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define       MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
 
 /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
 #define    MC_CMD_MAC_STATS_OUT_DMA_LEN 0
 /* enum: RXDP counter: Number of times an emergency descriptor fetch was
  * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
  */
-#define          MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS  0x47
+#define          MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS  0x47
 /* enum: RXDP counter: Number of times the DPCPU waited for an existing
  * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
  */
-#define          MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS  0x48
+#define          MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS  0x48
+#define          MC_CMD_MAC_VADAPTER_RX_DMABUF_START  0x4c /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS  0x4c /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES  0x4d /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS  0x4e /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES  0x4f /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS  0x50 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES  0x51 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS  0x52 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BAD_BYTES  0x53 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_OVERFLOW  0x54 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_DMABUF_START  0x57 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS  0x57 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES  0x58 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS  0x59 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES  0x5a /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS  0x5b /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES  0x5c /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS  0x5d /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BAD_BYTES  0x5e /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_OVERFLOW  0x5f /* enum */
 /* enum: Start of GMAC stats buffer space, for Siena only. */
 #define          MC_CMD_GMAC_DMABUF_START  0x40
 /* enum: End of GMAC stats buffer space, for Siena only. */
  */
 #define MC_CMD_WOL_FILTER_SET 0x32
 
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_WOL_FILTER_SET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_SET_IN_LEN 192
 #define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
  */
 #define MC_CMD_WOL_FILTER_REMOVE 0x33
 
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
  */
 #define MC_CMD_WOL_FILTER_RESET 0x34
 
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_RESET_IN_LEN 4
 #define       MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
  */
 #define MC_CMD_NVRAM_TYPES 0x36
 
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_TYPES_IN msgrequest */
 #define    MC_CMD_NVRAM_TYPES_IN_LEN 0
 
  */
 #define MC_CMD_NVRAM_INFO 0x37
 
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_INFO_IN msgrequest */
 #define    MC_CMD_NVRAM_INFO_IN_LEN 4
 #define       MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
  */
 #define MC_CMD_NVRAM_UPDATE_START 0x38
 
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_UPDATE_START_IN msgrequest */
 #define    MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
  */
 #define MC_CMD_NVRAM_READ 0x39
 
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_READ_IN msgrequest */
 #define    MC_CMD_NVRAM_READ_IN_LEN 12
 #define       MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
  */
 #define MC_CMD_NVRAM_WRITE 0x3a
 
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_WRITE_IN msgrequest */
 #define    MC_CMD_NVRAM_WRITE_IN_LENMIN 13
 #define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
  */
 #define MC_CMD_NVRAM_ERASE 0x3b
 
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_ERASE_IN msgrequest */
 #define    MC_CMD_NVRAM_ERASE_IN_LEN 12
 #define       MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
  */
 #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
 
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
 #define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
  */
 #define MC_CMD_REBOOT 0x3d
 
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_REBOOT_IN msgrequest */
 #define    MC_CMD_REBOOT_IN_LEN 4
 #define       MC_CMD_REBOOT_IN_FLAGS_OFST 0
  */
 #define MC_CMD_REBOOT_MODE 0x3f
 
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_REBOOT_MODE_IN msgrequest */
 #define    MC_CMD_REBOOT_MODE_IN_LEN 4
 #define       MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
  */
 #define MC_CMD_SENSOR_INFO 0x41
 
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SENSOR_INFO_IN msgrequest */
 #define    MC_CMD_SENSOR_INFO_IN_LEN 0
 
  */
 #define MC_CMD_READ_SENSORS 0x42
 
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_READ_SENSORS_IN msgrequest */
 #define    MC_CMD_READ_SENSORS_IN_LEN 8
 /* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
  */
 #define MC_CMD_GET_PHY_STATE 0x43
 
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PHY_STATE_IN msgrequest */
 #define    MC_CMD_GET_PHY_STATE_IN_LEN 0
 
  */
 #define MC_CMD_WOL_FILTER_GET 0x45
 
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_WOL_FILTER_GET_IN msgrequest */
 #define    MC_CMD_WOL_FILTER_GET_IN_LEN 0
 
  */
 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
 
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
  */
 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
 
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
 /* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
 #define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
  */
 #define MC_CMD_TESTASSERT 0x49
 
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_TESTASSERT_IN msgrequest */
 #define    MC_CMD_TESTASSERT_IN_LEN 0
 
  */
 #define MC_CMD_WORKAROUND 0x4a
 
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_WORKAROUND_IN msgrequest */
 #define    MC_CMD_WORKAROUND_IN_LEN 8
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
  */
 #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
 
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
  */
 #define MC_CMD_NVRAM_TEST 0x4c
 
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_TEST_IN msgrequest */
 #define    MC_CMD_NVRAM_TEST_IN_LEN 4
 #define       MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
  */
 #define MC_CMD_SENSOR_SET_LIMS 0x4e
 
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
 #define    MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
 #define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
  */
 #define MC_CMD_NVRAM_PARTITIONS 0x51
 
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
 #define    MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
 
  */
 #define MC_CMD_NVRAM_METADATA 0x52
 
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_NVRAM_METADATA_IN msgrequest */
 #define    MC_CMD_NVRAM_METADATA_IN_LEN 4
 /* Partition type ID code */
  */
 #define MC_CMD_GET_MAC_ADDRESSES 0x55
 
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
 #define    MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
 
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
 
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
 /***********************************/
 /* MC_CMD_READ_REGS
  * Get a dump of the MCPU registers
  */
 #define MC_CMD_READ_REGS 0x50
 
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_READ_REGS_IN msgrequest */
 #define    MC_CMD_READ_REGS_IN_LEN 0
 
  */
 #define MC_CMD_INIT_EVQ 0x80
 
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_INIT_EVQ_IN msgrequest */
 #define    MC_CMD_INIT_EVQ_IN_LENMIN 44
 #define    MC_CMD_INIT_EVQ_IN_LENMAX 548
  */
 #define MC_CMD_INIT_RXQ 0x81
 
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_INIT_RXQ_IN msgrequest */
 #define    MC_CMD_INIT_RXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_RXQ_IN_LENMAX 252
  */
 #define MC_CMD_INIT_TXQ 0x82
 
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_INIT_TXQ_IN msgrequest */
 #define    MC_CMD_INIT_TXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_TXQ_IN_LENMAX 252
  */
 #define MC_CMD_FINI_EVQ 0x83
 
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FINI_EVQ_IN msgrequest */
 #define    MC_CMD_FINI_EVQ_IN_LEN 4
 /* Instance of EVQ to destroy. Should be the same instance as that previously
  */
 #define MC_CMD_FINI_RXQ 0x84
 
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FINI_RXQ_IN msgrequest */
 #define    MC_CMD_FINI_RXQ_IN_LEN 4
 /* Instance of RXQ to destroy */
  */
 #define MC_CMD_FINI_TXQ 0x85
 
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FINI_TXQ_IN msgrequest */
 #define    MC_CMD_FINI_TXQ_IN_LEN 4
 /* Instance of TXQ to destroy */
  */
 #define MC_CMD_DRIVER_EVENT 0x86
 
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_DRIVER_EVENT_IN msgrequest */
 #define    MC_CMD_DRIVER_EVENT_IN_LEN 12
 /* Handle of target EVQ */
  */
 #define MC_CMD_PROXY_CMD 0x5b
 
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_PROXY_CMD_IN msgrequest */
 #define    MC_CMD_PROXY_CMD_IN_LEN 4
 /* The handle of the target function. */
  */
 #define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
 
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
 #define    MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
 /* Owner ID to use */
  */
 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
 
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
  */
 #define MC_CMD_FREE_BUFTBL_CHUNK 0x89
 
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
 #define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
  */
 #define MC_CMD_FILTER_OP 0x8a
 
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FILTER_OP_IN msgrequest */
 #define    MC_CMD_FILTER_OP_IN_LEN 108
 /* identifies the type of operation requested */
  */
 #define MC_CMD_GET_PARSER_DISP_INFO 0xe4
 
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
 #define    MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
 /* identifies the type of operation requested */
  */
 #define MC_CMD_PARSER_DISP_RW 0xe5
 
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_PARSER_DISP_RW_IN msgrequest */
 #define    MC_CMD_PARSER_DISP_RW_IN_LEN 32
 /* identifies the target of the operation */
  */
 #define MC_CMD_GET_PF_COUNT 0xb6
 
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PF_COUNT_IN msgrequest */
 #define    MC_CMD_GET_PF_COUNT_IN_LEN 0
 
  */
 #define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
 
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
 #define    MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
 
  */
 #define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
 
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
 #define    MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
 /* Identifies the port assignment for this function. */
  */
 #define MC_CMD_ALLOC_VIS 0x8b
 
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_ALLOC_VIS_IN msgrequest */
 #define    MC_CMD_ALLOC_VIS_IN_LEN 8
 /* The minimum number of VIs that is acceptable */
  */
 #define MC_CMD_FREE_VIS 0x8c
 
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_FREE_VIS_IN msgrequest */
 #define    MC_CMD_FREE_VIS_IN_LEN 0
 
  */
 #define MC_CMD_GET_SRIOV_CFG 0xba
 
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
 #define    MC_CMD_GET_SRIOV_CFG_IN_LEN 0
 
  */
 #define MC_CMD_SET_SRIOV_CFG 0xbb
 
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
 #define    MC_CMD_SET_SRIOV_CFG_IN_LEN 20
 /* Number of VFs currently enabled. */
  */
 #define MC_CMD_GET_VI_ALLOC_INFO 0x8d
 
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
 #define    MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
 
  */
 #define MC_CMD_DUMP_VI_STATE 0x8e
 
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_DUMP_VI_STATE_IN msgrequest */
 #define    MC_CMD_DUMP_VI_STATE_IN_LEN 4
 /* The VI number to query. */
  */
 #define MC_CMD_ALLOC_PIOBUF 0x8f
 
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
 #define    MC_CMD_ALLOC_PIOBUF_IN_LEN 0
 
  */
 #define MC_CMD_FREE_PIOBUF 0x90
 
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_FREE_PIOBUF_IN msgrequest */
 #define    MC_CMD_FREE_PIOBUF_IN_LEN 4
 /* Handle for allocated push I/O buffer. */
  */
 #define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
 
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
 #define    MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
 /* VI number to get information for. */
  */
 #define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
 
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
 #define    MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
 /* VI number to set information for. */
  */
 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
 
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
  */
 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
 
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
 #define    MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
  */
 #define MC_CMD_SATELLITE_DOWNLOAD 0x91
 
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
  * are subtle, and so downloads must proceed in a number of phases.
  *
  */
 #define MC_CMD_GET_CAPABILITIES 0xbe
 
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 /* MC_CMD_GET_CAPABILITIES_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_IN_LEN 0
 
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
  */
 #define MC_CMD_TCM_BUCKET_ALLOC 0xb2
 
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
 
  */
 #define MC_CMD_TCM_BUCKET_FREE 0xb3
 
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
 /* the bucket id */
  */
 #define MC_CMD_TCM_BUCKET_INIT 0xb4
 
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
 #define    MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
 /* the bucket id */
  */
 #define MC_CMD_TCM_TXQ_INIT 0xb5
 
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
 #define    MC_CMD_TCM_TXQ_INIT_IN_LEN 28
 /* the txq id */
  */
 #define MC_CMD_LINK_PIOBUF 0x92
 
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_LINK_PIOBUF_IN msgrequest */
 #define    MC_CMD_LINK_PIOBUF_IN_LEN 8
 /* Handle for allocated push I/O buffer. */
  */
 #define MC_CMD_UNLINK_PIOBUF 0x93
 
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
 #define    MC_CMD_UNLINK_PIOBUF_IN_LEN 4
 /* Function Local Instance (VI) number. */
  */
 #define MC_CMD_VSWITCH_ALLOC 0x94
 
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
 #define    MC_CMD_VSWITCH_ALLOC_IN_LEN 16
 /* The port to connect to the v-switch's upstream port. */
  */
 #define MC_CMD_VSWITCH_FREE 0x95
 
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VSWITCH_FREE_IN msgrequest */
 #define    MC_CMD_VSWITCH_FREE_IN_LEN 4
 /* The port to which the v-switch is connected. */
  */
 #define MC_CMD_VPORT_ALLOC 0x96
 
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_ALLOC_IN msgrequest */
 #define    MC_CMD_VPORT_ALLOC_IN_LEN 20
 /* The port to which the v-switch is connected. */
  */
 #define MC_CMD_VPORT_FREE 0x97
 
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_FREE_IN msgrequest */
 #define    MC_CMD_VPORT_FREE_IN_LEN 4
 /* The handle of the v-port */
  */
 #define MC_CMD_VADAPTOR_ALLOC 0x98
 
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
-#define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+#define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
 /* The port to connect to the v-adaptor's port. */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
 /* Flags controlling v-adaptor creation */
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
 /* The number of VLAN tags to strip on receive */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define       MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define          MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC  0x0
 
 /* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
 #define    MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
  */
 #define MC_CMD_VADAPTOR_FREE 0x99
 
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VADAPTOR_FREE_IN msgrequest */
 #define    MC_CMD_VADAPTOR_FREE_IN_LEN 4
 /* The port to which the v-adaptor is connected. */
 #define    MC_CMD_VADAPTOR_FREE_OUT_LEN 0
 
 
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define    MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define    MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define       MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define       MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
 /***********************************/
 /* MC_CMD_EVB_PORT_ASSIGN
  * assign a port to a PCI function.
  */
 #define MC_CMD_EVB_PORT_ASSIGN 0x9a
 
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
 #define    MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
 /* The port to assign. */
  */
 #define MC_CMD_RDWR_A64_REGIONS 0x9b
 
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
 #define    MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
 #define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
  */
 #define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
 
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
 #define    MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
 /* The handle of the owning upstream port */
  */
 #define MC_CMD_ONLOAD_STACK_FREE 0x9d
 
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
 /* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
 #define    MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
 /* The handle of the Onload stack */
  */
 #define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
 
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
 /* The handle of the owning upstream port */
  */
 #define MC_CMD_RSS_CONTEXT_FREE 0x9f
 
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
 /* The handle of the RSS context */
  */
 #define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
 
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
 /* The handle of the RSS context */
  */
 #define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
 
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
 /* The handle of the RSS context */
  */
 #define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
 
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
 /* The handle of the RSS context */
  */
 #define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
 
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
 /* The handle of the RSS context */
  */
 #define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
 
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
  */
 #define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
 
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
 /* The handle of the RSS context */
  */
 #define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
 
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
 /* The handle of the owning upstream port */
  */
 #define MC_CMD_DOT1P_MAPPING_FREE 0xa5
 
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
 #define    MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
 /* The handle of the .1p mapping */
  */
 #define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
 
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
 #define    MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
 /* The handle of the .1p mapping */
  */
 #define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
 
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
 #define    MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
 /* The handle of the .1p mapping */
  */
 #define MC_CMD_GET_VECTOR_CFG 0xbf
 
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
 #define    MC_CMD_GET_VECTOR_CFG_IN_LEN 0
 
  */
 #define MC_CMD_SET_VECTOR_CFG 0xc0
 
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
 #define    MC_CMD_SET_VECTOR_CFG_IN_LEN 12
 /* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
  */
 #define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
 
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
 #define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
  */
 #define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
 
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
 #define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
 /* The handle of the v-port */
  */
 #define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
 
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
 /* The handle of the v-port */
  */
 #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
 
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
 /* Index of the first buffer table entry. */
  */
 #define MC_CMD_SET_RXDP_CONFIG 0xc1
 
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
 #define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
  */
 #define MC_CMD_GET_RXDP_CONFIG 0xc2
 
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
 #define    MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
 
  */
 #define MC_CMD_GET_CLOCK 0xac
 
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_CLOCK_IN msgrequest */
 #define    MC_CMD_GET_CLOCK_IN_LEN 0
 
  */
 #define MC_CMD_SET_CLOCK 0xad
 
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_CLOCK_IN msgrequest */
 #define    MC_CMD_SET_CLOCK_IN_LEN 12
 /* Requested system frequency in MHz; 0 leaves unchanged. */
  */
 #define MC_CMD_DPCPU_RPC 0xae
 
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
  */
 #define MC_CMD_TRIGGER_INTERRUPT 0xe3
 
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
 #define    MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
 /* Interrupt level relative to base for function. */
  */
 #define MC_CMD_CAP_BLK_READ 0xe7
 
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_CAP_BLK_READ_IN msgrequest */
 #define    MC_CMD_CAP_BLK_READ_IN_LEN 12
 #define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
  */
 #define MC_CMD_DUMP_DO 0xe8
 
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DUMP_DO_IN msgrequest */
 #define    MC_CMD_DUMP_DO_IN_LEN 52
 #define       MC_CMD_DUMP_DO_IN_PADDING_OFST 0
  */
 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
 
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
 #define    MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
 #define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
  */
 #define MC_CMD_SET_PSU 0xea
 
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_PSU_IN msgrequest */
 #define    MC_CMD_SET_PSU_IN_LEN 12
 #define       MC_CMD_SET_PSU_IN_PARAM_OFST 0
  */
 #define MC_CMD_GET_FUNCTION_INFO 0xec
 
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
 #define    MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
 
  */
 #define MC_CMD_ENABLE_OFFLINE_BIST 0xed
 
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
 #define    MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
 
  */
 #define MC_CMD_UART_SEND_DATA 0xee
 
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_UART_SEND_DATA_OUT msgrequest */
 #define    MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
 #define    MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
  */
 #define MC_CMD_UART_RECV_DATA 0xef
 
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_UART_RECV_DATA_OUT msgrequest */
 #define    MC_CMD_UART_RECV_DATA_OUT_LEN 16
 /* CRC32 over OFFSET, LENGTH, RESERVED */
  */
 #define MC_CMD_READ_FUSES 0xf0
 
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_READ_FUSES_IN msgrequest */
 #define    MC_CMD_READ_FUSES_IN_LEN 8
 /* Offset in OTP to read */
  */
 #define MC_CMD_KR_TUNE 0xf1
 
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_KR_TUNE_IN msgrequest */
 #define    MC_CMD_KR_TUNE_IN_LENMIN 4
 #define    MC_CMD_KR_TUNE_IN_LENMAX 252
  */
 #define MC_CMD_PCIE_TUNE 0xf2
 
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_PCIE_TUNE_IN msgrequest */
 #define    MC_CMD_PCIE_TUNE_IN_LENMIN 4
 #define    MC_CMD_PCIE_TUNE_IN_LENMAX 252
  */
 #define MC_CMD_LICENSING 0xf3
 
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_LICENSING_IN msgrequest */
 #define    MC_CMD_LICENSING_IN_LEN 4
 /* identifies the type of operation requested */
  */
 #define MC_CMD_MC2MC_PROXY 0xf4
 
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_MC2MC_PROXY_IN msgrequest */
 #define    MC_CMD_MC2MC_PROXY_IN_LEN 0
 
  */
 #define MC_CMD_GET_LICENSED_APP_STATE 0xf5
 
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
 #define    MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
 /* application ID to query (LICENSED_APP_ID_xxx) */
  */
 #define MC_CMD_LICENSED_APP_OP 0xf6
 
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_LICENSED_APP_OP_IN msgrequest */
 #define    MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
 #define    MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
  */
 #define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
 
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
 /* configuration flags */
  */
 #define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
 
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
 #define    MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
 
index fb19b70eac0118b6bb1946cf9796d3721baddfe0..7f295c4d7b80cb866c3f2f10f71aeeaefaf3caa5 100644 (file)
@@ -865,6 +865,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
 
        BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
 
+       /* This has no effect on EF10 */
        ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
                        efx->net_dev->dev_addr);
 
@@ -923,6 +924,7 @@ enum efx_stats_action {
 static int efx_mcdi_mac_stats(struct efx_nic *efx,
                              enum efx_stats_action action, int clear)
 {
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
        MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
        int rc;
        int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -944,9 +946,14 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
                              MAC_STATS_IN_PERIODIC_NOEVENT, 1,
                              MAC_STATS_IN_PERIOD_MS, period);
        MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
-       rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
-                         NULL, 0, NULL);
+       MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+                               NULL, 0, NULL);
+       /* Expect ENOENT if DMA queues have not been set up */
+       if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+               efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+                                      NULL, 0, rc);
        return rc;
 }
 
index 325dd94bca465ef99efe00b9d829b63c1abab3a0..d72f522bf9c3b2c0d9df322a7798ec1be6a1aba8 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/highmem.h>
 #include <linux/workqueue.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/vmalloc.h>
 #include <linux/i2c.h>
 #include <linux/mtd/mtd.h>
@@ -793,7 +794,6 @@ union efx_multicast_hash {
        efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
 };
 
-struct efx_vf;
 struct vfdi_status;
 
 /**
@@ -897,7 +897,8 @@ struct vfdi_status;
  * @loopback_mode: Loopback status
  * @loopback_modes: Supported loopback mode bitmask
  * @loopback_selftest: Offline self-test private state
- * @filter_lock: Filter table lock
+ * @filter_sem: Filter table rw_semaphore, for freeing the table
+ * @filter_lock: Filter table lock, for mere content changes
  * @filter_state: Architecture-dependent filter table state
  * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
  *     indexed by filter ID
@@ -909,7 +910,6 @@ struct vfdi_status;
  *     completed (either success or failure). Not used when MCDI is used to
  *     flush receive queues.
  * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
- * @vf: Array of &struct efx_vf objects.
  * @vf_count: Number of VFs intended to be enabled.
  * @vf_init_count: Number of VFs that have been fully initialised.
  * @vi_scale: log2 number of vnics per VF.
@@ -1040,6 +1040,7 @@ struct efx_nic {
 
        void *loopback_selftest;
 
+       struct rw_semaphore filter_sem;
        spinlock_t filter_lock;
        void *filter_state;
 #ifdef CONFIG_RFS_ACCEL
@@ -1053,7 +1054,6 @@ struct efx_nic {
        wait_queue_head_t flush_wq;
 
 #ifdef CONFIG_SFC_SRIOV
-       struct efx_vf *vf;
        unsigned vf_count;
        unsigned vf_init_count;
        unsigned vi_scale;
@@ -1092,6 +1092,7 @@ struct efx_mtd_partition {
 
 /**
  * struct efx_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
  * @mem_map_size: Get memory BAR mapped size
  * @probe: Probe the controller
  * @remove: Free resources allocated by probe()
@@ -1204,6 +1205,7 @@ struct efx_mtd_partition {
  * @ptp_set_ts_config: Set hardware timestamp configuration.  The flags
  *     and tx_type will already have been validated but this operation
  *     must validate and update rx_filter.
+ * @set_mac_address: Set the MAC address of the device
  * @revision: Hardware architecture revision
  * @txd_ptr_tbl_base: TX descriptor ring base address
  * @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1226,6 +1228,8 @@ struct efx_mtd_partition {
  * @hwtstamp_filters: Mask of hardware timestamp filter types supported
  */
 struct efx_nic_type {
+       bool is_vf;
+       unsigned int mem_bar;
        unsigned int (*mem_map_size)(struct efx_nic *efx);
        int (*probe)(struct efx_nic *efx);
        void (*remove)(struct efx_nic *efx);
@@ -1277,7 +1281,8 @@ struct efx_nic_type {
        void (*tx_init)(struct efx_tx_queue *tx_queue);
        void (*tx_remove)(struct efx_tx_queue *tx_queue);
        void (*tx_write)(struct efx_tx_queue *tx_queue);
-       void (*rx_push_rss_config)(struct efx_nic *efx);
+       int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
+                                 const u32 *rx_indir_table);
        int (*rx_probe)(struct efx_rx_queue *rx_queue);
        void (*rx_init)(struct efx_rx_queue *rx_queue);
        void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1330,11 +1335,28 @@ struct efx_nic_type {
        int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
        int (*ptp_set_ts_config)(struct efx_nic *efx,
                                 struct hwtstamp_config *init);
+       int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
        int (*sriov_init)(struct efx_nic *efx);
        void (*sriov_fini)(struct efx_nic *efx);
-       void (*sriov_mac_address_changed)(struct efx_nic *efx);
        bool (*sriov_wanted)(struct efx_nic *efx);
        void (*sriov_reset)(struct efx_nic *efx);
+       void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
+       int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+       int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
+                                u8 qos);
+       int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
+                                    bool spoofchk);
+       int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
+                                  struct ifla_vf_info *ivi);
+       int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
+                                      int link_state);
+       int (*sriov_get_phys_port_id)(struct efx_nic *efx,
+                                     struct netdev_phys_item_id *ppid);
+       int (*vswitching_probe)(struct efx_nic *efx);
+       int (*vswitching_restore)(struct efx_nic *efx);
+       void (*vswitching_remove)(struct efx_nic *efx);
+       int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
+       int (*set_mac_address)(struct efx_nic *efx);
 
        int revision;
        unsigned int txd_ptr_tbl_base;
index 93d10cbbd1cfa9cd419f419d1420ffa40bc7da26..31ff9084d9a46624d3a2350a10fdddfc18bbfc08 100644 (file)
@@ -381,6 +381,7 @@ enum {
  * @efx: Pointer back to main interface structure
  * @wol_filter_id: Wake-on-LAN packet filter id
  * @stats: Hardware statistics
+ * @vf: Array of &struct siena_vf objects
  * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
  * @vfdi_status: Common VFDI status page to be dmad to VF address space.
  * @local_addr_list: List of local addresses. Protected by %local_lock.
@@ -394,6 +395,7 @@ struct siena_nic_data {
        int wol_filter_id;
        u64 stats[SIENA_STAT_COUNT];
 #ifdef CONFIG_SFC_SRIOV
+       struct siena_vf *vf;
        struct efx_channel *vfdi_channel;
        unsigned vf_buftbl_base;
        struct efx_buffer vfdi_status;
@@ -405,59 +407,77 @@ struct siena_nic_data {
 };
 
 enum {
-       EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
-       EF10_STAT_tx_packets,
-       EF10_STAT_tx_pause,
-       EF10_STAT_tx_control,
-       EF10_STAT_tx_unicast,
-       EF10_STAT_tx_multicast,
-       EF10_STAT_tx_broadcast,
-       EF10_STAT_tx_lt64,
-       EF10_STAT_tx_64,
-       EF10_STAT_tx_65_to_127,
-       EF10_STAT_tx_128_to_255,
-       EF10_STAT_tx_256_to_511,
-       EF10_STAT_tx_512_to_1023,
-       EF10_STAT_tx_1024_to_15xx,
-       EF10_STAT_tx_15xx_to_jumbo,
-       EF10_STAT_rx_bytes,
-       EF10_STAT_rx_bytes_minus_good_bytes,
-       EF10_STAT_rx_good_bytes,
-       EF10_STAT_rx_bad_bytes,
-       EF10_STAT_rx_packets,
-       EF10_STAT_rx_good,
-       EF10_STAT_rx_bad,
-       EF10_STAT_rx_pause,
-       EF10_STAT_rx_control,
+       EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+       EF10_STAT_port_tx_packets,
+       EF10_STAT_port_tx_pause,
+       EF10_STAT_port_tx_control,
+       EF10_STAT_port_tx_unicast,
+       EF10_STAT_port_tx_multicast,
+       EF10_STAT_port_tx_broadcast,
+       EF10_STAT_port_tx_lt64,
+       EF10_STAT_port_tx_64,
+       EF10_STAT_port_tx_65_to_127,
+       EF10_STAT_port_tx_128_to_255,
+       EF10_STAT_port_tx_256_to_511,
+       EF10_STAT_port_tx_512_to_1023,
+       EF10_STAT_port_tx_1024_to_15xx,
+       EF10_STAT_port_tx_15xx_to_jumbo,
+       EF10_STAT_port_rx_bytes,
+       EF10_STAT_port_rx_bytes_minus_good_bytes,
+       EF10_STAT_port_rx_good_bytes,
+       EF10_STAT_port_rx_bad_bytes,
+       EF10_STAT_port_rx_packets,
+       EF10_STAT_port_rx_good,
+       EF10_STAT_port_rx_bad,
+       EF10_STAT_port_rx_pause,
+       EF10_STAT_port_rx_control,
+       EF10_STAT_port_rx_unicast,
+       EF10_STAT_port_rx_multicast,
+       EF10_STAT_port_rx_broadcast,
+       EF10_STAT_port_rx_lt64,
+       EF10_STAT_port_rx_64,
+       EF10_STAT_port_rx_65_to_127,
+       EF10_STAT_port_rx_128_to_255,
+       EF10_STAT_port_rx_256_to_511,
+       EF10_STAT_port_rx_512_to_1023,
+       EF10_STAT_port_rx_1024_to_15xx,
+       EF10_STAT_port_rx_15xx_to_jumbo,
+       EF10_STAT_port_rx_gtjumbo,
+       EF10_STAT_port_rx_bad_gtjumbo,
+       EF10_STAT_port_rx_overflow,
+       EF10_STAT_port_rx_align_error,
+       EF10_STAT_port_rx_length_error,
+       EF10_STAT_port_rx_nodesc_drops,
+       EF10_STAT_port_rx_pm_trunc_bb_overflow,
+       EF10_STAT_port_rx_pm_discard_bb_overflow,
+       EF10_STAT_port_rx_pm_trunc_vfifo_full,
+       EF10_STAT_port_rx_pm_discard_vfifo_full,
+       EF10_STAT_port_rx_pm_trunc_qbb,
+       EF10_STAT_port_rx_pm_discard_qbb,
+       EF10_STAT_port_rx_pm_discard_mapping,
+       EF10_STAT_port_rx_dp_q_disabled_packets,
+       EF10_STAT_port_rx_dp_di_dropped_packets,
+       EF10_STAT_port_rx_dp_streaming_packets,
+       EF10_STAT_port_rx_dp_hlb_fetch,
+       EF10_STAT_port_rx_dp_hlb_wait,
        EF10_STAT_rx_unicast,
+       EF10_STAT_rx_unicast_bytes,
        EF10_STAT_rx_multicast,
+       EF10_STAT_rx_multicast_bytes,
        EF10_STAT_rx_broadcast,
-       EF10_STAT_rx_lt64,
-       EF10_STAT_rx_64,
-       EF10_STAT_rx_65_to_127,
-       EF10_STAT_rx_128_to_255,
-       EF10_STAT_rx_256_to_511,
-       EF10_STAT_rx_512_to_1023,
-       EF10_STAT_rx_1024_to_15xx,
-       EF10_STAT_rx_15xx_to_jumbo,
-       EF10_STAT_rx_gtjumbo,
-       EF10_STAT_rx_bad_gtjumbo,
+       EF10_STAT_rx_broadcast_bytes,
+       EF10_STAT_rx_bad,
+       EF10_STAT_rx_bad_bytes,
        EF10_STAT_rx_overflow,
-       EF10_STAT_rx_align_error,
-       EF10_STAT_rx_length_error,
-       EF10_STAT_rx_nodesc_drops,
-       EF10_STAT_rx_pm_trunc_bb_overflow,
-       EF10_STAT_rx_pm_discard_bb_overflow,
-       EF10_STAT_rx_pm_trunc_vfifo_full,
-       EF10_STAT_rx_pm_discard_vfifo_full,
-       EF10_STAT_rx_pm_trunc_qbb,
-       EF10_STAT_rx_pm_discard_qbb,
-       EF10_STAT_rx_pm_discard_mapping,
-       EF10_STAT_rx_dp_q_disabled_packets,
-       EF10_STAT_rx_dp_di_dropped_packets,
-       EF10_STAT_rx_dp_streaming_packets,
-       EF10_STAT_rx_dp_hlb_fetch,
-       EF10_STAT_rx_dp_hlb_wait,
+       EF10_STAT_tx_unicast,
+       EF10_STAT_tx_unicast_bytes,
+       EF10_STAT_tx_multicast,
+       EF10_STAT_tx_multicast_bytes,
+       EF10_STAT_tx_broadcast,
+       EF10_STAT_tx_broadcast_bytes,
+       EF10_STAT_tx_bad,
+       EF10_STAT_tx_bad_bytes,
+       EF10_STAT_tx_overflow,
        EF10_STAT_COUNT
 };
 
@@ -483,12 +503,21 @@ enum {
  * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
  *     reboot
  * @rx_rss_context: Firmware handle for our RSS context
+ * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
  * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
  *     after MC reboot
  * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
  *     %MC_CMD_GET_CAPABILITIES response)
+ * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
+ * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
+ * @vport_id: The function's vport ID, only relevant for PFs
+ * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
+ * @pf_index: The number for this PF, or the parent PF if this is a VF
+#ifdef CONFIG_SFC_SRIOV
+ * @vf: Pointer to VF data structure
+#endif
  */
 struct efx_ef10_nic_data {
        struct efx_buffer mcdi_buf;
@@ -503,126 +532,27 @@ struct efx_ef10_nic_data {
        unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
        bool must_restore_piobufs;
        u32 rx_rss_context;
+       bool rx_rss_context_exclusive;
        u64 stats[EF10_STAT_COUNT];
        bool workaround_35388;
        bool must_check_datapath_caps;
        u32 datapath_caps;
-};
-
-/*
- * On the SFC9000 family each port is associated with 1 PCI physical
- * function (PF) handled by sfc and a configurable number of virtual
- * functions (VFs) that may be handled by some other driver, often in
- * a VM guest.  The queue pointer registers are mapped in both PF and
- * VF BARs such that an 8K region provides access to a single RX, TX
- * and event queue (collectively a Virtual Interface, VI or VNIC).
- *
- * The PF has access to all 1024 VIs while VFs are mapped to VIs
- * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
- * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
- * The number of VIs and the VI_SCALE value are configurable but must
- * be established at boot time by firmware.
- */
-
-/* Maximum VI_SCALE parameter supported by Siena */
-#define EFX_VI_SCALE_MAX 6
-/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
- * so this is the smallest allowed value. */
-#define EFX_VI_BASE 128U
-/* Maximum number of VFs allowed */
-#define EFX_VF_COUNT_MAX 127
-/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
-#define EFX_MAX_VF_EVQ_SIZE 8192UL
-/* The number of buffer table entries reserved for each VI on a VF */
-#define EFX_VF_BUFTBL_PER_VI                                   \
-       ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) *        \
-        sizeof(efx_qword_t) / EFX_BUF_SIZE)
-
+       unsigned int rx_dpcpu_fw_id;
+       unsigned int tx_dpcpu_fw_id;
+       unsigned int vport_id;
+       bool must_probe_vswitching;
+       unsigned int pf_index;
+       u8 port_id[ETH_ALEN];
 #ifdef CONFIG_SFC_SRIOV
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx)
-{
-       return efx->vf_count != 0;
-}
-
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
-{
-       return efx->vf_init_count != 0;
-}
-
-static inline unsigned int efx_vf_size(struct efx_nic *efx)
-{
-       return 1 << efx->vi_scale;
-}
+       unsigned int vf_index;
+       struct ef10_vf *vf;
+#endif
+       u8 vport_mac[ETH_ALEN];
+};
 
 int efx_init_sriov(void);
-void efx_siena_sriov_probe(struct efx_nic *efx);
-int efx_siena_sriov_init(struct efx_nic *efx);
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
-void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-void efx_siena_sriov_reset(struct efx_nic *efx);
-void efx_siena_sriov_fini(struct efx_nic *efx);
 void efx_fini_sriov(void);
 
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#else
-
-/* SIENA */
-static inline bool efx_siena_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline bool efx_siena_sriov_enabled(struct efx_nic *efx) { return false; }
-static inline unsigned int efx_vf_size(struct efx_nic *efx) { return 0; }
-static inline int efx_init_sriov(void) { return 0; }
-static inline void efx_siena_sriov_probe(struct efx_nic *efx) {}
-static inline int efx_siena_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_siena_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_tx_flush_done(struct efx_nic *efx,
-                                                efx_qword_t *event) {}
-static inline void efx_siena_sriov_rx_flush_done(struct efx_nic *efx,
-                                                efx_qword_t *event) {}
-static inline void efx_siena_sriov_event(struct efx_channel *channel,
-                                        efx_qword_t *event) {}
-static inline void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx,
-                                                 unsigned dmaq) {}
-static inline void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr) {}
-static inline void efx_siena_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_siena_sriov_fini(struct efx_nic *efx) {}
-static inline void efx_fini_sriov(void) {}
-
-/* EF10 */
-static inline bool efx_ef10_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_ef10_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_ef10_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_ef10_sriov_fini(struct efx_nic *efx) {}
-
-#endif
-
-/* FALCON */
-static inline bool efx_falcon_sriov_wanted(struct efx_nic *efx) { return false; }
-static inline int efx_falcon_sriov_init(struct efx_nic *efx) { return -EOPNOTSUPP; }
-static inline void efx_falcon_sriov_mac_address_changed(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_reset(struct efx_nic *efx) {}
-static inline void efx_falcon_sriov_fini(struct efx_nic *efx) {}
-
-int efx_siena_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-int efx_siena_sriov_set_vf_vlan(struct net_device *dev, int vf,
-                               u16 vlan, u8 qos);
-int efx_siena_sriov_get_vf_config(struct net_device *dev, int vf,
-                                 struct ifla_vf_info *ivf);
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
-                                   bool spoofchk);
-
 struct ethtool_ts_info;
 int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
 void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
@@ -654,6 +584,7 @@ extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
 extern const struct efx_nic_type siena_a0_nic_type;
 extern const struct efx_nic_type efx_hunt_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
 
 /**************************************************************************
  *
index a2e9aee05cdde8dc5af6528101862868abe00075..ad62615a93dcfe238fd23cf0487e5c6ea60330b7 100644 (file)
@@ -306,7 +306,7 @@ struct efx_ptp_data {
        struct work_struct pps_work;
        struct workqueue_struct *pps_workwq;
        bool nic_ts_enabled;
-       MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+       _MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
 
        unsigned int good_syncs;
        unsigned int fast_syncs;
@@ -389,11 +389,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
        MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
        rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
                          outbuf, sizeof(outbuf), NULL);
-       if (rc) {
-               netif_err(efx, hw, efx->net_dev,
-                         "MC_CMD_PTP_OP_STATUS failed (%d)\n", rc);
+       if (rc)
                memset(outbuf, 0, sizeof(outbuf));
-       }
        efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
                             efx_ptp_stat_mask,
                             stats, _MCDI_PTR(outbuf, 0), false);
@@ -490,14 +487,20 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
         */
        MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
        MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
-       rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
-                         outbuf, sizeof(outbuf), &out_len);
-       if (rc == 0)
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                               outbuf, sizeof(outbuf), &out_len);
+       if (rc == 0) {
                fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
-       else if (rc == -EINVAL)
+       } else if (rc == -EINVAL) {
                fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
-       else
+       } else if (rc == -EPERM) {
+               netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+               return rc;
+       } else {
+               efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+                                      outbuf, sizeof(outbuf), rc);
                return rc;
+       }
 
        if (fmt == MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION) {
                ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
@@ -541,8 +544,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
                       MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
        MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
 
-       rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
-                         outbuf, sizeof(outbuf), NULL);
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+                               outbuf, sizeof(outbuf), NULL);
        if (rc == 0) {
                efx->ptp_data->ts_corrections.tx = MCDI_DWORD(outbuf,
                        PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
@@ -558,6 +561,8 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
                efx->ptp_data->ts_corrections.pps_out = 0;
                efx->ptp_data->ts_corrections.pps_in = 0;
        } else {
+               efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
+                                      sizeof(outbuf), rc);
                return rc;
        }
 
@@ -568,7 +573,7 @@ static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
 static int efx_ptp_enable(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
-       MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+       MCDI_DECLARE_BUF_ERR(outbuf);
        int rc;
 
        MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
@@ -596,7 +601,7 @@ static int efx_ptp_enable(struct efx_nic *efx)
 static int efx_ptp_disable(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
-       MCDI_DECLARE_BUF_OUT_OR_ERR(outbuf, 0);
+       MCDI_DECLARE_BUF_ERR(outbuf);
        int rc;
 
        MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
@@ -604,7 +609,12 @@ static int efx_ptp_disable(struct efx_nic *efx)
        rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
                                outbuf, sizeof(outbuf), NULL);
        rc = (rc == -EALREADY) ? 0 : rc;
-       if (rc)
+       /* If we get ENOSYS, the NIC doesn't support PTP, and thus this function
+        * should only have been called during probe.
+        */
+       if (rc == -ENOSYS || rc == -EPERM)
+               netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+       else if (rc)
                efx_mcdi_display_error(efx, MC_CMD_PTP,
                                       MC_CMD_PTP_IN_DISABLE_LEN,
                                       outbuf, sizeof(outbuf), rc);
index f12c811938d26b6450c536fcc66d92b3d7d3fa73..b323b9167526f6f48da1e13da8fbc54fd4daa9e5 100644 (file)
@@ -25,6 +25,7 @@
 #include "mcdi.h"
 #include "mcdi_pcol.h"
 #include "selftest.h"
+#include "siena_sriov.h"
 
 /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
 
@@ -306,7 +307,9 @@ static int siena_probe_nic(struct efx_nic *efx)
        if (rc)
                goto fail5;
 
+#ifdef CONFIG_SFC_SRIOV
        efx_siena_sriov_probe(efx);
+#endif
        efx_ptp_defer_probe_with_channel(efx);
 
        return 0;
@@ -321,7 +324,8 @@ fail1:
        return rc;
 }
 
-static void siena_rx_push_rss_config(struct efx_nic *efx)
+static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
+                                   const u32 *rx_indir_table)
 {
        efx_oword_t temp;
 
@@ -343,7 +347,11 @@ static void siena_rx_push_rss_config(struct efx_nic *efx)
               FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
        efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
 
+       memcpy(efx->rx_indir_table, rx_indir_table,
+              sizeof(efx->rx_indir_table));
        efx_farch_rx_push_indir_table(efx);
+
+       return 0;
 }
 
 /* This call performs hardware-specific global initialisation, such as
@@ -386,7 +394,7 @@ static int siena_init_nic(struct efx_nic *efx)
                            EFX_RX_USR_BUF_SIZE >> 5);
        efx_writeo(efx, &temp, FR_AZ_RX_CFG);
 
-       siena_rx_push_rss_config(efx);
+       siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
 
        /* Enable event logging */
        rc = efx_mcdi_log_ctrl(efx, true, false, 0);
@@ -909,6 +917,8 @@ fail:
  */
 
 const struct efx_nic_type siena_a0_nic_type = {
+       .is_vf = false,
+       .mem_bar = EFX_MEM_BAR,
        .mem_map_size = siena_mem_map_size,
        .probe = siena_probe_nic,
        .remove = siena_remove_nic,
@@ -996,11 +1006,22 @@ const struct efx_nic_type siena_a0_nic_type = {
 #endif
        .ptp_write_host_time = siena_ptp_write_host_time,
        .ptp_set_ts_config = siena_ptp_set_ts_config,
+#ifdef CONFIG_SFC_SRIOV
+       .sriov_configure = efx_siena_sriov_configure,
        .sriov_init = efx_siena_sriov_init,
        .sriov_fini = efx_siena_sriov_fini,
-       .sriov_mac_address_changed = efx_siena_sriov_mac_address_changed,
        .sriov_wanted = efx_siena_sriov_wanted,
        .sriov_reset = efx_siena_sriov_reset,
+       .sriov_flr = efx_siena_sriov_flr,
+       .sriov_set_vf_mac = efx_siena_sriov_set_vf_mac,
+       .sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
+       .sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
+       .sriov_get_vf_config = efx_siena_sriov_get_vf_config,
+       .vswitching_probe = efx_port_dummy_op_int,
+       .vswitching_restore = efx_port_dummy_op_int,
+       .vswitching_remove = efx_port_dummy_op_void,
+       .set_mac_address = efx_siena_sriov_mac_address_changed,
+#endif
 
        .revision = EFX_REV_SIENA_A0,
        .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
index fe83430796fd04f3aa502de8f324f107380a680f..da7b94f34604936c3c3ea945409295fa26beea0a 100644 (file)
@@ -16,6 +16,7 @@
 #include "filter.h"
 #include "mcdi_pcol.h"
 #include "farch_regs.h"
+#include "siena_sriov.h"
 #include "vfdi.h"
 
 /* Number of longs required to track all the VIs in a VF */
@@ -38,7 +39,7 @@ enum efx_vf_tx_filter_mode {
 };
 
 /**
- * struct efx_vf - Back-end resource and protocol state for a PCI VF
+ * struct siena_vf - Back-end resource and protocol state for a PCI VF
  * @efx: The Efx NIC owning this VF
  * @pci_rid: The PCI requester ID for this VF
  * @pci_name: The PCI name (formatted address) of this VF
@@ -83,7 +84,7 @@ enum efx_vf_tx_filter_mode {
  * @rxq_retry_count: Number of receive queues in @rxq_retry_mask.
  * @reset_work: Work item to schedule a VF reset.
  */
-struct efx_vf {
+struct siena_vf {
        struct efx_nic *efx;
        unsigned int pci_rid;
        char pci_name[13]; /* dddd:bb:dd.f */
@@ -189,7 +190,7 @@ MODULE_PARM_DESC(max_vfs,
  */
 static struct workqueue_struct *vfdi_workqueue;
 
-static unsigned abs_index(struct efx_vf *vf, unsigned index)
+static unsigned abs_index(struct siena_vf *vf, unsigned index)
 {
        return EFX_VI_BASE + vf->index * efx_vf_size(vf->efx) + index;
 }
@@ -207,8 +208,8 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
        MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
        MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
 
-       rc = efx_mcdi_rpc(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
-                         outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
+                               outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
        if (rc)
                return rc;
        if (outlen < MC_CMD_SRIOV_OUT_LEN)
@@ -299,7 +300,7 @@ out:
 /* The TX filter is entirely controlled by this driver, and is modified
  * underneath the feet of the VF
  */
-static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_tx_filter(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct efx_filter_spec filter;
@@ -343,7 +344,7 @@ static void efx_siena_sriov_reset_tx_filter(struct efx_vf *vf)
 }
 
 /* The RX filter is managed here on behalf of the VF driver */
-static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
+static void efx_siena_sriov_reset_rx_filter(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct efx_filter_spec filter;
@@ -382,7 +383,7 @@ static void efx_siena_sriov_reset_rx_filter(struct efx_vf *vf)
        }
 }
 
-static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
+static void __efx_siena_sriov_update_vf_addr(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct siena_nic_data *nic_data = efx->nic_data;
@@ -397,7 +398,7 @@ static void __efx_siena_sriov_update_vf_addr(struct efx_vf *vf)
  * local_page_list, either by acquiring local_lock or by running from
  * efx_siena_sriov_peer_work()
  */
-static void __efx_siena_sriov_push_vf_status(struct efx_vf *vf)
+static void __efx_siena_sriov_push_vf_status(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct siena_nic_data *nic_data = efx->nic_data;
@@ -509,8 +510,9 @@ static bool bad_buf_count(unsigned buf_count, unsigned max_entry_count)
  * Optionally set VF index and VI index within the VF.
  */
 static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
-                        struct efx_vf **vf_out, unsigned *rel_index_out)
+                        struct siena_vf **vf_out, unsigned *rel_index_out)
 {
+       struct siena_nic_data *nic_data = efx->nic_data;
        unsigned vf_i;
 
        if (abs_index < EFX_VI_BASE)
@@ -520,13 +522,13 @@ static bool map_vi_index(struct efx_nic *efx, unsigned abs_index,
                return true;
 
        if (vf_out)
-               *vf_out = efx->vf + vf_i;
+               *vf_out = nic_data->vf + vf_i;
        if (rel_index_out)
                *rel_index_out = abs_index % efx_vf_size(efx);
        return false;
 }
 
-static int efx_vfdi_init_evq(struct efx_vf *vf)
+static int efx_vfdi_init_evq(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct vfdi_req *req = vf->buf.addr;
@@ -567,7 +569,7 @@ static int efx_vfdi_init_evq(struct efx_vf *vf)
        return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_init_rxq(struct efx_vf *vf)
+static int efx_vfdi_init_rxq(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct vfdi_req *req = vf->buf.addr;
@@ -608,7 +610,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf)
        return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_init_txq(struct efx_vf *vf)
+static int efx_vfdi_init_txq(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct vfdi_req *req = vf->buf.addr;
@@ -655,7 +657,7 @@ static int efx_vfdi_init_txq(struct efx_vf *vf)
 }
 
 /* Returns true when efx_vfdi_fini_all_queues should wake */
-static bool efx_vfdi_flush_wake(struct efx_vf *vf)
+static bool efx_vfdi_flush_wake(struct siena_vf *vf)
 {
        /* Ensure that all updates are visible to efx_vfdi_fini_all_queues() */
        smp_mb();
@@ -664,7 +666,7 @@ static bool efx_vfdi_flush_wake(struct efx_vf *vf)
                atomic_read(&vf->rxq_retry_count);
 }
 
-static void efx_vfdi_flush_clear(struct efx_vf *vf)
+static void efx_vfdi_flush_clear(struct siena_vf *vf)
 {
        memset(vf->txq_mask, 0, sizeof(vf->txq_mask));
        vf->txq_count = 0;
@@ -674,7 +676,7 @@ static void efx_vfdi_flush_clear(struct efx_vf *vf)
        atomic_set(&vf->rxq_retry_count, 0);
 }
 
-static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
+static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        efx_oword_t reg;
@@ -757,7 +759,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
        return timeout ? 0 : VFDI_RC_ETIMEDOUT;
 }
 
-static int efx_vfdi_insert_filter(struct efx_vf *vf)
+static int efx_vfdi_insert_filter(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct siena_nic_data *nic_data = efx->nic_data;
@@ -789,7 +791,7 @@ static int efx_vfdi_insert_filter(struct efx_vf *vf)
        return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
+static int efx_vfdi_remove_all_filters(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct siena_nic_data *nic_data = efx->nic_data;
@@ -801,7 +803,7 @@ static int efx_vfdi_remove_all_filters(struct efx_vf *vf)
        return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_set_status_page(struct efx_vf *vf)
+static int efx_vfdi_set_status_page(struct siena_vf *vf)
 {
        struct efx_nic *efx = vf->efx;
        struct siena_nic_data *nic_data = efx->nic_data;
@@ -846,7 +848,7 @@ static int efx_vfdi_set_status_page(struct efx_vf *vf)
        return VFDI_RC_SUCCESS;
 }
 
-static int efx_vfdi_clear_status_page(struct efx_vf *vf)
+static int efx_vfdi_clear_status_page(struct siena_vf *vf)
 {
        mutex_lock(&vf->status_lock);
        vf->status_addr = 0;
@@ -855,7 +857,7 @@ static int efx_vfdi_clear_status_page(struct efx_vf *vf)
        return VFDI_RC_SUCCESS;
 }
 
-typedef int (*efx_vfdi_op_t)(struct efx_vf *vf);
+typedef int (*efx_vfdi_op_t)(struct siena_vf *vf);
 
 static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
        [VFDI_OP_INIT_EVQ] = efx_vfdi_init_evq,
@@ -870,7 +872,7 @@ static const efx_vfdi_op_t vfdi_ops[VFDI_OP_LIMIT] = {
 
 static void efx_siena_sriov_vfdi(struct work_struct *work)
 {
-       struct efx_vf *vf = container_of(work, struct efx_vf, req);
+       struct siena_vf *vf = container_of(work, struct siena_vf, req);
        struct efx_nic *efx = vf->efx;
        struct vfdi_req *req = vf->buf.addr;
        struct efx_memcpy_req copy[2];
@@ -936,7 +938,8 @@ static void efx_siena_sriov_vfdi(struct work_struct *work)
  * event ring in guest memory with VFDI reset events, then (re-initialise) the
  * event queue to raise an interrupt. The guest driver will then recover.
  */
-static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
+
+static void efx_siena_sriov_reset_vf(struct siena_vf *vf,
                                     struct efx_buffer *buffer)
 {
        struct efx_nic *efx = vf->efx;
@@ -1006,7 +1009,7 @@ static void efx_siena_sriov_reset_vf(struct efx_vf *vf,
 
 static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
 {
-       struct efx_vf *vf = container_of(work, struct efx_vf, req);
+       struct siena_vf *vf = container_of(work, struct siena_vf, req);
        struct efx_nic *efx = vf->efx;
        struct efx_buffer buf;
 
@@ -1055,8 +1058,10 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
        if (!max_vfs)
                return;
 
-       if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count))
+       if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
+               netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
                return;
+       }
        if (count > 0 && count > max_vfs)
                count = max_vfs;
 
@@ -1077,7 +1082,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
                                                       peer_work);
        struct efx_nic *efx = nic_data->efx;
        struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
-       struct efx_vf *vf;
+       struct siena_vf *vf;
        struct efx_local_addr *local_addr;
        struct vfdi_endpoint *peer;
        struct efx_endpoint_page *epp;
@@ -1099,7 +1104,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
        peer_space = ARRAY_SIZE(vfdi_status->peers) - 1;
        peer_count = 1;
        for (pos = 0; pos < efx->vf_count; ++pos) {
-               vf = efx->vf + pos;
+               vf = nic_data->vf + pos;
 
                mutex_lock(&vf->status_lock);
                if (vf->rx_filtering && !is_zero_ether_addr(vf->addr.mac_addr)) {
@@ -1155,7 +1160,7 @@ static void efx_siena_sriov_peer_work(struct work_struct *data)
 
        /* Finally, push the pages */
        for (pos = 0; pos < efx->vf_count; ++pos) {
-               vf = efx->vf + pos;
+               vf = nic_data->vf + pos;
 
                mutex_lock(&vf->status_lock);
                if (vf->status_addr)
@@ -1190,14 +1195,16 @@ static void efx_siena_sriov_free_local(struct efx_nic *efx)
 static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
 {
        unsigned index;
-       struct efx_vf *vf;
+       struct siena_vf *vf;
+       struct siena_nic_data *nic_data = efx->nic_data;
 
-       efx->vf = kzalloc(sizeof(struct efx_vf) * efx->vf_count, GFP_KERNEL);
-       if (!efx->vf)
+       nic_data->vf = kcalloc(efx->vf_count, sizeof(*nic_data->vf),
+                              GFP_KERNEL);
+       if (!nic_data->vf)
                return -ENOMEM;
 
        for (index = 0; index < efx->vf_count; ++index) {
-               vf = efx->vf + index;
+               vf = nic_data->vf + index;
 
                vf->efx = efx;
                vf->index = index;
@@ -1216,11 +1223,12 @@ static int efx_siena_sriov_vf_alloc(struct efx_nic *efx)
 
 static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
 {
-       struct efx_vf *vf;
+       struct siena_nic_data *nic_data = efx->nic_data;
+       struct siena_vf *vf;
        unsigned int pos;
 
        for (pos = 0; pos < efx->vf_count; ++pos) {
-               vf = efx->vf + pos;
+               vf = nic_data->vf + pos;
 
                efx_nic_free_buffer(efx, &vf->buf);
                kfree(vf->peer_page_addrs);
@@ -1237,7 +1245,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
        struct siena_nic_data *nic_data = efx->nic_data;
        unsigned index, devfn, sriov, buftbl_base;
        u16 offset, stride;
-       struct efx_vf *vf;
+       struct siena_vf *vf;
        int rc;
 
        sriov = pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV);
@@ -1250,7 +1258,7 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
        buftbl_base = nic_data->vf_buftbl_base;
        devfn = pci_dev->devfn + offset;
        for (index = 0; index < efx->vf_count; ++index) {
-               vf = efx->vf + index;
+               vf = nic_data->vf + index;
 
                /* Reserve buffer entries */
                vf->buftbl_base = buftbl_base;
@@ -1350,7 +1358,7 @@ fail_pci:
 fail_vfs:
        cancel_work_sync(&nic_data->peer_work);
        efx_siena_sriov_free_local(efx);
-       kfree(efx->vf);
+       kfree(nic_data->vf);
 fail_alloc:
        efx_nic_free_buffer(efx, &nic_data->vfdi_status);
 fail_status:
@@ -1361,7 +1369,7 @@ fail_cmd:
 
 void efx_siena_sriov_fini(struct efx_nic *efx)
 {
-       struct efx_vf *vf;
+       struct siena_vf *vf;
        unsigned int pos;
        struct siena_nic_data *nic_data = efx->nic_data;
 
@@ -1377,7 +1385,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
 
        /* Flush all reconfiguration work */
        for (pos = 0; pos < efx->vf_count; ++pos) {
-               vf = efx->vf + pos;
+               vf = nic_data->vf + pos;
                cancel_work_sync(&vf->req);
                cancel_work_sync(&vf->reset_work);
        }
@@ -1388,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
        /* Tear down back-end state */
        efx_siena_sriov_vfs_fini(efx);
        efx_siena_sriov_free_local(efx);
-       kfree(efx->vf);
+       kfree(nic_data->vf);
        efx_nic_free_buffer(efx, &nic_data->vfdi_status);
        efx_siena_sriov_cmd(efx, false, NULL, NULL);
 }
@@ -1396,7 +1404,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
 void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event)
 {
        struct efx_nic *efx = channel->efx;
-       struct efx_vf *vf;
+       struct siena_vf *vf;
        unsigned qid, seq, type, data;
 
        qid = EFX_QWORD_FIELD(*event, FSF_CZ_USER_QID);
@@ -1452,11 +1460,12 @@ error:
 
 void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
 {
-       struct efx_vf *vf;
+       struct siena_nic_data *nic_data = efx->nic_data;
+       struct siena_vf *vf;
 
        if (vf_i > efx->vf_init_count)
                return;
-       vf = efx->vf + vf_i;
+       vf = nic_data->vf + vf_i;
        netif_info(efx, hw, efx->net_dev,
                   "FLR on VF %s\n", vf->pci_name);
 
@@ -1467,21 +1476,23 @@ void efx_siena_sriov_flr(struct efx_nic *efx, unsigned vf_i)
        vf->evq0_count = 0;
 }
 
-void efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx)
 {
        struct siena_nic_data *nic_data = efx->nic_data;
        struct vfdi_status *vfdi_status = nic_data->vfdi_status.addr;
 
        if (!efx->vf_init_count)
-               return;
+               return 0;
        ether_addr_copy(vfdi_status->peers[0].mac_addr,
                        efx->net_dev->dev_addr);
        queue_work(vfdi_workqueue, &nic_data->peer_work);
+
+       return 0;
 }
 
 void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 {
-       struct efx_vf *vf;
+       struct siena_vf *vf;
        unsigned queue, qid;
 
        queue = EFX_QWORD_FIELD(*event,  FSF_AZ_DRIVER_EV_SUBDATA);
@@ -1500,7 +1511,7 @@ void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 
 void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 {
-       struct efx_vf *vf;
+       struct siena_vf *vf;
        unsigned ev_failed, queue, qid;
 
        queue = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
@@ -1525,7 +1536,7 @@ void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
 /* Called from napi. Schedule the reset work item */
 void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
 {
-       struct efx_vf *vf;
+       struct siena_vf *vf;
        unsigned int rel;
 
        if (map_vi_index(efx, dmaq, &vf, &rel))
@@ -1541,9 +1552,10 @@ void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq)
 /* Reset all VFs */
 void efx_siena_sriov_reset(struct efx_nic *efx)
 {
+       struct siena_nic_data *nic_data = efx->nic_data;
        unsigned int vf_i;
        struct efx_buffer buf;
-       struct efx_vf *vf;
+       struct siena_vf *vf;
 
        ASSERT_RTNL();
 
@@ -1557,7 +1569,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
                return;
 
        for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
-               vf = efx->vf + vf_i;
+               vf = nic_data->vf + vf_i;
                efx_siena_sriov_reset_vf(vf, &buf);
        }
 
@@ -1573,7 +1585,6 @@ int efx_init_sriov(void)
        vfdi_workqueue = create_singlethread_workqueue("sfc_vfdi");
        if (!vfdi_workqueue)
                return -ENOMEM;
-
        return 0;
 }
 
@@ -1582,14 +1593,14 @@ void efx_fini_sriov(void)
        destroy_workqueue(vfdi_workqueue);
 }
 
-int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
 {
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_vf *vf;
+       struct siena_nic_data *nic_data = efx->nic_data;
+       struct siena_vf *vf;
 
        if (vf_i >= efx->vf_init_count)
                return -EINVAL;
-       vf = efx->vf + vf_i;
+       vf = nic_data->vf + vf_i;
 
        mutex_lock(&vf->status_lock);
        ether_addr_copy(vf->addr.mac_addr, mac);
@@ -1599,16 +1610,16 @@ int efx_siena_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
        return 0;
 }
 
-int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
                                u16 vlan, u8 qos)
 {
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_vf *vf;
+       struct siena_nic_data *nic_data = efx->nic_data;
+       struct siena_vf *vf;
        u16 tci;
 
        if (vf_i >= efx->vf_init_count)
                return -EINVAL;
-       vf = efx->vf + vf_i;
+       vf = nic_data->vf + vf_i;
 
        mutex_lock(&vf->status_lock);
        tci = (vlan & VLAN_VID_MASK) | ((qos & 0x7) << VLAN_PRIO_SHIFT);
@@ -1619,16 +1630,16 @@ int efx_siena_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i,
        return 0;
 }
 
-int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf_i,
                                    bool spoofchk)
 {
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_vf *vf;
+       struct siena_nic_data *nic_data = efx->nic_data;
+       struct siena_vf *vf;
        int rc;
 
        if (vf_i >= efx->vf_init_count)
                return -EINVAL;
-       vf = efx->vf + vf_i;
+       vf = nic_data->vf + vf_i;
 
        mutex_lock(&vf->txq_lock);
        if (vf->txq_count == 0) {
@@ -1643,16 +1654,16 @@ int efx_siena_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
        return rc;
 }
 
-int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
                                  struct ifla_vf_info *ivi)
 {
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_vf *vf;
+       struct siena_nic_data *nic_data = efx->nic_data;
+       struct siena_vf *vf;
        u16 tci;
 
        if (vf_i >= efx->vf_init_count)
                return -EINVAL;
-       vf = efx->vf + vf_i;
+       vf = nic_data->vf + vf_i;
 
        ivi->vf = vf_i;
        ether_addr_copy(ivi->mac, vf->addr.mac_addr);
@@ -1666,3 +1677,12 @@ int efx_siena_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
        return 0;
 }
 
+bool efx_siena_sriov_wanted(struct efx_nic *efx)
+{
+       return efx->vf_count != 0;
+}
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs)
+{
+       return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
new file mode 100644 (file)
index 0000000..d88d4da
--- /dev/null
@@ -0,0 +1,79 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef SIENA_SRIOV_H
+#define SIENA_SRIOV_H
+
+#include "net_driver.h"
+
+/* On the SFC9000 family each port is associated with 1 PCI physical
+ * function (PF) handled by sfc and a configurable number of virtual
+ * functions (VFs) that may be handled by some other driver, often in
+ * a VM guest.  The queue pointer registers are mapped in both PF and
+ * VF BARs such that an 8K region provides access to a single RX, TX
+ * and event queue (collectively a Virtual Interface, VI or VNIC).
+ *
+ * The PF has access to all 1024 VIs while VFs are mapped to VIs
+ * according to VI_BASE and VI_SCALE: VF i has access to VIs numbered
+ * in range [VI_BASE + i << VI_SCALE, VI_BASE + i + 1 << VI_SCALE).
+ * The number of VIs and the VI_SCALE value are configurable but must
+ * be established at boot time by firmware.
+ */
+
+/* Maximum VI_SCALE parameter supported by Siena */
+#define EFX_VI_SCALE_MAX 6
+/* Base VI to use for SR-IOV. Must be aligned to (1 << EFX_VI_SCALE_MAX),
+ * so this is the smallest allowed value.
+ */
+#define EFX_VI_BASE 128U
+/* Maximum number of VFs allowed */
+#define EFX_VF_COUNT_MAX 127
+/* Limit EVQs on VFs to be only 8k to reduce buffer table reservation */
+#define EFX_MAX_VF_EVQ_SIZE 8192UL
+/* The number of buffer table entries reserved for each VI on a VF */
+#define EFX_VF_BUFTBL_PER_VI                                   \
+       ((EFX_MAX_VF_EVQ_SIZE + 2 * EFX_MAX_DMAQ_SIZE) *        \
+        sizeof(efx_qword_t) / EFX_BUF_SIZE)
+
+int efx_siena_sriov_configure(struct efx_nic *efx, int num_vfs);
+int efx_siena_sriov_init(struct efx_nic *efx);
+void efx_siena_sriov_fini(struct efx_nic *efx);
+int efx_siena_sriov_mac_address_changed(struct efx_nic *efx);
+bool efx_siena_sriov_wanted(struct efx_nic *efx);
+void efx_siena_sriov_reset(struct efx_nic *efx);
+void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
+
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
+                               u16 vlan, u8 qos);
+int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
+                                   bool spoofchk);
+int efx_siena_sriov_get_vf_config(struct efx_nic *efx, int vf,
+                                 struct ifla_vf_info *ivf);
+
+#ifdef CONFIG_SFC_SRIOV
+
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+       return efx->vf_init_count != 0;
+}
+#else /* !CONFIG_SFC_SRIOV */
+static inline bool efx_siena_sriov_enabled(struct efx_nic *efx)
+{
+       return false;
+}
+#endif /* CONFIG_SFC_SRIOV */
+
+void efx_siena_sriov_probe(struct efx_nic *efx);
+void efx_siena_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_siena_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_siena_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+
+#endif /* SIENA_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
new file mode 100644 (file)
index 0000000..816c446
--- /dev/null
@@ -0,0 +1,83 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "sriov.h"
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->type->sriov_set_vf_mac)
+               return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
+       else
+               return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+                         u8 qos)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->type->sriov_set_vf_vlan) {
+               if ((vlan & ~VLAN_VID_MASK) ||
+                   (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
+                       return -EINVAL;
+
+               return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
+       } else {
+               return -EOPNOTSUPP;
+       }
+}
+
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+                             bool spoofchk)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->type->sriov_set_vf_spoofchk)
+               return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
+       else
+               return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+                           struct ifla_vf_info *ivi)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->type->sriov_get_vf_config)
+               return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
+       else
+               return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+                               int link_state)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->type->sriov_set_vf_link_state)
+               return efx->type->sriov_set_vf_link_state(efx, vf_i,
+                                                         link_state);
+       else
+               return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+                              struct netdev_phys_item_id *ppid)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->type->sriov_get_phys_port_id)
+               return efx->type->sriov_get_phys_port_id(efx, ppid);
+       else
+               return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
new file mode 100644 (file)
index 0000000..400df52
--- /dev/null
@@ -0,0 +1,31 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SRIOV_H
+#define EFX_SRIOV_H
+
+#include "net_driver.h"
+
+#ifdef CONFIG_SFC_SRIOV
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+                         u8 qos);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+                             bool spoofchk);
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+                           struct ifla_vf_info *ivi);
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+                               int link_state);
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+                              struct netdev_phys_item_id *ppid);
+
+#endif /* CONFIG_SFC_SRIOV */
+
+#endif /* EFX_SRIOV_H */
index 7d3af190be55d9a4ea49ed903b261bec45a319ce..cec147d1d34f2bb0cd9013f2c58c4fc7642b1b23 100644 (file)
@@ -16,6 +16,7 @@ if STMMAC_ETH
 config STMMAC_PLATFORM
        tristate "STMMAC Platform bus support"
        depends on STMMAC_ETH
+       select MFD_SYSCON
        default y
        ---help---
          This selects the platform specific bus support for the stmmac driver.
@@ -26,6 +27,95 @@ config STMMAC_PLATFORM
 
          If unsure, say N.
 
+if STMMAC_PLATFORM
+
+config DWMAC_GENERIC
+       tristate "Generic driver for DWMAC"
+       default STMMAC_PLATFORM
+       ---help---
+         Generic DWMAC driver for platforms that don't require any
+         platform specific code to function or is using platform
+         data for setup.
+
+config DWMAC_IPQ806X
+       tristate "QCA IPQ806x DWMAC support"
+       default ARCH_QCOM
+       depends on OF
+       select MFD_SYSCON
+       help
+         Support for QCA IPQ806X DWMAC Ethernet.
+
+         This selects the IPQ806x SoC glue layer support for the stmmac
+         device driver. This driver does not use any of the hardware
+         acceleration features available on this SoC. Network devices
+         will behave like standard non-accelerated ethernet interfaces.
+
+config DWMAC_LPC18XX
+       tristate "NXP LPC18xx/43xx DWMAC support"
+       default ARCH_LPC18XX
+       depends on OF
+       select MFD_SYSCON
+       ---help---
+         Support for NXP LPC18xx/43xx DWMAC Ethernet.
+
+config DWMAC_MESON
+       tristate "Amlogic Meson dwmac support"
+       default ARCH_MESON
+       depends on OF
+       help
+         Support for Ethernet controller on Amlogic Meson SoCs.
+
+         This selects the Amlogic Meson SoC glue layer support for
+         the stmmac device driver. This driver is used for Meson6 and
+         Meson8 SoCs.
+
+config DWMAC_ROCKCHIP
+       tristate "Rockchip dwmac support"
+       default ARCH_ROCKCHIP
+       depends on OF
+       select MFD_SYSCON
+       help
+         Support for Ethernet controller on Rockchip RK3288 SoC.
+
+         This selects the Rockchip RK3288 SoC glue layer support for
+         the stmmac device driver.
+
+config DWMAC_SOCFPGA
+       tristate "SOCFPGA dwmac support"
+       default ARCH_SOCFPGA
+       depends on OF
+       select MFD_SYSCON
+       help
+         Support for ethernet controller on Altera SOCFPGA
+
+         This selects the Altera SOCFPGA SoC glue layer support
+         for the stmmac device driver. This driver is used for
+         arria5 and cyclone5 FPGA SoCs.
+
+config DWMAC_STI
+       tristate "STi GMAC support"
+       default ARCH_STI
+       depends on OF
+       select MFD_SYSCON
+       ---help---
+         Support for ethernet controller on STi SOCs.
+
+         This selects STi SoC glue layer support for the stmmac
+         device driver. This driver is used on for the STi series
+         SOCs GMAC ethernet controller.
+
+config DWMAC_SUNXI
+       tristate "Allwinner GMAC support"
+       default ARCH_SUNXI
+       depends on OF
+       ---help---
+         Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+         This selects Allwinner SoC glue layer support for the
+         stmmac device driver. This driver is used for A20/A31
+         GMAC ethernet controller.
+endif
+
 config STMMAC_PCI
        tristate "STMMAC PCI bus support"
        depends on STMMAC_ETH && PCI
index 73c2715a27f39c9d545d3cead884e67ed5d82a0e..b3901616f4f653073ba0911d1fa6c0814de81bc5 100644 (file)
@@ -4,9 +4,17 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
              dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o     \
              mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
 
-obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
-stmmac-platform-objs:= stmmac_platform.o dwmac-meson.o dwmac-sunxi.o   \
-                      dwmac-sti.o dwmac-socfpga.o dwmac-rk.o
+# Ordering matters. Generic driver must be last.
+obj-$(CONFIG_STMMAC_PLATFORM)  += stmmac-platform.o
+obj-$(CONFIG_DWMAC_IPQ806X)    += dwmac-ipq806x.o
+obj-$(CONFIG_DWMAC_LPC18XX)    += dwmac-lpc18xx.o
+obj-$(CONFIG_DWMAC_MESON)      += dwmac-meson.o
+obj-$(CONFIG_DWMAC_ROCKCHIP)   += dwmac-rk.o
+obj-$(CONFIG_DWMAC_SOCFPGA)    += dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_STI)                += dwmac-sti.o
+obj-$(CONFIG_DWMAC_SUNXI)      += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_GENERIC)    += dwmac-generic.o
+stmmac-platform-objs:= stmmac_platform.o
 
 obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
 stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
new file mode 100644 (file)
index 0000000..e817a1a
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * Generic DWMAC platform driver
+ *
+ * Copyright (C) 2007-2011  STMicroelectronics Ltd
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+static const struct of_device_id dwmac_generic_match[] = {
+       { .compatible = "st,spear600-gmac"},
+       { .compatible = "snps,dwmac-3.610"},
+       { .compatible = "snps,dwmac-3.70a"},
+       { .compatible = "snps,dwmac-3.710"},
+       { .compatible = "snps,dwmac"},
+       { }
+};
+MODULE_DEVICE_TABLE(of, dwmac_generic_match);
+
+static struct platform_driver dwmac_generic_driver = {
+       .probe  = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = STMMAC_RESOURCE_NAME,
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = of_match_ptr(dwmac_generic_match),
+       },
+};
+module_platform_driver(dwmac_generic_driver);
+
+MODULE_DESCRIPTION("Generic dwmac driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
new file mode 100644 (file)
index 0000000..7e3129e
--- /dev/null
@@ -0,0 +1,365 @@
+/*
+ * Qualcomm Atheros IPQ806x GMAC glue layer
+ *
+ * Copyright (C) 2015 The Linux Foundation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+
+#include "stmmac_platform.h"
+
+#define NSS_COMMON_CLK_GATE                    0x8
+#define NSS_COMMON_CLK_GATE_PTP_EN(x)          BIT(0x10 + x)
+#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x)     BIT(0x9 + (x * 2))
+#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x)     BIT(0x8 + (x * 2))
+#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x)      BIT(0x4 + x)
+#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x)      BIT(0x0 + x)
+
+#define NSS_COMMON_CLK_DIV0                    0xC
+#define NSS_COMMON_CLK_DIV_OFFSET(x)           (x * 8)
+#define NSS_COMMON_CLK_DIV_MASK                        0x7f
+
+#define NSS_COMMON_CLK_SRC_CTRL                        0x14
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)      (1 << x)
+/* Mode is coded on 1 bit but is different depending on the MAC ID:
+ * MAC0: QSGMII=0 RGMII=1
+ * MAC1: QSGMII=0 SGMII=0 RGMII=1
+ * MAC2 & MAC3: QSGMII=0 SGMII=1
+ */
+#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x)       1
+#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x)       ((x >= 2) ? 1 : 0)
+
+#define NSS_COMMON_MACSEC_CTL                  0x28
+#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x)
+
+#define NSS_COMMON_GMAC_CTL(x)                 (0x30 + (x * 4))
+#define NSS_COMMON_GMAC_CTL_CSYS_REQ           BIT(19)
+#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL      BIT(16)
+#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET   8
+#define NSS_COMMON_GMAC_CTL_IFG_OFFSET         0
+#define NSS_COMMON_GMAC_CTL_IFG_MASK           0x3f
+
+#define NSS_COMMON_CLK_DIV_RGMII_1000          1
+#define NSS_COMMON_CLK_DIV_RGMII_100           9
+#define NSS_COMMON_CLK_DIV_RGMII_10            99
+#define NSS_COMMON_CLK_DIV_SGMII_1000          0
+#define NSS_COMMON_CLK_DIV_SGMII_100           4
+#define NSS_COMMON_CLK_DIV_SGMII_10            49
+
+#define QSGMII_PCS_MODE_CTL                    0x68
+#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x)      BIT((x * 8) + 7)
+
+#define QSGMII_PCS_CAL_LCKDT_CTL               0x120
+#define QSGMII_PCS_CAL_LCKDT_CTL_RST           BIT(19)
+
+/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */
+#define QSGMII_PHY_SGMII_CTL(x)                        ((x == 1) ? 0x134 : \
+                                                (0x13c + (4 * (x - 2))))
+#define QSGMII_PHY_CDR_EN                      BIT(0)
+#define QSGMII_PHY_RX_FRONT_EN                 BIT(1)
+#define QSGMII_PHY_RX_SIGNAL_DETECT_EN         BIT(2)
+#define QSGMII_PHY_TX_DRIVER_EN                        BIT(3)
+#define QSGMII_PHY_QSGMII_EN                   BIT(7)
+#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET      12
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK                0x7
+#define QSGMII_PHY_RX_DC_BIAS_OFFSET           18
+#define QSGMII_PHY_RX_DC_BIAS_MASK             0x3
+#define QSGMII_PHY_RX_INPUT_EQU_OFFSET         20
+#define QSGMII_PHY_RX_INPUT_EQU_MASK           0x3
+#define QSGMII_PHY_CDR_PI_SLEW_OFFSET          22
+#define QSGMII_PHY_CDR_PI_SLEW_MASK            0x3
+#define QSGMII_PHY_TX_DRV_AMP_OFFSET           28
+#define QSGMII_PHY_TX_DRV_AMP_MASK             0xf
+
+struct ipq806x_gmac {
+       struct platform_device *pdev;
+       struct regmap *nss_common;
+       struct regmap *qsgmii_csr;
+       uint32_t id;
+       struct clk *core_clk;
+       phy_interface_t phy_mode;
+};
+
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+       struct device *dev = &gmac->pdev->dev;
+       int div;
+
+       switch (speed) {
+       case SPEED_1000:
+               div = NSS_COMMON_CLK_DIV_SGMII_1000;
+               break;
+
+       case SPEED_100:
+               div = NSS_COMMON_CLK_DIV_SGMII_100;
+               break;
+
+       case SPEED_10:
+               div = NSS_COMMON_CLK_DIV_SGMII_10;
+               break;
+
+       default:
+               dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed);
+               return -EINVAL;
+       }
+
+       return div;
+}
+
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+       struct device *dev = &gmac->pdev->dev;
+       int div;
+
+       switch (speed) {
+       case SPEED_1000:
+               div = NSS_COMMON_CLK_DIV_RGMII_1000;
+               break;
+
+       case SPEED_100:
+               div = NSS_COMMON_CLK_DIV_RGMII_100;
+               break;
+
+       case SPEED_10:
+               div = NSS_COMMON_CLK_DIV_RGMII_10;
+               break;
+
+       default:
+               dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed);
+               return -EINVAL;
+       }
+
+       return div;
+}
+
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+       uint32_t clk_bits, val;
+       int div;
+
+       switch (gmac->phy_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               div = get_clk_div_rgmii(gmac, speed);
+               clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+                          NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+               break;
+
+       case PHY_INTERFACE_MODE_SGMII:
+               div = get_clk_div_sgmii(gmac, speed);
+               clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+                          NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+               break;
+
+       default:
+               dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+                       phy_modes(gmac->phy_mode));
+               return -EINVAL;
+       }
+
+       /* Disable the clocks */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+       val &= ~clk_bits;
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+       /* Set the divider */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val);
+       val &= ~(NSS_COMMON_CLK_DIV_MASK
+                << NSS_COMMON_CLK_DIV_OFFSET(gmac->id));
+       val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id);
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val);
+
+       /* Enable the clock back */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+       val |= clk_bits;
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+       return 0;
+}
+
+static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+{
+       struct device *dev = &gmac->pdev->dev;
+
+       gmac->phy_mode = of_get_phy_mode(dev->of_node);
+       if (gmac->phy_mode < 0) {
+               dev_err(dev, "missing phy mode property\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
+               dev_err(dev, "missing qcom id property\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* The GMACs are called 1 to 4 in the documentation, but to simplify the
+        * code and keep it consistent with the Linux convention, we'll number
+        * them from 0 to 3 here.
+        */
+       if (gmac->id < 0 || gmac->id > 3) {
+               dev_err(dev, "invalid gmac id\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       gmac->core_clk = devm_clk_get(dev, "stmmaceth");
+       if (IS_ERR(gmac->core_clk)) {
+               dev_err(dev, "missing stmmaceth clk property\n");
+               return gmac->core_clk;
+       }
+       clk_set_rate(gmac->core_clk, 266000000);
+
+       /* Setup the register map for the nss common registers */
+       gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node,
+                                                          "qcom,nss-common");
+       if (IS_ERR(gmac->nss_common)) {
+               dev_err(dev, "missing nss-common node\n");
+               return gmac->nss_common;
+       }
+
+       /* Setup the register map for the qsgmii csr registers */
+       gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node,
+                                                          "qcom,qsgmii-csr");
+       if (IS_ERR(gmac->qsgmii_csr)) {
+               dev_err(dev, "missing qsgmii-csr node\n");
+               return gmac->qsgmii_csr;
+       }
+
+       return NULL;
+}
+
+static void *ipq806x_gmac_setup(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ipq806x_gmac *gmac;
+       int val;
+       void *err;
+
+       gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+       if (!gmac)
+               return ERR_PTR(-ENOMEM);
+
+       gmac->pdev = pdev;
+
+       err = ipq806x_gmac_of_parse(gmac);
+       if (err) {
+               dev_err(dev, "device tree parsing error\n");
+               return err;
+       }
+
+       regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
+                    QSGMII_PCS_CAL_LCKDT_CTL_RST);
+
+       /* Inter frame gap is set to 12 */
+       val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET |
+             12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET;
+       /* We also initiate an AXI low power exit request */
+       val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
+       switch (gmac->phy_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+               val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+               break;
+       default:
+               dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+                       phy_modes(gmac->phy_mode));
+               return NULL;
+       }
+       regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
+
+       /* Configure the clock src according to the mode */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
+       val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+       switch (gmac->phy_mode) {
+       case PHY_INTERFACE_MODE_RGMII:
+               val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
+                       NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+               val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) <<
+                       NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+               break;
+       default:
+               dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+                       phy_modes(gmac->phy_mode));
+               return NULL;
+       }
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
+
+       /* Enable PTP clock */
+       regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+       val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+       regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+       if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+               regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+                            QSGMII_PHY_CDR_EN |
+                            QSGMII_PHY_RX_FRONT_EN |
+                            QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+                            QSGMII_PHY_TX_DRIVER_EN |
+                            QSGMII_PHY_QSGMII_EN |
+                            0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+                            0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+                            0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+                            0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+                            0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+       }
+
+       return gmac;
+}
+
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+       struct ipq806x_gmac *gmac = priv;
+
+       ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static const struct stmmac_of_data ipq806x_gmac_data = {
+       .has_gmac       = 1,
+       .setup          = ipq806x_gmac_setup,
+       .fix_mac_speed  = ipq806x_gmac_fix_mac_speed,
+};
+
+static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+       { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
+
+static struct platform_driver ipq806x_gmac_dwmac_driver = {
+       .probe = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "ipq806x-gmac-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = ipq806x_gmac_dwmac_match,
+       },
+};
+module_platform_driver(ipq806x_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Mathieu Olivari <mathieu@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
new file mode 100644 (file)
index 0000000..cb888d3
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * DWMAC glue for NXP LPC18xx/LPC43xx Ethernet
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+/* Register defines for CREG syscon */
+#define LPC18XX_CREG_CREG6                     0x12c
+# define LPC18XX_CREG_CREG6_ETHMODE_MASK       0x7
+# define LPC18XX_CREG_CREG6_ETHMODE_MII                0x0
+# define LPC18XX_CREG_CREG6_ETHMODE_RMII       0x4
+
+struct lpc18xx_dwmac_priv_data {
+       struct regmap *reg;
+       int interface;
+};
+
+static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
+{
+       struct lpc18xx_dwmac_priv_data *dwmac;
+
+       dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+       if (!dwmac)
+               return ERR_PTR(-ENOMEM);
+
+       dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
+       if (dwmac->interface < 0)
+               return ERR_PTR(dwmac->interface);
+
+       dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+       if (IS_ERR(dwmac->reg)) {
+               dev_err(&pdev->dev, "Syscon lookup failed\n");
+               return dwmac->reg;
+       }
+
+       return dwmac;
+}
+
+static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
+{
+       struct lpc18xx_dwmac_priv_data *dwmac = priv;
+       u8 ethmode;
+
+       if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+               ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
+       } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+               ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
+       } else {
+               dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
+               return -EINVAL;
+       }
+
+       regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+                          LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
+
+       return 0;
+}
+
+static const struct stmmac_of_data lpc18xx_dwmac_data = {
+       .has_gmac = 1,
+       .setup = lpc18xx_dwmac_setup,
+       .init = lpc18xx_dwmac_init,
+};
+
+static const struct of_device_id lpc18xx_dwmac_match[] = {
+       { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
+
+static struct platform_driver lpc18xx_dwmac_driver = {
+       .probe  = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "lpc18xx-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = lpc18xx_dwmac_match,
+       },
+};
+module_platform_driver(lpc18xx_dwmac_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("DWMAC glue for LPC18xx/43xx Ethernet");
+MODULE_LICENSE("GPL v2");
index cca028d632f611ee99c3396e9f57da3c9360dbe5..61a324a87d09e0b6f06873ce35131adbfbf9826d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/ethtool.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/stmmac.h>
 
@@ -63,7 +64,28 @@ static void *meson6_dwmac_setup(struct platform_device *pdev)
        return dwmac;
 }
 
-const struct stmmac_of_data meson6_dwmac_data = {
+static const struct stmmac_of_data meson6_dwmac_data = {
        .setup          = meson6_dwmac_setup,
        .fix_mac_speed  = meson6_dwmac_fix_mac_speed,
 };
+
+static const struct of_device_id meson6_dwmac_match[] = {
+       { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+       { }
+};
+MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
+
+static struct platform_driver meson6_dwmac_driver = {
+       .probe  = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "meson6-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = meson6_dwmac_match,
+       },
+};
+module_platform_driver(meson6_dwmac_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
index 6249a4ec08f05c3ddb7939ab4f5386fe75a8f551..30e28f0d9a6003546b8aeb9a5e902369709787d1 100644 (file)
 #include <linux/phy.h>
 #include <linux/of_net.h>
 #include <linux/gpio.h>
+#include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/delay.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
 
+#include "stmmac_platform.h"
+
 struct rk_priv_data {
        struct platform_device *pdev;
        int phy_iface;
@@ -428,10 +432,31 @@ static void rk_fix_speed(void *priv, unsigned int speed)
                dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
 }
 
-const struct stmmac_of_data rk3288_gmac_data = {
+static const struct stmmac_of_data rk3288_gmac_data = {
        .has_gmac = 1,
        .fix_mac_speed = rk_fix_speed,
        .setup = rk_gmac_setup,
        .init = rk_gmac_init,
        .exit = rk_gmac_exit,
 };
+
+static const struct of_device_id rk_gmac_dwmac_match[] = {
+       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
+       { }
+};
+MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
+
+static struct platform_driver rk_gmac_dwmac_driver = {
+       .probe  = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "rk_gmac-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = rk_gmac_dwmac_match,
+       },
+};
+module_platform_driver(rk_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip RK3288 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
index 5a36bd2c7837d3f4c84e9344ba6bc040e872af2c..8141c5b844ae681160fbf44b69e37f359492db85 100644 (file)
@@ -257,9 +257,28 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
        return ret;
 }
 
-const struct stmmac_of_data socfpga_gmac_data = {
+static const struct stmmac_of_data socfpga_gmac_data = {
        .setup = socfpga_dwmac_probe,
        .init = socfpga_dwmac_init,
        .exit = socfpga_dwmac_exit,
        .fix_mac_speed = socfpga_dwmac_fix_mac_speed,
 };
+
+static const struct of_device_id socfpga_dwmac_match[] = {
+       { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+       { }
+};
+MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
+
+static struct platform_driver socfpga_dwmac_driver = {
+       .probe  = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "socfpga-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = socfpga_dwmac_match,
+       },
+};
+module_platform_driver(socfpga_dwmac_driver);
+
+MODULE_LICENSE("GPL v2");
index bb6e2dc61bec7dc8baac541e7bf390759caf017f..a2e8111c5d14302ffafb6f7fcd9db9a3db7e00e3 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/stmmac.h>
 #include <linux/phy.h>
 #include <linux/mfd/syscon.h>
+#include <linux/module.h>
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/of.h>
@@ -351,16 +352,40 @@ static void *sti_dwmac_setup(struct platform_device *pdev)
        return dwmac;
 }
 
-const struct stmmac_of_data stih4xx_dwmac_data = {
+static const struct stmmac_of_data stih4xx_dwmac_data = {
        .fix_mac_speed = stih4xx_fix_retime_src,
        .setup = sti_dwmac_setup,
        .init = stix4xx_init,
        .exit = sti_dwmac_exit,
 };
 
-const struct stmmac_of_data stid127_dwmac_data = {
+static const struct stmmac_of_data stid127_dwmac_data = {
        .fix_mac_speed = stid127_fix_retime_src,
        .setup = sti_dwmac_setup,
        .init = stid127_init,
        .exit = sti_dwmac_exit,
 };
+
+static const struct of_device_id sti_dwmac_match[] = {
+       { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
+       { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
+       { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
+       { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
+       { }
+};
+MODULE_DEVICE_TABLE(of, sti_dwmac_match);
+
+static struct platform_driver sti_dwmac_driver = {
+       .probe  = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "sti-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = sti_dwmac_match,
+       },
+};
+module_platform_driver(sti_dwmac_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
index c5ea9ab75b03c806a803a5b6b42b9e83920bd768..15048ca397591acb90f746a0e3b7bfbf5b9c9e86 100644 (file)
@@ -18,7 +18,9 @@
 
 #include <linux/stmmac.h>
 #include <linux/clk.h>
+#include <linux/module.h>
 #include <linux/phy.h>
+#include <linux/platform_device.h>
 #include <linux/of_net.h>
 #include <linux/regulator/consumer.h>
 
@@ -132,7 +134,7 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
 
 /* of_data specifying hardware features and callbacks.
  * hardware features were copied from Allwinner drivers. */
-const struct stmmac_of_data sun7i_gmac_data = {
+static const struct stmmac_of_data sun7i_gmac_data = {
        .has_gmac = 1,
        .tx_coe = 1,
        .fix_mac_speed = sun7i_fix_speed,
@@ -140,3 +142,24 @@ const struct stmmac_of_data sun7i_gmac_data = {
        .init = sun7i_gmac_init,
        .exit = sun7i_gmac_exit,
 };
+
+static const struct of_device_id sun7i_dwmac_match[] = {
+       { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+       { }
+};
+MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
+
+static struct platform_driver sun7i_dwmac_driver = {
+       .probe  = stmmac_pltfr_probe,
+       .remove = stmmac_pltfr_remove,
+       .driver = {
+               .name           = "sun7i-dwmac",
+               .pm             = &stmmac_pltfr_pm_ops,
+               .of_match_table = sun7i_dwmac_match,
+       },
+};
+module_platform_driver(sun7i_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunxi DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
index 73bab983edd96a47169bf4b1957e5fd13c28a3a0..1f3b33a6c6a8f8bd68f1b2313bc9ea93eb470ed4 100644 (file)
 #include <linux/ptp_clock_kernel.h>
 #include <linux/reset.h>
 
+struct stmmac_resources {
+       void __iomem *addr;
+       const char *mac;
+       int wol_irq;
+       int lpi_irq;
+       int irq;
+};
+
 struct stmmac_tx_info {
        dma_addr_t buf;
        bool map_as_page;
@@ -135,9 +143,9 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv);
 int stmmac_resume(struct net_device *ndev);
 int stmmac_suspend(struct net_device *ndev);
 int stmmac_dvr_remove(struct net_device *ndev);
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
-                                    struct plat_stmmacenet_data *plat_dat,
-                                    void __iomem *addr);
+int stmmac_dvr_probe(struct device *device,
+                    struct plat_stmmacenet_data *plat_dat,
+                    struct stmmac_resources *res);
 void stmmac_disable_eee_mode(struct stmmac_priv *priv);
 bool stmmac_eee_init(struct stmmac_priv *priv);
 
index 2c5ce2baca8712790d51096a53868b84466f7dde..a5156739e1e1d35b1bd6d52bba562f3291eefa68 100644 (file)
@@ -52,6 +52,7 @@
 #include "stmmac_ptp.h"
 #include "stmmac.h"
 #include <linux/reset.h>
+#include <linux/of_mdio.h>
 
 #define STMMAC_ALIGN(x)        L1_CACHE_ALIGN(x)
 
@@ -816,18 +817,25 @@ static int stmmac_init_phy(struct net_device *dev)
        priv->speed = 0;
        priv->oldduplex = -1;
 
-       if (priv->plat->phy_bus_name)
-               snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
-                        priv->plat->phy_bus_name, priv->plat->bus_id);
-       else
-               snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
-                        priv->plat->bus_id);
+       if (priv->plat->phy_node) {
+               phydev = of_phy_connect(dev, priv->plat->phy_node,
+                                       &stmmac_adjust_link, 0, interface);
+       } else {
+               if (priv->plat->phy_bus_name)
+                       snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+                                priv->plat->phy_bus_name, priv->plat->bus_id);
+               else
+                       snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+                                priv->plat->bus_id);
 
-       snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
-                priv->plat->phy_addr);
-       pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id_fmt);
+               snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+                        priv->plat->phy_addr);
+               pr_debug("stmmac_init_phy:  trying to attach to %s\n",
+                        phy_id_fmt);
 
-       phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+               phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
+                                    interface);
+       }
 
        if (IS_ERR(phydev)) {
                pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -848,7 +856,7 @@ static int stmmac_init_phy(struct net_device *dev)
         * device as well.
         * Note: phydev->phy_id is the result of reading the UID PHY registers.
         */
-       if (phydev->phy_id == 0) {
+       if (!priv->plat->phy_node && phydev->phy_id == 0) {
                phy_disconnect(phydev);
                return -ENODEV;
        }
@@ -975,13 +983,11 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
 {
        struct sk_buff *skb;
 
-       skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
-                                flags);
+       skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
        if (!skb) {
                pr_err("%s: Rx init fails; skb is NULL\n", __func__);
                return -ENOMEM;
        }
-       skb_reserve(skb, NET_IP_ALIGN);
        priv->rx_skbuff[i] = skb;
        priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
                                                priv->dma_buf_sz,
@@ -2800,16 +2806,15 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
  * stmmac_dvr_probe
  * @device: device pointer
  * @plat_dat: platform data pointer
- * @addr: iobase memory address
+ * @res: stmmac resource pointer
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
  * Return:
- * on success the new private structure is returned, otherwise the error
- * pointer.
+ * returns 0 on success, otherwise errno.
  */
-struct stmmac_priv *stmmac_dvr_probe(struct device *device,
-                                    struct plat_stmmacenet_data *plat_dat,
-                                    void __iomem *addr)
+int stmmac_dvr_probe(struct device *device,
+                    struct plat_stmmacenet_data *plat_dat,
+                    struct stmmac_resources *res)
 {
        int ret = 0;
        struct net_device *ndev = NULL;
@@ -2817,7 +2822,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 
        ndev = alloc_etherdev(sizeof(struct stmmac_priv));
        if (!ndev)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        SET_NETDEV_DEV(ndev, device);
 
@@ -2828,8 +2833,17 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
        stmmac_set_ethtool_ops(ndev);
        priv->pause = pause;
        priv->plat = plat_dat;
-       priv->ioaddr = addr;
-       priv->dev->base_addr = (unsigned long)addr;
+       priv->ioaddr = res->addr;
+       priv->dev->base_addr = (unsigned long)res->addr;
+
+       priv->dev->irq = res->irq;
+       priv->wol_irq = res->wol_irq;
+       priv->lpi_irq = res->lpi_irq;
+
+       if (res->mac)
+               memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
+
+       dev_set_drvdata(device, priv);
 
        /* Verify driver arguments */
        stmmac_verify_args();
@@ -2944,7 +2958,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
                }
        }
 
-       return priv;
+       return 0;
 
 error_mdio_register:
        unregister_netdev(ndev);
@@ -2957,7 +2971,7 @@ error_pclk_get:
 error_clk_get:
        free_netdev(ndev);
 
-       return ERR_PTR(ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
 
index 3bca908716e2a5c52a82ee6e6d8e67b9b10d23c6..d71a721ea61ce8f973d26ac7353ab210e33e9542 100644 (file)
@@ -163,7 +163,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
 {
        struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
        struct plat_stmmacenet_data *plat;
-       struct stmmac_priv *priv;
+       struct stmmac_resources res;
        int i;
        int ret;
 
@@ -214,19 +214,12 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
 
        pci_enable_msi(pdev);
 
-       priv = stmmac_dvr_probe(&pdev->dev, plat, pcim_iomap_table(pdev)[i]);
-       if (IS_ERR(priv)) {
-               dev_err(&pdev->dev, "%s: main driver probe failed\n", __func__);
-               return PTR_ERR(priv);
-       }
-       priv->dev->irq = pdev->irq;
-       priv->wol_irq = pdev->irq;
-
-       pci_set_drvdata(pdev, priv->dev);
+       memset(&res, 0, sizeof(res));
+       res.addr = pcim_iomap_table(pdev)[i];
+       res.wol_irq = pdev->irq;
+       res.irq = pdev->irq;
 
-       dev_dbg(&pdev->dev, "STMMAC PCI driver registration completed\n");
-
-       return 0;
+       return stmmac_dvr_probe(&pdev->dev, plat, &res);
 }
 
 /**
index 68aec5c460db46c1378cdf122c3ad8f4eba15e86..f3918c7e7eeb373a6736bb5145c33320acbabc53 100644 (file)
 #include <linux/of.h>
 #include <linux/of_net.h>
 #include <linux/of_device.h>
+#include <linux/of_mdio.h>
 
 #include "stmmac.h"
 #include "stmmac_platform.h"
 
-static const struct of_device_id stmmac_dt_ids[] = {
-       /* SoC specific glue layers should come before generic bindings */
-       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
-       { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
-       { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
-       { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
-       { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
-       { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
-       { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
-       { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
-       { .compatible = "st,spear600-gmac"},
-       { .compatible = "snps,dwmac-3.610"},
-       { .compatible = "snps,dwmac-3.70a"},
-       { .compatible = "snps,dwmac-3.710"},
-       { .compatible = "snps,dwmac"},
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
-
 #ifdef CONFIG_OF
 
 /**
@@ -129,14 +111,9 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
        struct device_node *np = pdev->dev.of_node;
        struct stmmac_dma_cfg *dma_cfg;
        const struct of_device_id *device;
+       struct device *dev = &pdev->dev;
 
-       if (!np)
-               return -ENODEV;
-
-       device = of_match_device(stmmac_dt_ids, &pdev->dev);
-       if (!device)
-               return -ENODEV;
-
+       device = of_match_device(dev->driver->of_match_table, dev);
        if (device->data) {
                const struct stmmac_of_data *data = device->data;
                plat->has_gmac = data->has_gmac;
@@ -168,13 +145,24 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
        /* Default to phy auto-detection */
        plat->phy_addr = -1;
 
+       /* If we find a phy-handle property, use it as the PHY */
+       plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+       /* If phy-handle is not specified, check if we have a fixed-phy */
+       if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+               if ((of_phy_register_fixed_link(np) < 0))
+                       return -ENODEV;
+
+               plat->phy_node = of_node_get(np);
+       }
+
        /* "snps,phy-addr" is not a standard property. Mark it as deprecated
         * and warn of its use. Remove this when phy node support is added.
         */
        if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
                dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
 
-       if (plat->phy_bus_name)
+       if (plat->phy_node || plat->phy_bus_name)
                plat->mdio_bus_data = NULL;
        else
                plat->mdio_bus_data =
@@ -232,8 +220,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
        if (of_find_property(np, "snps,pbl", NULL)) {
                dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
                                       GFP_KERNEL);
-               if (!dma_cfg)
+               if (!dma_cfg) {
+                       of_node_put(np);
                        return -ENOMEM;
+               }
                plat->dma_cfg = dma_cfg;
                of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
                dma_cfg->fixed_burst =
@@ -268,27 +258,26 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
  * the necessary platform resources, invoke custom helper (if required) and
  * invoke the main probe function.
  */
-static int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_pltfr_probe(struct platform_device *pdev)
 {
+       struct stmmac_resources stmmac_res;
        int ret = 0;
        struct resource *res;
        struct device *dev = &pdev->dev;
-       void __iomem *addr = NULL;
-       struct stmmac_priv *priv = NULL;
        struct plat_stmmacenet_data *plat_dat = NULL;
-       const char *mac = NULL;
-       int irq, wol_irq, lpi_irq;
+
+       memset(&stmmac_res, 0, sizeof(stmmac_res));
 
        /* Get IRQ information early to have an ability to ask for deferred
         * probe if needed before we went too far with resource allocation.
         */
-       irq = platform_get_irq_byname(pdev, "macirq");
-       if (irq < 0) {
-               if (irq != -EPROBE_DEFER) {
+       stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
+       if (stmmac_res.irq < 0) {
+               if (stmmac_res.irq != -EPROBE_DEFER) {
                        dev_err(dev,
                                "MAC IRQ configuration information not found\n");
                }
-               return irq;
+               return stmmac_res.irq;
        }
 
        /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -298,21 +287,21 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
         * In case the wake up interrupt is not passed from the platform
         * so the driver will continue to use the mac irq (ndev->irq)
         */
-       wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-       if (wol_irq < 0) {
-               if (wol_irq == -EPROBE_DEFER)
+       stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       if (stmmac_res.wol_irq < 0) {
+               if (stmmac_res.wol_irq == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
-               wol_irq = irq;
+               stmmac_res.wol_irq = stmmac_res.irq;
        }
 
-       lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-       if (lpi_irq == -EPROBE_DEFER)
+       stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+       if (stmmac_res.lpi_irq == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(addr))
-               return PTR_ERR(addr);
+       stmmac_res.addr = devm_ioremap_resource(dev, res);
+       if (IS_ERR(stmmac_res.addr))
+               return PTR_ERR(stmmac_res.addr);
 
        plat_dat = dev_get_platdata(&pdev->dev);
 
@@ -332,7 +321,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
        plat_dat->unicast_filter_entries = 1;
 
        if (pdev->dev.of_node) {
-               ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
+               ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
                if (ret) {
                        pr_err("%s: main dt probe failed", __func__);
                        return ret;
@@ -353,27 +342,9 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
                        return ret;
        }
 
-       priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
-       if (IS_ERR(priv)) {
-               pr_err("%s: main driver probe failed", __func__);
-               return PTR_ERR(priv);
-       }
-
-       /* Copy IRQ values to priv structure which is now avaialble */
-       priv->dev->irq = irq;
-       priv->wol_irq = wol_irq;
-       priv->lpi_irq = lpi_irq;
-
-       /* Get MAC address if available (DT) */
-       if (mac)
-               memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
-
-       platform_set_drvdata(pdev, priv->dev);
-
-       pr_debug("STMMAC platform driver registration completed");
-
-       return 0;
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
+EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
 
 /**
  * stmmac_pltfr_remove
@@ -381,7 +352,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
  * Description: this function calls the main to free the net resources
  * and calls the platforms hook and release the resources (e.g. mem).
  */
-static int stmmac_pltfr_remove(struct platform_device *pdev)
+int stmmac_pltfr_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct stmmac_priv *priv = netdev_priv(ndev);
@@ -395,6 +366,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
 
 #ifdef CONFIG_PM_SLEEP
 /**
@@ -438,21 +410,6 @@ static int stmmac_pltfr_resume(struct device *dev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops,
-                        stmmac_pltfr_suspend, stmmac_pltfr_resume);
-
-static struct platform_driver stmmac_pltfr_driver = {
-       .probe = stmmac_pltfr_probe,
-       .remove = stmmac_pltfr_remove,
-       .driver = {
-                  .name = STMMAC_RESOURCE_NAME,
-                  .pm = &stmmac_pltfr_pm_ops,
-                  .of_match_table = of_match_ptr(stmmac_dt_ids),
-       },
-};
-
-module_platform_driver(stmmac_pltfr_driver);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
-MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
-MODULE_LICENSE("GPL");
+SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
+                                      stmmac_pltfr_resume);
+EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
index 093eb99e5ffd96d2ade4aecaaba357ac8dd8ef7e..71da86d7bd00d9f7feb0a501cce0e6bbb4cb7298 100644 (file)
 #ifndef __STMMAC_PLATFORM_H__
 #define __STMMAC_PLATFORM_H__
 
-extern const struct stmmac_of_data meson6_dwmac_data;
-extern const struct stmmac_of_data sun7i_gmac_data;
-extern const struct stmmac_of_data stih4xx_dwmac_data;
-extern const struct stmmac_of_data stid127_dwmac_data;
-extern const struct stmmac_of_data socfpga_gmac_data;
-extern const struct stmmac_of_data rk3288_gmac_data;
+int stmmac_pltfr_probe(struct platform_device *pdev);
+int stmmac_pltfr_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
 
 #endif /* __STMMAC_PLATFORM_H__ */
index b536b4c82752a233e7c18d4f5194041f6dc80e20..462820514faea05beaa77f5a7e94ede318c294c6 100644 (file)
@@ -1361,7 +1361,6 @@ static int cpsw_ndo_stop(struct net_device *ndev)
        if (cpsw_common_res_usage_state(priv) <= 1) {
                cpts_unregister(priv->cpts);
                cpsw_intr_disable(priv);
-               cpdma_ctlr_int_ctrl(priv->dma, false);
                cpdma_ctlr_stop(priv->dma);
                cpsw_ale_stop(priv->ale);
        }
@@ -1456,7 +1455,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 
                if (priv->cpts->rx_enable)
                        ctrl |= CTRL_V2_RX_TS_BITS;
-       break;
+               break;
        case CPSW_VERSION_3:
        default:
                ctrl &= ~CTRL_V3_ALL_TS_MASK;
@@ -1466,7 +1465,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
 
                if (priv->cpts->rx_enable)
                        ctrl |= CTRL_V3_RX_TS_BITS;
-       break;
+               break;
        }
 
        mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1589,10 +1588,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
        cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
        ndev->stats.tx_errors++;
        cpsw_intr_disable(priv);
-       cpdma_ctlr_int_ctrl(priv->dma, false);
        cpdma_chan_stop(priv->txch);
        cpdma_chan_start(priv->txch);
-       cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
 }
 
@@ -1629,10 +1626,8 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
        struct cpsw_priv *priv = netdev_priv(ndev);
 
        cpsw_intr_disable(priv);
-       cpdma_ctlr_int_ctrl(priv->dma, false);
        cpsw_rx_interrupt(priv->irqs_table[0], priv);
        cpsw_tx_interrupt(priv->irqs_table[1], priv);
-       cpdma_ctlr_int_ctrl(priv->dma, true);
        cpsw_intr_enable(priv);
 }
 #endif
index 6e927b4583aa4b9067433a9a2919ab8ea6a43fa9..43b061bd8e0724f228939a5e7e0e38eb23cc30e5 100644 (file)
@@ -268,39 +268,6 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
 
-static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
-                                int port_mask)
-{
-       int port;
-
-       port = cpsw_ale_get_port_num(ale_entry);
-       if ((BIT(port) & port_mask) == 0)
-               return; /* ports dont intersect, not interested */
-       cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
-}
-
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
-{
-       u32 ale_entry[ALE_ENTRY_WORDS];
-       int ret, idx;
-
-       for (idx = 0; idx < ale->params.ale_entries; idx++) {
-               cpsw_ale_read(ale, idx, ale_entry);
-               ret = cpsw_ale_get_entry_type(ale_entry);
-               if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
-                       continue;
-
-               if (cpsw_ale_get_mcast(ale_entry))
-                       cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
-               else
-                       cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
-
-               cpsw_ale_write(ale, idx, ale_entry);
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_flush);
-
 static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
                                                int flags, u16 vid)
 {
@@ -752,18 +719,6 @@ static void cpsw_ale_timer(unsigned long arg)
        }
 }
 
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
-{
-       del_timer_sync(&ale->timer);
-       ale->ageout = ageout * HZ;
-       if (ale->ageout) {
-               ale->timer.expires = jiffies + ale->ageout;
-               add_timer(&ale->timer);
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
-
 void cpsw_ale_start(struct cpsw_ale *ale)
 {
        u32 rev;
index af1e7ecd87c6fbd24b80954c7977e96aa3676a0c..a7001894f3daef3369050714c38e001de88346aa 100644 (file)
@@ -90,8 +90,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale);
 void cpsw_ale_start(struct cpsw_ale *ale);
 void cpsw_ale_stop(struct cpsw_ale *ale);
 
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
 int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
                       int flags, u16 vid);
index 43efc3a0cda58b26cededf117180f6782fbca76a..5ec4ed3f6c8def7a6a6cf527cd9ac7ca73c09844 100644 (file)
@@ -537,7 +537,7 @@ int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
 static void netcp_frag_free(bool is_frag, void *ptr)
 {
        if (is_frag)
-               put_page(virt_to_head_page(ptr));
+               skb_free_frag(ptr);
        else
                kfree(ptr);
 }
@@ -698,7 +698,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
                }
        }
 
-       netcp->ndev->last_rx = jiffies;
        netcp->ndev->stats.rx_packets++;
        netcp->ndev->stats.rx_bytes += skb->len;
 
index 3d8f60d9643e45c3876c910a59c0c59f5d727a7c..6f0a4495c7f33613fed7c30a76d4ad329b80f9d9 100644 (file)
@@ -721,9 +721,6 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
        if (!hash_default)
                __inv_buffer(buf, len);
 
-       /* ISSUE: Is this needed? */
-       dev->last_rx = jiffies;
-
 #ifdef TILE_NET_DUMP_PACKETS
        dump_packet(buf, len, "rx");
 #endif /* TILE_NET_DUMP_PACKETS */
index 8e9371a3388a00ed78043c1d098b8c31db776ea7..3c54a2cae5dfd09e066205b945ae937a3cfabaee 100644 (file)
@@ -604,8 +604,7 @@ spider_net_set_multi(struct net_device *netdev)
        int i;
        u32 reg;
        struct spider_net_card *card = netdev_priv(netdev);
-       unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
-               {0, };
+       DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
 
        spider_net_set_promisc(card);
 
index de2850497c09d87a6e7131e12a82ce68af702dd5..725106f75d425a1e4342c59ef46e993a493d292d 100644 (file)
@@ -472,8 +472,7 @@ struct rhine_private {
 
        /* Frequently used values: keep some adjacent for cache effect. */
        u32 quirks;
-       struct rx_desc *rx_head_desc;
-       unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
+       unsigned int cur_rx;
        unsigned int cur_tx, dirty_tx;
        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
        struct rhine_stats rx_stats;
@@ -1213,17 +1212,61 @@ static void free_ring(struct net_device* dev)
 
 }
 
-static void alloc_rbufs(struct net_device *dev)
+struct rhine_skb_dma {
+       struct sk_buff *skb;
+       dma_addr_t dma;
+};
+
+static inline int rhine_skb_dma_init(struct net_device *dev,
+                                    struct rhine_skb_dma *sd)
 {
        struct rhine_private *rp = netdev_priv(dev);
        struct device *hwdev = dev->dev.parent;
-       dma_addr_t next;
+       const int size = rp->rx_buf_sz;
+
+       sd->skb = netdev_alloc_skb(dev, size);
+       if (!sd->skb)
+               return -ENOMEM;
+
+       sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
+               netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
+               dev_kfree_skb_any(sd->skb);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void rhine_reset_rbufs(struct rhine_private *rp)
+{
        int i;
 
-       rp->dirty_rx = rp->cur_rx = 0;
+       rp->cur_rx = 0;
+
+       for (i = 0; i < RX_RING_SIZE; i++)
+               rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+}
+
+static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
+                                          struct rhine_skb_dma *sd, int entry)
+{
+       rp->rx_skbuff_dma[entry] = sd->dma;
+       rp->rx_skbuff[entry] = sd->skb;
+
+       rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
+       dma_wmb();
+}
+
+static void free_rbufs(struct net_device* dev);
+
+static int alloc_rbufs(struct net_device *dev)
+{
+       struct rhine_private *rp = netdev_priv(dev);
+       dma_addr_t next;
+       int rc, i;
 
        rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
-       rp->rx_head_desc = &rp->rx_ring[0];
        next = rp->rx_ring_dma;
 
        /* Init the ring entries */
@@ -1239,23 +1282,20 @@ static void alloc_rbufs(struct net_device *dev)
 
        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
        for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
-               rp->rx_skbuff[i] = skb;
-               if (skb == NULL)
-                       break;
+               struct rhine_skb_dma sd;
 
-               rp->rx_skbuff_dma[i] =
-                       dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
-                                      DMA_FROM_DEVICE);
-               if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
-                       rp->rx_skbuff_dma[i] = 0;
-                       dev_kfree_skb(skb);
-                       break;
+               rc = rhine_skb_dma_init(dev, &sd);
+               if (rc < 0) {
+                       free_rbufs(dev);
+                       goto out;
                }
-               rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
-               rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+
+               rhine_skb_dma_nic_store(rp, &sd, i);
        }
-       rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+       rhine_reset_rbufs(rp);
+out:
+       return rc;
 }
 
 static void free_rbufs(struct net_device* dev)
@@ -1659,16 +1699,18 @@ static int rhine_open(struct net_device *dev)
 
        rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
        if (rc)
-               return rc;
+               goto out;
 
        netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
 
        rc = alloc_ring(dev);
-       if (rc) {
-               free_irq(rp->irq, dev);
-               return rc;
-       }
-       alloc_rbufs(dev);
+       if (rc < 0)
+               goto out_free_irq;
+
+       rc = alloc_rbufs(dev);
+       if (rc < 0)
+               goto out_free_ring;
+
        alloc_tbufs(dev);
        rhine_chip_reset(dev);
        rhine_task_enable(rp);
@@ -1680,7 +1722,14 @@ static int rhine_open(struct net_device *dev)
 
        netif_start_queue(dev);
 
-       return 0;
+out:
+       return rc;
+
+out_free_ring:
+       free_ring(dev);
+out_free_irq:
+       free_irq(rp->irq, dev);
+       goto out;
 }
 
 static void rhine_reset_task(struct work_struct *work)
@@ -1700,9 +1749,9 @@ static void rhine_reset_task(struct work_struct *work)
 
        /* clear all descriptors */
        free_tbufs(dev);
-       free_rbufs(dev);
        alloc_tbufs(dev);
-       alloc_rbufs(dev);
+
+       rhine_reset_rbufs(rp);
 
        /* Reinitialize the hardware. */
        rhine_chip_reset(dev);
@@ -1730,6 +1779,11 @@ static void rhine_tx_timeout(struct net_device *dev)
        schedule_work(&rp->reset_task);
 }
 
+static inline bool rhine_tx_queue_full(struct rhine_private *rp)
+{
+       return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
+}
+
 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                  struct net_device *dev)
 {
@@ -1800,11 +1854,17 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 
        netdev_sent_queue(dev, skb->len);
        /* lock eth irq */
-       wmb();
+       dma_wmb();
        rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
        wmb();
 
        rp->cur_tx++;
+       /*
+        * Nobody wants cur_tx write to rot for ages after the NIC will have
+        * seen the transmit request, especially as the transmit completion
+        * handler could miss it.
+        */
+       smp_wmb();
 
        /* Non-x86 Todo: explicitly flush cache lines here. */
 
@@ -1817,8 +1877,14 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
               ioaddr + ChipCmd1);
        IOSYNC;
 
-       if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
+       /* dirty_tx may be pessimistically out-of-sync. See rhine_tx. */
+       if (rhine_tx_queue_full(rp)) {
                netif_stop_queue(dev);
+               smp_rmb();
+               /* Rejuvenate. */
+               if (!rhine_tx_queue_full(rp))
+                       netif_wake_queue(dev);
+       }
 
        netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
                  rp->cur_tx - 1, entry);
@@ -1866,13 +1932,24 @@ static void rhine_tx(struct net_device *dev)
 {
        struct rhine_private *rp = netdev_priv(dev);
        struct device *hwdev = dev->dev.parent;
-       int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
        unsigned int pkts_compl = 0, bytes_compl = 0;
+       unsigned int dirty_tx = rp->dirty_tx;
+       unsigned int cur_tx;
        struct sk_buff *skb;
 
+       /*
+        * The race with rhine_start_tx does not matter here as long as the
+        * driver enforces a value of cur_tx that was relevant when the
+        * packet was scheduled to the network chipset.
+        * Executive summary: smp_rmb() balances smp_wmb() in rhine_start_tx.
+        */
+       smp_rmb();
+       cur_tx = rp->cur_tx;
        /* find and cleanup dirty tx descriptors */
-       while (rp->dirty_tx != rp->cur_tx) {
-               txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+       while (dirty_tx != cur_tx) {
+               unsigned int entry = dirty_tx % TX_RING_SIZE;
+               u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
+
                netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
                          entry, txstatus);
                if (txstatus & DescOwn)
@@ -1921,12 +1998,23 @@ static void rhine_tx(struct net_device *dev)
                pkts_compl++;
                dev_consume_skb_any(skb);
                rp->tx_skbuff[entry] = NULL;
-               entry = (++rp->dirty_tx) % TX_RING_SIZE;
+               dirty_tx++;
        }
 
+       rp->dirty_tx = dirty_tx;
+       /* Pity we can't rely on the nearby BQL completion implicit barrier. */
+       smp_wmb();
+
        netdev_completed_queue(dev, pkts_compl, bytes_compl);
-       if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
+
+       /* cur_tx may be optimistically out-of-sync. See rhine_start_tx. */
+       if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
                netif_wake_queue(dev);
+               smp_rmb();
+               /* Rejuvenate. */
+               if (rhine_tx_queue_full(rp))
+                       netif_stop_queue(dev);
+       }
 }
 
 /**
@@ -1944,22 +2032,33 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
        return be16_to_cpup((__be16 *)trailer);
 }
 
+static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
+                                    int data_size)
+{
+       dma_rmb();
+       if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
+               u16 vlan_tci;
+
+               vlan_tci = rhine_get_vlan_tci(skb, data_size);
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+       }
+}
+
 /* Process up to limit frames from receive ring */
 static int rhine_rx(struct net_device *dev, int limit)
 {
        struct rhine_private *rp = netdev_priv(dev);
        struct device *hwdev = dev->dev.parent;
-       int count;
        int entry = rp->cur_rx % RX_RING_SIZE;
+       int count;
 
        netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
-                 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
+                 entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
 
        /* If EOP is set on the next entry, it's a new packet. Send it up. */
        for (count = 0; count < limit; ++count) {
-               struct rx_desc *desc = rp->rx_head_desc;
+               struct rx_desc *desc = rp->rx_ring + entry;
                u32 desc_status = le32_to_cpu(desc->rx_status);
-               u32 desc_length = le32_to_cpu(desc->desc_length);
                int data_size = desc_status >> 16;
 
                if (desc_status & DescOwn)
@@ -1975,10 +2074,6 @@ static int rhine_rx(struct net_device *dev, int limit)
        "entry %#x length %d status %08x!\n",
                                            entry, data_size,
                                            desc_status);
-                               netdev_warn(dev,
-                                           "Oversized Ethernet frame %p vs %p\n",
-                                           rp->rx_head_desc,
-                                           &rp->rx_ring[entry]);
                                dev->stats.rx_length_errors++;
                        } else if (desc_status & RxErr) {
                                /* There was a error. */
@@ -2000,16 +2095,17 @@ static int rhine_rx(struct net_device *dev, int limit)
                                }
                        }
                } else {
-                       struct sk_buff *skb = NULL;
                        /* Length should omit the CRC */
                        int pkt_len = data_size - 4;
-                       u16 vlan_tci = 0;
+                       struct sk_buff *skb;
 
                        /* Check if the packet is long enough to accept without
                           copying to a minimally-sized skbuff. */
-                       if (pkt_len < rx_copybreak)
+                       if (pkt_len < rx_copybreak) {
                                skb = netdev_alloc_skb_ip_align(dev, pkt_len);
-                       if (skb) {
+                               if (unlikely(!skb))
+                                       goto drop;
+
                                dma_sync_single_for_cpu(hwdev,
                                                        rp->rx_skbuff_dma[entry],
                                                        rp->rx_buf_sz,
@@ -2018,32 +2114,31 @@ static int rhine_rx(struct net_device *dev, int limit)
                                skb_copy_to_linear_data(skb,
                                                 rp->rx_skbuff[entry]->data,
                                                 pkt_len);
-                               skb_put(skb, pkt_len);
+
                                dma_sync_single_for_device(hwdev,
                                                           rp->rx_skbuff_dma[entry],
                                                           rp->rx_buf_sz,
                                                           DMA_FROM_DEVICE);
                        } else {
+                               struct rhine_skb_dma sd;
+
+                               if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
+                                       goto drop;
+
                                skb = rp->rx_skbuff[entry];
-                               if (skb == NULL) {
-                                       netdev_err(dev, "Inconsistent Rx descriptor chain\n");
-                                       break;
-                               }
-                               rp->rx_skbuff[entry] = NULL;
-                               skb_put(skb, pkt_len);
+
                                dma_unmap_single(hwdev,
                                                 rp->rx_skbuff_dma[entry],
                                                 rp->rx_buf_sz,
                                                 DMA_FROM_DEVICE);
+                               rhine_skb_dma_nic_store(rp, &sd, entry);
                        }
 
-                       if (unlikely(desc_length & DescTag))
-                               vlan_tci = rhine_get_vlan_tci(skb, data_size);
-
+                       skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, dev);
 
-                       if (unlikely(desc_length & DescTag))
-                               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+                       rhine_rx_vlan_tag(skb, desc, data_size);
+
                        netif_receive_skb(skb);
 
                        u64_stats_update_begin(&rp->rx_stats.syncp);
@@ -2051,35 +2146,16 @@ static int rhine_rx(struct net_device *dev, int limit)
                        rp->rx_stats.packets++;
                        u64_stats_update_end(&rp->rx_stats.syncp);
                }
+give_descriptor_to_nic:
+               desc->rx_status = cpu_to_le32(DescOwn);
                entry = (++rp->cur_rx) % RX_RING_SIZE;
-               rp->rx_head_desc = &rp->rx_ring[entry];
-       }
-
-       /* Refill the Rx ring buffers. */
-       for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
-               struct sk_buff *skb;
-               entry = rp->dirty_rx % RX_RING_SIZE;
-               if (rp->rx_skbuff[entry] == NULL) {
-                       skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
-                       rp->rx_skbuff[entry] = skb;
-                       if (skb == NULL)
-                               break;  /* Better luck next round. */
-                       rp->rx_skbuff_dma[entry] =
-                               dma_map_single(hwdev, skb->data,
-                                              rp->rx_buf_sz,
-                                              DMA_FROM_DEVICE);
-                       if (dma_mapping_error(hwdev,
-                                             rp->rx_skbuff_dma[entry])) {
-                               dev_kfree_skb(skb);
-                               rp->rx_skbuff_dma[entry] = 0;
-                               break;
-                       }
-                       rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
-               }
-               rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
        }
 
        return count;
+
+drop:
+       dev->stats.rx_dropped++;
+       goto give_descriptor_to_nic;
 }
 
 static void rhine_restart_tx(struct net_device *dev) {
@@ -2484,9 +2560,8 @@ static int rhine_resume(struct device *device)
        enable_mmio(rp->pioaddr, rp->quirks);
        rhine_power_init(dev);
        free_tbufs(dev);
-       free_rbufs(dev);
        alloc_tbufs(dev);
-       alloc_rbufs(dev);
+       rhine_reset_rbufs(rp);
        rhine_task_enable(rp);
        spin_lock_bh(&rp->lock);
        init_registers(dev);
index af2694dc6f90146fc2afe9073a0dde7058731f59..5a1068df7038c5faf5d67742139ea1d0d53f0845 100644 (file)
 
 u32 temac_ior(struct temac_local *lp, int offset)
 {
-       return in_be32((u32 *)(lp->regs + offset));
+       return in_be32(lp->regs + offset);
 }
 
 void temac_iow(struct temac_local *lp, int offset, u32 value)
 {
-       out_be32((u32 *) (lp->regs + offset), value);
+       out_be32(lp->regs + offset, value);
 }
 
 int temac_indirect_busywait(struct temac_local *lp)
@@ -124,7 +124,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
  */
 static u32 temac_dma_in32(struct temac_local *lp, int reg)
 {
-       return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
+       return in_be32(lp->sdma_regs + (reg << 2));
 }
 
 /**
@@ -134,7 +134,7 @@ static u32 temac_dma_in32(struct temac_local *lp, int reg)
  */
 static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
 {
-       out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
+       out_be32(lp->sdma_regs + (reg << 2), value);
 }
 
 /* DMA register access functions can be DCR based or memory mapped.
@@ -400,7 +400,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
        mutex_unlock(&lp->indirect_mutex);
 }
 
-struct temac_option {
+static struct temac_option {
        int flg;
        u32 opt;
        u32 reg;
@@ -587,7 +587,7 @@ static void temac_device_reset(struct net_device *ndev)
        ndev->trans_start = jiffies; /* prevent tx timeout */
 }
 
-void temac_adjust_link(struct net_device *ndev)
+static void temac_adjust_link(struct net_device *ndev)
 {
        struct temac_local *lp = netdev_priv(ndev);
        struct phy_device *phy = lp->phy_dev;
@@ -688,10 +688,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 
        if (temac_check_tx_bd_space(lp, num_frag)) {
-               if (!netif_queue_stopped(ndev)) {
+               if (!netif_queue_stopped(ndev))
                        netif_stop_queue(ndev);
-                       return NETDEV_TX_BUSY;
-               }
                return NETDEV_TX_BUSY;
        }
 
index 4c9b4fa1d3c1cbed1fc223634adca0a2cb7b5807..7cb9abac95c89c71add32d2b599aef46b0af2b00 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
+#include <linux/if_vlan.h>
 
 /* Packet size info */
 #define XAE_HDR_SIZE                   14 /* Size of Ethernet header */
-#define XAE_HDR_VLAN_SIZE              18 /* Size of an Ethernet hdr + VLAN */
 #define XAE_TRL_SIZE                    4 /* Size of Ethernet trailer (FCS) */
 #define XAE_MTU                              1500 /* Max MTU of an Ethernet frame */
 #define XAE_JUMBO_MTU                9000 /* Max MTU of a jumbo Eth. frame */
 
 #define XAE_MAX_FRAME_SIZE      (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
-#define XAE_MAX_VLAN_FRAME_SIZE  (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE)
+#define XAE_MAX_VLAN_FRAME_SIZE  (XAE_MTU + VLAN_ETH_HLEN + XAE_TRL_SIZE)
 #define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE)
 
 /* Configuration options */
 #define XAE_OPTION_FLOW_CONTROL                        (1 << 4)
 
 /* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not
- * stripped. Default: disabled (set) */
+ * stripped. Default: disabled (set)
+ */
 #define XAE_OPTION_FCS_STRIP                   (1 << 5)
 
 /* Generate FCS field and add PAD automatically for outgoing frames.
- * Default: enabled (set) */
+ * Default: enabled (set)
+ */
 #define XAE_OPTION_FCS_INSERT                  (1 << 6)
 
 /* Enable Length/Type error checking for incoming frames. When this option is
  * set, the MAC will filter frames that have a mismatched type/length field
  * and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these
  * types of frames are encountered. When this option is cleared, the MAC will
- * allow these types of frames to be received. Default: enabled (set) */
+ * allow these types of frames to be received. Default: enabled (set)
+ */
 #define XAE_OPTION_LENTYPE_ERR                 (1 << 7)
 
 /* Enable the transmitter. Default: enabled (set) */
 #define XAE_MDIO_MWD_OFFSET    0x00000508 /* MII Management Write Data */
 #define XAE_MDIO_MRD_OFFSET    0x0000050C /* MII Management Read Data */
 #define XAE_MDIO_MIS_OFFSET    0x00000600 /* MII Management Interrupt Status */
-#define XAE_MDIO_MIP_OFFSET    0x00000620 /* MII Mgmt Interrupt Pending
-                                           * register offset */
-#define XAE_MDIO_MIE_OFFSET    0x00000640 /* MII Management Interrupt Enable
-                                           * register offset */
-#define XAE_MDIO_MIC_OFFSET    0x00000660 /* MII Management Interrupt Clear
-                                           * register offset. */
+/* MII Mgmt Interrupt Pending register offset */
+#define XAE_MDIO_MIP_OFFSET    0x00000620
+/* MII Management Interrupt Enable register offset */
+#define XAE_MDIO_MIE_OFFSET    0x00000640
+/* MII Management Interrupt Clear register offset. */
+#define XAE_MDIO_MIC_OFFSET    0x00000660
 #define XAE_UAW0_OFFSET                0x00000700 /* Unicast address word 0 */
 #define XAE_UAW1_OFFSET                0x00000704 /* Unicast address word 1 */
 #define XAE_FMI_OFFSET         0x00000708 /* Filter Mask Index */
 #define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */
 
 /* Bit Masks for Axi Ethernet RAF register */
-#define XAE_RAF_MCSTREJ_MASK           0x00000002 /* Reject receive multicast
-                                                   * destination address */
-#define XAE_RAF_BCSTREJ_MASK           0x00000004 /* Reject receive broadcast
-                                                   * destination address */
+/* Reject receive multicast destination address */
+#define XAE_RAF_MCSTREJ_MASK           0x00000002
+/* Reject receive broadcast destination address */
+#define XAE_RAF_BCSTREJ_MASK           0x00000004
 #define XAE_RAF_TXVTAGMODE_MASK                0x00000018 /* Tx VLAN TAG mode */
 #define XAE_RAF_RXVTAGMODE_MASK                0x00000060 /* Rx VLAN TAG mode */
 #define XAE_RAF_TXVSTRPMODE_MASK       0x00000180 /* Tx VLAN STRIP mode */
 #define XAE_RAF_RXVSTRPMODE_MASK       0x00000600 /* Rx VLAN STRIP mode */
 #define XAE_RAF_NEWFNCENBL_MASK                0x00000800 /* New function mode */
-#define XAE_RAF_EMULTIFLTRENBL_MASK    0x00001000 /* Exteneded Multicast
-                                                   * Filtering mode
-                                                   */
+/* Exteneded Multicast Filtering mode */
+#define XAE_RAF_EMULTIFLTRENBL_MASK    0x00001000
 #define XAE_RAF_STATSRST_MASK          0x00002000 /* Stats. Counter Reset */
 #define XAE_RAF_RXBADFRMEN_MASK                0x00004000 /* Recv Bad Frame Enable */
 #define XAE_RAF_TXVTAGMODE_SHIFT       3 /* Tx Tag mode shift bits */
 
 /* Bit Masks for Axi Ethernet TPF and IFGP registers */
 #define XAE_TPF_TPFV_MASK              0x0000FFFF /* Tx pause frame value */
-#define XAE_IFGP0_IFGP_MASK            0x0000007F /* Transmit inter-frame
-                                                   * gap adjustment value */
+/* Transmit inter-frame gap adjustment value */
+#define XAE_IFGP0_IFGP_MASK            0x0000007F
 
 /* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply
- * for all 3 registers. */
-#define XAE_INT_HARDACSCMPLT_MASK      0x00000001 /* Hard register access
-                                                   * complete */
-#define XAE_INT_AUTONEG_MASK           0x00000002 /* Auto negotiation
-                                                   * complete */
+ * for all 3 registers.
+ */
+/* Hard register access complete */
+#define XAE_INT_HARDACSCMPLT_MASK      0x00000001
+/* Auto negotiation complete */
+#define XAE_INT_AUTONEG_MASK           0x00000002
 #define XAE_INT_RXCMPIT_MASK           0x00000004 /* Rx complete */
 #define XAE_INT_RXRJECT_MASK           0x00000008 /* Rx frame rejected */
 #define XAE_INT_RXFIFOOVR_MASK         0x00000010 /* Rx fifo overrun */
 #define XAE_INT_PHYRSTCMPLT_MASK       0x00000100 /* Phy Reset complete */
 #define XAE_INT_ALL_MASK               0x0000003F /* All the ints */
 
+/* INT bits that indicate receive errors */
 #define XAE_INT_RECV_ERROR_MASK                                \
-       (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that
-                                                        * indicate receive
-                                                        * errors */
+       (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK)
 
 /* Bit masks for Axi Ethernet VLAN TPID Word 0 register */
 #define XAE_TPID_0_MASK                0x0000FFFF /* TPID 0 */
 /* Bit masks for Axi Ethernet RCW1 register */
 #define XAE_RCW1_RST_MASK      0x80000000 /* Reset */
 #define XAE_RCW1_JUM_MASK      0x40000000 /* Jumbo frame enable */
-#define XAE_RCW1_FCS_MASK      0x20000000 /* In-Band FCS enable
-                                           * (FCS not stripped) */
+/* In-Band FCS enable (FCS not stripped) */
+#define XAE_RCW1_FCS_MASK      0x20000000
 #define XAE_RCW1_RX_MASK       0x10000000 /* Receiver enable */
 #define XAE_RCW1_VLAN_MASK     0x08000000 /* VLAN frame enable */
-#define XAE_RCW1_LT_DIS_MASK   0x02000000 /* Length/type field valid check
-                                           * disable */
-#define XAE_RCW1_CL_DIS_MASK   0x01000000 /* Control frame Length check
-                                           * disable */
-#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address
-                                           * bits [47:32]. Bits [31:0] are
-                                           * stored in register RCW0 */
+/* Length/type field valid check disable */
+#define XAE_RCW1_LT_DIS_MASK   0x02000000
+/* Control frame Length check disable */
+#define XAE_RCW1_CL_DIS_MASK   0x01000000
+/* Pause frame source address bits [47:32]. Bits [31:0] are
+ * stored in register RCW0
+ */
+#define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF
 
 /* Bit masks for Axi Ethernet TC register */
 #define XAE_TC_RST_MASK                0x80000000 /* Reset */
 #define XAE_TC_JUM_MASK                0x40000000 /* Jumbo frame enable */
-#define XAE_TC_FCS_MASK                0x20000000 /* In-Band FCS enable
-                                           * (FCS not generated) */
+/* In-Band FCS enable (FCS not generated) */
+#define XAE_TC_FCS_MASK                0x20000000
 #define XAE_TC_TX_MASK         0x10000000 /* Transmitter enable */
 #define XAE_TC_VLAN_MASK       0x08000000 /* VLAN frame enable */
-#define XAE_TC_IFG_MASK                0x02000000 /* Inter-frame gap adjustment
-                                           * enable */
+/* Inter-frame gap adjustment enable */
+#define XAE_TC_IFG_MASK                0x02000000
 
 /* Bit masks for Axi Ethernet FCC register */
 #define XAE_FCC_FCRX_MASK      0x20000000 /* Rx flow control enable */
 #define XAE_MDIO_INT_MIIM_RDY_MASK     0x00000001 /* MIIM Interrupt */
 
 /* Bit masks for Axi Ethernet UAW1 register */
-#define XAE_UAW1_UNICASTADDR_MASK      0x0000FFFF /* Station address bits
-                                                   * [47:32]; Station address
-                                                   * bits [31:0] are stored in
-                                                   * register UAW0 */
+/* Station address bits [47:32]; Station address
+ * bits [31:0] are stored in register UAW0
+ */
+#define XAE_UAW1_UNICASTADDR_MASK      0x0000FFFF
 
 /* Bit masks for Axi Ethernet FMI register */
 #define XAE_FMI_PM_MASK                        0x80000000 /* Promis. mode enable */
 #define XAE_PHY_TYPE_SGMII             4
 #define XAE_PHY_TYPE_1000BASE_X                5
 
-#define XAE_MULTICAST_CAM_TABLE_NUM    4 /* Total number of entries in the
-                                          * hardware multicast table. */
+ /* Total number of entries in the hardware multicast table. */
+#define XAE_MULTICAST_CAM_TABLE_NUM    4
 
 /* Axi Ethernet Synthesis features */
 #define XAE_FEATURE_PARTIAL_RX_CSUM    (1 << 0)
@@ -407,8 +410,11 @@ struct axidma_bd {
  *               Txed/Rxed in the existing hardware. If jumbo option is
  *               supported, the maximum frame size would be 9k. Else it is
  *               1522 bytes (assuming support for basic VLAN)
- * @jumbo_support: Stores hardware configuration for jumbo support. If hardware
- *                can handle jumbo packets, this entry will be 1, else 0.
+ * @rxmem:     Stores rx memory size for jumbo frame handling.
+ * @csum_offload_on_tx_path:   Stores the checksum selection on TX side.
+ * @csum_offload_on_rx_path:   Stores the checksum selection on RX side.
+ * @coalesce_count_rx: Store the irq coalesce on RX side.
+ * @coalesce_count_tx: Store the irq coalesce on TX side.
  */
 struct axienet_local {
        struct net_device *ndev;
@@ -446,7 +452,7 @@ struct axienet_local {
        u32 rx_bd_ci;
 
        u32 max_frm_size;
-       u32 jumbo_support;
+       u32 rxmem;
 
        int csum_offload_on_tx_path;
        int csum_offload_on_rx_path;
@@ -472,7 +478,7 @@ struct axienet_option {
  * @lp:         Pointer to axienet local structure
  * @offset:     Address offset from the base address of Axi Ethernet core
  *
- * returns: The contents of the Axi Ethernet register
+ * Return: The contents of the Axi Ethernet register
  *
  * This function returns the contents of the corresponding register.
  */
index 28b7e7d9c272ae672034d10bb64f855f0d1077e9..4208dd7ef10118aff0fa4bf02cd47e05481c25f9 100644 (file)
@@ -117,7 +117,7 @@ static struct axienet_option axienet_options[] = {
  * @lp:                Pointer to axienet local structure
  * @reg:       Address offset from the base address of the Axi DMA core
  *
- * returns: The contents of the Axi DMA register
+ * Return: The contents of the Axi DMA register
  *
  * This function returns the contents of the corresponding Axi DMA register.
  */
@@ -179,8 +179,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
  * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
  * @ndev:      Pointer to the net_device structure
  *
- * returns: 0, on success
- *         -ENOMEM, on failure
+ * Return: 0, on success -ENOMEM, on failure
  *
  * This function is called to initialize the Rx and Tx DMA descriptor
  * rings. This initializes the descriptors with required default values
@@ -198,9 +197,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
        lp->tx_bd_tail = 0;
        lp->rx_bd_ci = 0;
 
-       /*
-        * Allocate the Tx and Rx buffer descriptors.
-        */
+       /* Allocate the Tx and Rx buffer descriptors. */
        lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
                                          sizeof(*lp->tx_bd_v) * TX_BD_NUM,
                                          &lp->tx_bd_p, GFP_KERNEL);
@@ -263,7 +260,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 
        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
-        * halted state. This will make the Rx side ready for reception.*/
+        * halted state. This will make the Rx side ready for reception.
+        */
        axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -273,7 +271,8 @@ static int axienet_dma_bd_init(struct net_device *ndev)
 
        /* Write to the RS (Run-stop) bit in the Tx channel control register.
         * Tx channel is now ready to run. But only after we write to the
-        * tail pointer register that the Tx channel will start transmitting */
+        * tail pointer register that the Tx channel will start transmitting.
+        */
        axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -320,7 +319,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address)
  * @ndev:      Pointer to the net_device structure
  * @p:         6 byte Address to be written as MAC address
  *
- * returns: 0 for all conditions. Presently, there is no failure case.
+ * Return: 0 for all conditions. Presently, there is no failure case.
  *
  * This function is called to initialize the MAC address of the Axi Ethernet
  * core. It calls the core specific axienet_set_mac_address. This is the
@@ -354,7 +353,8 @@ static void axienet_set_multicast_list(struct net_device *ndev)
            netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
                /* We must make the kernel realize we had to move into
                 * promiscuous mode. If it was a promiscuous mode request
-                * the flag is already set. If not we set it. */
+                * the flag is already set. If not we set it.
+                */
                ndev->flags |= IFF_PROMISC;
                reg = axienet_ior(lp, XAE_FMI_OFFSET);
                reg |= XAE_FMI_PM_MASK;
@@ -438,14 +438,15 @@ static void __axienet_device_reset(struct axienet_local *lp,
        /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
         * process of Axi DMA takes a while to complete as all pending
         * commands/transfers will be flushed or completed during this
-        * reset process. */
+        * reset process.
+        */
        axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
        timeout = DELAY_OF_ONE_MILLISEC;
        while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
                udelay(1);
                if (--timeout == 0) {
-                       dev_err(dev, "axienet_device_reset DMA "
-                               "reset timeout!\n");
+                       netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
+                                  __func__);
                        break;
                }
        }
@@ -471,19 +472,21 @@ static void axienet_device_reset(struct net_device *ndev)
        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
 
        lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
+       lp->options |= XAE_OPTION_VLAN;
        lp->options &= (~XAE_OPTION_JUMBO);
 
        if ((ndev->mtu > XAE_MTU) &&
-           (ndev->mtu <= XAE_JUMBO_MTU) &&
-           (lp->jumbo_support)) {
-               lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
-                                  XAE_TRL_SIZE;
-               lp->options |= XAE_OPTION_JUMBO;
+               (ndev->mtu <= XAE_JUMBO_MTU)) {
+               lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
+                                       XAE_TRL_SIZE;
+
+               if (lp->max_frm_size <= lp->rxmem)
+                       lp->options |= XAE_OPTION_JUMBO;
        }
 
        if (axienet_dma_bd_init(ndev)) {
-               dev_err(&ndev->dev, "axienet_device_reset descriptor "
-                       "allocation failed\n");
+               netdev_err(ndev, "%s: descriptor allocation failed\n",
+                          __func__);
        }
 
        axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
@@ -497,7 +500,8 @@ static void axienet_device_reset(struct net_device *ndev)
        axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 
        /* Sync default options with HW but leave receiver and
-        * transmitter disabled.*/
+        * transmitter disabled.
+        */
        axienet_setoptions(ndev, lp->options &
                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
        axienet_set_mac_address(ndev, NULL);
@@ -558,8 +562,8 @@ static void axienet_adjust_link(struct net_device *ndev)
                        lp->last_link = link_state;
                        phy_print_status(phy);
                } else {
-                       dev_err(&ndev->dev, "Error setting Axi Ethernet "
-                               "mac speed\n");
+                       netdev_err(ndev,
+                                  "Error setting Axi Ethernet mac speed\n");
                }
        }
 }
@@ -617,7 +621,7 @@ static void axienet_start_xmit_done(struct net_device *ndev)
  * @lp:                Pointer to the axienet_local structure
  * @num_frag:  The number of BDs to check for
  *
- * returns: 0, on success
+ * Return: 0, on success
  *         NETDEV_TX_BUSY, if any of the descriptors are not free
  *
  * This function is invoked before BDs are allocated and transmission starts.
@@ -640,7 +644,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
  * @skb:       sk_buff pointer that contains data to be Txed.
  * @ndev:      Pointer to net_device structure.
  *
- * returns: NETDEV_TX_OK, on success
+ * Return: NETDEV_TX_OK, on success
  *         NETDEV_TX_BUSY, if any of the descriptors are not free
  *
  * This function is invoked from upper layers to initiate transmission. The
@@ -726,15 +730,15 @@ static void axienet_recv(struct net_device *ndev)
        u32 csumstatus;
        u32 size = 0;
        u32 packets = 0;
-       dma_addr_t tail_p;
+       dma_addr_t tail_p = 0;
        struct axienet_local *lp = netdev_priv(ndev);
        struct sk_buff *skb, *new_skb;
        struct axidma_bd *cur_p;
 
-       tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
        cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 
        while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+               tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
                skb = (struct sk_buff *) (cur_p->sw_id_offset);
                length = cur_p->app4 & 0x0000FFFF;
 
@@ -786,7 +790,8 @@ static void axienet_recv(struct net_device *ndev)
        ndev->stats.rx_packets += packets;
        ndev->stats.rx_bytes += size;
 
-       axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+       if (tail_p)
+               axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
 }
 
 /**
@@ -794,7 +799,7 @@ static void axienet_recv(struct net_device *ndev)
  * @irq:       irq number
  * @_ndev:     net_device pointer
  *
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
  *
  * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
  * to complete the BD processing.
@@ -808,6 +813,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
 
        status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
        if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+               axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
                axienet_start_xmit_done(lp->ndev);
                goto out;
        }
@@ -831,9 +837,9 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
                axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 
                tasklet_schedule(&lp->dma_err_tasklet);
+               axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
        }
 out:
-       axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
        return IRQ_HANDLED;
 }
 
@@ -842,7 +848,7 @@ out:
  * @irq:       irq number
  * @_ndev:     net_device pointer
  *
- * returns: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED for all cases.
  *
  * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
  * processing.
@@ -856,6 +862,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
 
        status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
        if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
+               axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
                axienet_recv(lp->ndev);
                goto out;
        }
@@ -879,9 +886,9 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
                axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 
                tasklet_schedule(&lp->dma_err_tasklet);
+               axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
        }
 out:
-       axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
        return IRQ_HANDLED;
 }
 
@@ -891,7 +898,7 @@ static void axienet_dma_err_handler(unsigned long data);
  * axienet_open - Driver open routine.
  * @ndev:      Pointer to net_device structure
  *
- * returns: 0, on success.
+ * Return: 0, on success.
  *         -ENODEV, if PHY cannot be connected to
  *         non-zero error value on failure
  *
@@ -914,7 +921,8 @@ static int axienet_open(struct net_device *ndev)
        /* Disable the MDIO interface till Axi Ethernet Reset is completed.
         * When we do an Axi Ethernet reset, it resets the complete core
         * including the MDIO. If MDIO is not disabled when the reset
-        * process is started, MDIO will be broken afterwards. */
+        * process is started, MDIO will be broken afterwards.
+        */
        axienet_iow(lp, XAE_MDIO_MC_OFFSET,
                    (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
        axienet_device_reset(ndev);
@@ -925,14 +933,20 @@ static int axienet_open(struct net_device *ndev)
                return ret;
 
        if (lp->phy_node) {
-               lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+               if (lp->phy_type == XAE_PHY_TYPE_GMII) {
+                       lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
                                             axienet_adjust_link, 0,
                                             PHY_INTERFACE_MODE_GMII);
-               if (!lp->phy_dev) {
-                       dev_err(lp->dev, "of_phy_connect() failed\n");
-                       return -ENODEV;
+               } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
+                       lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+                                            axienet_adjust_link, 0,
+                                            PHY_INTERFACE_MODE_RGMII_ID);
                }
-               phy_start(lp->phy_dev);
+
+               if (!lp->phy_dev)
+                       dev_err(lp->dev, "of_phy_connect() failed\n");
+               else
+                       phy_start(lp->phy_dev);
        }
 
        /* Enable tasklets for Axi DMA error handling */
@@ -965,7 +979,7 @@ err_tx_irq:
  * axienet_stop - Driver stop routine.
  * @ndev:      Pointer to net_device structure
  *
- * returns: 0, on success.
+ * Return: 0, on success.
  *
  * This is the driver stop routine. It calls phy_disconnect to stop the PHY
  * device. It also removes the interrupt handlers and disables the interrupts.
@@ -1005,7 +1019,7 @@ static int axienet_stop(struct net_device *ndev)
  * @ndev:      Pointer to net_device structure
  * @new_mtu:   New mtu value to be applied
  *
- * returns: Always returns 0 (success).
+ * Return: Always returns 0 (success).
  *
  * This is the change mtu driver routine. It checks if the Axi Ethernet
  * hardware supports jumbo frames before changing the mtu. This can be
@@ -1017,15 +1031,15 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
 
        if (netif_running(ndev))
                return -EBUSY;
-       if (lp->jumbo_support) {
-               if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
-                       return -EINVAL;
-               ndev->mtu = new_mtu;
-       } else {
-               if ((new_mtu > XAE_MTU) || (new_mtu < 64))
-                       return -EINVAL;
-               ndev->mtu = new_mtu;
-       }
+
+       if ((new_mtu + VLAN_ETH_HLEN +
+               XAE_TRL_SIZE) > lp->rxmem)
+               return -EINVAL;
+
+       if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
+               return -EINVAL;
+
+       ndev->mtu = new_mtu;
 
        return 0;
 }
@@ -1072,6 +1086,8 @@ static const struct net_device_ops axienet_netdev_ops = {
  * not be found, the function returns -ENODEV. This function calls the
  * relevant PHY ethtool API to get the PHY settings.
  * Issue "ethtool ethX" under linux prompt to execute this function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
  */
 static int axienet_ethtools_get_settings(struct net_device *ndev,
                                         struct ethtool_cmd *ecmd)
@@ -1093,6 +1109,8 @@ static int axienet_ethtools_get_settings(struct net_device *ndev,
  * relevant PHY ethtool API to set the PHY.
  * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
  * function.
+ *
+ * Return: 0 on success, -ENODEV if PHY doesn't exist
  */
 static int axienet_ethtools_set_settings(struct net_device *ndev,
                                         struct ethtool_cmd *ecmd)
@@ -1127,6 +1145,8 @@ static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
  *
  * This implements ethtool command for getting the total register length
  * information.
+ *
+ * Return: the total regs length
  */
 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
 {
@@ -1213,11 +1233,13 @@ axienet_ethtools_get_pauseparam(struct net_device *ndev,
  * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
  *                                  settings.
  * @ndev:      Pointer to net_device structure
- * @epauseparam:Pointer to ethtool_pauseparam structure
+ * @epauseparm:Pointer to ethtool_pauseparam structure
  *
  * This implements ethtool command for enabling flow control on Rx and Tx
  * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
  * function.
+ *
+ * Return: 0 on success, -EFAULT if device is running
  */
 static int
 axienet_ethtools_set_pauseparam(struct net_device *ndev,
@@ -1227,8 +1249,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
        struct axienet_local *lp = netdev_priv(ndev);
 
        if (netif_running(ndev)) {
-               printk(KERN_ERR "%s: Please stop netif before applying "
-                      "configruation\n", ndev->name);
+               netdev_err(ndev,
+                          "Please stop netif before applying configuration\n");
                return -EFAULT;
        }
 
@@ -1254,6 +1276,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev,
  * This implements ethtool command for getting the DMA interrupt coalescing
  * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
  * execute this function.
+ *
+ * Return: 0 always
  */
 static int axienet_ethtools_get_coalesce(struct net_device *ndev,
                                         struct ethtool_coalesce *ecoalesce)
@@ -1277,6 +1301,8 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev,
  * This implements ethtool command for setting the DMA interrupt coalescing
  * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
  * prompt to execute this function.
+ *
+ * Return: 0, on success, Non-zero error value on failure.
  */
 static int axienet_ethtools_set_coalesce(struct net_device *ndev,
                                         struct ethtool_coalesce *ecoalesce)
@@ -1284,8 +1310,8 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
        struct axienet_local *lp = netdev_priv(ndev);
 
        if (netif_running(ndev)) {
-               printk(KERN_ERR "%s: Please stop netif before applying "
-                      "configruation\n", ndev->name);
+               netdev_err(ndev,
+                          "Please stop netif before applying configuration\n");
                return -EFAULT;
        }
 
@@ -1354,7 +1380,8 @@ static void axienet_dma_err_handler(unsigned long data)
        /* Disable the MDIO interface till Axi Ethernet Reset is completed.
         * When we do an Axi Ethernet reset, it resets the complete core
         * including the MDIO. So if MDIO is not disabled when the reset
-        * process is started, MDIO will be broken afterwards. */
+        * process is started, MDIO will be broken afterwards.
+        */
        axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
                    ~XAE_MDIO_MC_MDIOEN_MASK));
 
@@ -1425,7 +1452,8 @@ static void axienet_dma_err_handler(unsigned long data)
        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 
        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
-        * halted state. This will make the Rx side ready for reception.*/
+        * halted state. This will make the Rx side ready for reception.
+        */
        axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
@@ -1435,7 +1463,8 @@ static void axienet_dma_err_handler(unsigned long data)
 
        /* Write to the RS (Run-stop) bit in the Tx channel control register.
         * Tx channel is now ready to run. But only after we write to the
-        * tail pointer register that the Tx channel will start transmitting */
+        * tail pointer register that the Tx channel will start transmitting
+        */
        axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
@@ -1451,7 +1480,8 @@ static void axienet_dma_err_handler(unsigned long data)
        axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 
        /* Sync default options with HW but leave receiver and
-        * transmitter disabled.*/
+        * transmitter disabled.
+        */
        axienet_setoptions(ndev, lp->options &
                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
        axienet_set_mac_address(ndev, NULL);
@@ -1460,11 +1490,10 @@ static void axienet_dma_err_handler(unsigned long data)
 }
 
 /**
- * axienet_of_probe - Axi Ethernet probe function.
- * @op:                Pointer to platform device structure.
- * @match:     Pointer to device id structure
+ * axienet_probe - Axi Ethernet probe function.
+ * @pdev:      Pointer to platform device structure.
  *
- * returns: 0, on success
+ * Return: 0, on success
  *         Non-zero error value on failure.
  *
  * This is the probe routine for Axi Ethernet driver. This is called before
@@ -1472,22 +1501,23 @@ static void axienet_dma_err_handler(unsigned long data)
  * device. Parses through device tree and populates fields of
  * axienet_local. It registers the Ethernet device.
  */
-static int axienet_of_probe(struct platform_device *op)
+static int axienet_probe(struct platform_device *pdev)
 {
-       __be32 *p;
-       int size, ret = 0;
+       int ret;
        struct device_node *np;
        struct axienet_local *lp;
        struct net_device *ndev;
-       const void *addr;
+       u8 mac_addr[6];
+       struct resource *ethres, dmares;
+       u32 value;
 
        ndev = alloc_etherdev(sizeof(*lp));
        if (!ndev)
                return -ENOMEM;
 
-       platform_set_drvdata(op, ndev);
+       platform_set_drvdata(pdev, ndev);
 
-       SET_NETDEV_DEV(ndev, &op->dev);
+       SET_NETDEV_DEV(ndev, &pdev->dev);
        ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
        ndev->features = NETIF_F_SG;
        ndev->netdev_ops = &axienet_netdev_ops;
@@ -1495,21 +1525,23 @@ static int axienet_of_probe(struct platform_device *op)
 
        lp = netdev_priv(ndev);
        lp->ndev = ndev;
-       lp->dev = &op->dev;
+       lp->dev = &pdev->dev;
        lp->options = XAE_OPTION_DEFAULTS;
        /* Map device registers */
-       lp->regs = of_iomap(op->dev.of_node, 0);
+       ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
        if (!lp->regs) {
-               dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+               dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
                ret = -ENOMEM;
-               goto nodev;
+               goto free_netdev;
        }
+
        /* Setup checksum offload, but default to off if not specified */
        lp->features = 0;
 
-       p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
-       if (p) {
-               switch (be32_to_cpup(p)) {
+       ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
+       if (!ret) {
+               switch (value) {
                case 1:
                        lp->csum_offload_on_tx_path =
                                XAE_FEATURE_PARTIAL_TX_CSUM;
@@ -1528,9 +1560,9 @@ static int axienet_of_probe(struct platform_device *op)
                        lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
                }
        }
-       p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
-       if (p) {
-               switch (be32_to_cpup(p)) {
+       ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
+       if (!ret) {
+               switch (value) {
                case 1:
                        lp->csum_offload_on_rx_path =
                                XAE_FEATURE_PARTIAL_RX_CSUM;
@@ -1546,82 +1578,77 @@ static int axienet_of_probe(struct platform_device *op)
                }
        }
        /* For supporting jumbo frames, the Axi Ethernet hardware must have
-        * a larger Rx/Tx Memory. Typically, the size must be more than or
-        * equal to 16384 bytes, so that we can enable jumbo option and start
-        * supporting jumbo frames. Here we check for memory allocated for
-        * Rx/Tx in the hardware from the device-tree and accordingly set
-        * flags. */
-       p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
-       if (p) {
-               if ((be32_to_cpup(p)) >= 0x4000)
-                       lp->jumbo_support = 1;
-       }
-       p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
-       if (p)
-               lp->phy_type = be32_to_cpup(p);
+        * a larger Rx/Tx Memory. Typically, the size must be large so that
+        * we can enable jumbo option and start supporting jumbo frames.
+        * Here we check for memory allocated for Rx/Tx in the hardware from
+        * the device-tree and accordingly set flags.
+        */
+       of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+       of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type);
 
        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
-       np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
-       if (!np) {
-               dev_err(&op->dev, "could not find DMA node\n");
-               ret = -ENODEV;
-               goto err_iounmap;
+       np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
+       if (IS_ERR(np)) {
+               dev_err(&pdev->dev, "could not find DMA node\n");
+               ret = PTR_ERR(np);
+               goto free_netdev;
        }
-       lp->dma_regs = of_iomap(np, 0);
-       if (lp->dma_regs) {
-               dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
-       } else {
-               dev_err(&op->dev, "unable to map DMA registers\n");
-               of_node_put(np);
+       ret = of_address_to_resource(np, 0, &dmares);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to get DMA resource\n");
+               goto free_netdev;
+       }
+       lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
+       if (!lp->dma_regs) {
+               dev_err(&pdev->dev, "could not map DMA regs\n");
+               ret = -ENOMEM;
+               goto free_netdev;
        }
        lp->rx_irq = irq_of_parse_and_map(np, 1);
        lp->tx_irq = irq_of_parse_and_map(np, 0);
        of_node_put(np);
        if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
-               dev_err(&op->dev, "could not determine irqs\n");
+               dev_err(&pdev->dev, "could not determine irqs\n");
                ret = -ENOMEM;
-               goto err_iounmap_2;
+               goto free_netdev;
        }
 
        /* Retrieve the MAC address */
-       addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
-       if ((!addr) || (size != 6)) {
-               dev_err(&op->dev, "could not find MAC address\n");
-               ret = -ENODEV;
-               goto err_iounmap_2;
+       ret = of_property_read_u8_array(pdev->dev.of_node,
+                                       "local-mac-address", mac_addr, 6);
+       if (ret) {
+               dev_err(&pdev->dev, "could not find MAC address\n");
+               goto free_netdev;
        }
-       axienet_set_mac_address(ndev, (void *) addr);
+       axienet_set_mac_address(ndev, (void *)mac_addr);
 
        lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
        lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
 
-       lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
-       ret = axienet_mdio_setup(lp, op->dev.of_node);
-       if (ret)
-               dev_warn(&op->dev, "error registering MDIO bus\n");
+       lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+       if (lp->phy_node) {
+               ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+               if (ret)
+                       dev_warn(&pdev->dev, "error registering MDIO bus\n");
+       }
 
        ret = register_netdev(lp->ndev);
        if (ret) {
                dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
-               goto err_iounmap_2;
+               goto free_netdev;
        }
 
        return 0;
 
-err_iounmap_2:
-       if (lp->dma_regs)
-               iounmap(lp->dma_regs);
-err_iounmap:
-       iounmap(lp->regs);
-nodev:
+free_netdev:
        free_netdev(ndev);
-       ndev = NULL;
+
        return ret;
 }
 
-static int axienet_of_remove(struct platform_device *op)
+static int axienet_remove(struct platform_device *pdev)
 {
-       struct net_device *ndev = platform_get_drvdata(op);
+       struct net_device *ndev = platform_get_drvdata(pdev);
        struct axienet_local *lp = netdev_priv(ndev);
 
        axienet_mdio_teardown(lp);
@@ -1630,24 +1657,21 @@ static int axienet_of_remove(struct platform_device *op)
        of_node_put(lp->phy_node);
        lp->phy_node = NULL;
 
-       iounmap(lp->regs);
-       if (lp->dma_regs)
-               iounmap(lp->dma_regs);
        free_netdev(ndev);
 
        return 0;
 }
 
-static struct platform_driver axienet_of_driver = {
-       .probe = axienet_of_probe,
-       .remove = axienet_of_remove,
+static struct platform_driver axienet_driver = {
+       .probe = axienet_probe,
+       .remove = axienet_remove,
        .driver = {
                 .name = "xilinx_axienet",
                 .of_match_table = axienet_of_match,
        },
 };
 
-module_platform_driver(axienet_of_driver);
+module_platform_driver(axienet_driver);
 
 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
 MODULE_AUTHOR("Xilinx");
index 3b67d60d43787bb1442c2e7ec1771fd047f461fc..2a5a16834c017c1c32e66591c7618ab47b9f8927 100644 (file)
@@ -37,7 +37,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
  * @phy_id:    Address of the PHY device
  * @reg:       PHY register to read
  *
- * returns:    The register contents on success, -ETIMEDOUT on a timeout
+ * Return:     The register contents on success, -ETIMEDOUT on a timeout
  *
  * Reads the contents of the requested register from the requested PHY
  * address by first writing the details into MCR register. After a while
@@ -80,7 +80,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
  * @reg:       PHY register to write to
  * @val:       Value to be written into the register
  *
- * returns:    0 on success, -ETIMEDOUT on a timeout
+ * Return:     0 on success, -ETIMEDOUT on a timeout
  *
  * Writes the value to the requested register by first writing the value
  * into MWD register. The the MCR register is then appropriately setup
@@ -119,7 +119,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
  * @lp:                Pointer to axienet local data structure.
  * @np:                Pointer to device node
  *
- * returns:    0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * Return:     0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
  *             mdiobus_alloc (to allocate memory for mii bus structure) fails.
  *
  * Sets up the MDIO interface by initializing the MDIO clock and enabling the
@@ -161,19 +161,19 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
 
        np1 = of_find_node_by_name(NULL, "cpu");
        if (!np1) {
-               printk(KERN_WARNING "%s(): Could not find CPU device node.",
-                      __func__);
-               printk(KERN_WARNING "Setting MDIO clock divisor to "
-                      "default %d\n", DEFAULT_CLOCK_DIVISOR);
+               netdev_warn(lp->ndev, "Could not find CPU device node.\n");
+               netdev_warn(lp->ndev,
+                           "Setting MDIO clock divisor to default %d\n",
+                           DEFAULT_CLOCK_DIVISOR);
                clk_div = DEFAULT_CLOCK_DIVISOR;
                goto issue;
        }
        property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL);
        if (!property_p) {
-               printk(KERN_WARNING "%s(): Could not find CPU property: "
-                      "clock-frequency.", __func__);
-               printk(KERN_WARNING "Setting MDIO clock divisor to "
-                      "default %d\n", DEFAULT_CLOCK_DIVISOR);
+               netdev_warn(lp->ndev, "clock-frequency property not found.\n");
+               netdev_warn(lp->ndev,
+                           "Setting MDIO clock divisor to default %d\n",
+                           DEFAULT_CLOCK_DIVISOR);
                clk_div = DEFAULT_CLOCK_DIVISOR;
                of_node_put(np1);
                goto issue;
@@ -183,12 +183,14 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
        clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
        /* If there is any remainder from the division of
         * fHOST / (MAX_MDIO_FREQ * 2), then we need to add
-        * 1 to the clock divisor or we will surely be above 2.5 MHz */
+        * 1 to the clock divisor or we will surely be above 2.5 MHz
+        */
        if (host_clock % (MAX_MDIO_FREQ * 2))
                clk_div++;
 
-       printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based "
-              "on %u Hz host clock.\n", __func__, clk_div, host_clock);
+       netdev_dbg(lp->ndev,
+                  "Setting MDIO clock divisor to %u/%u Hz host clock.\n",
+                  clk_div, host_clock);
 
        of_node_put(np1);
 issue:
index cc27dea3414e89163e2cb53dd6540fc258ad091c..9956680402de74bbd5ccf04761d797f09489e992 100644 (file)
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
        smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
        mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
 
-       DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
+       DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ;
        DB_SMT("SRF: state SR%d Threshold %d\n",
                smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
 #ifdef DEBUG
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
new file mode 100644 (file)
index 0000000..78d49d1
--- /dev/null
@@ -0,0 +1,523 @@
+/*
+ * GENEVE: Generic Network Virtualization Encapsulation
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/hash.h>
+#include <net/rtnetlink.h>
+#include <net/geneve.h>
+
+#define GENEVE_NETDEV_VER      "0.6"
+
+#define GENEVE_UDP_PORT                6081
+
+#define GENEVE_N_VID           (1u << 24)
+#define GENEVE_VID_MASK                (GENEVE_N_VID - 1)
+
+#define VNI_HASH_BITS          10
+#define VNI_HASH_SIZE          (1<<VNI_HASH_BITS)
+
+static bool log_ecn_error = true;
+module_param(log_ecn_error, bool, 0644);
+MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
+
+/* per-network namespace private data for this module */
+struct geneve_net {
+       struct list_head  geneve_list;
+       struct hlist_head vni_list[VNI_HASH_SIZE];
+};
+
+/* Pseudo network device */
+struct geneve_dev {
+       struct hlist_node  hlist;       /* vni hash table */
+       struct net         *net;        /* netns for packet i/o */
+       struct net_device  *dev;        /* netdev for geneve tunnel */
+       struct geneve_sock *sock;       /* socket used for geneve tunnel */
+       u8                 vni[3];      /* virtual network ID for tunnel */
+       u8                 ttl;         /* TTL override */
+       u8                 tos;         /* TOS override */
+       struct sockaddr_in remote;      /* IPv4 address for link partner */
+       struct list_head   next;        /* geneve's per namespace list */
+};
+
+static int geneve_net_id;
+
+static inline __u32 geneve_net_vni_hash(u8 vni[3])
+{
+       __u32 vnid;
+
+       vnid = (vni[0] << 16) | (vni[1] << 8) | vni[2];
+       return hash_32(vnid, VNI_HASH_BITS);
+}
+
+/* geneve receive/decap routine */
+static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
+{
+       struct genevehdr *gnvh = geneve_hdr(skb);
+       struct geneve_dev *dummy, *geneve = NULL;
+       struct geneve_net *gn;
+       struct iphdr *iph = NULL;
+       struct pcpu_sw_netstats *stats;
+       struct hlist_head *vni_list_head;
+       int err = 0;
+       __u32 hash;
+
+       iph = ip_hdr(skb); /* Still outer IP header... */
+
+       gn = gs->rcv_data;
+
+       /* Find the device for this VNI */
+       hash = geneve_net_vni_hash(gnvh->vni);
+       vni_list_head = &gn->vni_list[hash];
+       hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
+               if (!memcmp(gnvh->vni, dummy->vni, sizeof(dummy->vni)) &&
+                   iph->saddr == dummy->remote.sin_addr.s_addr) {
+                       geneve = dummy;
+                       break;
+               }
+       }
+       if (!geneve)
+               goto drop;
+
+       /* Drop packets w/ critical options,
+        * since we don't support any...
+        */
+       if (gnvh->critical)
+               goto drop;
+
+       skb_reset_mac_header(skb);
+       skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
+       skb->protocol = eth_type_trans(skb, geneve->dev);
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+       /* Ignore packet loops (and multicast echo) */
+       if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
+               goto drop;
+
+       skb_reset_network_header(skb);
+
+       iph = ip_hdr(skb); /* Now inner IP header... */
+       err = IP_ECN_decapsulate(iph, skb);
+
+       if (unlikely(err)) {
+               if (log_ecn_error)
+                       net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                            &iph->saddr, iph->tos);
+               if (err > 1) {
+                       ++geneve->dev->stats.rx_frame_errors;
+                       ++geneve->dev->stats.rx_errors;
+                       goto drop;
+               }
+       }
+
+       stats = this_cpu_ptr(geneve->dev->tstats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->syncp);
+
+       netif_rx(skb);
+
+       return;
+drop:
+       /* Consume bad packet */
+       kfree_skb(skb);
+}
+
+/* Setup stats when device is created */
+static int geneve_init(struct net_device *dev)
+{
+       dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void geneve_uninit(struct net_device *dev)
+{
+       free_percpu(dev->tstats);
+}
+
+static int geneve_open(struct net_device *dev)
+{
+       struct geneve_dev *geneve = netdev_priv(dev);
+       struct net *net = geneve->net;
+       struct geneve_net *gn = net_generic(geneve->net, geneve_net_id);
+       struct geneve_sock *gs;
+
+       gs = geneve_sock_add(net, htons(GENEVE_UDP_PORT), geneve_rx, gn,
+                            false, false);
+       if (IS_ERR(gs))
+               return PTR_ERR(gs);
+
+       geneve->sock = gs;
+
+       return 0;
+}
+
+static int geneve_stop(struct net_device *dev)
+{
+       struct geneve_dev *geneve = netdev_priv(dev);
+       struct geneve_sock *gs = geneve->sock;
+
+       geneve_sock_release(gs);
+
+       return 0;
+}
+
+static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct geneve_dev *geneve = netdev_priv(dev);
+       struct geneve_sock *gs = geneve->sock;
+       struct rtable *rt = NULL;
+       const struct iphdr *iip; /* interior IP header */
+       struct flowi4 fl4;
+       int err;
+       __be16 sport;
+       __u8 tos, ttl;
+
+       iip = ip_hdr(skb);
+
+       skb_reset_mac_header(skb);
+
+       /* TODO: port min/max limits should be configurable */
+       sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true);
+
+       tos = geneve->tos;
+       if (tos == 1)
+               tos = ip_tunnel_get_dsfield(iip, skb);
+
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.flowi4_tos = RT_TOS(tos);
+       fl4.daddr = geneve->remote.sin_addr.s_addr;
+       rt = ip_route_output_key(geneve->net, &fl4);
+       if (IS_ERR(rt)) {
+               netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
+               dev->stats.tx_carrier_errors++;
+               goto tx_error;
+       }
+       if (rt->dst.dev == dev) { /* is this necessary? */
+               netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
+               dev->stats.collisions++;
+               goto rt_tx_error;
+       }
+
+       tos = ip_tunnel_ecn_encap(tos, iip, skb);
+
+       ttl = geneve->ttl;
+       if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
+               ttl = 1;
+
+       ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+       /* no need to handle local destination and encap bypass...yet... */
+
+       err = geneve_xmit_skb(gs, rt, skb, fl4.saddr, fl4.daddr,
+                             tos, ttl, 0, sport, htons(GENEVE_UDP_PORT), 0,
+                             geneve->vni, 0, NULL, false,
+                             !net_eq(geneve->net, dev_net(geneve->dev)));
+       if (err < 0)
+               ip_rt_put(rt);
+
+       iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+
+       return NETDEV_TX_OK;
+
+rt_tx_error:
+       ip_rt_put(rt);
+tx_error:
+       dev->stats.tx_errors++;
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops geneve_netdev_ops = {
+       .ndo_init               = geneve_init,
+       .ndo_uninit             = geneve_uninit,
+       .ndo_open               = geneve_open,
+       .ndo_stop               = geneve_stop,
+       .ndo_start_xmit         = geneve_xmit,
+       .ndo_get_stats64        = ip_tunnel_get_stats64,
+       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
+};
+
+static void geneve_get_drvinfo(struct net_device *dev,
+                              struct ethtool_drvinfo *drvinfo)
+{
+       strlcpy(drvinfo->version, GENEVE_NETDEV_VER, sizeof(drvinfo->version));
+       strlcpy(drvinfo->driver, "geneve", sizeof(drvinfo->driver));
+}
+
+static const struct ethtool_ops geneve_ethtool_ops = {
+       .get_drvinfo    = geneve_get_drvinfo,
+       .get_link       = ethtool_op_get_link,
+};
+
+/* Info for udev, that this is a virtual tunnel endpoint */
+static struct device_type geneve_type = {
+       .name = "geneve",
+};
+
+/* Initialize the device structure. */
+static void geneve_setup(struct net_device *dev)
+{
+       ether_setup(dev);
+
+       dev->netdev_ops = &geneve_netdev_ops;
+       dev->ethtool_ops = &geneve_ethtool_ops;
+       dev->destructor = free_netdev;
+
+       SET_NETDEV_DEVTYPE(dev, &geneve_type);
+
+       dev->tx_queue_len = 0;
+       dev->features    |= NETIF_F_LLTX;
+       dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
+       dev->features    |= NETIF_F_RXCSUM;
+       dev->features    |= NETIF_F_GSO_SOFTWARE;
+
+       dev->vlan_features = dev->features;
+       dev->features    |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+
+       dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+
+       netif_keep_dst(dev);
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+}
+
+static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
+       [IFLA_GENEVE_ID]                = { .type = NLA_U32 },
+       [IFLA_GENEVE_REMOTE]            = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+       [IFLA_GENEVE_TTL]               = { .type = NLA_U8 },
+       [IFLA_GENEVE_TOS]               = { .type = NLA_U8 },
+};
+
+static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+
+       if (!data)
+               return -EINVAL;
+
+       if (data[IFLA_GENEVE_ID]) {
+               __u32 vni =  nla_get_u32(data[IFLA_GENEVE_ID]);
+
+               if (vni >= GENEVE_VID_MASK)
+                       return -ERANGE;
+       }
+
+       return 0;
+}
+
+static int geneve_newlink(struct net *net, struct net_device *dev,
+                        struct nlattr *tb[], struct nlattr *data[])
+{
+       struct geneve_net *gn = net_generic(net, geneve_net_id);
+       struct geneve_dev *dummy, *geneve = netdev_priv(dev);
+       struct hlist_head *vni_list_head;
+       struct sockaddr_in remote;      /* IPv4 address for link partner */
+       __u32 vni, hash;
+       int err;
+
+       if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
+               return -EINVAL;
+
+       geneve->net = net;
+       geneve->dev = dev;
+
+       vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+       geneve->vni[0] = (vni & 0x00ff0000) >> 16;
+       geneve->vni[1] = (vni & 0x0000ff00) >> 8;
+       geneve->vni[2] =  vni & 0x000000ff;
+
+       geneve->remote.sin_addr.s_addr =
+               nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+       if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
+               return -EINVAL;
+
+       remote = geneve->remote;
+       hash = geneve_net_vni_hash(geneve->vni);
+       vni_list_head = &gn->vni_list[hash];
+       hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
+               if (!memcmp(geneve->vni, dummy->vni, sizeof(dummy->vni)) &&
+                   !memcmp(&remote, &dummy->remote, sizeof(dummy->remote)))
+                       return -EBUSY;
+       }
+
+       if (tb[IFLA_ADDRESS] == NULL)
+               eth_hw_addr_random(dev);
+
+       err = register_netdevice(dev);
+       if (err)
+               return err;
+
+       if (data[IFLA_GENEVE_TTL])
+               geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+
+       if (data[IFLA_GENEVE_TOS])
+               geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+
+       list_add(&geneve->next, &gn->geneve_list);
+
+       hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]);
+
+       return 0;
+}
+
+static void geneve_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct geneve_dev *geneve = netdev_priv(dev);
+
+       if (!hlist_unhashed(&geneve->hlist))
+               hlist_del_rcu(&geneve->hlist);
+
+       list_del(&geneve->next);
+       unregister_netdevice_queue(dev, head);
+}
+
+static size_t geneve_get_size(const struct net_device *dev)
+{
+       return nla_total_size(sizeof(__u32)) +  /* IFLA_GENEVE_ID */
+               nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TTL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
+               0;
+}
+
+static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct geneve_dev *geneve = netdev_priv(dev);
+       __u32 vni;
+
+       vni = (geneve->vni[0] << 16) | (geneve->vni[1] << 8) | geneve->vni[2];
+       if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
+               goto nla_put_failure;
+
+       if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
+                           geneve->remote.sin_addr.s_addr))
+               goto nla_put_failure;
+
+       if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
+           nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops geneve_link_ops __read_mostly = {
+       .kind           = "geneve",
+       .maxtype        = IFLA_GENEVE_MAX,
+       .policy         = geneve_policy,
+       .priv_size      = sizeof(struct geneve_dev),
+       .setup          = geneve_setup,
+       .validate       = geneve_validate,
+       .newlink        = geneve_newlink,
+       .dellink        = geneve_dellink,
+       .get_size       = geneve_get_size,
+       .fill_info      = geneve_fill_info,
+};
+
+static __net_init int geneve_init_net(struct net *net)
+{
+       struct geneve_net *gn = net_generic(net, geneve_net_id);
+       unsigned int h;
+
+       INIT_LIST_HEAD(&gn->geneve_list);
+
+       for (h = 0; h < VNI_HASH_SIZE; ++h)
+               INIT_HLIST_HEAD(&gn->vni_list[h]);
+
+       return 0;
+}
+
+static void __net_exit geneve_exit_net(struct net *net)
+{
+       struct geneve_net *gn = net_generic(net, geneve_net_id);
+       struct geneve_dev *geneve, *next;
+       struct net_device *dev, *aux;
+       LIST_HEAD(list);
+
+       rtnl_lock();
+
+       /* gather any geneve devices that were moved into this ns */
+       for_each_netdev_safe(net, dev, aux)
+               if (dev->rtnl_link_ops == &geneve_link_ops)
+                       unregister_netdevice_queue(dev, &list);
+
+       /* now gather any other geneve devices that were created in this ns */
+       list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
+               /* If geneve->dev is in the same netns, it was already added
+                * to the list by the previous loop.
+                */
+               if (!net_eq(dev_net(geneve->dev), net))
+                       unregister_netdevice_queue(geneve->dev, &list);
+       }
+
+       /* unregister the devices gathered above */
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
+}
+
+static struct pernet_operations geneve_net_ops = {
+       .init = geneve_init_net,
+       .exit = geneve_exit_net,
+       .id   = &geneve_net_id,
+       .size = sizeof(struct geneve_net),
+};
+
+static int __init geneve_init_module(void)
+{
+       int rc;
+
+       rc = register_pernet_subsys(&geneve_net_ops);
+       if (rc)
+               goto out1;
+
+       rc = rtnl_link_register(&geneve_link_ops);
+       if (rc)
+               goto out2;
+
+       return 0;
+out2:
+       unregister_pernet_subsys(&geneve_net_ops);
+out1:
+       return rc;
+}
+late_initcall(geneve_init_module);
+
+static void __exit geneve_cleanup_module(void)
+{
+       rtnl_link_unregister(&geneve_link_ops);
+       unregister_pernet_subsys(&geneve_net_ops);
+}
+module_exit(geneve_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(GENEVE_NETDEV_VER);
+MODULE_AUTHOR("John W. Linville <linville@tuxdriver.com>");
+MODULE_DESCRIPTION("Interface driver for GENEVE encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("geneve");
index 41071d32bc8e0e1259726aa647bc8a77324ffdd9..dd4544085db321d2f9020d97ebbbb9a8887ed4b9 100644 (file)
@@ -161,6 +161,7 @@ struct netvsc_device_info {
        unsigned char mac_adr[ETH_ALEN];
        bool link_state;        /* 0 - link up, 1 - link down */
        int  ring_size;
+       u32  max_num_vrss_chns;
 };
 
 enum rndis_device_state {
@@ -611,6 +612,12 @@ struct multi_send_data {
        u32 count; /* counter of batched packets */
 };
 
+struct netvsc_stats {
+       u64 packets;
+       u64 bytes;
+       struct u64_stats_sync syncp;
+};
+
 /* The context of the netvsc device  */
 struct net_device_context {
        /* point back to our device context */
@@ -618,6 +625,9 @@ struct net_device_context {
        struct delayed_work dwork;
        struct work_struct work;
        u32 msg_enable; /* debug level */
+
+       struct netvsc_stats __percpu *tx_stats;
+       struct netvsc_stats __percpu *rx_stats;
 };
 
 /* Per netvsc device */
index ea091bc5ff09dad379fde915fbb7ec073c613aa1..06de98a056228261a9df982421c4137827dfda01 100644 (file)
@@ -227,13 +227,18 @@ static int netvsc_init_buf(struct hv_device *device)
        struct netvsc_device *net_device;
        struct nvsp_message *init_packet;
        struct net_device *ndev;
+       int node;
 
        net_device = get_outbound_net_device(device);
        if (!net_device)
                return -ENODEV;
        ndev = net_device->ndev;
 
-       net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+       node = cpu_to_node(device->channel->target_cpu);
+       net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
+       if (!net_device->recv_buf)
+               net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+
        if (!net_device->recv_buf) {
                netdev_err(ndev, "unable to allocate receive "
                        "buffer of size %d\n", net_device->recv_buf_size);
@@ -321,7 +326,9 @@ static int netvsc_init_buf(struct hv_device *device)
 
        /* Now setup the send buffer.
         */
-       net_device->send_buf = vzalloc(net_device->send_buf_size);
+       net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
+       if (!net_device->send_buf)
+               net_device->send_buf = vzalloc(net_device->send_buf_size);
        if (!net_device->send_buf) {
                netdev_err(ndev, "unable to allocate send "
                           "buffer of size %d\n", net_device->send_buf_size);
@@ -743,6 +750,7 @@ static inline int netvsc_send_pkt(
        u64 req_id;
        int ret;
        struct hv_page_buffer *pgbuf;
+       u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
 
        nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
        if (packet->is_data_pkt) {
@@ -769,32 +777,42 @@ static inline int netvsc_send_pkt(
        if (out_channel->rescind)
                return -ENODEV;
 
+       /*
+        * It is possible that once we successfully place this packet
+        * on the ringbuffer, we may stop the queue. In that case, we want
+        * to notify the host independent of the xmit_more flag. We don't
+        * need to be precise here; in the worst case we may signal the host
+        * unnecessarily.
+        */
+       if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
+               packet->xmit_more = false;
+
        if (packet->page_buf_cnt) {
                pgbuf = packet->cp_partial ? packet->page_buf +
                        packet->rmsg_pgcnt : packet->page_buf;
-               ret = vmbus_sendpacket_pagebuffer(out_channel,
-                                                 pgbuf,
-                                                 packet->page_buf_cnt,
-                                                 &nvmsg,
-                                                 sizeof(struct nvsp_message),
-                                                 req_id);
+               ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
+                                                     pgbuf,
+                                                     packet->page_buf_cnt,
+                                                     &nvmsg,
+                                                     sizeof(struct nvsp_message),
+                                                     req_id,
+                                                     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+                                                     !packet->xmit_more);
        } else {
-               ret = vmbus_sendpacket(
-                               out_channel, &nvmsg,
-                               sizeof(struct nvsp_message),
-                               req_id,
-                               VM_PKT_DATA_INBAND,
-                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+               ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
+                                          sizeof(struct nvsp_message),
+                                          req_id,
+                                          VM_PKT_DATA_INBAND,
+                                          VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
+                                          !packet->xmit_more);
        }
 
        if (ret == 0) {
                atomic_inc(&net_device->num_outstanding_sends);
                atomic_inc(&net_device->queue_sends[q_idx]);
 
-               if (hv_ringbuf_avail_percent(&out_channel->outbound) <
-                       RING_AVAIL_PERCENT_LOWATER) {
-                       netif_tx_stop_queue(netdev_get_tx_queue(
-                                           ndev, q_idx));
+               if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
+                       netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
 
                        if (atomic_read(&net_device->
                                queue_sends[q_idx]) < 1)
index 5993c7e2d723a7e42d6022c90cb8e495420a49ad..358475ed9b5964c53f038c61f7fb8a3996c2a5ab 100644 (file)
@@ -46,6 +46,8 @@ static int ring_size = 128;
 module_param(ring_size, int, S_IRUGO);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 
+static int max_num_vrss_chns = 8;
+
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
                                NETIF_MSG_LINK | NETIF_MSG_IFUP |
                                NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -196,12 +198,12 @@ static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
        struct flow_keys flow;
        int data_len;
 
-       if (!skb_flow_dissect(skb, &flow) ||
-           !(flow.n_proto == htons(ETH_P_IP) ||
-             flow.n_proto == htons(ETH_P_IPV6)))
+       if (!skb_flow_dissect_flow_keys(skb, &flow) ||
+           !(flow.basic.n_proto == htons(ETH_P_IP) ||
+             flow.basic.n_proto == htons(ETH_P_IPV6)))
                return false;
 
-       if (flow.ip_proto == IPPROTO_TCP)
+       if (flow.basic.ip_proto == IPPROTO_TCP)
                data_len = 12;
        else
                data_len = 8;
@@ -391,7 +393,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        u32 skb_length;
        u32 pkt_sz;
        struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
-
+       struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
 
        /* We will atmost need two pages to describe the rndis
         * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
@@ -580,8 +582,10 @@ do_send:
 
 drop:
        if (ret == 0) {
-               net->stats.tx_bytes += skb_length;
-               net->stats.tx_packets++;
+               u64_stats_update_begin(&tx_stats->syncp);
+               tx_stats->packets++;
+               tx_stats->bytes += skb_length;
+               u64_stats_update_end(&tx_stats->syncp);
        } else {
                if (ret != -EAGAIN) {
                        dev_kfree_skb_any(skb);
@@ -644,13 +648,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
                                struct ndis_tcp_ip_checksum_info *csum_info)
 {
        struct net_device *net;
+       struct net_device_context *net_device_ctx;
        struct sk_buff *skb;
+       struct netvsc_stats *rx_stats;
 
        net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
        if (!net || net->reg_state != NETREG_REGISTERED) {
                packet->status = NVSP_STAT_FAIL;
                return 0;
        }
+       net_device_ctx = netdev_priv(net);
+       rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
 
        /* Allocate a skb - TODO direct I/O to pages? */
        skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
@@ -686,8 +694,10 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        skb_record_rx_queue(skb, packet->channel->
                            offermsg.offer.sub_channel_index);
 
-       net->stats.rx_packets++;
-       net->stats.rx_bytes += packet->total_data_buflen;
+       u64_stats_update_begin(&rx_stats->syncp);
+       rx_stats->packets++;
+       rx_stats->bytes += packet->total_data_buflen;
+       u64_stats_update_end(&rx_stats->syncp);
 
        /*
         * Pass the skb back up. Network stack will deallocate the skb when it
@@ -747,12 +757,53 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        ndevctx->device_ctx = hdev;
        hv_set_drvdata(hdev, ndev);
        device_info.ring_size = ring_size;
+       device_info.max_num_vrss_chns = max_num_vrss_chns;
        rndis_filter_device_add(hdev, &device_info);
        netif_tx_wake_all_queues(ndev);
 
        return 0;
 }
 
+static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
+                                                   struct rtnl_link_stats64 *t)
+{
+       struct net_device_context *ndev_ctx = netdev_priv(net);
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
+                                                           cpu);
+               struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
+                                                           cpu);
+               u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+               unsigned int start;
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+                       tx_packets = tx_stats->packets;
+                       tx_bytes = tx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+                       rx_packets = rx_stats->packets;
+                       rx_bytes = rx_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
+
+               t->tx_bytes     += tx_bytes;
+               t->tx_packets   += tx_packets;
+               t->rx_bytes     += rx_bytes;
+               t->rx_packets   += rx_packets;
+       }
+
+       t->tx_dropped   = net->stats.tx_dropped;
+       t->tx_errors    = net->stats.tx_dropped;
+
+       t->rx_dropped   = net->stats.rx_dropped;
+       t->rx_errors    = net->stats.rx_errors;
+
+       return t;
+}
 
 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
 {
@@ -804,6 +855,7 @@ static const struct net_device_ops device_ops = {
        .ndo_validate_addr =            eth_validate_addr,
        .ndo_set_mac_address =          netvsc_set_mac_addr,
        .ndo_select_queue =             netvsc_select_queue,
+       .ndo_get_stats64 =              netvsc_get_stats64,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller =          netvsc_poll_controller,
 #endif
@@ -855,6 +907,14 @@ static void netvsc_link_change(struct work_struct *w)
                netdev_notify_peers(net);
 }
 
+static void netvsc_free_netdev(struct net_device *netdev)
+{
+       struct net_device_context *net_device_ctx = netdev_priv(netdev);
+
+       free_percpu(net_device_ctx->tx_stats);
+       free_percpu(net_device_ctx->rx_stats);
+       free_netdev(netdev);
+}
 
 static int netvsc_probe(struct hv_device *dev,
                        const struct hv_vmbus_device_id *dev_id)
@@ -883,6 +943,18 @@ static int netvsc_probe(struct hv_device *dev,
                netdev_dbg(net, "netvsc msg_enable: %d\n",
                           net_device_ctx->msg_enable);
 
+       net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+       if (!net_device_ctx->tx_stats) {
+               free_netdev(net);
+               return -ENOMEM;
+       }
+       net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
+       if (!net_device_ctx->rx_stats) {
+               free_percpu(net_device_ctx->tx_stats);
+               free_netdev(net);
+               return -ENOMEM;
+       }
+
        hv_set_drvdata(dev, net);
        INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
        INIT_WORK(&net_device_ctx->work, do_set_multicast);
@@ -906,10 +978,11 @@ static int netvsc_probe(struct hv_device *dev,
 
        /* Notify the netvsc driver of the new device */
        device_info.ring_size = ring_size;
+       device_info.max_num_vrss_chns = max_num_vrss_chns;
        ret = rndis_filter_device_add(dev, &device_info);
        if (ret != 0) {
                netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-               free_netdev(net);
+               netvsc_free_netdev(net);
                hv_set_drvdata(dev, NULL);
                return ret;
        }
@@ -923,7 +996,7 @@ static int netvsc_probe(struct hv_device *dev,
        if (ret != 0) {
                pr_err("Unable to register netdev.\n");
                rndis_filter_device_remove(dev);
-               free_netdev(net);
+               netvsc_free_netdev(net);
        } else {
                schedule_delayed_work(&net_device_ctx->dwork, 0);
        }
@@ -962,7 +1035,7 @@ static int netvsc_remove(struct hv_device *dev)
         */
        rndis_filter_device_remove(dev);
 
-       free_netdev(net);
+       netvsc_free_netdev(net);
        return 0;
 }
 
index 9118cea918821cb6bbe83a2f97a71134a58fd5dd..006c1b8c23857a17757366ffab86fd4eb9bad60b 100644 (file)
@@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev,
        struct ndis_recv_scale_cap rsscap;
        u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
        u32 mtu, size;
+       u32 num_rss_qs;
+       const struct cpumask *node_cpu_mask;
+       u32 num_possible_rss_qs;
 
        rndis_device = get_rndis_device();
        if (!rndis_device)
@@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev,
        if (ret || rsscap.num_recv_que < 2)
                goto out;
 
+       num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
+
        net_device->max_chn = rsscap.num_recv_que;
-       net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
-                              num_online_cpus() : rsscap.num_recv_que;
+
+       /*
+        * We will limit the VRSS channels to the number CPUs in the NUMA node
+        * the primary channel is currently bound to.
+        */
+       node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
+       num_possible_rss_qs = cpumask_weight(node_cpu_mask);
+       net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
        if (net_device->num_chn == 1)
                goto out;
 
index 1a3c3e57aa0b67750eccb0d2fc62eefb4eef0050..1dd5ab8e5054a4a192e75841c9220750f7fa9dec 100644 (file)
@@ -53,3 +53,13 @@ config IEEE802154_CC2520
 
          This driver can also be built as a module. To do so, say M here.
          the module will be called 'cc2520'.
+
+config IEEE802154_ATUSB
+       tristate "ATUSB transceiver driver"
+       depends on IEEE802154_DRIVERS && MAC802154 && USB
+       ---help---
+         Say Y here to enable the ATUSB IEEE 802.15.4 wireless
+         controller.
+
+         This driver can also be built as a module. To do so say M here.
+         The module will be called 'atusb'.
index d77fa4d77e27d8e19e43f29f2161633c6754dd84..cf1d2a6db023924bb12c9e05ae034665357cfe80 100644 (file)
@@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
 obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
 obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
 obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
+obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
index 67d00fbc2e0e29e7bd426ed8f7d21b22bf6772fc..2f25a5ed82473b38190c9d873c0651fbdf755760 100644 (file)
@@ -35,6 +35,8 @@
 #include <net/mac802154.h>
 #include <net/cfg802154.h>
 
+#include "at86rf230.h"
+
 struct at86rf230_local;
 /* at86rf2xx chip depend data.
  * All timings are in us.
@@ -50,7 +52,7 @@ struct at86rf2xx_chip_data {
        int rssi_base_val;
 
        int (*set_channel)(struct at86rf230_local *, u8, u8);
-       int (*get_desense_steps)(struct at86rf230_local *, s32);
+       int (*set_txpower)(struct at86rf230_local *, s32);
 };
 
 #define AT86RF2XX_MAX_BUF              (127 + 3)
@@ -102,200 +104,6 @@ struct at86rf230_local {
        struct at86rf230_state_change tx;
 };
 
-#define RG_TRX_STATUS  (0x01)
-#define SR_TRX_STATUS          0x01, 0x1f, 0
-#define SR_RESERVED_01_3       0x01, 0x20, 5
-#define SR_CCA_STATUS          0x01, 0x40, 6
-#define SR_CCA_DONE            0x01, 0x80, 7
-#define RG_TRX_STATE   (0x02)
-#define SR_TRX_CMD             0x02, 0x1f, 0
-#define SR_TRAC_STATUS         0x02, 0xe0, 5
-#define RG_TRX_CTRL_0  (0x03)
-#define SR_CLKM_CTRL           0x03, 0x07, 0
-#define SR_CLKM_SHA_SEL                0x03, 0x08, 3
-#define SR_PAD_IO_CLKM         0x03, 0x30, 4
-#define SR_PAD_IO              0x03, 0xc0, 6
-#define RG_TRX_CTRL_1  (0x04)
-#define SR_IRQ_POLARITY                0x04, 0x01, 0
-#define SR_IRQ_MASK_MODE       0x04, 0x02, 1
-#define SR_SPI_CMD_MODE                0x04, 0x0c, 2
-#define SR_RX_BL_CTRL          0x04, 0x10, 4
-#define SR_TX_AUTO_CRC_ON      0x04, 0x20, 5
-#define SR_IRQ_2_EXT_EN                0x04, 0x40, 6
-#define SR_PA_EXT_EN           0x04, 0x80, 7
-#define RG_PHY_TX_PWR  (0x05)
-#define SR_TX_PWR              0x05, 0x0f, 0
-#define SR_PA_LT               0x05, 0x30, 4
-#define SR_PA_BUF_LT           0x05, 0xc0, 6
-#define RG_PHY_RSSI    (0x06)
-#define SR_RSSI                        0x06, 0x1f, 0
-#define SR_RND_VALUE           0x06, 0x60, 5
-#define SR_RX_CRC_VALID                0x06, 0x80, 7
-#define RG_PHY_ED_LEVEL        (0x07)
-#define SR_ED_LEVEL            0x07, 0xff, 0
-#define RG_PHY_CC_CCA  (0x08)
-#define SR_CHANNEL             0x08, 0x1f, 0
-#define SR_CCA_MODE            0x08, 0x60, 5
-#define SR_CCA_REQUEST         0x08, 0x80, 7
-#define RG_CCA_THRES   (0x09)
-#define SR_CCA_ED_THRES                0x09, 0x0f, 0
-#define SR_RESERVED_09_1       0x09, 0xf0, 4
-#define RG_RX_CTRL     (0x0a)
-#define SR_PDT_THRES           0x0a, 0x0f, 0
-#define SR_RESERVED_0a_1       0x0a, 0xf0, 4
-#define RG_SFD_VALUE   (0x0b)
-#define SR_SFD_VALUE           0x0b, 0xff, 0
-#define RG_TRX_CTRL_2  (0x0c)
-#define SR_OQPSK_DATA_RATE     0x0c, 0x03, 0
-#define SR_SUB_MODE            0x0c, 0x04, 2
-#define SR_BPSK_QPSK           0x0c, 0x08, 3
-#define SR_OQPSK_SUB1_RC_EN    0x0c, 0x10, 4
-#define SR_RESERVED_0c_5       0x0c, 0x60, 5
-#define SR_RX_SAFE_MODE                0x0c, 0x80, 7
-#define RG_ANT_DIV     (0x0d)
-#define SR_ANT_CTRL            0x0d, 0x03, 0
-#define SR_ANT_EXT_SW_EN       0x0d, 0x04, 2
-#define SR_ANT_DIV_EN          0x0d, 0x08, 3
-#define SR_RESERVED_0d_2       0x0d, 0x70, 4
-#define SR_ANT_SEL             0x0d, 0x80, 7
-#define RG_IRQ_MASK    (0x0e)
-#define SR_IRQ_MASK            0x0e, 0xff, 0
-#define RG_IRQ_STATUS  (0x0f)
-#define SR_IRQ_0_PLL_LOCK      0x0f, 0x01, 0
-#define SR_IRQ_1_PLL_UNLOCK    0x0f, 0x02, 1
-#define SR_IRQ_2_RX_START      0x0f, 0x04, 2
-#define SR_IRQ_3_TRX_END       0x0f, 0x08, 3
-#define SR_IRQ_4_CCA_ED_DONE   0x0f, 0x10, 4
-#define SR_IRQ_5_AMI           0x0f, 0x20, 5
-#define SR_IRQ_6_TRX_UR                0x0f, 0x40, 6
-#define SR_IRQ_7_BAT_LOW       0x0f, 0x80, 7
-#define RG_VREG_CTRL   (0x10)
-#define SR_RESERVED_10_6       0x10, 0x03, 0
-#define SR_DVDD_OK             0x10, 0x04, 2
-#define SR_DVREG_EXT           0x10, 0x08, 3
-#define SR_RESERVED_10_3       0x10, 0x30, 4
-#define SR_AVDD_OK             0x10, 0x40, 6
-#define SR_AVREG_EXT           0x10, 0x80, 7
-#define RG_BATMON      (0x11)
-#define SR_BATMON_VTH          0x11, 0x0f, 0
-#define SR_BATMON_HR           0x11, 0x10, 4
-#define SR_BATMON_OK           0x11, 0x20, 5
-#define SR_RESERVED_11_1       0x11, 0xc0, 6
-#define RG_XOSC_CTRL   (0x12)
-#define SR_XTAL_TRIM           0x12, 0x0f, 0
-#define SR_XTAL_MODE           0x12, 0xf0, 4
-#define RG_RX_SYN      (0x15)
-#define SR_RX_PDT_LEVEL                0x15, 0x0f, 0
-#define SR_RESERVED_15_2       0x15, 0x70, 4
-#define SR_RX_PDT_DIS          0x15, 0x80, 7
-#define RG_XAH_CTRL_1  (0x17)
-#define SR_RESERVED_17_8       0x17, 0x01, 0
-#define SR_AACK_PROM_MODE      0x17, 0x02, 1
-#define SR_AACK_ACK_TIME       0x17, 0x04, 2
-#define SR_RESERVED_17_5       0x17, 0x08, 3
-#define SR_AACK_UPLD_RES_FT    0x17, 0x10, 4
-#define SR_AACK_FLTR_RES_FT    0x17, 0x20, 5
-#define SR_CSMA_LBT_MODE       0x17, 0x40, 6
-#define SR_RESERVED_17_1       0x17, 0x80, 7
-#define RG_FTN_CTRL    (0x18)
-#define SR_RESERVED_18_2       0x18, 0x7f, 0
-#define SR_FTN_START           0x18, 0x80, 7
-#define RG_PLL_CF      (0x1a)
-#define SR_RESERVED_1a_2       0x1a, 0x7f, 0
-#define SR_PLL_CF_START                0x1a, 0x80, 7
-#define RG_PLL_DCU     (0x1b)
-#define SR_RESERVED_1b_3       0x1b, 0x3f, 0
-#define SR_RESERVED_1b_2       0x1b, 0x40, 6
-#define SR_PLL_DCU_START       0x1b, 0x80, 7
-#define RG_PART_NUM    (0x1c)
-#define SR_PART_NUM            0x1c, 0xff, 0
-#define RG_VERSION_NUM (0x1d)
-#define SR_VERSION_NUM         0x1d, 0xff, 0
-#define RG_MAN_ID_0    (0x1e)
-#define SR_MAN_ID_0            0x1e, 0xff, 0
-#define RG_MAN_ID_1    (0x1f)
-#define SR_MAN_ID_1            0x1f, 0xff, 0
-#define RG_SHORT_ADDR_0        (0x20)
-#define SR_SHORT_ADDR_0                0x20, 0xff, 0
-#define RG_SHORT_ADDR_1        (0x21)
-#define SR_SHORT_ADDR_1                0x21, 0xff, 0
-#define RG_PAN_ID_0    (0x22)
-#define SR_PAN_ID_0            0x22, 0xff, 0
-#define RG_PAN_ID_1    (0x23)
-#define SR_PAN_ID_1            0x23, 0xff, 0
-#define RG_IEEE_ADDR_0 (0x24)
-#define SR_IEEE_ADDR_0         0x24, 0xff, 0
-#define RG_IEEE_ADDR_1 (0x25)
-#define SR_IEEE_ADDR_1         0x25, 0xff, 0
-#define RG_IEEE_ADDR_2 (0x26)
-#define SR_IEEE_ADDR_2         0x26, 0xff, 0
-#define RG_IEEE_ADDR_3 (0x27)
-#define SR_IEEE_ADDR_3         0x27, 0xff, 0
-#define RG_IEEE_ADDR_4 (0x28)
-#define SR_IEEE_ADDR_4         0x28, 0xff, 0
-#define RG_IEEE_ADDR_5 (0x29)
-#define SR_IEEE_ADDR_5         0x29, 0xff, 0
-#define RG_IEEE_ADDR_6 (0x2a)
-#define SR_IEEE_ADDR_6         0x2a, 0xff, 0
-#define RG_IEEE_ADDR_7 (0x2b)
-#define SR_IEEE_ADDR_7         0x2b, 0xff, 0
-#define RG_XAH_CTRL_0  (0x2c)
-#define SR_SLOTTED_OPERATION   0x2c, 0x01, 0
-#define SR_MAX_CSMA_RETRIES    0x2c, 0x0e, 1
-#define SR_MAX_FRAME_RETRIES   0x2c, 0xf0, 4
-#define RG_CSMA_SEED_0 (0x2d)
-#define SR_CSMA_SEED_0         0x2d, 0xff, 0
-#define RG_CSMA_SEED_1 (0x2e)
-#define SR_CSMA_SEED_1         0x2e, 0x07, 0
-#define SR_AACK_I_AM_COORD     0x2e, 0x08, 3
-#define SR_AACK_DIS_ACK                0x2e, 0x10, 4
-#define SR_AACK_SET_PD         0x2e, 0x20, 5
-#define SR_AACK_FVN_MODE       0x2e, 0xc0, 6
-#define RG_CSMA_BE     (0x2f)
-#define SR_MIN_BE              0x2f, 0x0f, 0
-#define SR_MAX_BE              0x2f, 0xf0, 4
-
-#define CMD_REG                0x80
-#define CMD_REG_MASK   0x3f
-#define CMD_WRITE      0x40
-#define CMD_FB         0x20
-
-#define IRQ_BAT_LOW    (1 << 7)
-#define IRQ_TRX_UR     (1 << 6)
-#define IRQ_AMI                (1 << 5)
-#define IRQ_CCA_ED     (1 << 4)
-#define IRQ_TRX_END    (1 << 3)
-#define IRQ_RX_START   (1 << 2)
-#define IRQ_PLL_UNL    (1 << 1)
-#define IRQ_PLL_LOCK   (1 << 0)
-
-#define IRQ_ACTIVE_HIGH        0
-#define IRQ_ACTIVE_LOW 1
-
-#define STATE_P_ON             0x00    /* BUSY */
-#define STATE_BUSY_RX          0x01
-#define STATE_BUSY_TX          0x02
-#define STATE_FORCE_TRX_OFF    0x03
-#define STATE_FORCE_TX_ON      0x04    /* IDLE */
-/* 0x05 */                             /* INVALID_PARAMETER */
-#define STATE_RX_ON            0x06
-/* 0x07 */                             /* SUCCESS */
-#define STATE_TRX_OFF          0x08
-#define STATE_TX_ON            0x09
-/* 0x0a - 0x0e */                      /* 0x0a - UNSUPPORTED_ATTRIBUTE */
-#define STATE_SLEEP            0x0F
-#define STATE_PREP_DEEP_SLEEP  0x10
-#define STATE_BUSY_RX_AACK     0x11
-#define STATE_BUSY_TX_ARET     0x12
-#define STATE_RX_AACK_ON       0x16
-#define STATE_TX_ARET_ON       0x19
-#define STATE_RX_ON_NOCLK      0x1C
-#define STATE_RX_AACK_ON_NOCLK 0x1D
-#define STATE_BUSY_RX_AACK_NOCLK 0x1E
-#define STATE_TRANSITION_IN_PROGRESS 0x1F
-
-#define TRX_STATE_MASK         (0x1F)
-
 #define AT86RF2XX_NUMREGS 0x3F
 
 static void
@@ -1010,7 +818,7 @@ at86rf230_xmit_start(void *context)
                if (lp->is_tx_from_off) {
                        lp->is_tx_from_off = false;
                        at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-                                                    at86rf230_xmit_tx_on,
+                                                    at86rf230_write_frame,
                                                     false);
                } else {
                        at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
@@ -1076,6 +884,50 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
        return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
 }
 
+#define AT86RF2XX_MAX_ED_LEVELS 0xF
+static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+       -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+       -7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+       -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200,
+       -8000, -7800, -7600, -7400, -7200, -7000,
+};
+
+static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+       -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000,
+       -7800, -7600, -7400, -7200, -7000, -6800,
+};
+
+static inline int
+at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val)
+{
+       unsigned int cca_ed_thres;
+       int rc;
+
+       rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres);
+       if (rc < 0)
+               return rc;
+
+       switch (rssi_base_val) {
+       case -98:
+               lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98;
+               lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98);
+               lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres];
+               break;
+       case -100:
+               lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+               lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
+               lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres];
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       return 0;
+}
+
 static int
 at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
 {
@@ -1098,6 +950,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
        if (rc < 0)
                return rc;
 
+       rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val);
+       if (rc < 0)
+               return rc;
+
        /* This sets the symbol_duration according frequency on the 212.
         * TODO move this handling while set channel and page in cfg802154.
         * We can do that, this timings are according 802.15.4 standard.
@@ -1193,23 +1049,56 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
        return 0;
 }
 
+#define AT86RF23X_MAX_TX_POWERS 0xF
+static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+       400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600,
+       -800, -1200, -1700,
+};
+
+static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+       300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700,
+       -900, -1200, -1700,
+};
+
+#define AT86RF212_MAX_TX_POWERS 0x1F
+static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = {
+       500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700,
+       -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700,
+       -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600,
+};
+
+static int
+at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm)
+{
+       u32 i;
+
+       for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+               if (lp->hw->phy->supported.tx_powers[i] == mbm)
+                       return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i);
+       }
+
+       return -EINVAL;
+}
+
 static int
-at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db)
+at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm)
 {
-       struct at86rf230_local *lp = hw->priv;
+       u32 i;
 
-       /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
-        * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
-        * 0dB.
-        * thus, supported values for db range from -26 to 5, for 31dB of
-        * reduction to 0dB of reduction.
-        */
-       if (db > 5 || db < -26)
-               return -EINVAL;
+       for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+               if (lp->hw->phy->supported.tx_powers[i] == mbm)
+                       return at86rf230_write_subreg(lp, SR_TX_PWR_212, i);
+       }
 
-       db = -(db - 5);
+       return -EINVAL;
+}
+
+static int
+at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+       struct at86rf230_local *lp = hw->priv;
 
-       return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+       return lp->data->set_txpower(lp, mbm);
 }
 
 static int
@@ -1254,28 +1143,19 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
        return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
 }
 
-static int
-at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
-       return (level - lp->data->rssi_base_val) * 100 / 207;
-}
-
-static int
-at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
-       return (level - lp->data->rssi_base_val) / 2;
-}
 
 static int
-at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
+at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
 {
        struct at86rf230_local *lp = hw->priv;
+       u32 i;
 
-       if (level < lp->data->rssi_base_val || level > 30)
-               return -EINVAL;
+       for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+               if (hw->phy->supported.cca_ed_levels[i] == mbm)
+                       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i);
+       }
 
-       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES,
-                                     lp->data->get_desense_steps(lp, level));
+       return -EINVAL;
 }
 
 static int
@@ -1365,7 +1245,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
        .t_p_ack = 545,
        .rssi_base_val = -91,
        .set_channel = at86rf23x_set_channel,
-       .get_desense_steps = at86rf23x_get_desens_steps
+       .set_txpower = at86rf23x_set_txpower,
 };
 
 static struct at86rf2xx_chip_data at86rf231_data = {
@@ -1378,7 +1258,7 @@ static struct at86rf2xx_chip_data at86rf231_data = {
        .t_p_ack = 545,
        .rssi_base_val = -91,
        .set_channel = at86rf23x_set_channel,
-       .get_desense_steps = at86rf23x_get_desens_steps
+       .set_txpower = at86rf23x_set_txpower,
 };
 
 static struct at86rf2xx_chip_data at86rf212_data = {
@@ -1391,7 +1271,7 @@ static struct at86rf2xx_chip_data at86rf212_data = {
        .t_p_ack = 545,
        .rssi_base_val = -100,
        .set_channel = at86rf212_set_channel,
-       .get_desense_steps = at86rf212_get_desens_steps
+       .set_txpower = at86rf212_set_txpower,
 };
 
 static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
@@ -1564,8 +1444,21 @@ at86rf230_detect_device(struct at86rf230_local *lp)
        }
 
        lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
-                       IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
-                       IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
+                       IEEE802154_HW_CSMA_PARAMS |
+                       IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT |
+                       IEEE802154_HW_PROMISCUOUS;
+
+       lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER |
+                            WPAN_PHY_FLAG_CCA_ED_LEVEL |
+                            WPAN_PHY_FLAG_CCA_MODE;
+
+       lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+               BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+       lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+               BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+       lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels;
+       lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels);
 
        lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
 
@@ -1573,36 +1466,49 @@ at86rf230_detect_device(struct at86rf230_local *lp)
        case 2:
                chip = "at86rf230";
                rc = -ENOTSUPP;
-               break;
+               goto not_supp;
        case 3:
                chip = "at86rf231";
                lp->data = &at86rf231_data;
-               lp->hw->phy->channels_supported[0] = 0x7FFF800;
+               lp->hw->phy->supported.channels[0] = 0x7FFF800;
                lp->hw->phy->current_channel = 11;
                lp->hw->phy->symbol_duration = 16;
+               lp->hw->phy->supported.tx_powers = at86rf231_powers;
+               lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
                break;
        case 7:
                chip = "at86rf212";
                lp->data = &at86rf212_data;
                lp->hw->flags |= IEEE802154_HW_LBT;
-               lp->hw->phy->channels_supported[0] = 0x00007FF;
-               lp->hw->phy->channels_supported[2] = 0x00007FF;
+               lp->hw->phy->supported.channels[0] = 0x00007FF;
+               lp->hw->phy->supported.channels[2] = 0x00007FF;
                lp->hw->phy->current_channel = 5;
                lp->hw->phy->symbol_duration = 25;
+               lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
+               lp->hw->phy->supported.tx_powers = at86rf212_powers;
+               lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
+               lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+               lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
                break;
        case 11:
                chip = "at86rf233";
                lp->data = &at86rf233_data;
-               lp->hw->phy->channels_supported[0] = 0x7FFF800;
+               lp->hw->phy->supported.channels[0] = 0x7FFF800;
                lp->hw->phy->current_channel = 13;
                lp->hw->phy->symbol_duration = 16;
+               lp->hw->phy->supported.tx_powers = at86rf233_powers;
+               lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
                break;
        default:
                chip = "unknown";
                rc = -ENOTSUPP;
-               break;
+               goto not_supp;
        }
 
+       lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7];
+       lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0];
+
+not_supp:
        dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version);
 
        return rc;
diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h
new file mode 100644 (file)
index 0000000..1e6d1cc
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#ifndef _AT86RF230_H
+#define _AT86RF230_H
+
+#define RG_TRX_STATUS  (0x01)
+#define SR_TRX_STATUS          0x01, 0x1f, 0
+#define SR_RESERVED_01_3       0x01, 0x20, 5
+#define SR_CCA_STATUS          0x01, 0x40, 6
+#define SR_CCA_DONE            0x01, 0x80, 7
+#define RG_TRX_STATE   (0x02)
+#define SR_TRX_CMD             0x02, 0x1f, 0
+#define SR_TRAC_STATUS         0x02, 0xe0, 5
+#define RG_TRX_CTRL_0  (0x03)
+#define SR_CLKM_CTRL           0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL                0x03, 0x08, 3
+#define SR_PAD_IO_CLKM         0x03, 0x30, 4
+#define SR_PAD_IO              0x03, 0xc0, 6
+#define RG_TRX_CTRL_1  (0x04)
+#define SR_IRQ_POLARITY                0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE       0x04, 0x02, 1
+#define SR_SPI_CMD_MODE                0x04, 0x0c, 2
+#define SR_RX_BL_CTRL          0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON      0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN                0x04, 0x40, 6
+#define SR_PA_EXT_EN           0x04, 0x80, 7
+#define RG_PHY_TX_PWR  (0x05)
+#define SR_TX_PWR_23X          0x05, 0x0f, 0
+#define SR_PA_LT_230           0x05, 0x30, 4
+#define SR_PA_BUF_LT_230       0x05, 0xc0, 6
+#define SR_TX_PWR_212          0x05, 0x1f, 0
+#define SR_GC_PA_212           0x05, 0x60, 5
+#define SR_PA_BOOST_LT_212     0x05, 0x80, 7
+#define RG_PHY_RSSI    (0x06)
+#define SR_RSSI                        0x06, 0x1f, 0
+#define SR_RND_VALUE           0x06, 0x60, 5
+#define SR_RX_CRC_VALID                0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL        (0x07)
+#define SR_ED_LEVEL            0x07, 0xff, 0
+#define RG_PHY_CC_CCA  (0x08)
+#define SR_CHANNEL             0x08, 0x1f, 0
+#define SR_CCA_MODE            0x08, 0x60, 5
+#define SR_CCA_REQUEST         0x08, 0x80, 7
+#define RG_CCA_THRES   (0x09)
+#define SR_CCA_ED_THRES                0x09, 0x0f, 0
+#define SR_RESERVED_09_1       0x09, 0xf0, 4
+#define RG_RX_CTRL     (0x0a)
+#define SR_PDT_THRES           0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1       0x0a, 0xf0, 4
+#define RG_SFD_VALUE   (0x0b)
+#define SR_SFD_VALUE           0x0b, 0xff, 0
+#define RG_TRX_CTRL_2  (0x0c)
+#define SR_OQPSK_DATA_RATE     0x0c, 0x03, 0
+#define SR_SUB_MODE            0x0c, 0x04, 2
+#define SR_BPSK_QPSK           0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN    0x0c, 0x10, 4
+#define SR_RESERVED_0c_5       0x0c, 0x60, 5
+#define SR_RX_SAFE_MODE                0x0c, 0x80, 7
+#define RG_ANT_DIV     (0x0d)
+#define SR_ANT_CTRL            0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN       0x0d, 0x04, 2
+#define SR_ANT_DIV_EN          0x0d, 0x08, 3
+#define SR_RESERVED_0d_2       0x0d, 0x70, 4
+#define SR_ANT_SEL             0x0d, 0x80, 7
+#define RG_IRQ_MASK    (0x0e)
+#define SR_IRQ_MASK            0x0e, 0xff, 0
+#define RG_IRQ_STATUS  (0x0f)
+#define SR_IRQ_0_PLL_LOCK      0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK    0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START      0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END       0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE   0x0f, 0x10, 4
+#define SR_IRQ_5_AMI           0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR                0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW       0x0f, 0x80, 7
+#define RG_VREG_CTRL   (0x10)
+#define SR_RESERVED_10_6       0x10, 0x03, 0
+#define SR_DVDD_OK             0x10, 0x04, 2
+#define SR_DVREG_EXT           0x10, 0x08, 3
+#define SR_RESERVED_10_3       0x10, 0x30, 4
+#define SR_AVDD_OK             0x10, 0x40, 6
+#define SR_AVREG_EXT           0x10, 0x80, 7
+#define RG_BATMON      (0x11)
+#define SR_BATMON_VTH          0x11, 0x0f, 0
+#define SR_BATMON_HR           0x11, 0x10, 4
+#define SR_BATMON_OK           0x11, 0x20, 5
+#define SR_RESERVED_11_1       0x11, 0xc0, 6
+#define RG_XOSC_CTRL   (0x12)
+#define SR_XTAL_TRIM           0x12, 0x0f, 0
+#define SR_XTAL_MODE           0x12, 0xf0, 4
+#define RG_RX_SYN      (0x15)
+#define SR_RX_PDT_LEVEL                0x15, 0x0f, 0
+#define SR_RESERVED_15_2       0x15, 0x70, 4
+#define SR_RX_PDT_DIS          0x15, 0x80, 7
+#define RG_XAH_CTRL_1  (0x17)
+#define SR_RESERVED_17_8       0x17, 0x01, 0
+#define SR_AACK_PROM_MODE      0x17, 0x02, 1
+#define SR_AACK_ACK_TIME       0x17, 0x04, 2
+#define SR_RESERVED_17_5       0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT    0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT    0x17, 0x20, 5
+#define SR_CSMA_LBT_MODE       0x17, 0x40, 6
+#define SR_RESERVED_17_1       0x17, 0x80, 7
+#define RG_FTN_CTRL    (0x18)
+#define SR_RESERVED_18_2       0x18, 0x7f, 0
+#define SR_FTN_START           0x18, 0x80, 7
+#define RG_PLL_CF      (0x1a)
+#define SR_RESERVED_1a_2       0x1a, 0x7f, 0
+#define SR_PLL_CF_START                0x1a, 0x80, 7
+#define RG_PLL_DCU     (0x1b)
+#define SR_RESERVED_1b_3       0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2       0x1b, 0x40, 6
+#define SR_PLL_DCU_START       0x1b, 0x80, 7
+#define RG_PART_NUM    (0x1c)
+#define SR_PART_NUM            0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM         0x1d, 0xff, 0
+#define RG_MAN_ID_0    (0x1e)
+#define SR_MAN_ID_0            0x1e, 0xff, 0
+#define RG_MAN_ID_1    (0x1f)
+#define SR_MAN_ID_1            0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0        (0x20)
+#define SR_SHORT_ADDR_0                0x20, 0xff, 0
+#define RG_SHORT_ADDR_1        (0x21)
+#define SR_SHORT_ADDR_1                0x21, 0xff, 0
+#define RG_PAN_ID_0    (0x22)
+#define SR_PAN_ID_0            0x22, 0xff, 0
+#define RG_PAN_ID_1    (0x23)
+#define SR_PAN_ID_1            0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0         0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1         0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2         0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3         0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4         0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5         0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6         0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7         0x2b, 0xff, 0
+#define RG_XAH_CTRL_0  (0x2c)
+#define SR_SLOTTED_OPERATION   0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES    0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES   0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0         0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1         0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD     0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK                0x2e, 0x10, 4
+#define SR_AACK_SET_PD         0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE       0x2e, 0xc0, 6
+#define RG_CSMA_BE     (0x2f)
+#define SR_MIN_BE              0x2f, 0x0f, 0
+#define SR_MAX_BE              0x2f, 0xf0, 4
+
+#define CMD_REG                0x80
+#define CMD_REG_MASK   0x3f
+#define CMD_WRITE      0x40
+#define CMD_FB         0x20
+
+#define IRQ_BAT_LOW    BIT(7)
+#define IRQ_TRX_UR     BIT(6)
+#define IRQ_AMI                BIT(5)
+#define IRQ_CCA_ED     BIT(4)
+#define IRQ_TRX_END    BIT(3)
+#define IRQ_RX_START   BIT(2)
+#define IRQ_PLL_UNL    BIT(1)
+#define IRQ_PLL_LOCK   BIT(0)
+
+#define IRQ_ACTIVE_HIGH        0
+#define IRQ_ACTIVE_LOW 1
+
+#define STATE_P_ON             0x00    /* BUSY */
+#define STATE_BUSY_RX          0x01
+#define STATE_BUSY_TX          0x02
+#define STATE_FORCE_TRX_OFF    0x03
+#define STATE_FORCE_TX_ON      0x04    /* IDLE */
+/* 0x05 */                             /* INVALID_PARAMETER */
+#define STATE_RX_ON            0x06
+/* 0x07 */                             /* SUCCESS */
+#define STATE_TRX_OFF          0x08
+#define STATE_TX_ON            0x09
+/* 0x0a - 0x0e */                      /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP            0x0F
+#define STATE_PREP_DEEP_SLEEP  0x10
+#define STATE_BUSY_RX_AACK     0x11
+#define STATE_BUSY_TX_ARET     0x12
+#define STATE_RX_AACK_ON       0x16
+#define STATE_TX_ARET_ON       0x19
+#define STATE_RX_ON_NOCLK      0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+#define TRX_STATE_MASK         (0x1F)
+
+#endif /* !_AT86RF230_H */
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
new file mode 100644 (file)
index 0000000..5b6bb9a
--- /dev/null
@@ -0,0 +1,699 @@
+/*
+ * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2
+ *
+ * Based on at86rf230.c and spi_atusb.c.
+ * at86rf230.c is
+ * Copyright (C) 2009 Siemens AG
+ * Written by: Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ *
+ * spi_atusb.c is
+ * Copyright (c) 2011 Richard Sharpe <realrichardsharpe@gmail.com>
+ * Copyright (c) 2011 Stefan Schmidt <stefan@datenfreihafen.org>
+ * Copyright (c) 2011 Werner Almesberger <werner@almesberger.net>
+ *
+ * USB initialization is
+ * Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include <net/cfg802154.h>
+#include <net/mac802154.h>
+
+#include "at86rf230.h"
+#include "atusb.h"
+
+#define ATUSB_JEDEC_ATMEL      0x1f    /* JEDEC manufacturer ID */
+
+#define ATUSB_NUM_RX_URBS      4       /* allow for a bit of local latency */
+#define ATUSB_ALLOC_DELAY_MS   100     /* delay after failed allocation */
+#define ATUSB_TX_TIMEOUT_MS    200     /* on the air timeout */
+
+struct atusb {
+       struct ieee802154_hw *hw;
+       struct usb_device *usb_dev;
+       int shutdown;                   /* non-zero if shutting down */
+       int err;                        /* set by first error */
+
+       /* RX variables */
+       struct delayed_work work;       /* memory allocations */
+       struct usb_anchor idle_urbs;    /* URBs waiting to be submitted */
+       struct usb_anchor rx_urbs;      /* URBs waiting for reception */
+
+       /* TX variables */
+       struct usb_ctrlrequest tx_dr;
+       struct urb *tx_urb;
+       struct sk_buff *tx_skb;
+       uint8_t tx_ack_seq;             /* current TX ACK sequence number */
+};
+
+/* at86rf230.h defines values as <reg, mask, shift> tuples. We use the more
+ * traditional style of having registers and or-able values. SR_REG extracts
+ * the register number. SR_VALUE uses the shift to prepare a value accordingly.
+ */
+
+#define __SR_REG(reg, mask, shift)     (reg)
+#define SR_REG(sr)                     __SR_REG(sr)
+
+#define __SR_VALUE(reg, mask, shift, val)      ((val) << (shift))
+#define SR_VALUE(sr, val)                      __SR_VALUE(sr, (val))
+
+/* ----- USB commands without data ----------------------------------------- */
+
+/* To reduce the number of error checks in the code, we record the first error
+ * in atusb->err and reject all subsequent requests until the error is cleared.
+ */
+
+static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
+                            __u8 request, __u8 requesttype,
+                            __u16 value, __u16 index,
+                            void *data, __u16 size, int timeout)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       int ret;
+
+       if (atusb->err)
+               return atusb->err;
+
+       ret = usb_control_msg(usb_dev, pipe, request, requesttype,
+                             value, index, data, size, timeout);
+       if (ret < 0) {
+               atusb->err = ret;
+               dev_err(&usb_dev->dev,
+                       "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n",
+                       request, value, index, ret);
+       }
+       return ret;
+}
+
+static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+
+       dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd);
+       return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+                                cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
+}
+
+static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+
+       dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n",
+               reg, value);
+       return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+                                ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+                                value, reg, NULL, 0, 1000);
+}
+
+static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       int ret;
+       uint8_t value;
+
+       dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
+       ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+                               ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+                               0, reg, &value, 1, 1000);
+       return ret >= 0 ? value : ret;
+}
+
+static int atusb_get_and_clear_error(struct atusb *atusb)
+{
+       int err = atusb->err;
+
+       atusb->err = 0;
+       return err;
+}
+
+/* ----- skb allocation ---------------------------------------------------- */
+
+#define MAX_PSDU       127
+#define MAX_RX_XFER    (1 + MAX_PSDU + 2 + 1)  /* PHR+PSDU+CRC+LQI */
+
+#define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb)
+
+static void atusb_in(struct urb *urb);
+
+static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       struct sk_buff *skb = urb->context;
+       int ret;
+
+       if (!skb) {
+               skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL);
+               if (!skb) {
+                       dev_warn_ratelimited(&usb_dev->dev,
+                                            "atusb_in: can't allocate skb\n");
+                       return -ENOMEM;
+               }
+               skb_put(skb, MAX_RX_XFER);
+               SKB_ATUSB(skb) = atusb;
+       }
+
+       usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1),
+                         skb->data, MAX_RX_XFER, atusb_in, skb);
+       usb_anchor_urb(urb, &atusb->rx_urbs);
+
+       ret = usb_submit_urb(urb, GFP_KERNEL);
+       if (ret) {
+               usb_unanchor_urb(urb);
+               kfree_skb(skb);
+               urb->context = NULL;
+       }
+       return ret;
+}
+
+static void atusb_work_urbs(struct work_struct *work)
+{
+       struct atusb *atusb =
+           container_of(to_delayed_work(work), struct atusb, work);
+       struct usb_device *usb_dev = atusb->usb_dev;
+       struct urb *urb;
+       int ret;
+
+       if (atusb->shutdown)
+               return;
+
+       do {
+               urb = usb_get_from_anchor(&atusb->idle_urbs);
+               if (!urb)
+                       return;
+               ret = atusb_submit_rx_urb(atusb, urb);
+       } while (!ret);
+
+       usb_anchor_urb(urb, &atusb->idle_urbs);
+       dev_warn_ratelimited(&usb_dev->dev,
+                            "atusb_in: can't allocate/submit URB (%d)\n", ret);
+       schedule_delayed_work(&atusb->work,
+                             msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1);
+}
+
+/* ----- Asynchronous USB -------------------------------------------------- */
+
+static void atusb_tx_done(struct atusb *atusb, uint8_t seq)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       uint8_t expect = atusb->tx_ack_seq;
+
+       dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect);
+       if (seq == expect) {
+               /* TODO check for ifs handling in firmware */
+               ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
+       } else {
+               /* TODO I experience this case when atusb has a tx complete
+                * irq before probing, we should fix the firmware it's an
+                * unlikely case now that seq == expect is then true, but can
+                * happen and fail with a tx_skb = NULL;
+                */
+               ieee802154_wake_queue(atusb->hw);
+               if (atusb->tx_skb)
+                       dev_kfree_skb_irq(atusb->tx_skb);
+       }
+}
+
+static void atusb_in_good(struct urb *urb)
+{
+       struct usb_device *usb_dev = urb->dev;
+       struct sk_buff *skb = urb->context;
+       struct atusb *atusb = SKB_ATUSB(skb);
+       uint8_t len, lqi;
+
+       if (!urb->actual_length) {
+               dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
+               return;
+       }
+
+       len = *skb->data;
+
+       if (urb->actual_length == 1) {
+               atusb_tx_done(atusb, len);
+               return;
+       }
+
+       if (len + 1 > urb->actual_length - 1) {
+               dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n",
+                       len, urb->actual_length);
+               return;
+       }
+
+       if (!ieee802154_is_valid_psdu_len(len)) {
+               dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n");
+               return;
+       }
+
+       lqi = skb->data[len + 1];
+       dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi);
+       skb_pull(skb, 1);       /* remove PHR */
+       skb_trim(skb, len);     /* get payload only */
+       ieee802154_rx_irqsafe(atusb->hw, skb, lqi);
+       urb->context = NULL;    /* skb is gone */
+}
+
+static void atusb_in(struct urb *urb)
+{
+       struct usb_device *usb_dev = urb->dev;
+       struct sk_buff *skb = urb->context;
+       struct atusb *atusb = SKB_ATUSB(skb);
+
+       dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n",
+               urb->status, urb->actual_length);
+       if (urb->status) {
+               if (urb->status == -ENOENT) { /* being killed */
+                       kfree_skb(skb);
+                       urb->context = NULL;
+                       return;
+               }
+               dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status);
+       } else {
+               atusb_in_good(urb);
+       }
+
+       usb_anchor_urb(urb, &atusb->idle_urbs);
+       if (!atusb->shutdown)
+               schedule_delayed_work(&atusb->work, 0);
+}
+
+/* ----- URB allocation/deallocation --------------------------------------- */
+
+static void atusb_free_urbs(struct atusb *atusb)
+{
+       struct urb *urb;
+
+       while (1) {
+               urb = usb_get_from_anchor(&atusb->idle_urbs);
+               if (!urb)
+                       break;
+               if (urb->context)
+                       kfree_skb(urb->context);
+               usb_free_urb(urb);
+       }
+}
+
+static int atusb_alloc_urbs(struct atusb *atusb, int n)
+{
+       struct urb *urb;
+
+       while (n) {
+               urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!urb) {
+                       atusb_free_urbs(atusb);
+                       return -ENOMEM;
+               }
+               usb_anchor_urb(urb, &atusb->idle_urbs);
+               n--;
+       }
+       return 0;
+}
+
+/* ----- IEEE 802.15.4 interface operations -------------------------------- */
+
+static void atusb_xmit_complete(struct urb *urb)
+{
+       dev_dbg(&urb->dev->dev, "atusb_xmit urb completed");
+}
+
+static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+       struct atusb *atusb = hw->priv;
+       struct usb_device *usb_dev = atusb->usb_dev;
+       int ret;
+
+       dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len);
+       atusb->tx_skb = skb;
+       atusb->tx_ack_seq++;
+       atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq);
+       atusb->tx_dr.wLength = cpu_to_le16(skb->len);
+
+       usb_fill_control_urb(atusb->tx_urb, usb_dev,
+                            usb_sndctrlpipe(usb_dev, 0),
+                            (unsigned char *)&atusb->tx_dr, skb->data,
+                            skb->len, atusb_xmit_complete, NULL);
+       ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC);
+       dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret);
+       return ret;
+}
+
+static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+       struct atusb *atusb = hw->priv;
+       int ret;
+
+       /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0,
+        * "Mode 3a, Carrier sense OR energy above threshold".
+        * We should probably make this configurable. @@@
+        */
+       ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel);
+       if (ret < 0)
+               return ret;
+       msleep(1);      /* @@@ ugly synchronization */
+       return 0;
+}
+
+static int atusb_ed(struct ieee802154_hw *hw, u8 *level)
+{
+       BUG_ON(!level);
+       *level = 0xbe;
+       return 0;
+}
+
+static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
+                                 struct ieee802154_hw_addr_filt *filt,
+                                 unsigned long changed)
+{
+       struct atusb *atusb = hw->priv;
+       struct device *dev = &atusb->usb_dev->dev;
+       uint8_t reg;
+
+       if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+               u16 addr = le16_to_cpu(filt->short_addr);
+
+               dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n");
+               atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
+               atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+       }
+
+       if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+               u16 pan = le16_to_cpu(filt->pan_id);
+
+               dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n");
+               atusb_write_reg(atusb, RG_PAN_ID_0, pan);
+               atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+       }
+
+       if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
+               u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN];
+
+               memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
+               dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n");
+               for (i = 0; i < 8; i++)
+                       atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+       }
+
+       if (changed & IEEE802154_AFILT_PANC_CHANGED) {
+               dev_vdbg(dev,
+                        "atusb_set_hw_addr_filt called for panc change\n");
+               reg = atusb_read_reg(atusb, SR_REG(SR_AACK_I_AM_COORD));
+               if (filt->pan_coord)
+                       reg |= SR_VALUE(SR_AACK_I_AM_COORD, 1);
+               else
+                       reg &= ~SR_VALUE(SR_AACK_I_AM_COORD, 1);
+               atusb_write_reg(atusb, SR_REG(SR_AACK_I_AM_COORD), reg);
+       }
+
+       return atusb_get_and_clear_error(atusb);
+}
+
+static int atusb_start(struct ieee802154_hw *hw)
+{
+       struct atusb *atusb = hw->priv;
+       struct usb_device *usb_dev = atusb->usb_dev;
+       int ret;
+
+       dev_dbg(&usb_dev->dev, "atusb_start\n");
+       schedule_delayed_work(&atusb->work, 0);
+       atusb_command(atusb, ATUSB_RX_MODE, 1);
+       ret = atusb_get_and_clear_error(atusb);
+       if (ret < 0)
+               usb_kill_anchored_urbs(&atusb->idle_urbs);
+       return ret;
+}
+
+static void atusb_stop(struct ieee802154_hw *hw)
+{
+       struct atusb *atusb = hw->priv;
+       struct usb_device *usb_dev = atusb->usb_dev;
+
+       dev_dbg(&usb_dev->dev, "atusb_stop\n");
+       usb_kill_anchored_urbs(&atusb->idle_urbs);
+       atusb_command(atusb, ATUSB_RX_MODE, 0);
+       atusb_get_and_clear_error(atusb);
+}
+
+static struct ieee802154_ops atusb_ops = {
+       .owner                  = THIS_MODULE,
+       .xmit_async             = atusb_xmit,
+       .ed                     = atusb_ed,
+       .set_channel            = atusb_channel,
+       .start                  = atusb_start,
+       .stop                   = atusb_stop,
+       .set_hw_addr_filt       = atusb_set_hw_addr_filt,
+};
+
+/* ----- Firmware and chip version information ----------------------------- */
+
+static int atusb_get_and_show_revision(struct atusb *atusb)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       unsigned char buffer[3];
+       int ret;
+
+       /* Get a couple of the ATMega Firmware values */
+       ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+                               ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+                               buffer, 3, 1000);
+       if (ret >= 0)
+               dev_info(&usb_dev->dev,
+                        "Firmware: major: %u, minor: %u, hardware type: %u\n",
+                        buffer[0], buffer[1], buffer[2]);
+       if (buffer[0] == 0 && buffer[1] < 2) {
+               dev_info(&usb_dev->dev,
+                        "Firmware version (%u.%u) is predates our first public release.",
+                        buffer[0], buffer[1]);
+               dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
+       }
+
+       return ret;
+}
+
+static int atusb_get_and_show_build(struct atusb *atusb)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       char build[ATUSB_BUILD_SIZE + 1];
+       int ret;
+
+       ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+                               ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
+                               build, ATUSB_BUILD_SIZE, 1000);
+       if (ret >= 0) {
+               build[ret] = 0;
+               dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
+       }
+
+       return ret;
+}
+
+static int atusb_get_and_show_chip(struct atusb *atusb)
+{
+       struct usb_device *usb_dev = atusb->usb_dev;
+       uint8_t man_id_0, man_id_1, part_num, version_num;
+
+       man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
+       man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
+       part_num = atusb_read_reg(atusb, RG_PART_NUM);
+       version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+
+       if (atusb->err)
+               return atusb->err;
+
+       if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) {
+               dev_err(&usb_dev->dev,
+                       "non-Atmel transceiver xxxx%02x%02x\n",
+                       man_id_1, man_id_0);
+               goto fail;
+       }
+       if (part_num != 3 && part_num != 2) {
+               dev_err(&usb_dev->dev,
+                       "unexpected transceiver, part 0x%02x version 0x%02x\n",
+                       part_num, version_num);
+               goto fail;
+       }
+
+       dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num);
+
+       return 0;
+
+fail:
+       atusb->err = -ENODEV;
+       return -ENODEV;
+}
+
+/* ----- Setup ------------------------------------------------------------- */
+
+static int atusb_probe(struct usb_interface *interface,
+                      const struct usb_device_id *id)
+{
+       struct usb_device *usb_dev = interface_to_usbdev(interface);
+       struct ieee802154_hw *hw;
+       struct atusb *atusb = NULL;
+       int ret = -ENOMEM;
+
+       hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops);
+       if (!hw)
+               return -ENOMEM;
+
+       atusb = hw->priv;
+       atusb->hw = hw;
+       atusb->usb_dev = usb_get_dev(usb_dev);
+       usb_set_intfdata(interface, atusb);
+
+       atusb->shutdown = 0;
+       atusb->err = 0;
+       INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs);
+       init_usb_anchor(&atusb->idle_urbs);
+       init_usb_anchor(&atusb->rx_urbs);
+
+       if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS))
+               goto fail;
+
+       atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV;
+       atusb->tx_dr.bRequest = ATUSB_TX;
+       atusb->tx_dr.wValue = cpu_to_le16(0);
+
+       atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!atusb->tx_urb)
+               goto fail;
+
+       hw->parent = &usb_dev->dev;
+       hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+                   IEEE802154_HW_AACK;
+
+       hw->phy->current_page = 0;
+       hw->phy->current_channel = 11;  /* reset default */
+       hw->phy->supported.channels[0] = 0x7FFF800;
+       ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+       atusb_command(atusb, ATUSB_RF_RESET, 0);
+       atusb_get_and_show_chip(atusb);
+       atusb_get_and_show_revision(atusb);
+       atusb_get_and_show_build(atusb);
+       ret = atusb_get_and_clear_error(atusb);
+       if (ret) {
+               dev_err(&atusb->usb_dev->dev,
+                       "%s: initialization failed, error = %d\n",
+                       __func__, ret);
+               goto fail;
+       }
+
+       ret = ieee802154_register_hw(hw);
+       if (ret)
+               goto fail;
+
+       /* If we just powered on, we're now in P_ON and need to enter TRX_OFF
+        * explicitly. Any resets after that will send us straight to TRX_OFF,
+        * making the command below redundant.
+        */
+       atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+       msleep(1);      /* reset => TRX_OFF, tTR13 = 37 us */
+
+#if 0
+       /* Calculating the maximum time available to empty the frame buffer
+        * on reception:
+        *
+        * According to [1], the inter-frame gap is
+        * R * 20 * 16 us + 128 us
+        * where R is a random number from 0 to 7. Furthermore, we have 20 bit
+        * times (80 us at 250 kbps) of SHR of the next frame before the
+        * transceiver begins storing data in the frame buffer.
+        *
+        * This yields a minimum time of 208 us between the last data of a
+        * frame and the first data of the next frame. This time is further
+        * reduced by interrupt latency in the atusb firmware.
+        *
+        * atusb currently needs about 500 us to retrieve a maximum-sized
+        * frame. We therefore have to allow reception of a new frame to begin
+        * while we retrieve the previous frame.
+        *
+        * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based
+        *      network", Jennic 2006.
+        *     http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf
+        */
+
+       atusb_write_reg(atusb,
+                       SR_REG(SR_RX_SAFE_MODE), SR_VALUE(SR_RX_SAFE_MODE, 1));
+#endif
+       atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+
+       ret = atusb_get_and_clear_error(atusb);
+       if (!ret)
+               return 0;
+
+       dev_err(&atusb->usb_dev->dev,
+               "%s: setup failed, error = %d\n",
+               __func__, ret);
+
+       ieee802154_unregister_hw(hw);
+fail:
+       atusb_free_urbs(atusb);
+       usb_kill_urb(atusb->tx_urb);
+       usb_free_urb(atusb->tx_urb);
+       usb_put_dev(usb_dev);
+       ieee802154_free_hw(hw);
+       return ret;
+}
+
+static void atusb_disconnect(struct usb_interface *interface)
+{
+       struct atusb *atusb = usb_get_intfdata(interface);
+
+       dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n");
+
+       atusb->shutdown = 1;
+       cancel_delayed_work_sync(&atusb->work);
+
+       usb_kill_anchored_urbs(&atusb->rx_urbs);
+       atusb_free_urbs(atusb);
+       usb_kill_urb(atusb->tx_urb);
+       usb_free_urb(atusb->tx_urb);
+
+       ieee802154_unregister_hw(atusb->hw);
+
+       ieee802154_free_hw(atusb->hw);
+
+       usb_set_intfdata(interface, NULL);
+       usb_put_dev(atusb->usb_dev);
+
+       pr_debug("atusb_disconnect done\n");
+}
+
+/* The devices we work with */
+static const struct usb_device_id atusb_device_table[] = {
+       {
+               .match_flags            = USB_DEVICE_ID_MATCH_DEVICE |
+                                         USB_DEVICE_ID_MATCH_INT_INFO,
+               .idVendor               = ATUSB_VENDOR_ID,
+               .idProduct              = ATUSB_PRODUCT_ID,
+               .bInterfaceClass        = USB_CLASS_VENDOR_SPEC
+       },
+       /* end with null element */
+       {}
+};
+MODULE_DEVICE_TABLE(usb, atusb_device_table);
+
+static struct usb_driver atusb_driver = {
+       .name           = "atusb",
+       .probe          = atusb_probe,
+       .disconnect     = atusb_disconnect,
+       .id_table       = atusb_device_table,
+};
+module_usb_driver(atusb_driver);
+
+MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>");
+MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>");
+MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>");
+MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>");
+MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h
new file mode 100644 (file)
index 0000000..0690edc
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * atusb.h - Definitions shared between kernel and ATUSB firmware
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2, or
+ * (at your option) any later version.
+ *
+ * This file should be identical for kernel and firmware.
+ * Kernel: drivers/net/ieee802154/atusb.h
+ * Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h
+ */
+
+#ifndef _ATUSB_H
+#define _ATUSB_H
+
+#define ATUSB_VENDOR_ID        0x20b7  /* Qi Hardware*/
+#define ATUSB_PRODUCT_ID 0x1540        /* 802.15.4, device 0 */
+                               /*     -- -         - */
+
+#define ATUSB_BUILD_SIZE 256   /* maximum build version/date message length */
+
+/* Commands to our device. Make sure this is synced with the firmware */
+enum atusb_requests {
+       ATUSB_ID                        = 0x00, /* system status/control grp */
+       ATUSB_BUILD,
+       ATUSB_RESET,
+       ATUSB_RF_RESET                  = 0x10, /* debug/test group */
+       ATUSB_POLL_INT,
+       ATUSB_TEST,                     /* atusb-sil only */
+       ATUSB_TIMER,
+       ATUSB_GPIO,
+       ATUSB_SLP_TR,
+       ATUSB_GPIO_CLEANUP,
+       ATUSB_REG_WRITE                 = 0x20, /* transceiver group */
+       ATUSB_REG_READ,
+       ATUSB_BUF_WRITE,
+       ATUSB_BUF_READ,
+       ATUSB_SRAM_WRITE,
+       ATUSB_SRAM_READ,
+       ATUSB_SPI_WRITE                 = 0x30, /* SPI group */
+       ATUSB_SPI_READ1,
+       ATUSB_SPI_READ2,
+       ATUSB_SPI_WRITE2_SYNC,
+       ATUSB_RX_MODE                   = 0x40, /* HardMAC group */
+       ATUSB_TX,
+};
+
+/* Direction   bRequest                wValue          wIndex  wLength
+ *
+ * ->host      ATUSB_ID                -               -       3
+ * ->host      ATUSB_BUILD             -               -       #bytes
+ * host->      ATUSB_RESET             -               -       0
+ *
+ * host->      ATUSB_RF_RESET          -               -       0
+ * ->host      ATUSB_POLL_INT          -               -       1
+ * host->      ATUSB_TEST              -               -       0
+ * ->host      ATUSB_TIMER             -               -       #bytes (6)
+ * ->host      ATUSB_GPIO              dir+data        mask+p# 3
+ * host->      ATUSB_SLP_TR            -               -       0
+ * host->      ATUSB_GPIO_CLEANUP      -               -       0
+ *
+ * host->      ATUSB_REG_WRITE         value           addr    0
+ * ->host      ATUSB_REG_READ          -               addr    1
+ * host->      ATUSB_BUF_WRITE         -               -       #bytes
+ * ->host      ATUSB_BUF_READ          -               -       #bytes
+ * host->      ATUSB_SRAM_WRITE        -               addr    #bytes
+ * ->host      ATUSB_SRAM_READ         -               addr    #bytes
+ *
+ * host->      ATUSB_SPI_WRITE         byte0           byte1   #bytes
+ * ->host      ATUSB_SPI_READ1         byte0           -       #bytes
+ * ->host      ATUSB_SPI_READ2         byte0           byte1   #bytes
+ * ->host      ATUSB_SPI_WRITE2_SYNC   byte0           byte1   0/1
+ *
+ * host->      ATUSB_RX_MODE           on              -       0
+ * host->      ATUSB_TX                flags           ack_seq #bytes
+ */
+
+#define ATUSB_REQ_FROM_DEV     (USB_TYPE_VENDOR | USB_DIR_IN)
+#define ATUSB_REQ_TO_DEV       (USB_TYPE_VENDOR | USB_DIR_OUT)
+
+#endif /* !_ATUSB_H */
index f833b8bb66634ed0e6057235785c67e7c072f4b7..84b28a05c5a14a289c489d0064c70a637d450b57 100644 (file)
@@ -653,7 +653,7 @@ static int cc2520_register(struct cc2520_private *priv)
        ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
 
        /* We do support only 2.4 Ghz */
-       priv->hw->phy->channels_supported[0] = 0x7FFF800;
+       priv->hw->phy->supported.channels[0] = 0x7FFF800;
        priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
                          IEEE802154_HW_AFILT;
 
index dc2bfb600b4bd491a8bc2a41e12e84a87e1bff77..9d0da4ec3e8c91a6aab44231c9e701691a814799 100644 (file)
 #include <net/mac802154.h>
 #include <net/cfg802154.h>
 
-static int numlbs = 1;
+static int numlbs = 2;
 
-struct fakelb_dev_priv {
-       struct ieee802154_hw *hw;
+static LIST_HEAD(fakelb_phys);
+static DEFINE_SPINLOCK(fakelb_phys_lock);
 
-       struct list_head list;
-       struct fakelb_priv *fake;
+static LIST_HEAD(fakelb_ifup_phys);
+static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
 
-       spinlock_t lock;
-       bool working;
-};
+struct fakelb_phy {
+       struct ieee802154_hw *hw;
+
+       u8 page;
+       u8 channel;
 
-struct fakelb_priv {
        struct list_head list;
-       rwlock_t lock;
+       struct list_head list_ifup;
 };
 
-static int
-fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
+static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
 {
        BUG_ON(!level);
        *level = 0xbe;
@@ -53,78 +53,63 @@ fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
        return 0;
 }
 
-static int
-fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
 {
-       pr_debug("set channel to %d\n", channel);
+       struct fakelb_phy *phy = hw->priv;
 
+       write_lock_bh(&fakelb_ifup_phys_lock);
+       phy->page = page;
+       phy->channel = channel;
+       write_unlock_bh(&fakelb_ifup_phys_lock);
        return 0;
 }
 
-static void
-fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 {
-       struct sk_buff *newskb;
+       struct fakelb_phy *current_phy = hw->priv, *phy;
 
-       spin_lock(&priv->lock);
-       if (priv->working) {
-               newskb = pskb_copy(skb, GFP_ATOMIC);
-               ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc);
-       }
-       spin_unlock(&priv->lock);
-}
+       read_lock_bh(&fakelb_ifup_phys_lock);
+       list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) {
+               if (current_phy == phy)
+                       continue;
 
-static int
-fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
-{
-       struct fakelb_dev_priv *priv = hw->priv;
-       struct fakelb_priv *fake = priv->fake;
-
-       read_lock_bh(&fake->lock);
-       if (priv->list.next == priv->list.prev) {
-               /* we are the only one device */
-               fakelb_hw_deliver(priv, skb);
-       } else {
-               struct fakelb_dev_priv *dp;
-               list_for_each_entry(dp, &priv->fake->list, list) {
-                       if (dp != priv &&
-                           (dp->hw->phy->current_channel ==
-                            priv->hw->phy->current_channel))
-                               fakelb_hw_deliver(dp, skb);
+               if (current_phy->page == phy->page &&
+                   current_phy->channel == phy->channel) {
+                       struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
+
+                       if (newskb)
+                               ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc);
                }
        }
-       read_unlock_bh(&fake->lock);
+       read_unlock_bh(&fakelb_ifup_phys_lock);
 
+       ieee802154_xmit_complete(hw, skb, false);
        return 0;
 }
 
-static int
-fakelb_hw_start(struct ieee802154_hw *hw) {
-       struct fakelb_dev_priv *priv = hw->priv;
-       int ret = 0;
+static int fakelb_hw_start(struct ieee802154_hw *hw)
+{
+       struct fakelb_phy *phy = hw->priv;
 
-       spin_lock(&priv->lock);
-       if (priv->working)
-               ret = -EBUSY;
-       else
-               priv->working = 1;
-       spin_unlock(&priv->lock);
+       write_lock_bh(&fakelb_ifup_phys_lock);
+       list_add(&phy->list_ifup, &fakelb_ifup_phys);
+       write_unlock_bh(&fakelb_ifup_phys_lock);
 
-       return ret;
+       return 0;
 }
 
-static void
-fakelb_hw_stop(struct ieee802154_hw *hw) {
-       struct fakelb_dev_priv *priv = hw->priv;
+static void fakelb_hw_stop(struct ieee802154_hw *hw)
+{
+       struct fakelb_phy *phy = hw->priv;
 
-       spin_lock(&priv->lock);
-       priv->working = 0;
-       spin_unlock(&priv->lock);
+       write_lock_bh(&fakelb_ifup_phys_lock);
+       list_del(&phy->list_ifup);
+       write_unlock_bh(&fakelb_ifup_phys_lock);
 }
 
 static const struct ieee802154_ops fakelb_ops = {
        .owner = THIS_MODULE,
-       .xmit_sync = fakelb_hw_xmit,
+       .xmit_async = fakelb_hw_xmit,
        .ed = fakelb_hw_ed,
        .set_channel = fakelb_hw_channel,
        .start = fakelb_hw_start,
@@ -135,54 +120,54 @@ static const struct ieee802154_ops fakelb_ops = {
 module_param(numlbs, int, 0);
 MODULE_PARM_DESC(numlbs, " number of pseudo devices");
 
-static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+static int fakelb_add_one(struct device *dev)
 {
-       struct fakelb_dev_priv *priv;
-       int err;
        struct ieee802154_hw *hw;
+       struct fakelb_phy *phy;
+       int err;
 
-       hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops);
+       hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops);
        if (!hw)
                return -ENOMEM;
 
-       priv = hw->priv;
-       priv->hw = hw;
+       phy = hw->priv;
+       phy->hw = hw;
 
        /* 868 MHz BPSK 802.15.4-2003 */
-       hw->phy->channels_supported[0] |= 1;
+       hw->phy->supported.channels[0] |= 1;
        /* 915 MHz BPSK 802.15.4-2003 */
-       hw->phy->channels_supported[0] |= 0x7fe;
+       hw->phy->supported.channels[0] |= 0x7fe;
        /* 2.4 GHz O-QPSK 802.15.4-2003 */
-       hw->phy->channels_supported[0] |= 0x7FFF800;
+       hw->phy->supported.channels[0] |= 0x7FFF800;
        /* 868 MHz ASK 802.15.4-2006 */
-       hw->phy->channels_supported[1] |= 1;
+       hw->phy->supported.channels[1] |= 1;
        /* 915 MHz ASK 802.15.4-2006 */
-       hw->phy->channels_supported[1] |= 0x7fe;
+       hw->phy->supported.channels[1] |= 0x7fe;
        /* 868 MHz O-QPSK 802.15.4-2006 */
-       hw->phy->channels_supported[2] |= 1;
+       hw->phy->supported.channels[2] |= 1;
        /* 915 MHz O-QPSK 802.15.4-2006 */
-       hw->phy->channels_supported[2] |= 0x7fe;
+       hw->phy->supported.channels[2] |= 0x7fe;
        /* 2.4 GHz CSS 802.15.4a-2007 */
-       hw->phy->channels_supported[3] |= 0x3fff;
+       hw->phy->supported.channels[3] |= 0x3fff;
        /* UWB Sub-gigahertz 802.15.4a-2007 */
-       hw->phy->channels_supported[4] |= 1;
+       hw->phy->supported.channels[4] |= 1;
        /* UWB Low band 802.15.4a-2007 */
-       hw->phy->channels_supported[4] |= 0x1e;
+       hw->phy->supported.channels[4] |= 0x1e;
        /* UWB High band 802.15.4a-2007 */
-       hw->phy->channels_supported[4] |= 0xffe0;
+       hw->phy->supported.channels[4] |= 0xffe0;
        /* 750 MHz O-QPSK 802.15.4c-2009 */
-       hw->phy->channels_supported[5] |= 0xf;
+       hw->phy->supported.channels[5] |= 0xf;
        /* 750 MHz MPSK 802.15.4c-2009 */
-       hw->phy->channels_supported[5] |= 0xf0;
+       hw->phy->supported.channels[5] |= 0xf0;
        /* 950 MHz BPSK 802.15.4d-2009 */
-       hw->phy->channels_supported[6] |= 0x3ff;
+       hw->phy->supported.channels[6] |= 0x3ff;
        /* 950 MHz GFSK 802.15.4d-2009 */
-       hw->phy->channels_supported[6] |= 0x3ffc00;
+       hw->phy->supported.channels[6] |= 0x3ffc00;
 
-       INIT_LIST_HEAD(&priv->list);
-       priv->fake = fake;
-
-       spin_lock_init(&priv->lock);
+       ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+       /* fake phy channel 13 as default */
+       hw->phy->current_channel = 13;
+       phy->channel = hw->phy->current_channel;
 
        hw->parent = dev;
 
@@ -190,67 +175,55 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
        if (err)
                goto err_reg;
 
-       write_lock_bh(&fake->lock);
-       list_add_tail(&priv->list, &fake->list);
-       write_unlock_bh(&fake->lock);
+       spin_lock(&fakelb_phys_lock);
+       list_add_tail(&phy->list, &fakelb_phys);
+       spin_unlock(&fakelb_phys_lock);
 
        return 0;
 
 err_reg:
-       ieee802154_free_hw(priv->hw);
+       ieee802154_free_hw(phy->hw);
        return err;
 }
 
-static void fakelb_del(struct fakelb_dev_priv *priv)
+static void fakelb_del(struct fakelb_phy *phy)
 {
-       write_lock_bh(&priv->fake->lock);
-       list_del(&priv->list);
-       write_unlock_bh(&priv->fake->lock);
+       list_del(&phy->list);
 
-       ieee802154_unregister_hw(priv->hw);
-       ieee802154_free_hw(priv->hw);
+       ieee802154_unregister_hw(phy->hw);
+       ieee802154_free_hw(phy->hw);
 }
 
 static int fakelb_probe(struct platform_device *pdev)
 {
-       struct fakelb_priv *priv;
-       struct fakelb_dev_priv *dp;
-       int err = -ENOMEM;
-       int i;
-
-       priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
-                           GFP_KERNEL);
-       if (!priv)
-               goto err_alloc;
-
-       INIT_LIST_HEAD(&priv->list);
-       rwlock_init(&priv->lock);
+       struct fakelb_phy *phy, *tmp;
+       int err, i;
 
        for (i = 0; i < numlbs; i++) {
-               err = fakelb_add_one(&pdev->dev, priv);
+               err = fakelb_add_one(&pdev->dev);
                if (err < 0)
                        goto err_slave;
        }
 
-       platform_set_drvdata(pdev, priv);
        dev_info(&pdev->dev, "added ieee802154 hardware\n");
        return 0;
 
 err_slave:
-       list_for_each_entry(dp, &priv->list, list)
-               fakelb_del(dp);
-err_alloc:
+       spin_lock(&fakelb_phys_lock);
+       list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+               fakelb_del(phy);
+       spin_unlock(&fakelb_phys_lock);
        return err;
 }
 
 static int fakelb_remove(struct platform_device *pdev)
 {
-       struct fakelb_priv *priv = platform_get_drvdata(pdev);
-       struct fakelb_dev_priv *dp, *temp;
-
-       list_for_each_entry_safe(dp, temp, &priv->list, list)
-               fakelb_del(dp);
+       struct fakelb_phy *phy, *tmp;
 
+       spin_lock(&fakelb_phys_lock);
+       list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+               fakelb_del(phy);
+       spin_unlock(&fakelb_phys_lock);
        return 0;
 }
 
index fba2dfd910f7372b2aedc355039cc71e2433c008..f2a1bd122a74b4032e7feb997a36652c9683905c 100644 (file)
@@ -750,7 +750,7 @@ static int mrf24j40_probe(struct spi_device *spi)
 
        devrec->hw->priv = devrec;
        devrec->hw->parent = &devrec->spi->dev;
-       devrec->hw->phy->channels_supported[0] = CHANNEL_MASK;
+       devrec->hw->phy->supported.channels[0] = CHANNEL_MASK;
        devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
                            IEEE802154_HW_AFILT;
 
index 54549a6223dd2f47f493ae112a06f12f3337120f..953a97492fabf46eda9986ad713e4cc9ec4275dd 100644 (file)
@@ -39,6 +39,8 @@
 #define IPVLAN_MAC_FILTER_SIZE (1 << IPVLAN_MAC_FILTER_BITS)
 #define IPVLAN_MAC_FILTER_MASK (IPVLAN_MAC_FILTER_SIZE - 1)
 
+#define IPVLAN_QBACKLOG_LIMIT  1000
+
 typedef enum {
        IPVL_IPV6 = 0,
        IPVL_ICMPV6,
@@ -93,6 +95,8 @@ struct ipvl_port {
        struct hlist_head       hlhead[IPVLAN_HASH_SIZE];
        struct list_head        ipvlans;
        struct rcu_head         rcu;
+       struct work_struct      wq;
+       struct sk_buff_head     backlog;
        int                     count;
        u16                     mode;
 };
@@ -112,6 +116,7 @@ void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval);
 void ipvlan_init_secret(void);
 unsigned int ipvlan_mac_hash(const unsigned char *addr);
 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
+void ipvlan_process_multicast(struct work_struct *work);
 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
index c30b5c300c05f6fdd91e48802c84b77103e3a322..8afbedad620d9ed27576dc6426878ae858979a51 100644 (file)
@@ -189,62 +189,69 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr)
        return hash & IPVLAN_MAC_FILTER_MASK;
 }
 
-static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
-                                  const struct ipvl_dev *in_dev, bool local)
+void ipvlan_process_multicast(struct work_struct *work)
 {
-       struct ethhdr *eth = eth_hdr(skb);
+       struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
+       struct ethhdr *ethh;
        struct ipvl_dev *ipvlan;
-       struct sk_buff *nskb;
+       struct sk_buff *skb, *nskb;
+       struct sk_buff_head list;
        unsigned int len;
        unsigned int mac_hash;
        int ret;
+       u8 pkt_type;
+       bool hlocal, dlocal;
 
-       if (skb->protocol == htons(ETH_P_PAUSE))
-               return;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
-               if (local && (ipvlan == in_dev))
-                       continue;
+       __skb_queue_head_init(&list);
 
-               mac_hash = ipvlan_mac_hash(eth->h_dest);
-               if (!test_bit(mac_hash, ipvlan->mac_filters))
-                       continue;
+       spin_lock_bh(&port->backlog.lock);
+       skb_queue_splice_tail_init(&port->backlog, &list);
+       spin_unlock_bh(&port->backlog.lock);
 
-               ret = NET_RX_DROP;
-               len = skb->len + ETH_HLEN;
-               nskb = skb_clone(skb, GFP_ATOMIC);
-               if (!nskb)
-                       goto mcast_acct;
+       while ((skb = __skb_dequeue(&list)) != NULL) {
+               ethh = eth_hdr(skb);
+               hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr);
+               mac_hash = ipvlan_mac_hash(ethh->h_dest);
 
-               if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast))
-                       nskb->pkt_type = PACKET_BROADCAST;
+               if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
+                       pkt_type = PACKET_BROADCAST;
                else
-                       nskb->pkt_type = PACKET_MULTICAST;
-
-               nskb->dev = ipvlan->dev;
-               if (local)
-                       ret = dev_forward_skb(ipvlan->dev, nskb);
-               else
-                       ret = netif_rx(nskb);
-mcast_acct:
-               ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
-       }
-       rcu_read_unlock();
-
-       /* Locally generated? ...Forward a copy to the main-device as
-        * well. On the RX side we'll ignore it (wont give it to any
-        * of the virtual devices.
-        */
-       if (local) {
-               nskb = skb_clone(skb, GFP_ATOMIC);
-               if (nskb) {
-                       if (ether_addr_equal(eth->h_dest, port->dev->broadcast))
-                               nskb->pkt_type = PACKET_BROADCAST;
+                       pkt_type = PACKET_MULTICAST;
+
+               dlocal = false;
+               rcu_read_lock();
+               list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
+                       if (hlocal && (ipvlan->dev == skb->dev)) {
+                               dlocal = true;
+                               continue;
+                       }
+                       if (!test_bit(mac_hash, ipvlan->mac_filters))
+                               continue;
+
+                       ret = NET_RX_DROP;
+                       len = skb->len + ETH_HLEN;
+                       nskb = skb_clone(skb, GFP_ATOMIC);
+                       if (!nskb)
+                               goto acct;
+
+                       nskb->pkt_type = pkt_type;
+                       nskb->dev = ipvlan->dev;
+                       if (hlocal)
+                               ret = dev_forward_skb(ipvlan->dev, nskb);
                        else
-                               nskb->pkt_type = PACKET_MULTICAST;
-
-                       dev_forward_skb(port->dev, nskb);
+                               ret = netif_rx(nskb);
+acct:
+                       ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
+               }
+               rcu_read_unlock();
+
+               if (dlocal) {
+                       /* If the packet originated here, send it out. */
+                       skb->dev = port->dev;
+                       skb->pkt_type = pkt_type;
+                       dev_queue_xmit(skb);
+               } else {
+                       kfree_skb(skb);
                }
        }
 }
@@ -446,6 +453,26 @@ out:
        return ret;
 }
 
+static void ipvlan_multicast_enqueue(struct ipvl_port *port,
+                                    struct sk_buff *skb)
+{
+       if (skb->protocol == htons(ETH_P_PAUSE)) {
+               kfree_skb(skb);
+               return;
+       }
+
+       spin_lock(&port->backlog.lock);
+       if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
+               __skb_queue_tail(&port->backlog, skb);
+               spin_unlock(&port->backlog.lock);
+               schedule_work(&port->wq);
+       } else {
+               spin_unlock(&port->backlog.lock);
+               atomic_long_inc(&skb->dev->rx_dropped);
+               kfree_skb(skb);
+       }
+}
+
 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
 {
        const struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -493,11 +520,8 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
                return dev_forward_skb(ipvlan->phy_dev, skb);
 
        } else if (is_multicast_ether_addr(eth->h_dest)) {
-               u8 ip_summed = skb->ip_summed;
-
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-               ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true);
-               skb->ip_summed = ip_summed;
+               ipvlan_multicast_enqueue(ipvlan->port, skb);
+               return NET_XMIT_SUCCESS;
        }
 
        skb->dev = ipvlan->phy_dev;
@@ -581,8 +605,18 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
        int addr_type;
 
        if (is_multicast_ether_addr(eth->h_dest)) {
-               if (ipvlan_external_frame(skb, port))
-                       ipvlan_multicast_frame(port, skb, NULL, false);
+               if (ipvlan_external_frame(skb, port)) {
+                       struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+                       /* External frames are queued for device local
+                        * distribution, but a copy is given to master
+                        * straight away to avoid sending duplicates later
+                        * when work-queue processes this frame. This is
+                        * achieved by returning RX_HANDLER_PASS.
+                        */
+                       if (nskb)
+                               ipvlan_multicast_enqueue(port, nskb);
+               }
        } else {
                struct ipvl_addr *addr;
 
index 77b92a0fe557ade8fea66af377217e9c0f8feded..1acc283160d924e0754f3da99fc120c638c257a2 100644 (file)
@@ -54,6 +54,9 @@ static int ipvlan_port_create(struct net_device *dev)
        for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
                INIT_HLIST_HEAD(&port->hlhead[idx]);
 
+       skb_queue_head_init(&port->backlog);
+       INIT_WORK(&port->wq, ipvlan_process_multicast);
+
        err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
        if (err)
                goto err;
@@ -72,6 +75,8 @@ static void ipvlan_port_destroy(struct net_device *dev)
 
        dev->priv_flags &= ~IFF_IPVLAN_MASTER;
        netdev_rx_handler_unregister(dev);
+       cancel_work_sync(&port->wq);
+       __skb_queue_purge(&port->backlog);
        kfree_rcu(port, rcu);
 }
 
@@ -213,17 +218,6 @@ static void ipvlan_change_rx_flags(struct net_device *dev, int change)
                dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
 }
 
-static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set)
-{
-       struct net_device *dev = ipvlan->dev;
-       unsigned int hashbit = ipvlan_mac_hash(dev->broadcast);
-
-       if (set && !test_bit(hashbit, ipvlan->mac_filters))
-               __set_bit(hashbit, ipvlan->mac_filters);
-       else if (!set && test_bit(hashbit, ipvlan->mac_filters))
-               __clear_bit(hashbit, ipvlan->mac_filters);
-}
-
 static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
@@ -238,6 +232,12 @@ static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
                netdev_for_each_mc_addr(ha, dev)
                        __set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
 
+               /* Turn-on broadcast bit irrespective of address family,
+                * since broadcast is deferred to a work-queue, hence no
+                * impact on fast-path processing.
+                */
+               __set_bit(ipvlan_mac_hash(dev->broadcast), mc_filters);
+
                bitmap_copy(ipvlan->mac_filters, mc_filters,
                            IPVLAN_MAC_FILTER_SIZE);
        }
@@ -705,7 +705,6 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
         */
        if (netif_running(ipvlan->dev))
                ipvlan_ht_addr_add(ipvlan, addr);
-       ipvlan_set_broadcast_mac_filter(ipvlan, true);
 
        return 0;
 }
@@ -722,8 +721,6 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
        list_del(&addr->anode);
        ipvlan->ipv4cnt--;
        WARN_ON(ipvlan->ipv4cnt < 0);
-       if (!ipvlan->ipv4cnt)
-           ipvlan_set_broadcast_mac_filter(ipvlan, false);
        kfree_rcu(addr, rcu);
 
        return;
index f6c916312577193cd32773cf20798f6509509770..25f21968fa5c61506af3ed49fc0d9a7fa4ad15c4 100644 (file)
@@ -848,7 +848,9 @@ static void irda_usb_receive(struct urb *urb)
                 * Jean II */
                self->rx_defer_timer.function = irda_usb_rx_defer_expired;
                self->rx_defer_timer.data = (unsigned long) urb;
-               mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
+               mod_timer(&self->rx_defer_timer,
+                         jiffies + msecs_to_jiffies(10));
+
                return;
        }
        
index 8c350c5d54adab2556a9a1d7f2a56965b79f16b9..483afb19596ce04bf931ccf2e97aa476fa738082 100644 (file)
@@ -476,7 +476,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
 
        err = -ENOMEM;
        q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
-                                            &macvtap_proto);
+                                            &macvtap_proto, 0);
        if (!q)
                goto out;
 
@@ -1006,6 +1006,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
        unsigned int __user *up = argp;
        unsigned short u;
        int __user *sp = argp;
+       struct sockaddr sa;
        int s;
        int ret;
 
@@ -1101,6 +1102,37 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                rtnl_unlock();
                return ret;
 
+       case SIOCGIFHWADDR:
+               rtnl_lock();
+               vlan = macvtap_get_vlan(q);
+               if (!vlan) {
+                       rtnl_unlock();
+                       return -ENOLINK;
+               }
+               ret = 0;
+               u = vlan->dev->type;
+               if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
+                   copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
+                   put_user(u, &ifr->ifr_hwaddr.sa_family))
+                       ret = -EFAULT;
+               macvtap_put_vlan(vlan);
+               rtnl_unlock();
+               return ret;
+
+       case SIOCSIFHWADDR:
+               if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+                       return -EFAULT;
+               rtnl_lock();
+               vlan = macvtap_get_vlan(q);
+               if (!vlan) {
+                       rtnl_unlock();
+                       return -ENOLINK;
+               }
+               ret = dev_set_mac_address(vlan->dev, &sa);
+               macvtap_put_vlan(vlan);
+               rtnl_unlock();
+               return ret;
+
        default:
                return -EINVAL;
        }
index 70641d2c042957e7e154b4a1d265f39fe3ffa386..cf18940f4e84f5352a6dffe7a72548d72ef3a011 100644 (file)
@@ -24,13 +24,6 @@ config AMD_PHY
        ---help---
          Currently supports the am79c874
 
-config AMD_XGBE_PHY
-       tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
-       depends on (OF || ACPI) && HAS_IOMEM
-       depends on ARM64 || COMPILE_TEST
-       ---help---
-         Currently supports the AMD 10GbE PHY
-
 config MARVELL_PHY
        tristate "Drivers for Marvell PHYs"
        ---help---
@@ -119,6 +112,11 @@ config MICREL_PHY
        ---help---
          Supports the KSZ9021, VSC8201, KS8001 PHYs.
 
+config DP83867_PHY
+       tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
+       ---help---
+         Currently supports the DP83867 PHY.
+
 config FIXED_PHY
        tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
        depends on PHYLIB
@@ -212,7 +210,6 @@ config MDIO_BCM_UNIMAC
          This hardware can be found in the Broadcom GENET Ethernet MAC
          controllers as well as some Broadcom Ethernet switches such as the
          Starfighter 2 switches.
-
 endif # PHYLIB
 
 config MICREL_KS8995MA
index 501ea7699a2df399c6e1998e570eab6c7ae9a645..fcc25a0c45cd01de449f677bb424cbe158a416c7 100644 (file)
@@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_BITBANG)    += mdio-bitbang.o
 obj-$(CONFIG_MDIO_GPIO)                += mdio-gpio.o
 obj-$(CONFIG_NATIONAL_PHY)     += national.o
 obj-$(CONFIG_DP83640_PHY)      += dp83640.o
+obj-$(CONFIG_DP83867_PHY)      += dp83867.o
 obj-$(CONFIG_STE10XP)          += ste10Xp.o
 obj-$(CONFIG_MICREL_PHY)       += micrel.o
 obj-$(CONFIG_MDIO_OCTEON)      += mdio-octeon.o
@@ -33,5 +34,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_GPIO)       += mdio-mux-gpio.o
 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
 obj-$(CONFIG_MDIO_SUN4I)       += mdio-sun4i.o
 obj-$(CONFIG_MDIO_MOXART)      += mdio-moxart.o
-obj-$(CONFIG_AMD_XGBE_PHY)     += amd-xgbe-phy.o
 obj-$(CONFIG_MDIO_BCM_UNIMAC)  += mdio-bcm-unimac.o
diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c
deleted file mode 100644 (file)
index 34a75cb..0000000
+++ /dev/null
@@ -1,1901 +0,0 @@
-/*
- * AMD 10Gb Ethernet PHY driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *     * Neither the name of Advanced Micro Devices, Inc. nor the
- *       names of its contributors may be used to endorse or promote products
- *       derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/workqueue.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/mdio.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/property.h>
-#include <linux/acpi.h>
-#include <linux/jiffies.h>
-
-MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION("1.0.0-a");
-MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
-
-#define XGBE_PHY_ID    0x000162d0
-#define XGBE_PHY_MASK  0xfffffff0
-
-#define XGBE_PHY_SPEEDSET_PROPERTY     "amd,speed-set"
-#define XGBE_PHY_BLWC_PROPERTY         "amd,serdes-blwc"
-#define XGBE_PHY_CDR_RATE_PROPERTY     "amd,serdes-cdr-rate"
-#define XGBE_PHY_PQ_SKEW_PROPERTY      "amd,serdes-pq-skew"
-#define XGBE_PHY_TX_AMP_PROPERTY       "amd,serdes-tx-amp"
-#define XGBE_PHY_DFE_CFG_PROPERTY      "amd,serdes-dfe-tap-config"
-#define XGBE_PHY_DFE_ENA_PROPERTY      "amd,serdes-dfe-tap-enable"
-
-#define XGBE_PHY_SPEEDS                        3
-#define XGBE_PHY_SPEED_1000            0
-#define XGBE_PHY_SPEED_2500            1
-#define XGBE_PHY_SPEED_10000           2
-
-#define XGBE_AN_MS_TIMEOUT             500
-
-#define XGBE_AN_INT_CMPLT              0x01
-#define XGBE_AN_INC_LINK               0x02
-#define XGBE_AN_PG_RCV                 0x04
-#define XGBE_AN_INT_MASK               0x07
-
-#define XNP_MCF_NULL_MESSAGE           0x001
-#define XNP_ACK_PROCESSED              BIT(12)
-#define XNP_MP_FORMATTED               BIT(13)
-#define XNP_NP_EXCHANGE                        BIT(15)
-
-#define XGBE_PHY_RATECHANGE_COUNT      500
-
-#define XGBE_PHY_KR_TRAINING_START     0x01
-#define XGBE_PHY_KR_TRAINING_ENABLE    0x02
-
-#define XGBE_PHY_FEC_ENABLE            0x01
-#define XGBE_PHY_FEC_FORWARD           0x02
-#define XGBE_PHY_FEC_MASK              0x03
-
-#ifndef MDIO_PMA_10GBR_PMD_CTRL
-#define MDIO_PMA_10GBR_PMD_CTRL                0x0096
-#endif
-
-#ifndef MDIO_PMA_10GBR_FEC_ABILITY
-#define MDIO_PMA_10GBR_FEC_ABILITY     0x00aa
-#endif
-
-#ifndef MDIO_PMA_10GBR_FEC_CTRL
-#define MDIO_PMA_10GBR_FEC_CTRL                0x00ab
-#endif
-
-#ifndef MDIO_AN_XNP
-#define MDIO_AN_XNP                    0x0016
-#endif
-
-#ifndef MDIO_AN_LPX
-#define MDIO_AN_LPX                    0x0019
-#endif
-
-#ifndef MDIO_AN_INTMASK
-#define MDIO_AN_INTMASK                        0x8001
-#endif
-
-#ifndef MDIO_AN_INT
-#define MDIO_AN_INT                    0x8002
-#endif
-
-#ifndef MDIO_CTRL1_SPEED1G
-#define MDIO_CTRL1_SPEED1G             (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
-#endif
-
-/* SerDes integration register offsets */
-#define SIR0_KR_RT_1                   0x002c
-#define SIR0_STATUS                    0x0040
-#define SIR1_SPEED                     0x0000
-
-/* SerDes integration register entry bit positions and sizes */
-#define SIR0_KR_RT_1_RESET_INDEX       11
-#define SIR0_KR_RT_1_RESET_WIDTH       1
-#define SIR0_STATUS_RX_READY_INDEX     0
-#define SIR0_STATUS_RX_READY_WIDTH     1
-#define SIR0_STATUS_TX_READY_INDEX     8
-#define SIR0_STATUS_TX_READY_WIDTH     1
-#define SIR1_SPEED_CDR_RATE_INDEX      12
-#define SIR1_SPEED_CDR_RATE_WIDTH      4
-#define SIR1_SPEED_DATARATE_INDEX      4
-#define SIR1_SPEED_DATARATE_WIDTH      2
-#define SIR1_SPEED_PLLSEL_INDEX                3
-#define SIR1_SPEED_PLLSEL_WIDTH                1
-#define SIR1_SPEED_RATECHANGE_INDEX    6
-#define SIR1_SPEED_RATECHANGE_WIDTH    1
-#define SIR1_SPEED_TXAMP_INDEX         8
-#define SIR1_SPEED_TXAMP_WIDTH         4
-#define SIR1_SPEED_WORDMODE_INDEX      0
-#define SIR1_SPEED_WORDMODE_WIDTH      3
-
-#define SPEED_10000_BLWC               0
-#define SPEED_10000_CDR                        0x7
-#define SPEED_10000_PLL                        0x1
-#define SPEED_10000_PQ                 0x12
-#define SPEED_10000_RATE               0x0
-#define SPEED_10000_TXAMP              0xa
-#define SPEED_10000_WORD               0x7
-#define SPEED_10000_DFE_TAP_CONFIG     0x1
-#define SPEED_10000_DFE_TAP_ENABLE     0x7f
-
-#define SPEED_2500_BLWC                        1
-#define SPEED_2500_CDR                 0x2
-#define SPEED_2500_PLL                 0x0
-#define SPEED_2500_PQ                  0xa
-#define SPEED_2500_RATE                        0x1
-#define SPEED_2500_TXAMP               0xf
-#define SPEED_2500_WORD                        0x1
-#define SPEED_2500_DFE_TAP_CONFIG      0x3
-#define SPEED_2500_DFE_TAP_ENABLE      0x0
-
-#define SPEED_1000_BLWC                        1
-#define SPEED_1000_CDR                 0x2
-#define SPEED_1000_PLL                 0x0
-#define SPEED_1000_PQ                  0xa
-#define SPEED_1000_RATE                        0x3
-#define SPEED_1000_TXAMP               0xf
-#define SPEED_1000_WORD                        0x1
-#define SPEED_1000_DFE_TAP_CONFIG      0x3
-#define SPEED_1000_DFE_TAP_ENABLE      0x0
-
-/* SerDes RxTx register offsets */
-#define RXTX_REG6                      0x0018
-#define RXTX_REG20                     0x0050
-#define RXTX_REG22                     0x0058
-#define RXTX_REG114                    0x01c8
-#define RXTX_REG129                    0x0204
-
-/* SerDes RxTx register entry bit positions and sizes */
-#define RXTX_REG6_RESETB_RXD_INDEX     8
-#define RXTX_REG6_RESETB_RXD_WIDTH     1
-#define RXTX_REG20_BLWC_ENA_INDEX      2
-#define RXTX_REG20_BLWC_ENA_WIDTH      1
-#define RXTX_REG114_PQ_REG_INDEX       9
-#define RXTX_REG114_PQ_REG_WIDTH       7
-#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
-#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
-
-/* Bit setting and getting macros
- *  The get macro will extract the current bit field value from within
- *  the variable
- *
- *  The set macro will clear the current bit field value within the
- *  variable and then set the bit field of the variable to the
- *  specified value
- */
-#define GET_BITS(_var, _index, _width)                                 \
-       (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
-
-#define SET_BITS(_var, _index, _width, _val)                           \
-do {                                                                   \
-       (_var) &= ~(((0x1 << (_width)) - 1) << (_index));               \
-       (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index));     \
-} while (0)
-
-#define XSIR_GET_BITS(_var, _prefix, _field)                           \
-       GET_BITS((_var),                                                \
-                _prefix##_##_field##_INDEX,                            \
-                _prefix##_##_field##_WIDTH)
-
-#define XSIR_SET_BITS(_var, _prefix, _field, _val)                     \
-       SET_BITS((_var),                                                \
-                _prefix##_##_field##_INDEX,                            \
-                _prefix##_##_field##_WIDTH, (_val))
-
-/* Macros for reading or writing SerDes integration registers
- *  The ioread macros will get bit fields or full values using the
- *  register definitions formed using the input names
- *
- *  The iowrite macros will set bit fields or full values using the
- *  register definitions formed using the input names
- */
-#define XSIR0_IOREAD(_priv, _reg)                                      \
-       ioread16((_priv)->sir0_regs + _reg)
-
-#define XSIR0_IOREAD_BITS(_priv, _reg, _field)                         \
-       GET_BITS(XSIR0_IOREAD((_priv), _reg),                           \
-                _reg##_##_field##_INDEX,                               \
-                _reg##_##_field##_WIDTH)
-
-#define XSIR0_IOWRITE(_priv, _reg, _val)                               \
-       iowrite16((_val), (_priv)->sir0_regs + _reg)
-
-#define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val)                  \
-do {                                                                   \
-       u16 reg_val = XSIR0_IOREAD((_priv), _reg);                      \
-       SET_BITS(reg_val,                                               \
-                _reg##_##_field##_INDEX,                               \
-                _reg##_##_field##_WIDTH, (_val));                      \
-       XSIR0_IOWRITE((_priv), _reg, reg_val);                          \
-} while (0)
-
-#define XSIR1_IOREAD(_priv, _reg)                                      \
-       ioread16((_priv)->sir1_regs + _reg)
-
-#define XSIR1_IOREAD_BITS(_priv, _reg, _field)                         \
-       GET_BITS(XSIR1_IOREAD((_priv), _reg),                           \
-                _reg##_##_field##_INDEX,                               \
-                _reg##_##_field##_WIDTH)
-
-#define XSIR1_IOWRITE(_priv, _reg, _val)                               \
-       iowrite16((_val), (_priv)->sir1_regs + _reg)
-
-#define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val)                  \
-do {                                                                   \
-       u16 reg_val = XSIR1_IOREAD((_priv), _reg);                      \
-       SET_BITS(reg_val,                                               \
-                _reg##_##_field##_INDEX,                               \
-                _reg##_##_field##_WIDTH, (_val));                      \
-       XSIR1_IOWRITE((_priv), _reg, reg_val);                          \
-} while (0)
-
-/* Macros for reading or writing SerDes RxTx registers
- *  The ioread macros will get bit fields or full values using the
- *  register definitions formed using the input names
- *
- *  The iowrite macros will set bit fields or full values using the
- *  register definitions formed using the input names
- */
-#define XRXTX_IOREAD(_priv, _reg)                                      \
-       ioread16((_priv)->rxtx_regs + _reg)
-
-#define XRXTX_IOREAD_BITS(_priv, _reg, _field)                         \
-       GET_BITS(XRXTX_IOREAD((_priv), _reg),                           \
-                _reg##_##_field##_INDEX,                               \
-                _reg##_##_field##_WIDTH)
-
-#define XRXTX_IOWRITE(_priv, _reg, _val)                               \
-       iowrite16((_val), (_priv)->rxtx_regs + _reg)
-
-#define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val)                  \
-do {                                                                   \
-       u16 reg_val = XRXTX_IOREAD((_priv), _reg);                      \
-       SET_BITS(reg_val,                                               \
-                _reg##_##_field##_INDEX,                               \
-                _reg##_##_field##_WIDTH, (_val));                      \
-       XRXTX_IOWRITE((_priv), _reg, reg_val);                          \
-} while (0)
-
-static const u32 amd_xgbe_phy_serdes_blwc[] = {
-       SPEED_1000_BLWC,
-       SPEED_2500_BLWC,
-       SPEED_10000_BLWC,
-};
-
-static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
-       SPEED_1000_CDR,
-       SPEED_2500_CDR,
-       SPEED_10000_CDR,
-};
-
-static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
-       SPEED_1000_PQ,
-       SPEED_2500_PQ,
-       SPEED_10000_PQ,
-};
-
-static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
-       SPEED_1000_TXAMP,
-       SPEED_2500_TXAMP,
-       SPEED_10000_TXAMP,
-};
-
-static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
-       SPEED_1000_DFE_TAP_CONFIG,
-       SPEED_2500_DFE_TAP_CONFIG,
-       SPEED_10000_DFE_TAP_CONFIG,
-};
-
-static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
-       SPEED_1000_DFE_TAP_ENABLE,
-       SPEED_2500_DFE_TAP_ENABLE,
-       SPEED_10000_DFE_TAP_ENABLE,
-};
-
-enum amd_xgbe_phy_an {
-       AMD_XGBE_AN_READY = 0,
-       AMD_XGBE_AN_PAGE_RECEIVED,
-       AMD_XGBE_AN_INCOMPAT_LINK,
-       AMD_XGBE_AN_COMPLETE,
-       AMD_XGBE_AN_NO_LINK,
-       AMD_XGBE_AN_ERROR,
-};
-
-enum amd_xgbe_phy_rx {
-       AMD_XGBE_RX_BPA = 0,
-       AMD_XGBE_RX_XNP,
-       AMD_XGBE_RX_COMPLETE,
-       AMD_XGBE_RX_ERROR,
-};
-
-enum amd_xgbe_phy_mode {
-       AMD_XGBE_MODE_KR,
-       AMD_XGBE_MODE_KX,
-};
-
-enum amd_xgbe_phy_speedset {
-       AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
-       AMD_XGBE_PHY_SPEEDSET_2500_10000,
-};
-
-struct amd_xgbe_phy_priv {
-       struct platform_device *pdev;
-       struct acpi_device *adev;
-       struct device *dev;
-
-       struct phy_device *phydev;
-
-       /* SerDes related mmio resources */
-       struct resource *rxtx_res;
-       struct resource *sir0_res;
-       struct resource *sir1_res;
-
-       /* SerDes related mmio registers */
-       void __iomem *rxtx_regs;        /* SerDes Rx/Tx CSRs */
-       void __iomem *sir0_regs;        /* SerDes integration registers (1/2) */
-       void __iomem *sir1_regs;        /* SerDes integration registers (2/2) */
-
-       int an_irq;
-       char an_irq_name[IFNAMSIZ + 32];
-       struct work_struct an_irq_work;
-       unsigned int an_irq_allocated;
-
-       unsigned int speed_set;
-
-       /* SerDes UEFI configurable settings.
-        *   Switching between modes/speeds requires new values for some
-        *   SerDes settings.  The values can be supplied as device
-        *   properties in array format.  The first array entry is for
-        *   1GbE, second for 2.5GbE and third for 10GbE
-        */
-       u32 serdes_blwc[XGBE_PHY_SPEEDS];
-       u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
-       u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
-       u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
-       u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
-       u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
-
-       /* Auto-negotiation state machine support */
-       struct mutex an_mutex;
-       enum amd_xgbe_phy_an an_result;
-       enum amd_xgbe_phy_an an_state;
-       enum amd_xgbe_phy_rx kr_state;
-       enum amd_xgbe_phy_rx kx_state;
-       struct work_struct an_work;
-       struct workqueue_struct *an_workqueue;
-       unsigned int an_supported;
-       unsigned int parallel_detect;
-       unsigned int fec_ability;
-       unsigned long an_start;
-
-       unsigned int lpm_ctrl;          /* CTRL1 for resume */
-};
-
-static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
-{
-       int ret;
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
-       if (ret < 0)
-               return ret;
-
-       ret |= XGBE_PHY_KR_TRAINING_ENABLE;
-       phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
-
-       return 0;
-}
-
-static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
-{
-       int ret;
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
-       if (ret < 0)
-               return ret;
-
-       ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
-       phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
-
-       return 0;
-}
-
-static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
-{
-       int ret;
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-       if (ret < 0)
-               return ret;
-
-       ret |= MDIO_CTRL1_LPOWER;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-       usleep_range(75, 100);
-
-       ret &= ~MDIO_CTRL1_LPOWER;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-       return 0;
-}
-
-static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-
-       /* Assert Rx and Tx ratechange */
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
-}
-
-static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       unsigned int wait;
-       u16 status;
-
-       /* Release Rx and Tx ratechange */
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
-
-       /* Wait for Rx and Tx ready */
-       wait = XGBE_PHY_RATECHANGE_COUNT;
-       while (wait--) {
-               usleep_range(50, 75);
-
-               status = XSIR0_IOREAD(priv, SIR0_STATUS);
-               if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
-                   XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
-                       goto rx_reset;
-       }
-
-       netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
-                  status);
-
-rx_reset:
-       /* Perform Rx reset for the DFE changes */
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
-}
-
-static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ret;
-
-       /* Enable KR training */
-       ret = amd_xgbe_an_enable_kr_training(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Set PCS to KR/10G speed */
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
-       if (ret < 0)
-               return ret;
-
-       ret &= ~MDIO_PCS_CTRL2_TYPE;
-       ret |= MDIO_PCS_CTRL2_10GBR;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-       if (ret < 0)
-               return ret;
-
-       ret &= ~MDIO_CTRL1_SPEEDSEL;
-       ret |= MDIO_CTRL1_SPEED10G;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-       ret = amd_xgbe_phy_pcs_power_cycle(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Set SerDes to 10G speed */
-       amd_xgbe_phy_serdes_start_ratechange(phydev);
-
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
-
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
-                          priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
-                          priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
-                          priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
-                          priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
-                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
-       XRXTX_IOWRITE(priv, RXTX_REG22,
-                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
-
-       amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
-       return 0;
-}
-
-static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ret;
-
-       /* Disable KR training */
-       ret = amd_xgbe_an_disable_kr_training(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Set PCS to KX/1G speed */
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
-       if (ret < 0)
-               return ret;
-
-       ret &= ~MDIO_PCS_CTRL2_TYPE;
-       ret |= MDIO_PCS_CTRL2_10GBX;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-       if (ret < 0)
-               return ret;
-
-       ret &= ~MDIO_CTRL1_SPEEDSEL;
-       ret |= MDIO_CTRL1_SPEED1G;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-       ret = amd_xgbe_phy_pcs_power_cycle(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Set SerDes to 2.5G speed */
-       amd_xgbe_phy_serdes_start_ratechange(phydev);
-
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
-
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
-                          priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
-                          priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
-                          priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
-                          priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
-                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
-       XRXTX_IOWRITE(priv, RXTX_REG22,
-                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
-
-       amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
-       return 0;
-}
-
-static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ret;
-
-       /* Disable KR training */
-       ret = amd_xgbe_an_disable_kr_training(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Set PCS to KX/1G speed */
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
-       if (ret < 0)
-               return ret;
-
-       ret &= ~MDIO_PCS_CTRL2_TYPE;
-       ret |= MDIO_PCS_CTRL2_10GBX;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-       if (ret < 0)
-               return ret;
-
-       ret &= ~MDIO_CTRL1_SPEEDSEL;
-       ret |= MDIO_CTRL1_SPEED1G;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-       ret = amd_xgbe_phy_pcs_power_cycle(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Set SerDes to 1G speed */
-       amd_xgbe_phy_serdes_start_ratechange(phydev);
-
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
-
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
-                          priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
-       XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
-                          priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
-                          priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
-                          priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
-       XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
-                          priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
-       XRXTX_IOWRITE(priv, RXTX_REG22,
-                     priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
-
-       amd_xgbe_phy_serdes_complete_ratechange(phydev);
-
-       return 0;
-}
-
-static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
-                                enum amd_xgbe_phy_mode *mode)
-{
-       int ret;
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
-       if (ret < 0)
-               return ret;
-
-       if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
-               *mode = AMD_XGBE_MODE_KR;
-       else
-               *mode = AMD_XGBE_MODE_KX;
-
-       return 0;
-}
-
-static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
-{
-       enum amd_xgbe_phy_mode mode;
-
-       if (amd_xgbe_phy_cur_mode(phydev, &mode))
-               return false;
-
-       return (mode == AMD_XGBE_MODE_KR);
-}
-
-static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ret;
-
-       /* If we are in KR switch to KX, and vice-versa */
-       if (amd_xgbe_phy_in_kr_mode(phydev)) {
-               if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
-                       ret = amd_xgbe_phy_gmii_mode(phydev);
-               else
-                       ret = amd_xgbe_phy_gmii_2500_mode(phydev);
-       } else {
-               ret = amd_xgbe_phy_xgmii_mode(phydev);
-       }
-
-       return ret;
-}
-
-static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
-                                enum amd_xgbe_phy_mode mode)
-{
-       enum amd_xgbe_phy_mode cur_mode;
-       int ret;
-
-       ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
-       if (ret)
-               return ret;
-
-       if (mode != cur_mode)
-               ret = amd_xgbe_phy_switch_mode(phydev);
-
-       return ret;
-}
-
-static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
-{
-       if (phydev->autoneg == AUTONEG_ENABLE) {
-               if (phydev->advertising & ADVERTISED_10000baseKR_Full)
-                       return true;
-       } else {
-               if (phydev->speed == SPEED_10000)
-                       return true;
-       }
-
-       return false;
-}
-
-static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
-{
-       if (phydev->autoneg == AUTONEG_ENABLE) {
-               if (phydev->advertising & ADVERTISED_2500baseX_Full)
-                       return true;
-       } else {
-               if (phydev->speed == SPEED_2500)
-                       return true;
-       }
-
-       return false;
-}
-
-static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
-{
-       if (phydev->autoneg == AUTONEG_ENABLE) {
-               if (phydev->advertising & ADVERTISED_1000baseKX_Full)
-                       return true;
-       } else {
-               if (phydev->speed == SPEED_1000)
-                       return true;
-       }
-
-       return false;
-}
-
-static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
-                              bool restart)
-{
-       int ret;
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
-       if (ret < 0)
-               return ret;
-
-       ret &= ~MDIO_AN_CTRL1_ENABLE;
-
-       if (enable)
-               ret |= MDIO_AN_CTRL1_ENABLE;
-
-       if (restart)
-               ret |= MDIO_AN_CTRL1_RESTART;
-
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
-
-       return 0;
-}
-
-static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
-{
-       return amd_xgbe_phy_set_an(phydev, true, true);
-}
-
-static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
-{
-       return amd_xgbe_phy_set_an(phydev, false, false);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
-                                                   enum amd_xgbe_phy_rx *state)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ad_reg, lp_reg, ret;
-
-       *state = AMD_XGBE_RX_COMPLETE;
-
-       /* If we're not in KR mode then we're done */
-       if (!amd_xgbe_phy_in_kr_mode(phydev))
-               return AMD_XGBE_AN_PAGE_RECEIVED;
-
-       /* Enable/Disable FEC */
-       ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
-       if (ad_reg < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
-       if (lp_reg < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
-       if (ret < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       ret &= ~XGBE_PHY_FEC_MASK;
-       if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
-               ret |= priv->fec_ability;
-
-       phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
-
-       /* Start KR training */
-       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
-       if (ret < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
-               XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
-
-               ret |= XGBE_PHY_KR_TRAINING_START;
-               phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
-                             ret);
-
-               XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
-       }
-
-       return AMD_XGBE_AN_PAGE_RECEIVED;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
-                                              enum amd_xgbe_phy_rx *state)
-{
-       u16 msg;
-
-       *state = AMD_XGBE_RX_XNP;
-
-       msg = XNP_MCF_NULL_MESSAGE;
-       msg |= XNP_MP_FORMATTED;
-
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
-
-       return AMD_XGBE_AN_PAGE_RECEIVED;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
-                                              enum amd_xgbe_phy_rx *state)
-{
-       unsigned int link_support;
-       int ret, ad_reg, lp_reg;
-
-       /* Read Base Ability register 2 first */
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
-       if (ret < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       /* Check for a supported mode, otherwise restart in a different one */
-       link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
-       if (!(ret & link_support))
-               return AMD_XGBE_AN_INCOMPAT_LINK;
-
-       /* Check Extended Next Page support */
-       ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
-       if (ad_reg < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
-       if (lp_reg < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
-              amd_xgbe_an_tx_xnp(phydev, state) :
-              amd_xgbe_an_tx_training(phydev, state);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
-                                              enum amd_xgbe_phy_rx *state)
-{
-       int ad_reg, lp_reg;
-
-       /* Check Extended Next Page support */
-       ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
-       if (ad_reg < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
-       if (lp_reg < 0)
-               return AMD_XGBE_AN_ERROR;
-
-       return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
-              amd_xgbe_an_tx_xnp(phydev, state) :
-              amd_xgbe_an_tx_training(phydev, state);
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       enum amd_xgbe_phy_rx *state;
-       unsigned long an_timeout;
-       int ret;
-
-       if (!priv->an_start) {
-               priv->an_start = jiffies;
-       } else {
-               an_timeout = priv->an_start +
-                            msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
-               if (time_after(jiffies, an_timeout)) {
-                       /* Auto-negotiation timed out, reset state */
-                       priv->kr_state = AMD_XGBE_RX_BPA;
-                       priv->kx_state = AMD_XGBE_RX_BPA;
-
-                       priv->an_start = jiffies;
-               }
-       }
-
-       state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
-                                               : &priv->kx_state;
-
-       switch (*state) {
-       case AMD_XGBE_RX_BPA:
-               ret = amd_xgbe_an_rx_bpa(phydev, state);
-               break;
-
-       case AMD_XGBE_RX_XNP:
-               ret = amd_xgbe_an_rx_xnp(phydev, state);
-               break;
-
-       default:
-               ret = AMD_XGBE_AN_ERROR;
-       }
-
-       return ret;
-}
-
-static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ret;
-
-       /* Be sure we aren't looping trying to negotiate */
-       if (amd_xgbe_phy_in_kr_mode(phydev)) {
-               priv->kr_state = AMD_XGBE_RX_ERROR;
-
-               if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
-                   !(phydev->advertising & SUPPORTED_2500baseX_Full))
-                       return AMD_XGBE_AN_NO_LINK;
-
-               if (priv->kx_state != AMD_XGBE_RX_BPA)
-                       return AMD_XGBE_AN_NO_LINK;
-       } else {
-               priv->kx_state = AMD_XGBE_RX_ERROR;
-
-               if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
-                       return AMD_XGBE_AN_NO_LINK;
-
-               if (priv->kr_state != AMD_XGBE_RX_BPA)
-                       return AMD_XGBE_AN_NO_LINK;
-       }
-
-       ret = amd_xgbe_phy_disable_an(phydev);
-       if (ret)
-               return AMD_XGBE_AN_ERROR;
-
-       ret = amd_xgbe_phy_switch_mode(phydev);
-       if (ret)
-               return AMD_XGBE_AN_ERROR;
-
-       ret = amd_xgbe_phy_restart_an(phydev);
-       if (ret)
-               return AMD_XGBE_AN_ERROR;
-
-       return AMD_XGBE_AN_INCOMPAT_LINK;
-}
-
-static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
-{
-       struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
-
-       /* Interrupt reason must be read and cleared outside of IRQ context */
-       disable_irq_nosync(priv->an_irq);
-
-       queue_work(priv->an_workqueue, &priv->an_irq_work);
-
-       return IRQ_HANDLED;
-}
-
-static void amd_xgbe_an_irq_work(struct work_struct *work)
-{
-       struct amd_xgbe_phy_priv *priv = container_of(work,
-                                                     struct amd_xgbe_phy_priv,
-                                                     an_irq_work);
-
-       /* Avoid a race between enabling the IRQ and exiting the work by
-        * waiting for the work to finish and then queueing it
-        */
-       flush_work(&priv->an_work);
-       queue_work(priv->an_workqueue, &priv->an_work);
-}
-
-static void amd_xgbe_an_state_machine(struct work_struct *work)
-{
-       struct amd_xgbe_phy_priv *priv = container_of(work,
-                                                     struct amd_xgbe_phy_priv,
-                                                     an_work);
-       struct phy_device *phydev = priv->phydev;
-       enum amd_xgbe_phy_an cur_state = priv->an_state;
-       int int_reg, int_mask;
-
-       mutex_lock(&priv->an_mutex);
-
-       /* Read the interrupt */
-       int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
-       if (!int_reg)
-               goto out;
-
-next_int:
-       if (int_reg < 0) {
-               priv->an_state = AMD_XGBE_AN_ERROR;
-               int_mask = XGBE_AN_INT_MASK;
-       } else if (int_reg & XGBE_AN_PG_RCV) {
-               priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
-               int_mask = XGBE_AN_PG_RCV;
-       } else if (int_reg & XGBE_AN_INC_LINK) {
-               priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
-               int_mask = XGBE_AN_INC_LINK;
-       } else if (int_reg & XGBE_AN_INT_CMPLT) {
-               priv->an_state = AMD_XGBE_AN_COMPLETE;
-               int_mask = XGBE_AN_INT_CMPLT;
-       } else {
-               priv->an_state = AMD_XGBE_AN_ERROR;
-               int_mask = 0;
-       }
-
-       /* Clear the interrupt to be processed */
-       int_reg &= ~int_mask;
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
-
-       priv->an_result = priv->an_state;
-
-again:
-       cur_state = priv->an_state;
-
-       switch (priv->an_state) {
-       case AMD_XGBE_AN_READY:
-               priv->an_supported = 0;
-               break;
-
-       case AMD_XGBE_AN_PAGE_RECEIVED:
-               priv->an_state = amd_xgbe_an_page_received(phydev);
-               priv->an_supported++;
-               break;
-
-       case AMD_XGBE_AN_INCOMPAT_LINK:
-               priv->an_supported = 0;
-               priv->parallel_detect = 0;
-               priv->an_state = amd_xgbe_an_incompat_link(phydev);
-               break;
-
-       case AMD_XGBE_AN_COMPLETE:
-               priv->parallel_detect = priv->an_supported ? 0 : 1;
-               netdev_dbg(phydev->attached_dev, "%s successful\n",
-                          priv->an_supported ? "Auto negotiation"
-                                             : "Parallel detection");
-               break;
-
-       case AMD_XGBE_AN_NO_LINK:
-               break;
-
-       default:
-               priv->an_state = AMD_XGBE_AN_ERROR;
-       }
-
-       if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
-               int_reg = 0;
-               phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-       } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
-               netdev_err(phydev->attached_dev,
-                          "error during auto-negotiation, state=%u\n",
-                          cur_state);
-
-               int_reg = 0;
-               phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-       }
-
-       if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
-               priv->an_result = priv->an_state;
-               priv->an_state = AMD_XGBE_AN_READY;
-               priv->kr_state = AMD_XGBE_RX_BPA;
-               priv->kx_state = AMD_XGBE_RX_BPA;
-               priv->an_start = 0;
-       }
-
-       if (cur_state != priv->an_state)
-               goto again;
-
-       if (int_reg)
-               goto next_int;
-
-out:
-       enable_irq(priv->an_irq);
-
-       mutex_unlock(&priv->an_mutex);
-}
-
-static int amd_xgbe_an_init(struct phy_device *phydev)
-{
-       int ret;
-
-       /* Set up Advertisement register 3 first */
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
-       if (ret < 0)
-               return ret;
-
-       if (phydev->advertising & SUPPORTED_10000baseR_FEC)
-               ret |= 0xc000;
-       else
-               ret &= ~0xc000;
-
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
-
-       /* Set up Advertisement register 2 next */
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
-       if (ret < 0)
-               return ret;
-
-       if (phydev->advertising & SUPPORTED_10000baseKR_Full)
-               ret |= 0x80;
-       else
-               ret &= ~0x80;
-
-       if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
-           (phydev->advertising & SUPPORTED_2500baseX_Full))
-               ret |= 0x20;
-       else
-               ret &= ~0x20;
-
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
-
-       /* Set up Advertisement register 1 last */
-       ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
-       if (ret < 0)
-               return ret;
-
-       if (phydev->advertising & SUPPORTED_Pause)
-               ret |= 0x400;
-       else
-               ret &= ~0x400;
-
-       if (phydev->advertising & SUPPORTED_Asym_Pause)
-               ret |= 0x800;
-       else
-               ret &= ~0x800;
-
-       /* We don't intend to perform XNP */
-       ret &= ~XNP_NP_EXCHANGE;
-
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
-
-       return 0;
-}
-
-static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
-{
-       int count, ret;
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-       if (ret < 0)
-               return ret;
-
-       ret |= MDIO_CTRL1_RESET;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-       count = 50;
-       do {
-               msleep(20);
-               ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-               if (ret < 0)
-                       return ret;
-       } while ((ret & MDIO_CTRL1_RESET) && --count);
-
-       if (ret & MDIO_CTRL1_RESET)
-               return -ETIMEDOUT;
-
-       /* Disable auto-negotiation for now */
-       ret = amd_xgbe_phy_disable_an(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Clear auto-negotiation interrupts */
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-
-       return 0;
-}
-
-static int amd_xgbe_phy_config_init(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       struct net_device *netdev = phydev->attached_dev;
-       int ret;
-
-       if (!priv->an_irq_allocated) {
-               /* Allocate the auto-negotiation workqueue and interrupt */
-               snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
-                        "%s-pcs", netdev_name(netdev));
-
-               priv->an_workqueue =
-                       create_singlethread_workqueue(priv->an_irq_name);
-               if (!priv->an_workqueue) {
-                       netdev_err(netdev, "phy workqueue creation failed\n");
-                       return -ENOMEM;
-               }
-
-               ret = devm_request_irq(priv->dev, priv->an_irq,
-                                      amd_xgbe_an_isr, 0, priv->an_irq_name,
-                                      priv);
-               if (ret) {
-                       netdev_err(netdev, "phy irq request failed\n");
-                       destroy_workqueue(priv->an_workqueue);
-                       return ret;
-               }
-
-               priv->an_irq_allocated = 1;
-       }
-
-       /* Set initial mode - call the mode setting routines
-        * directly to insure we are properly configured
-        */
-       if (amd_xgbe_phy_use_xgmii_mode(phydev))
-               ret = amd_xgbe_phy_xgmii_mode(phydev);
-       else if (amd_xgbe_phy_use_gmii_mode(phydev))
-               ret = amd_xgbe_phy_gmii_mode(phydev);
-       else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
-               ret = amd_xgbe_phy_gmii_2500_mode(phydev);
-       else
-               ret = -EINVAL;
-       if (ret < 0)
-               return ret;
-
-       /* Set up advertisement registers based on current settings */
-       ret = amd_xgbe_an_init(phydev);
-       if (ret)
-               return ret;
-
-       /* Enable auto-negotiation interrupts */
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
-
-       return 0;
-}
-
-static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
-{
-       int ret;
-
-       /* Disable auto-negotiation */
-       ret = amd_xgbe_phy_disable_an(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Validate/Set specified speed */
-       switch (phydev->speed) {
-       case SPEED_10000:
-               ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
-               break;
-
-       case SPEED_2500:
-       case SPEED_1000:
-               ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
-               break;
-
-       default:
-               ret = -EINVAL;
-       }
-
-       if (ret < 0)
-               return ret;
-
-       /* Validate duplex mode */
-       if (phydev->duplex != DUPLEX_FULL)
-               return -EINVAL;
-
-       phydev->pause = 0;
-       phydev->asym_pause = 0;
-
-       return 0;
-}
-
-static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       u32 mmd_mask = phydev->c45_ids.devices_in_package;
-       int ret;
-
-       if (phydev->autoneg != AUTONEG_ENABLE)
-               return amd_xgbe_phy_setup_forced(phydev);
-
-       /* Make sure we have the AN MMD present */
-       if (!(mmd_mask & MDIO_DEVS_AN))
-               return -EINVAL;
-
-       /* Disable auto-negotiation interrupt */
-       disable_irq(priv->an_irq);
-
-       /* Start auto-negotiation in a supported mode */
-       if (phydev->advertising & SUPPORTED_10000baseKR_Full)
-               ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
-       else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
-                (phydev->advertising & SUPPORTED_2500baseX_Full))
-               ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
-       else
-               ret = -EINVAL;
-       if (ret < 0) {
-               enable_irq(priv->an_irq);
-               return ret;
-       }
-
-       /* Disable and stop any in progress auto-negotiation */
-       ret = amd_xgbe_phy_disable_an(phydev);
-       if (ret < 0)
-               return ret;
-
-       /* Clear any auto-negotitation interrupts */
-       phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
-
-       priv->an_result = AMD_XGBE_AN_READY;
-       priv->an_state = AMD_XGBE_AN_READY;
-       priv->kr_state = AMD_XGBE_RX_BPA;
-       priv->kx_state = AMD_XGBE_RX_BPA;
-
-       /* Re-enable auto-negotiation interrupt */
-       enable_irq(priv->an_irq);
-
-       /* Set up advertisement registers based on current settings */
-       ret = amd_xgbe_an_init(phydev);
-       if (ret)
-               return ret;
-
-       /* Enable and start auto-negotiation */
-       return amd_xgbe_phy_restart_an(phydev);
-}
-
-static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ret;
-
-       mutex_lock(&priv->an_mutex);
-
-       ret = __amd_xgbe_phy_config_aneg(phydev);
-
-       mutex_unlock(&priv->an_mutex);
-
-       return ret;
-}
-
-static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-
-       return (priv->an_result == AMD_XGBE_AN_COMPLETE);
-}
-
-static int amd_xgbe_phy_update_link(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ret;
-
-       /* If we're doing auto-negotiation don't report link down */
-       if (priv->an_state != AMD_XGBE_AN_READY) {
-               phydev->link = 1;
-               return 0;
-       }
-
-       /* Link status is latched low, so read once to clear
-        * and then read again to get current state
-        */
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
-       if (ret < 0)
-               return ret;
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
-       if (ret < 0)
-               return ret;
-
-       phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
-
-       return 0;
-}
-
-static int amd_xgbe_phy_read_status(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       u32 mmd_mask = phydev->c45_ids.devices_in_package;
-       int ret, ad_ret, lp_ret;
-
-       ret = amd_xgbe_phy_update_link(phydev);
-       if (ret)
-               return ret;
-
-       if ((phydev->autoneg == AUTONEG_ENABLE) &&
-           !priv->parallel_detect) {
-               if (!(mmd_mask & MDIO_DEVS_AN))
-                       return -EINVAL;
-
-               if (!amd_xgbe_phy_aneg_done(phydev))
-                       return 0;
-
-               /* Compare Advertisement and Link Partner register 1 */
-               ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
-               if (ad_ret < 0)
-                       return ad_ret;
-               lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
-               if (lp_ret < 0)
-                       return lp_ret;
-
-               ad_ret &= lp_ret;
-               phydev->pause = (ad_ret & 0x400) ? 1 : 0;
-               phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
-
-               /* Compare Advertisement and Link Partner register 2 */
-               ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
-                                     MDIO_AN_ADVERTISE + 1);
-               if (ad_ret < 0)
-                       return ad_ret;
-               lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
-               if (lp_ret < 0)
-                       return lp_ret;
-
-               ad_ret &= lp_ret;
-               if (ad_ret & 0x80) {
-                       phydev->speed = SPEED_10000;
-                       ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
-                       if (ret)
-                               return ret;
-               } else {
-                       switch (priv->speed_set) {
-                       case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-                               phydev->speed = SPEED_1000;
-                               break;
-
-                       case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-                               phydev->speed = SPEED_2500;
-                               break;
-                       }
-
-                       ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
-                       if (ret)
-                               return ret;
-               }
-
-               phydev->duplex = DUPLEX_FULL;
-       } else {
-               if (amd_xgbe_phy_in_kr_mode(phydev)) {
-                       phydev->speed = SPEED_10000;
-               } else {
-                       switch (priv->speed_set) {
-                       case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-                               phydev->speed = SPEED_1000;
-                               break;
-
-                       case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-                               phydev->speed = SPEED_2500;
-                               break;
-                       }
-               }
-               phydev->duplex = DUPLEX_FULL;
-               phydev->pause = 0;
-               phydev->asym_pause = 0;
-       }
-
-       return 0;
-}
-
-static int amd_xgbe_phy_suspend(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       int ret;
-
-       mutex_lock(&phydev->lock);
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
-       if (ret < 0)
-               goto unlock;
-
-       priv->lpm_ctrl = ret;
-
-       ret |= MDIO_CTRL1_LPOWER;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
-
-       ret = 0;
-
-unlock:
-       mutex_unlock(&phydev->lock);
-
-       return ret;
-}
-
-static int amd_xgbe_phy_resume(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-
-       mutex_lock(&phydev->lock);
-
-       priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
-       phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
-
-       mutex_unlock(&phydev->lock);
-
-       return 0;
-}
-
-static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
-                                               unsigned int type)
-{
-       unsigned int count;
-       int i;
-
-       for (i = 0, count = 0; i < pdev->num_resources; i++) {
-               struct resource *r = &pdev->resource[i];
-
-               if (type == resource_type(r))
-                       count++;
-       }
-
-       return count;
-}
-
-static int amd_xgbe_phy_probe(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv;
-       struct platform_device *phy_pdev;
-       struct device *dev, *phy_dev;
-       unsigned int phy_resnum, phy_irqnum;
-       int ret;
-
-       if (!phydev->bus || !phydev->bus->parent)
-               return -EINVAL;
-
-       dev = phydev->bus->parent;
-
-       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       priv->pdev = to_platform_device(dev);
-       priv->adev = ACPI_COMPANION(dev);
-       priv->dev = dev;
-       priv->phydev = phydev;
-       mutex_init(&priv->an_mutex);
-       INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
-       INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
-
-       if (!priv->adev || acpi_disabled) {
-               struct device_node *bus_node;
-               struct device_node *phy_node;
-
-               bus_node = priv->dev->of_node;
-               phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
-               if (!phy_node) {
-                       dev_err(dev, "unable to parse phy-handle\n");
-                       ret = -EINVAL;
-                       goto err_priv;
-               }
-
-               phy_pdev = of_find_device_by_node(phy_node);
-               of_node_put(phy_node);
-
-               if (!phy_pdev) {
-                       dev_err(dev, "unable to obtain phy device\n");
-                       ret = -EINVAL;
-                       goto err_priv;
-               }
-
-               phy_resnum = 0;
-               phy_irqnum = 0;
-       } else {
-               /* In ACPI, the XGBE and PHY resources are the grouped
-                * together with the PHY resources at the end
-                */
-               phy_pdev = priv->pdev;
-               phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
-                                                        IORESOURCE_MEM) - 3;
-               phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
-                                                        IORESOURCE_IRQ) - 1;
-       }
-       phy_dev = &phy_pdev->dev;
-
-       /* Get the device mmio areas */
-       priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
-                                              phy_resnum++);
-       priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
-       if (IS_ERR(priv->rxtx_regs)) {
-               dev_err(dev, "rxtx ioremap failed\n");
-               ret = PTR_ERR(priv->rxtx_regs);
-               goto err_put;
-       }
-
-       priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
-                                              phy_resnum++);
-       priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
-       if (IS_ERR(priv->sir0_regs)) {
-               dev_err(dev, "sir0 ioremap failed\n");
-               ret = PTR_ERR(priv->sir0_regs);
-               goto err_rxtx;
-       }
-
-       priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
-                                              phy_resnum++);
-       priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
-       if (IS_ERR(priv->sir1_regs)) {
-               dev_err(dev, "sir1 ioremap failed\n");
-               ret = PTR_ERR(priv->sir1_regs);
-               goto err_sir0;
-       }
-
-       /* Get the auto-negotiation interrupt */
-       ret = platform_get_irq(phy_pdev, phy_irqnum);
-       if (ret < 0) {
-               dev_err(dev, "platform_get_irq failed\n");
-               goto err_sir1;
-       }
-       priv->an_irq = ret;
-
-       /* Get the device speed set property */
-       ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
-                                      &priv->speed_set);
-       if (ret) {
-               dev_err(dev, "invalid %s property\n",
-                       XGBE_PHY_SPEEDSET_PROPERTY);
-               goto err_sir1;
-       }
-
-       switch (priv->speed_set) {
-       case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-       case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-               break;
-       default:
-               dev_err(dev, "invalid %s property\n",
-                       XGBE_PHY_SPEEDSET_PROPERTY);
-               ret = -EINVAL;
-               goto err_sir1;
-       }
-
-       if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
-               ret = device_property_read_u32_array(phy_dev,
-                                                    XGBE_PHY_BLWC_PROPERTY,
-                                                    priv->serdes_blwc,
-                                                    XGBE_PHY_SPEEDS);
-               if (ret) {
-                       dev_err(dev, "invalid %s property\n",
-                               XGBE_PHY_BLWC_PROPERTY);
-                       goto err_sir1;
-               }
-       } else {
-               memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
-                      sizeof(priv->serdes_blwc));
-       }
-
-       if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
-               ret = device_property_read_u32_array(phy_dev,
-                                                    XGBE_PHY_CDR_RATE_PROPERTY,
-                                                    priv->serdes_cdr_rate,
-                                                    XGBE_PHY_SPEEDS);
-               if (ret) {
-                       dev_err(dev, "invalid %s property\n",
-                               XGBE_PHY_CDR_RATE_PROPERTY);
-                       goto err_sir1;
-               }
-       } else {
-               memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
-                      sizeof(priv->serdes_cdr_rate));
-       }
-
-       if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
-               ret = device_property_read_u32_array(phy_dev,
-                                                    XGBE_PHY_PQ_SKEW_PROPERTY,
-                                                    priv->serdes_pq_skew,
-                                                    XGBE_PHY_SPEEDS);
-               if (ret) {
-                       dev_err(dev, "invalid %s property\n",
-                               XGBE_PHY_PQ_SKEW_PROPERTY);
-                       goto err_sir1;
-               }
-       } else {
-               memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
-                      sizeof(priv->serdes_pq_skew));
-       }
-
-       if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
-               ret = device_property_read_u32_array(phy_dev,
-                                                    XGBE_PHY_TX_AMP_PROPERTY,
-                                                    priv->serdes_tx_amp,
-                                                    XGBE_PHY_SPEEDS);
-               if (ret) {
-                       dev_err(dev, "invalid %s property\n",
-                               XGBE_PHY_TX_AMP_PROPERTY);
-                       goto err_sir1;
-               }
-       } else {
-               memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
-                      sizeof(priv->serdes_tx_amp));
-       }
-
-       if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
-               ret = device_property_read_u32_array(phy_dev,
-                                                    XGBE_PHY_DFE_CFG_PROPERTY,
-                                                    priv->serdes_dfe_tap_cfg,
-                                                    XGBE_PHY_SPEEDS);
-               if (ret) {
-                       dev_err(dev, "invalid %s property\n",
-                               XGBE_PHY_DFE_CFG_PROPERTY);
-                       goto err_sir1;
-               }
-       } else {
-               memcpy(priv->serdes_dfe_tap_cfg,
-                      amd_xgbe_phy_serdes_dfe_tap_cfg,
-                      sizeof(priv->serdes_dfe_tap_cfg));
-       }
-
-       if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
-               ret = device_property_read_u32_array(phy_dev,
-                                                    XGBE_PHY_DFE_ENA_PROPERTY,
-                                                    priv->serdes_dfe_tap_ena,
-                                                    XGBE_PHY_SPEEDS);
-               if (ret) {
-                       dev_err(dev, "invalid %s property\n",
-                               XGBE_PHY_DFE_ENA_PROPERTY);
-                       goto err_sir1;
-               }
-       } else {
-               memcpy(priv->serdes_dfe_tap_ena,
-                      amd_xgbe_phy_serdes_dfe_tap_ena,
-                      sizeof(priv->serdes_dfe_tap_ena));
-       }
-
-       /* Initialize supported features */
-       phydev->supported = SUPPORTED_Autoneg;
-       phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-       phydev->supported |= SUPPORTED_Backplane;
-       phydev->supported |= SUPPORTED_10000baseKR_Full;
-       switch (priv->speed_set) {
-       case AMD_XGBE_PHY_SPEEDSET_1000_10000:
-               phydev->supported |= SUPPORTED_1000baseKX_Full;
-               break;
-       case AMD_XGBE_PHY_SPEEDSET_2500_10000:
-               phydev->supported |= SUPPORTED_2500baseX_Full;
-               break;
-       }
-
-       ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
-       if (ret < 0)
-               return ret;
-       priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
-       if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
-               phydev->supported |= SUPPORTED_10000baseR_FEC;
-
-       phydev->advertising = phydev->supported;
-
-       phydev->priv = priv;
-
-       if (!priv->adev || acpi_disabled)
-               platform_device_put(phy_pdev);
-
-       return 0;
-
-err_sir1:
-       devm_iounmap(dev, priv->sir1_regs);
-       devm_release_mem_region(dev, priv->sir1_res->start,
-                               resource_size(priv->sir1_res));
-
-err_sir0:
-       devm_iounmap(dev, priv->sir0_regs);
-       devm_release_mem_region(dev, priv->sir0_res->start,
-                               resource_size(priv->sir0_res));
-
-err_rxtx:
-       devm_iounmap(dev, priv->rxtx_regs);
-       devm_release_mem_region(dev, priv->rxtx_res->start,
-                               resource_size(priv->rxtx_res));
-
-err_put:
-       if (!priv->adev || acpi_disabled)
-               platform_device_put(phy_pdev);
-
-err_priv:
-       devm_kfree(dev, priv);
-
-       return ret;
-}
-
-static void amd_xgbe_phy_remove(struct phy_device *phydev)
-{
-       struct amd_xgbe_phy_priv *priv = phydev->priv;
-       struct device *dev = priv->dev;
-
-       if (priv->an_irq_allocated) {
-               devm_free_irq(dev, priv->an_irq, priv);
-
-               flush_workqueue(priv->an_workqueue);
-               destroy_workqueue(priv->an_workqueue);
-       }
-
-       /* Release resources */
-       devm_iounmap(dev, priv->sir1_regs);
-       devm_release_mem_region(dev, priv->sir1_res->start,
-                               resource_size(priv->sir1_res));
-
-       devm_iounmap(dev, priv->sir0_regs);
-       devm_release_mem_region(dev, priv->sir0_res->start,
-                               resource_size(priv->sir0_res));
-
-       devm_iounmap(dev, priv->rxtx_regs);
-       devm_release_mem_region(dev, priv->rxtx_res->start,
-                               resource_size(priv->rxtx_res));
-
-       devm_kfree(dev, priv);
-}
-
-static int amd_xgbe_match_phy_device(struct phy_device *phydev)
-{
-       return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
-}
-
-static struct phy_driver amd_xgbe_phy_driver[] = {
-       {
-               .phy_id                 = XGBE_PHY_ID,
-               .phy_id_mask            = XGBE_PHY_MASK,
-               .name                   = "AMD XGBE PHY",
-               .features               = 0,
-               .flags                  = PHY_IS_INTERNAL,
-               .probe                  = amd_xgbe_phy_probe,
-               .remove                 = amd_xgbe_phy_remove,
-               .soft_reset             = amd_xgbe_phy_soft_reset,
-               .config_init            = amd_xgbe_phy_config_init,
-               .suspend                = amd_xgbe_phy_suspend,
-               .resume                 = amd_xgbe_phy_resume,
-               .config_aneg            = amd_xgbe_phy_config_aneg,
-               .aneg_done              = amd_xgbe_phy_aneg_done,
-               .read_status            = amd_xgbe_phy_read_status,
-               .match_phy_device       = amd_xgbe_match_phy_device,
-               .driver                 = {
-                       .owner = THIS_MODULE,
-               },
-       },
-};
-
-module_phy_driver(amd_xgbe_phy_driver);
-
-static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
-       { XGBE_PHY_ID, XGBE_PHY_MASK },
-       { }
-};
-MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);
index b5dc59de094eef06838d4601cacd9dbeaba04a6a..4dea85bfc545b86d5874031531a9ce4963facbe2 100644 (file)
@@ -136,8 +136,8 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
        /* AFE_RX_LP_COUNTER, set RX bandwidth to maximum */
        phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
 
-       /* AFE_TX_CONFIG, set 1000BT Cfeed=110 for all ports */
-       phy_write_misc(phydev, AFE_TX_CONFIG, 0x0061);
+       /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+       phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
 
        /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
        phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
@@ -167,6 +167,9 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
        /* AFE_RXCONFIG_1, provide more margin for INL/DNL measurement */
        phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9b2f);
 
+       /* AFE_TX_CONFIG, set 100BT Cfeed=011 to improve rise/fall time */
+       phy_write_misc(phydev, AFE_TX_CONFIG, 0x431);
+
        /* AFE_VDCA_ICTRL_0, set Iq=1101 instead of 0111 for AB symmetry */
        phy_write_misc(phydev, AFE_VDCA_ICTRL_0, 0xa7da);
 
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
new file mode 100644 (file)
index 0000000..c7a12e2
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * Driver for the Texas Instruments DP83867 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#include <dt-bindings/net/ti-dp83867.h>
+
+#define DP83867_PHY_ID         0x2000a231
+#define DP83867_DEVADDR                0x1f
+
+#define MII_DP83867_PHYCTRL    0x10
+#define MII_DP83867_MICR       0x12
+#define MII_DP83867_ISR                0x13
+#define DP83867_CTRL           0x1f
+
+/* Extended Registers */
+#define DP83867_RGMIICTL       0x0032
+#define DP83867_RGMIIDCTL      0x0086
+
+#define DP83867_SW_RESET       BIT(15)
+#define DP83867_SW_RESTART     BIT(14)
+
+/* MICR Interrupt bits */
+#define MII_DP83867_MICR_AN_ERR_INT_EN         BIT(15)
+#define MII_DP83867_MICR_SPEED_CHNG_INT_EN     BIT(14)
+#define MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN  BIT(13)
+#define MII_DP83867_MICR_PAGE_RXD_INT_EN       BIT(12)
+#define MII_DP83867_MICR_AUTONEG_COMP_INT_EN   BIT(11)
+#define MII_DP83867_MICR_LINK_STS_CHNG_INT_EN  BIT(10)
+#define MII_DP83867_MICR_FALSE_CARRIER_INT_EN  BIT(8)
+#define MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN        BIT(4)
+#define MII_DP83867_MICR_WOL_INT_EN            BIT(3)
+#define MII_DP83867_MICR_XGMII_ERR_INT_EN      BIT(2)
+#define MII_DP83867_MICR_POL_CHNG_INT_EN       BIT(1)
+#define MII_DP83867_MICR_JABBER_INT_EN         BIT(0)
+
+/* RGMIICTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_EN          BIT(1)
+#define DP83867_RGMII_RX_CLK_DELAY_EN          BIT(0)
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_SHIFT         14
+
+/* RGMIIDCTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_SHIFT       4
+
+struct dp83867_private {
+       int rx_id_delay;
+       int tx_id_delay;
+       int fifo_depth;
+};
+
+static int dp83867_ack_interrupt(struct phy_device *phydev)
+{
+       int err = phy_read(phydev, MII_DP83867_ISR);
+
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int dp83867_config_intr(struct phy_device *phydev)
+{
+       int micr_status;
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+               micr_status = phy_read(phydev, MII_DP83867_MICR);
+               if (micr_status < 0)
+                       return micr_status;
+
+               micr_status |=
+                       (MII_DP83867_MICR_AN_ERR_INT_EN |
+                       MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+                       MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
+                       MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
+
+               return phy_write(phydev, MII_DP83867_MICR, micr_status);
+       }
+
+       micr_status = 0x0;
+       return phy_write(phydev, MII_DP83867_MICR, micr_status);
+}
+
+#ifdef CONFIG_OF_MDIO
+static int dp83867_of_init(struct phy_device *phydev)
+{
+       struct dp83867_private *dp83867 = phydev->priv;
+       struct device *dev = &phydev->dev;
+       struct device_node *of_node = dev->of_node;
+       int ret;
+
+       if (!of_node && dev->parent->of_node)
+               of_node = dev->parent->of_node;
+
+       if (!phydev->dev.of_node)
+               return -ENODEV;
+
+       ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
+                                  &dp83867->rx_id_delay);
+       if (ret)
+               return ret;
+
+       ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
+                                  &dp83867->tx_id_delay);
+       if (ret)
+               return ret;
+
+       ret = of_property_read_u32(of_node, "ti,fifo-depth",
+                                  &dp83867->fifo_depth);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+#else
+static int dp83867_of_init(struct phy_device *phydev)
+{
+       return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83867_config_init(struct phy_device *phydev)
+{
+       struct dp83867_private *dp83867;
+       int ret;
+       u16 val, delay;
+
+       if (!phydev->priv) {
+               dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867),
+                                      GFP_KERNEL);
+               if (!dp83867)
+                       return -ENOMEM;
+
+               phydev->priv = dp83867;
+               ret = dp83867_of_init(phydev);
+               if (ret)
+                       return ret;
+       } else {
+               dp83867 = (struct dp83867_private *)phydev->priv;
+       }
+
+       if (phy_interface_is_rgmii(phydev)) {
+               ret = phy_write(phydev, MII_DP83867_PHYCTRL,
+                       (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+               if (ret)
+                       return ret;
+       }
+
+       if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+           (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
+               val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
+                                           DP83867_DEVADDR, phydev->addr);
+
+               if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+                       val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
+
+               if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+                       val |= DP83867_RGMII_TX_CLK_DELAY_EN;
+
+               if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+                       val |= DP83867_RGMII_RX_CLK_DELAY_EN;
+
+               phy_write_mmd_indirect(phydev, DP83867_RGMIICTL,
+                                      DP83867_DEVADDR, phydev->addr, val);
+
+               delay = (dp83867->rx_id_delay |
+                       (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
+
+               phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL,
+                                      DP83867_DEVADDR, phydev->addr, delay);
+       }
+
+       return 0;
+}
+
+static int dp83867_phy_reset(struct phy_device *phydev)
+{
+       int err;
+
+       err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+       if (err < 0)
+               return err;
+
+       return dp83867_config_init(phydev);
+}
+
+static struct phy_driver dp83867_driver[] = {
+       {
+               .phy_id         = DP83867_PHY_ID,
+               .phy_id_mask    = 0xfffffff0,
+               .name           = "TI DP83867",
+               .features       = PHY_GBIT_FEATURES,
+               .flags          = PHY_HAS_INTERRUPT,
+
+               .config_init    = dp83867_config_init,
+               .soft_reset     = dp83867_phy_reset,
+
+               /* IRQ related */
+               .ack_interrupt  = dp83867_ack_interrupt,
+               .config_intr    = dp83867_config_intr,
+
+               .config_aneg    = genphy_config_aneg,
+               .read_status    = genphy_read_status,
+               .suspend        = genphy_suspend,
+               .resume         = genphy_resume,
+
+               .driver         = {.owner = THIS_MODULE,}
+       },
+};
+module_phy_driver(dp83867_driver);
+
+static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+       { DP83867_PHY_ID, 0xfffffff0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(mdio, dp83867_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL");
index 8644f039d92274399fe9d3c1389474c07fe2df1c..0dbc445a5fa0f81b4dd1f3bc4bde663d7208116d 100644 (file)
@@ -139,10 +139,7 @@ static int ip1001_config_init(struct phy_device *phydev)
        if (c < 0)
                return c;
 
-       if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+       if (phy_interface_is_rgmii(phydev)) {
 
                c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
                if (c < 0)
index 1b1698f98818219c623038c8781dc2e806a3070d..f721444c2b0a9413dd0bac8c7e61099cf6d3789b 100644 (file)
@@ -317,10 +317,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+       if (phy_interface_is_rgmii(phydev)) {
 
                mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
                        MII_88E1121_PHY_MSCR_DELAY_MASK;
@@ -469,10 +466,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
        int err;
        int temp;
 
-       if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+       if (phy_interface_is_rgmii(phydev)) {
 
                temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
                if (temp < 0)
index daec9b05d168ca4f0f103f3638fcc3259e9ea304..61a543c788cc1b67fafe3eceab17b2b4d20af413 100644 (file)
@@ -165,8 +165,11 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
 
        ctrl->ops->set_mdio_dir(ctrl, 0);
 
-       /* check the turnaround bit: the PHY should be driving it to zero */
-       if (mdiobb_get_bit(ctrl) != 0) {
+       /* check the turnaround bit: the PHY should be driving it to zero, if this
+        * PHY is listed in phy_ignore_ta_mask as having broken TA, skip that
+        */
+       if (mdiobb_get_bit(ctrl) != 0 &&
+           !(bus->phy_ignore_ta_mask & (1 << phy))) {
                /* PHY didn't drive TA low -- flush any bits it
                 * may be trying to send.
                 */
index 53d18150f4e291bb4bb047a18a9877d3a82a08f4..7dc21e56a7aa805c42f1a1624a704d807efa08f5 100644 (file)
@@ -158,6 +158,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
        new_bus->name = "GPIO Bitbanged MDIO",
 
        new_bus->phy_mask = pdata->phy_mask;
+       new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
        new_bus->irq = pdata->irqs;
        new_bus->parent = dev;
 
index ebdc357c513167515baef710ba56d8b6b8e57cb9..499185eaf413ba08b1447fbebf1f60080c643329 100644 (file)
@@ -288,9 +288,10 @@ static int kszphy_config_init(struct phy_device *phydev)
 }
 
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
-                                      struct device_node *of_node, u16 reg,
-                                      char *field1, char *field2,
-                                      char *field3, char *field4)
+                                      const struct device_node *of_node,
+                                      u16 reg,
+                                      const char *field1, const char *field2,
+                                      const char *field3, const char *field4)
 {
        int val1 = -1;
        int val2 = -2;
@@ -336,8 +337,8 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
 
 static int ksz9021_config_init(struct phy_device *phydev)
 {
-       struct device *dev = &phydev->dev;
-       struct device_node *of_node = dev->of_node;
+       const struct device *dev = &phydev->dev;
+       const struct device_node *of_node = dev->of_node;
 
        if (!of_node && dev->parent->of_node)
                of_node = dev->parent->of_node;
@@ -365,6 +366,11 @@ static int ksz9021_config_init(struct phy_device *phydev)
 #define KSZ9031_PS_TO_REG              60
 
 /* Extended registers */
+/* MMD Address 0x0 */
+#define MII_KSZ9031RN_FLP_BURST_TX_LO  3
+#define MII_KSZ9031RN_FLP_BURST_TX_HI  4
+
+/* MMD Address 0x2 */
 #define MII_KSZ9031RN_CONTROL_PAD_SKEW 4
 #define MII_KSZ9031RN_RX_DATA_PAD_SKEW 5
 #define MII_KSZ9031RN_TX_DATA_PAD_SKEW 6
@@ -389,9 +395,9 @@ static int ksz9031_extended_read(struct phy_device *phydev,
 }
 
 static int ksz9031_of_load_skew_values(struct phy_device *phydev,
-                                      struct device_node *of_node,
+                                      const struct device_node *of_node,
                                       u16 reg, size_t field_sz,
-                                      char *field[], u8 numfields)
+                                      const char *field[], u8 numfields)
 {
        int val[4] = {-1, -2, -3, -4};
        int matches = 0;
@@ -425,20 +431,36 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev,
        return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval);
 }
 
+static int ksz9031_center_flp_timing(struct phy_device *phydev)
+{
+       int result;
+
+       /* Center KSZ9031RNX FLP timing at 16ms. */
+       result = ksz9031_extended_write(phydev, OP_DATA, 0,
+                                       MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006);
+       result = ksz9031_extended_write(phydev, OP_DATA, 0,
+                                       MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80);
+
+       if (result)
+               return result;
+
+       return genphy_restart_aneg(phydev);
+}
+
 static int ksz9031_config_init(struct phy_device *phydev)
 {
-       struct device *dev = &phydev->dev;
-       struct device_node *of_node = dev->of_node;
-       char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
-       char *rx_data_skews[4] = {
+       const struct device *dev = &phydev->dev;
+       const struct device_node *of_node = dev->of_node;
+       static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
+       static const char *rx_data_skews[4] = {
                "rxd0-skew-ps", "rxd1-skew-ps",
                "rxd2-skew-ps", "rxd3-skew-ps"
        };
-       char *tx_data_skews[4] = {
+       static const char *tx_data_skews[4] = {
                "txd0-skew-ps", "txd1-skew-ps",
                "txd2-skew-ps", "txd3-skew-ps"
        };
-       char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
+       static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
 
        if (!of_node && dev->parent->of_node)
                of_node = dev->parent->of_node;
@@ -460,7 +482,8 @@ static int ksz9031_config_init(struct phy_device *phydev)
                                MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
                                tx_data_skews, 4);
        }
-       return 0;
+
+       return ksz9031_center_flp_timing(phydev);
 }
 
 #define KSZ8873MLL_GLOBAL_CONTROL_4    0x06
@@ -519,7 +542,7 @@ ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
 static int kszphy_probe(struct phy_device *phydev)
 {
        const struct kszphy_type *type = phydev->drv->driver_data;
-       struct device_node *np = phydev->dev.of_node;
+       const struct device_node *np = phydev->dev.of_node;
        struct kszphy_priv *priv;
        struct clk *clk;
        int ret;
index 47cd578052fc2328169fcc9df304be79e7af9ac5..b2197b506acbe86f3540d5ae1d8334129c2bbe57 100644 (file)
@@ -58,6 +58,31 @@ static const char *phy_speed_to_str(int speed)
        }
 }
 
+#define PHY_STATE_STR(_state)                  \
+       case PHY_##_state:                      \
+               return __stringify(_state);     \
+
+static const char *phy_state_to_str(enum phy_state st)
+{
+       switch (st) {
+       PHY_STATE_STR(DOWN)
+       PHY_STATE_STR(STARTING)
+       PHY_STATE_STR(READY)
+       PHY_STATE_STR(PENDING)
+       PHY_STATE_STR(UP)
+       PHY_STATE_STR(AN)
+       PHY_STATE_STR(RUNNING)
+       PHY_STATE_STR(NOLINK)
+       PHY_STATE_STR(FORCING)
+       PHY_STATE_STR(CHANGELINK)
+       PHY_STATE_STR(HALTED)
+       PHY_STATE_STR(RESUMING)
+       }
+
+       return NULL;
+}
+
+
 /**
  * phy_print_status - Convenience function to print out the current phy status
  * @phydev: the phy_device struct
@@ -784,10 +809,13 @@ void phy_state_machine(struct work_struct *work)
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
        bool needs_aneg = false, do_suspend = false;
+       enum phy_state old_state;
        int err = 0;
 
        mutex_lock(&phydev->lock);
 
+       old_state = phydev->state;
+
        if (phydev->drv->link_change_notify)
                phydev->drv->link_change_notify(phydev);
 
@@ -952,6 +980,9 @@ void phy_state_machine(struct work_struct *work)
        if (err < 0)
                phy_error(phydev);
 
+       dev_dbg(&phydev->dev, "PHY state change %s -> %s\n",
+               phy_state_to_str(old_state), phy_state_to_str(phydev->state));
+
        queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
                           PHY_STATE_TIME * HZ);
 }
@@ -1062,8 +1093,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
        if ((phydev->duplex == DUPLEX_FULL) &&
            ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
            (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
-           (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
-            phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
+            phy_interface_is_rgmii(phydev) ||
             phy_is_internal(phydev))) {
                int eee_lp, eee_cap, eee_adv;
                u32 lp, cap, adv;
index b62a5e3a1c652d27e2bbb0d2a8a88990c3fef027..3837ae344f63b9d69a5dd958d0e7b7dc202ff316 100644 (file)
@@ -550,11 +550,11 @@ static struct proto pppoe_sk_proto __read_mostly = {
  * Initialize a new struct sock.
  *
  **********************************************************************/
-static int pppoe_create(struct net *net, struct socket *sock)
+static int pppoe_create(struct net *net, struct socket *sock, int kern)
 {
        struct sock *sk;
 
-       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto);
+       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, kern);
        if (!sk)
                return -ENOMEM;
 
index 2940e9fe351b994029d8f26db30c86114a79e417..0e1b30622477b17b6fd038709eb11c763039fb87 100644 (file)
@@ -118,7 +118,7 @@ static int pppox_create(struct net *net, struct socket *sock, int protocol,
            !try_module_get(pppox_protos[protocol]->owner))
                goto out;
 
-       rc = pppox_protos[protocol]->create(net, sock);
+       rc = pppox_protos[protocol]->create(net, sock, kern);
 
        module_put(pppox_protos[protocol]->owner);
 out:
index e3bfbd4d01367fc32b063d3305b635006d3f9b03..14839bc0aaf5216ee1722b8c4b83ad83c5225858 100644 (file)
@@ -561,14 +561,14 @@ static void pptp_sock_destruct(struct sock *sk)
        skb_queue_purge(&sk->sk_receive_queue);
 }
 
-static int pptp_create(struct net *net, struct socket *sock)
+static int pptp_create(struct net *net, struct socket *sock, int kern)
 {
        int error = -ENOMEM;
        struct sock *sk;
        struct pppox_sock *po;
        struct pptp_opt *opt;
 
-       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto);
+       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
        if (!sk)
                goto out;
 
index 6928448f6b7f1a80f5cfd46eeee5992f3bc24cf7..daa054b3ff03ebf58f32c8bc7b1b012240895176 100644 (file)
@@ -1924,7 +1924,7 @@ static netdev_features_t team_fix_features(struct net_device *dev,
        struct team *team = netdev_priv(dev);
        netdev_features_t mask;
 
-       mask = features | NETIF_F_HW_SWITCH_OFFLOAD;
+       mask = features;
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
 
@@ -1977,8 +1977,12 @@ static const struct net_device_ops team_netdev_ops = {
        .ndo_del_slave          = team_del_slave,
        .ndo_fix_features       = team_fix_features,
        .ndo_change_carrier     = team_change_carrier,
-       .ndo_bridge_setlink     = ndo_dflt_netdev_switch_port_bridge_setlink,
-       .ndo_bridge_dellink     = ndo_dflt_netdev_switch_port_bridge_dellink,
+       .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
+       .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
+       .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
+       .ndo_fdb_add            = switchdev_port_fdb_add,
+       .ndo_fdb_del            = switchdev_port_fdb_del,
+       .ndo_fdb_dump           = switchdev_port_fdb_dump,
        .ndo_features_check     = passthru_features_check,
 };
 
index e470ae59d40536fe7530774cb473ebe57000f9a8..1a1c4f7b3ec53d884c0ea3aed996d08d203ce6aa 100644 (file)
@@ -146,7 +146,6 @@ struct tun_file {
        struct socket socket;
        struct socket_wq wq;
        struct tun_struct __rcu *tun;
-       struct net *net;
        struct fasync_struct *fasync;
        /* only used for fasnyc */
        unsigned int flags;
@@ -493,10 +492,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
                            tun->dev->reg_state == NETREG_REGISTERED)
                                unregister_netdevice(tun->dev);
                }
-
-               BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
-                                &tfile->socket.flags));
-               sk_release_kernel(&tfile->sk);
+               sock_put(&tfile->sk);
        }
 }
 
@@ -1492,18 +1488,10 @@ out:
        return ret;
 }
 
-static int tun_release(struct socket *sock)
-{
-       if (sock->sk)
-               sock_put(sock->sk);
-       return 0;
-}
-
 /* Ops structure to mimic raw sockets with tun */
 static const struct proto_ops tun_socket_ops = {
        .sendmsg = tun_sendmsg,
        .recvmsg = tun_recvmsg,
-       .release = tun_release,
 };
 
 static struct proto tun_proto = {
@@ -1865,7 +1853,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        if (cmd == TUNSETIFF && !tun) {
                ifr.ifr_name[IFNAMSIZ-1] = '\0';
 
-               ret = tun_set_iff(tfile->net, file, &ifr);
+               ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
 
                if (ret)
                        goto unlock;
@@ -2154,16 +2142,16 @@ out:
 
 static int tun_chr_open(struct inode *inode, struct file * file)
 {
+       struct net *net = current->nsproxy->net_ns;
        struct tun_file *tfile;
 
        DBG1(KERN_INFO, "tunX: tun_chr_open\n");
 
-       tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
-                                           &tun_proto);
+       tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+                                           &tun_proto, 0);
        if (!tfile)
                return -ENOMEM;
        RCU_INIT_POINTER(tfile->tun, NULL);
-       tfile->net = get_net(current->nsproxy->net_ns);
        tfile->flags = 0;
        tfile->ifindex = 0;
 
@@ -2174,13 +2162,11 @@ static int tun_chr_open(struct inode *inode, struct file * file)
        tfile->socket.ops = &tun_socket_ops;
 
        sock_init_data(&tfile->socket, &tfile->sk);
-       sk_change_net(&tfile->sk, tfile->net);
 
        tfile->sk.sk_write_space = tun_sock_write_space;
        tfile->sk.sk_sndbuf = INT_MAX;
 
        file->private_data = tfile;
-       set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
        INIT_LIST_HEAD(&tfile->next);
 
        sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
@@ -2191,10 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
 static int tun_chr_close(struct inode *inode, struct file *file)
 {
        struct tun_file *tfile = file->private_data;
-       struct net *net = tfile->net;
 
        tun_detach(tfile, true);
-       put_net(net);
 
        return 0;
 }
index 21a0fbf1ed947a83506de920f7f61501457bfe68..34c519eb1db5092a6e1bd17e02b9a3e53a5c5cb2 100644 (file)
@@ -336,7 +336,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
 
        if (!net_eq(dev_net(vxlan->dev), vxlan->net) &&
            nla_put_s32(skb, NDA_LINK_NETNSID,
-                       peernet2id(dev_net(vxlan->dev), vxlan->net)))
+                       peernet2id_alloc(dev_net(vxlan->dev), vxlan->net)))
                goto nla_put_failure;
 
        if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
@@ -1921,6 +1921,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                memset(&fl4, 0, sizeof(fl4));
                fl4.flowi4_oif = rdst->remote_ifindex;
                fl4.flowi4_tos = RT_TOS(tos);
+               fl4.flowi4_mark = skb->mark;
+               fl4.flowi4_proto = IPPROTO_UDP;
                fl4.daddr = dst->sin.sin_addr.s_addr;
                fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
 
@@ -1981,6 +1983,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                fl6.flowi6_oif = rdst->remote_ifindex;
                fl6.daddr = dst->sin6.sin6_addr;
                fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+               fl6.flowi6_mark = skb->mark;
                fl6.flowi6_proto = IPPROTO_UDP;
 
                if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
@@ -2128,9 +2131,10 @@ static void vxlan_cleanup(unsigned long arg)
        if (!netif_running(vxlan->dev))
                return;
 
-       spin_lock_bh(&vxlan->hash_lock);
        for (h = 0; h < FDB_HASH_SIZE; ++h) {
                struct hlist_node *p, *n;
+
+               spin_lock_bh(&vxlan->hash_lock);
                hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
                        struct vxlan_fdb *f
                                = container_of(p, struct vxlan_fdb, hlist);
@@ -2149,8 +2153,8 @@ static void vxlan_cleanup(unsigned long arg)
                        } else if (time_before(timeout, next_timer))
                                next_timer = timeout;
                }
+               spin_unlock_bh(&vxlan->hash_lock);
        }
-       spin_unlock_bh(&vxlan->hash_lock);
 
        mod_timer(&vxlan->age_timer, next_timer);
 }
index bcfa01add7cc479ca25d5f2f198f9623fc966415..7193b7304fdd3ed4b69c0125732d4a024d4a4b36 100644 (file)
@@ -517,7 +517,7 @@ static int cosa_probe(int base, int irq, int dma)
                 */
                set_current_state(TASK_INTERRUPTIBLE);
                cosa_putstatus(cosa, SR_TX_INT_ENA);
-               schedule_timeout(30);
+               schedule_timeout(msecs_to_jiffies(300));
                irq = probe_irq_off(irqs);
                /* Disable all IRQs from the card */
                cosa_putstatus(cosa, 0);
index 08223569cebdf361f7e9ea5408ac56221612874e..7a72407208b161772ba66dc284c7701ea5a85f59 100644 (file)
@@ -551,7 +551,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
                               msg, i);
                        goto done;
                }
-               schedule_timeout_uninterruptible(10);
+               schedule_timeout_uninterruptible(msecs_to_jiffies(100));
                rmb();
        } while (++i > 0);
        netdev_err(dev, "%s timeout\n", msg);
@@ -596,7 +596,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
                    (dpriv->iqtx[cur] & cpu_to_le32(Xpr)))
                        break;
                smp_rmb();
-               schedule_timeout_uninterruptible(10);
+               schedule_timeout_uninterruptible(msecs_to_jiffies(100));
        } while (++i > 0);
 
        return (i >= 0 ) ? i : -EAGAIN;
@@ -1033,7 +1033,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
        /* Flush posted writes */
        readl(ioaddr + GSTAR);
 
-       schedule_timeout_uninterruptible(10);
+       schedule_timeout_uninterruptible(msecs_to_jiffies(100));
 
        for (i = 0; i < 16; i++)
                pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
@@ -1046,7 +1046,6 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
 static int dscc4_open(struct net_device *dev)
 {
        struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
-       struct dscc4_pci_priv *ppriv;
        int ret = -EAGAIN;
 
        if ((dscc4_loopback_check(dpriv) < 0))
@@ -1055,8 +1054,6 @@ static int dscc4_open(struct net_device *dev)
        if ((ret = hdlc_open(dev)))
                goto err;
 
-       ppriv = dpriv->pci_priv;
-
        /*
         * Due to various bugs, there is no way to reliably reset a
         * specific port (manufacturer's dependent special PCI #RST wiring
index 16604bdf5197ea81031484c2d3deefa5d0aae020..a63ab2e831054b372b551788e9106ebf29bc925c 100644 (file)
@@ -277,6 +277,7 @@ source "drivers/net/wireless/libertas/Kconfig"
 source "drivers/net/wireless/orinoco/Kconfig"
 source "drivers/net/wireless/p54/Kconfig"
 source "drivers/net/wireless/rt2x00/Kconfig"
+source "drivers/net/wireless/mediatek/Kconfig"
 source "drivers/net/wireless/rtlwifi/Kconfig"
 source "drivers/net/wireless/ti/Kconfig"
 source "drivers/net/wireless/zd1211rw/Kconfig"
index 0c88916867187817ea08f43c4d69cbaf86d28246..6b9e729dd8acbf0af1a545109b0b304d400e93d4 100644 (file)
@@ -45,6 +45,8 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi/
 obj-$(CONFIG_IWLEGACY) += iwlegacy/
 obj-$(CONFIG_RT2X00)   += rt2x00/
 
+obj-$(CONFIG_WL_MEDIATEK)      += mediatek/
+
 obj-$(CONFIG_P54_COMMON)       += p54/
 
 obj-$(CONFIG_ATH_CARDS)                += ath/
index f07a618995457096f8f1d33a0366f6a8b0a17fbe..8c283fcd843d3c33c0fda7f1753d11d54e0a2bbc 100644 (file)
@@ -1098,14 +1098,18 @@ static void adm8211_hw_init(struct ieee80211_hw *dev)
                pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
 
                switch (cline) {
-               case  0x8: reg |= (0x1 << 14);
-                          break;
-               case 0x16: reg |= (0x2 << 14);
-                          break;
-               case 0x32: reg |= (0x3 << 14);
-                          break;
-                 default: reg |= (0x0 << 14);
-                          break;
+               case  0x8:
+                       reg |= (0x1 << 14);
+                       break;
+               case 0x10:
+                       reg |= (0x2 << 14);
+                       break;
+               case 0x20:
+                       reg |= (0x3 << 14);
+                       break;
+               default:
+                       reg |= (0x0 << 14);
+                       break;
                }
        }
 
@@ -1353,12 +1357,7 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
 
        new_flags = 0;
 
-       if (*total_flags & FIF_PROMISC_IN_BSS) {
-               new_flags |= FIF_PROMISC_IN_BSS;
-               priv->nar |= ADM8211_NAR_PR;
-               priv->nar &= ~ADM8211_NAR_MM;
-               mc_filter[1] = mc_filter[0] = ~0;
-       } else if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
+       if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
                new_flags |= FIF_ALLMULTI;
                priv->nar &= ~ADM8211_NAR_PR;
                priv->nar |= ADM8211_NAR_MM;
index 55090a38ac9549eb546a747144b9f274ae1b2faf..ae03271f878e56ad44c7ec6b87e857418955bbde 100644 (file)
@@ -447,7 +447,7 @@ struct at76_priv {
        int mac80211_registered;
 };
 
-#define AT76_SUPPORTED_FILTERS FIF_PROMISC_IN_BSS
+#define AT76_SUPPORTED_FILTERS 0
 
 #define SCAN_POLL_INTERVAL     (HZ / 4)
 
index 5147ebe4cd05d13d12db78aacbd3081fe83a9727..14937cbeca564ed807c61657bd9eaf84710143bd 100644 (file)
@@ -1319,8 +1319,7 @@ out_unlock:
 
 }
 
-#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
-                                 FIF_ALLMULTI | \
+#define AR5523_SUPPORTED_FILTERS (FIF_ALLMULTI | \
                                  FIF_FCSFAIL | \
                                  FIF_OTHER_BSS)
 
index 7e9481099a8e6ba9dd06e13a6f60559993956516..65ef483ebf5053a8f1b79ba0e41dfad19702d530 100644 (file)
@@ -251,6 +251,7 @@ void ath_printk(const char *level, const struct ath_common *common,
  * @ATH_DBG_DFS: radar datection
  * @ATH_DBG_WOW: Wake on Wireless
  * @ATH_DBG_DYNACK: dynack handling
+ * @ATH_DBG_SPECTRAL_SCAN: FFT spectral scan
  * @ATH_DBG_ANY: enable all debugging
  *
  * The debug level is used to control the amount and type of debugging output
@@ -280,6 +281,7 @@ enum ATH_DEBUG {
        ATH_DBG_WOW             = 0x00020000,
        ATH_DBG_CHAN_CTX        = 0x00040000,
        ATH_DBG_DYNACK          = 0x00080000,
+       ATH_DBG_SPECTRAL_SCAN   = 0x00100000,
        ATH_DBG_ANY             = 0xffffffff
 };
 
index f4dbb3e93bf8600aecbdeda7ab0616393b07d4cf..9729e69416358a120ef0c97cd3feb034cb0ca6bb 100644 (file)
@@ -10,13 +10,15 @@ ath10k_core-y += mac.o \
                 wmi.o \
                 wmi-tlv.o \
                 bmi.o \
-                hw.o
+                hw.o \
+                p2p.o
 
 ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
 ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
 ath10k_core-$(CONFIG_THERMAL) += thermal.o
 ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+ath10k_core-$(CONFIG_PM) += wow.o
 
 obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
 ath10k_pci-y += pci.o \
index c0e454bb6a8df646b9266afbffa76ed179dff0fc..bcccae19325d8f77da713a37956514ee5f97c1b9 100644 (file)
@@ -387,7 +387,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
 
-       if (!skip_otp && result != 0) {
+       if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+                                  ar->fw_features))
+           && result != 0) {
                ath10k_err(ar, "otp calibration failed: %d", result);
                return -EINVAL;
        }
@@ -482,31 +484,79 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
        return 0;
 }
 
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+static int ath10k_core_fetch_spec_board_file(struct ath10k *ar)
 {
-       int ret = 0;
+       char filename[100];
 
-       if (ar->hw_params.fw.fw == NULL) {
-               ath10k_err(ar, "firmware file not defined\n");
-               return -EINVAL;
-       }
+       scnprintf(filename, sizeof(filename), "board-%s-%s.bin",
+                 ath10k_bus_str(ar->hif.bus), ar->spec_board_id);
+
+       ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
+       if (IS_ERR(ar->board))
+               return PTR_ERR(ar->board);
+
+       ar->board_data = ar->board->data;
+       ar->board_len = ar->board->size;
+       ar->spec_board_loaded = true;
 
-       if (ar->hw_params.fw.board == NULL) {
-               ath10k_err(ar, "board data file not defined");
+       return 0;
+}
+
+static int ath10k_core_fetch_generic_board_file(struct ath10k *ar)
+{
+       if (!ar->hw_params.fw.board) {
+               ath10k_err(ar, "failed to find board file fw entry\n");
                return -EINVAL;
        }
 
        ar->board = ath10k_fetch_fw_file(ar,
                                         ar->hw_params.fw.dir,
                                         ar->hw_params.fw.board);
-       if (IS_ERR(ar->board)) {
-               ret = PTR_ERR(ar->board);
-               ath10k_err(ar, "could not fetch board data (%d)\n", ret);
-               goto err;
-       }
+       if (IS_ERR(ar->board))
+               return PTR_ERR(ar->board);
 
        ar->board_data = ar->board->data;
        ar->board_len = ar->board->size;
+       ar->spec_board_loaded = false;
+
+       return 0;
+}
+
+static int ath10k_core_fetch_board_file(struct ath10k *ar)
+{
+       int ret;
+
+       if (strlen(ar->spec_board_id) > 0) {
+               ret = ath10k_core_fetch_spec_board_file(ar);
+               if (ret) {
+                       ath10k_info(ar, "failed to load spec board file, falling back to generic: %d\n",
+                                   ret);
+                       goto generic;
+               }
+
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "found specific board file for %s\n",
+                          ar->spec_board_id);
+               return 0;
+       }
+
+generic:
+       ret = ath10k_core_fetch_generic_board_file(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to fetch generic board data: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
+{
+       int ret = 0;
+
+       if (ar->hw_params.fw.fw == NULL) {
+               ath10k_err(ar, "firmware file not defined\n");
+               return -EINVAL;
+       }
 
        ar->firmware = ath10k_fetch_fw_file(ar,
                                            ar->hw_params.fw.dir,
@@ -675,6 +725,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                        ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
                                   ar->wmi.op_version);
                        break;
+               case ATH10K_FW_IE_HTT_OP_VERSION:
+                       if (ie_len != sizeof(u32))
+                               break;
+
+                       version = (__le32 *)data;
+
+                       ar->htt.op_version = le32_to_cpup(version);
+
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
+                                  ar->htt.op_version);
+                       break;
                default:
                        ath10k_warn(ar, "Unknown FW IE: %u\n",
                                    le32_to_cpu(hdr->id));
@@ -695,27 +756,6 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                goto err;
        }
 
-       /* now fetch the board file */
-       if (ar->hw_params.fw.board == NULL) {
-               ath10k_err(ar, "board data file not defined");
-               ret = -EINVAL;
-               goto err;
-       }
-
-       ar->board = ath10k_fetch_fw_file(ar,
-                                        ar->hw_params.fw.dir,
-                                        ar->hw_params.fw.board);
-       if (IS_ERR(ar->board)) {
-               ret = PTR_ERR(ar->board);
-               ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
-                          ar->hw_params.fw.dir, ar->hw_params.fw.board,
-                          ret);
-               goto err;
-       }
-
-       ar->board_data = ar->board->data;
-       ar->board_len = ar->board->size;
-
        return 0;
 
 err:
@@ -730,6 +770,19 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
        /* calibration file is optional, don't check for any errors */
        ath10k_fetch_cal_file(ar);
 
+       ret = ath10k_core_fetch_board_file(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+               return ret;
+       }
+
+       ar->fw_api = 5;
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+
+       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+       if (ret == 0)
+               goto success;
+
        ar->fw_api = 4;
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
 
@@ -958,6 +1011,8 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->max_num_stations = TARGET_NUM_STATIONS;
                ar->max_num_vdevs = TARGET_NUM_VDEVS;
                ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+               ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+                       WMI_STAT_PEER;
                break;
        case ATH10K_FW_WMI_OP_VERSION_10_1:
        case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -966,12 +1021,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->max_num_stations = TARGET_10X_NUM_STATIONS;
                ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
                ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+               ar->fw_stats_req_mask = WMI_STAT_PEER;
                break;
        case ATH10K_FW_WMI_OP_VERSION_TLV:
                ar->max_num_peers = TARGET_TLV_NUM_PEERS;
                ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
                ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+               ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
                ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+               ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
+               ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+                       WMI_STAT_PEER;
                break;
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -979,6 +1039,29 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                return -EINVAL;
        }
 
+       /* Backwards compatibility for firmwares without
+        * ATH10K_FW_IE_HTT_OP_VERSION.
+        */
+       if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+               switch (ar->wmi.op_version) {
+               case ATH10K_FW_WMI_OP_VERSION_MAIN:
+                       ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+                       break;
+               case ATH10K_FW_WMI_OP_VERSION_10_1:
+               case ATH10K_FW_WMI_OP_VERSION_10_2:
+               case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+                       ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+                       break;
+               case ATH10K_FW_WMI_OP_VERSION_TLV:
+                       ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+                       break;
+               case ATH10K_FW_WMI_OP_VERSION_UNSET:
+               case ATH10K_FW_WMI_OP_VERSION_MAX:
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
@@ -1080,9 +1163,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
 
        if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
                status = ath10k_wmi_wait_for_service_ready(ar);
-               if (status <= 0) {
+               if (status) {
                        ath10k_warn(ar, "wmi service ready event not received");
-                       status = -ETIMEDOUT;
                        goto err_hif_stop;
                }
        }
@@ -1098,9 +1180,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
        }
 
        status = ath10k_wmi_wait_for_unified_ready(ar);
-       if (status <= 0) {
+       if (status) {
                ath10k_err(ar, "wmi unified ready event not received\n");
-               status = -ETIMEDOUT;
                goto err_hif_stop;
        }
 
@@ -1151,6 +1232,7 @@ EXPORT_SYMBOL(ath10k_core_start);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
 {
        int ret;
+       unsigned long time_left;
 
        reinit_completion(&ar->target_suspend);
 
@@ -1160,9 +1242,9 @@ int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
                return ret;
        }
 
-       ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+       time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
 
-       if (ret == 0) {
+       if (!time_left) {
                ath10k_warn(ar, "suspend timed out - target pause event never came\n");
                return -ETIMEDOUT;
        }
@@ -1386,6 +1468,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
        init_completion(&ar->scan.completed);
        init_completion(&ar->scan.on_channel);
        init_completion(&ar->target_suspend);
+       init_completion(&ar->wow.wakeup_completed);
 
        init_completion(&ar->install_key_done);
        init_completion(&ar->vdev_setup_done);
index f65310c3ba5fe8d660cd4139f93407a22045b8aa..70fcdc9c27587593046f1aeab8fd3b68b2d74364 100644 (file)
@@ -35,6 +35,7 @@
 #include "../dfs_pattern_detector.h"
 #include "spectral.h"
 #include "thermal.h"
+#include "wow.h"
 
 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
 #define ATH10K_SCAN_ID 0
 #define WMI_READY_TIMEOUT (5 * HZ)
 #define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_NUM_CHANS 38
+#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_NUM_CHANS 39
 
 /* Antenna noise floor */
 #define ATH10K_DEFAULT_NOISE_FLOOR -95
 
 #define ATH10K_MAX_NUM_MGMT_PENDING 128
 
-/* number of failed packets */
-#define ATH10K_KICKOUT_THRESHOLD 50
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
 
 /*
  * Use insanely high numbers to make sure that the firmware implementation
@@ -82,6 +84,8 @@ struct ath10k_skb_cb {
        dma_addr_t paddr;
        u8 eid;
        u8 vdev_id;
+       enum ath10k_hw_txrx_mode txmode;
+       bool is_protected;
 
        struct {
                u8 tid;
@@ -280,6 +284,15 @@ struct ath10k_sta {
 #endif
 };
 
+struct ath10k_chanctx {
+       /* Used to story copy of chanctx_conf to avoid inconsistencies. Ideally
+        * mac80211 should allow some sort of explicit locking to guarantee
+        * that the publicly available chanctx_conf can be accessed safely at
+        * all times.
+        */
+       struct ieee80211_chanctx_conf conf;
+};
+
 #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
 
 enum ath10k_beacon_state {
@@ -301,6 +314,7 @@ struct ath10k_vif {
        enum ath10k_beacon_state beacon_state;
        void *beacon_buf;
        dma_addr_t beacon_paddr;
+       unsigned long tx_paused; /* arbitrary values defined by target */
 
        struct ath10k *ar;
        struct ieee80211_vif *vif;
@@ -334,13 +348,13 @@ struct ath10k_vif {
                } ap;
        } u;
 
-       u8 fixed_rate;
-       u8 fixed_nss;
-       u8 force_sgi;
        bool use_cts_prot;
        int num_legacy_stations;
        int txpower;
        struct wmi_wmm_params_all_arg wmm_params;
+       struct work_struct ap_csa_work;
+       struct delayed_work connection_loss_work;
+       struct cfg80211_bitrate_mask bitrate_mask;
 };
 
 struct ath10k_vif_iter {
@@ -440,6 +454,20 @@ enum ath10k_fw_features {
         */
        ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
 
+       /* Some firmware revisions have an incomplete WoWLAN implementation
+        * despite WMI service bit being advertised. This feature flag is used
+        * to distinguish whether WoWLAN is really supported or not.
+        */
+       ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
+
+       /* Don't trust error code from otp.bin */
+       ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+
+       /* Some firmware revisions pad 4th hw address to 4 byte boundary making
+        * it 8 bytes long in Native Wifi Rx decap.
+        */
+       ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+
        /* keep last */
        ATH10K_FW_FEATURE_COUNT,
 };
@@ -498,6 +526,11 @@ static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
        return "unknown";
 }
 
+enum ath10k_tx_pause_reason {
+       ATH10K_TX_PAUSE_Q_FULL,
+       ATH10K_TX_PAUSE_MAX,
+};
+
 struct ath10k {
        struct ath_common ath_common;
        struct ieee80211_hw *hw;
@@ -511,12 +544,15 @@ struct ath10k {
        u32 fw_version_minor;
        u16 fw_version_release;
        u16 fw_version_build;
+       u32 fw_stats_req_mask;
        u32 phy_capability;
        u32 hw_min_tx_power;
        u32 hw_max_tx_power;
        u32 ht_cap_info;
        u32 vht_cap_info;
        u32 num_rf_chains;
+       /* protected by conf_mutex */
+       bool ani_enabled;
 
        DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
 
@@ -565,6 +601,9 @@ struct ath10k {
 
        const struct firmware *cal_file;
 
+       char spec_board_id[100];
+       bool spec_board_loaded;
+
        int fw_api;
        enum ath10k_cal_mode cal_mode;
 
@@ -593,6 +632,7 @@ struct ath10k {
        struct cfg80211_chan_def chandef;
 
        unsigned long long free_vdev_map;
+       struct ath10k_vif *monitor_arvif;
        bool monitor;
        int monitor_vdev_id;
        bool monitor_started;
@@ -633,6 +673,7 @@ struct ath10k {
        int max_num_peers;
        int max_num_stations;
        int max_num_vdevs;
+       int max_num_tdls_vdevs;
 
        struct work_struct offchan_tx_work;
        struct sk_buff_head offchan_tx_queue;
@@ -655,6 +696,8 @@ struct ath10k {
 
        struct dfs_pattern_detector *dfs_detector;
 
+       unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
+
 #ifdef CONFIG_ATH10K_DEBUGFS
        struct ath10k_debug debug;
 #endif
@@ -686,6 +729,7 @@ struct ath10k {
        } stats;
 
        struct ath10k_thermal thermal;
+       struct ath10k_wow wow;
 
        /* must be last */
        u8 drv_priv[0] __aligned(sizeof(void *));
index 301081db1ef60a9f7a68a9452d7155965520cc39..8fa606a9c4ddaf3f95b43d80723a0fbcfe59a010 100644 (file)
@@ -124,10 +124,14 @@ EXPORT_SYMBOL(ath10k_info);
 
 void ath10k_print_driver_info(struct ath10k *ar)
 {
-       ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+       ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
                    ar->hw_params.name,
                    ar->target_version,
                    ar->chip_id,
+                   (strlen(ar->spec_board_id) > 0 ? ", " : ""),
+                   ar->spec_board_id,
+                   (strlen(ar->spec_board_id) > 0 && !ar->spec_board_loaded
+                    ? " fallback" : ""),
                    ar->hw->wiphy->fw_version,
                    ar->fw_api,
                    ar->htt.target_version_major,
@@ -380,12 +384,12 @@ unlock:
 
 static int ath10k_debug_fw_stats_request(struct ath10k *ar)
 {
-       unsigned long timeout;
+       unsigned long timeout, time_left;
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       timeout = jiffies + msecs_to_jiffies(1*HZ);
+       timeout = jiffies + msecs_to_jiffies(1 * HZ);
 
        ath10k_debug_fw_stats_reset(ar);
 
@@ -395,18 +399,16 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
 
                reinit_completion(&ar->debug.fw_stats_complete);
 
-               ret = ath10k_wmi_request_stats(ar,
-                                              WMI_STAT_PDEV |
-                                              WMI_STAT_VDEV |
-                                              WMI_STAT_PEER);
+               ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
                if (ret) {
                        ath10k_warn(ar, "could not request stats (%d)\n", ret);
                        return ret;
                }
 
-               ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
-                                                 1*HZ);
-               if (ret == 0)
+               time_left =
+               wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+                                           1 * HZ);
+               if (!time_left)
                        return -ETIMEDOUT;
 
                spin_lock_bh(&ar->data_lock);
@@ -1708,6 +1710,61 @@ static int ath10k_debug_cal_data_release(struct inode *inode,
        return 0;
 }
 
+static ssize_t ath10k_write_ani_enable(struct file *file,
+                                      const char __user *user_buf,
+                                      size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       int ret;
+       u8 enable;
+
+       if (kstrtou8_from_user(user_buf, count, 0, &enable))
+               return -EINVAL;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->ani_enabled == enable) {
+               ret = count;
+               goto exit;
+       }
+
+       ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
+                                       enable);
+       if (ret) {
+               ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
+               goto exit;
+       }
+       ar->ani_enabled = enable;
+
+       ret = count;
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+
+       return ret;
+}
+
+static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
+                                     size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       int len = 0;
+       char buf[32];
+
+       len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+                       ar->ani_enabled);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ani_enable = {
+       .read = ath10k_read_ani_enable,
+       .write = ath10k_write_ani_enable,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 static const struct file_operations fops_cal_data = {
        .open = ath10k_debug_cal_data_open,
        .read = ath10k_debug_cal_data_read,
@@ -1991,6 +2048,50 @@ static const struct file_operations fops_pktlog_filter = {
        .open = simple_open
 };
 
+static ssize_t ath10k_write_quiet_period(struct file *file,
+                                        const char __user *ubuf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       u32 period;
+
+       if (kstrtouint_from_user(ubuf, count, 0, &period))
+               return -EINVAL;
+
+       if (period < ATH10K_QUIET_PERIOD_MIN) {
+               ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
+                           period);
+               return -EINVAL;
+       }
+       mutex_lock(&ar->conf_mutex);
+       ar->thermal.quiet_period = period;
+       ath10k_thermal_set_throttling(ar);
+       mutex_unlock(&ar->conf_mutex);
+
+       return count;
+}
+
+static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
+                                       size_t count, loff_t *ppos)
+{
+       char buf[32];
+       struct ath10k *ar = file->private_data;
+       int len = 0;
+
+       mutex_lock(&ar->conf_mutex);
+       len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+                       ar->thermal.quiet_period);
+       mutex_unlock(&ar->conf_mutex);
+
+       return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_quiet_period = {
+       .read = ath10k_read_quiet_period,
+       .write = ath10k_write_quiet_period,
+       .open = simple_open
+};
+
 int ath10k_debug_create(struct ath10k *ar)
 {
        ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
@@ -2068,6 +2169,9 @@ int ath10k_debug_register(struct ath10k *ar)
        debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
                            ar, &fops_cal_data);
 
+       debugfs_create_file("ani_enable", S_IRUSR | S_IWUSR,
+                           ar->debug.debugfs_phy, ar, &fops_ani_enable);
+
        debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
                            ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
 
@@ -2088,6 +2192,9 @@ int ath10k_debug_register(struct ath10k *ar)
        debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
                            ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
 
+       debugfs_create_file("quiet_period", S_IRUGO | S_IWUSR,
+                           ar->debug.debugfs_phy, ar, &fops_quiet_period);
+
        return 0;
 }
 
index a12b8323f9f1000ac9e139ee23d52e03f71ac13a..53bd6a19eab6215b6108077c7407de6f2bac1d30 100644 (file)
@@ -36,6 +36,7 @@ enum ath10k_debug_mask {
        ATH10K_DBG_REGULATORY   = 0x00000800,
        ATH10K_DBG_TESTMODE     = 0x00001000,
        ATH10K_DBG_WMI_PRINT    = 0x00002000,
+       ATH10K_DBG_PCI_PS       = 0x00004000,
        ATH10K_DBG_ANY          = 0xffffffff,
 };
 
index 2fd9e180272b3d48e642cb63e5fc7b09871bc998..85bfa2acb801b8945a7d825bfbc91f126d82c655 100644 (file)
@@ -86,21 +86,6 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
        ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
 }
 
-/* assumes tx_lock is held */
-static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
-{
-       struct ath10k *ar = ep->htc->ar;
-
-       if (!ep->tx_credit_flow_enabled)
-               return false;
-       if (ep->tx_credits >= ep->tx_credits_per_max_message)
-               return false;
-
-       ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
-                  ep->eid);
-       return true;
-}
-
 static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
                                      struct sk_buff *skb)
 {
@@ -111,13 +96,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
        hdr->eid = ep->eid;
        hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
        hdr->flags = 0;
+       hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
 
        spin_lock_bh(&ep->htc->tx_lock);
        hdr->seq_no = ep->seq_no++;
-
-       if (ath10k_htc_ep_need_credit_update(ep))
-               hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
-
        spin_unlock_bh(&ep->htc->tx_lock);
 }
 
@@ -414,7 +396,8 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
                struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
 
                switch (__le16_to_cpu(msg->hdr.message_id)) {
-               default:
+               case ATH10K_HTC_MSG_READY_ID:
+               case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
                        /* handle HTC control message */
                        if (completion_done(&htc->ctl_resp)) {
                                /*
@@ -438,6 +421,10 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
                        break;
                case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
                        htc->htc_ops.target_send_suspend_complete(ar);
+                       break;
+               default:
+                       ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+                       break;
                }
                goto out;
        }
@@ -548,6 +535,7 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
 {
        struct ath10k *ar = htc->ar;
        int i, status = 0;
+       unsigned long time_left;
        struct ath10k_htc_svc_conn_req conn_req;
        struct ath10k_htc_svc_conn_resp conn_resp;
        struct ath10k_htc_msg *msg;
@@ -555,9 +543,9 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
        u16 credit_count;
        u16 credit_size;
 
-       status = wait_for_completion_timeout(&htc->ctl_resp,
-                                            ATH10K_HTC_WAIT_TIMEOUT_HZ);
-       if (status == 0) {
+       time_left = wait_for_completion_timeout(&htc->ctl_resp,
+                                               ATH10K_HTC_WAIT_TIMEOUT_HZ);
+       if (!time_left) {
                /* Workaround: In some cases the PCI HIF doesn't
                 * receive interrupt for the control response message
                 * even if the buffer was completed. It is suspected
@@ -569,10 +557,11 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
                for (i = 0; i < CE_COUNT; i++)
                        ath10k_hif_send_complete_check(htc->ar, i, 1);
 
-               status = wait_for_completion_timeout(&htc->ctl_resp,
-                                                    ATH10K_HTC_WAIT_TIMEOUT_HZ);
+               time_left =
+               wait_for_completion_timeout(&htc->ctl_resp,
+                                           ATH10K_HTC_WAIT_TIMEOUT_HZ);
 
-               if (status == 0)
+               if (!time_left)
                        status = -ETIMEDOUT;
        }
 
@@ -646,6 +635,7 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
        struct sk_buff *skb;
        unsigned int max_msg_size = 0;
        int length, status;
+       unsigned long time_left;
        bool disable_credit_flow_ctrl = false;
        u16 message_id, service_id, flags = 0;
        u8 tx_alloc = 0;
@@ -701,10 +691,10 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
        }
 
        /* wait for response */
-       status = wait_for_completion_timeout(&htc->ctl_resp,
-                                            ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
-       if (status == 0) {
-               ath10k_err(ar, "Service connect timeout: %d\n", status);
+       time_left = wait_for_completion_timeout(&htc->ctl_resp,
+                                               ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+       if (!time_left) {
+               ath10k_err(ar, "Service connect timeout\n");
                return -ETIMEDOUT;
        }
 
index 4f59ab923e4849681287b0035a9ea20233e173a9..6da6ef26143af0caeac2ed8ed76b14f28319b471 100644 (file)
 #include "core.h"
 #include "debug.h"
 
+static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
+       [HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+       [HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+       [HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+       [HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+       [HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+       [HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+       [HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+       [HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+       [HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+       [HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+       [HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+       [HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+       [HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
+               HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+       [HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+               HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+       [HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+               HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+       [HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+       [HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+               HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+       [HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
+       [HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+       [HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+       [HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+       [HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+       [HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+       [HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+       [HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+       [HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+       [HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+       [HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+       [HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+       [HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+       [HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+       [HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+       [HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+       [HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+       [HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+       [HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+       [HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+               HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+};
+
+static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
+       [HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+       [HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+       [HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+       [HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+       [HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+       [HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+       [HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+       [HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+       [HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+       [HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+       [HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+       [HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+       [HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+       [HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+       [HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+               HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+       [HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+               HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+       [HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+       [HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+               HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+       [HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
+               HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+       [HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
+               HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+       [HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+       [HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
+               HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+       [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
 int ath10k_htt_connect(struct ath10k_htt *htt)
 {
        struct ath10k_htc_svc_conn_req conn_req;
@@ -66,6 +146,24 @@ int ath10k_htt_init(struct ath10k *ar)
                8 + /* llc snap */
                2; /* ip4 dscp or ip6 priority */
 
+       switch (ar->htt.op_version) {
+       case ATH10K_FW_HTT_OP_VERSION_10_1:
+               ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
+               ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
+               break;
+       case ATH10K_FW_HTT_OP_VERSION_TLV:
+               ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
+               ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
+               break;
+       case ATH10K_FW_HTT_OP_VERSION_MAIN:
+               ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
+               ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
+               break;
+       case ATH10K_FW_HTT_OP_VERSION_MAX:
+       case ATH10K_FW_HTT_OP_VERSION_UNSET:
+               WARN_ON(1);
+               return -EINVAL;
+       }
        return 0;
 }
 
index 874bf44ff7a2f910945e492cf6086ccc4a0d945a..7e8a0d83566379b01fec63235d968a9913639b52 100644 (file)
@@ -25,7 +25,9 @@
 #include <net/mac80211.h>
 
 #include "htc.h"
+#include "hw.h"
 #include "rx_desc.h"
+#include "hw.h"
 
 enum htt_dbg_stats_type {
        HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
@@ -271,35 +273,108 @@ enum htt_mgmt_tx_status {
 
 /*=== target -> host messages ===============================================*/
 
-enum htt_t2h_msg_type {
-       HTT_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
-       HTT_T2H_MSG_TYPE_RX_IND                 = 0x1,
-       HTT_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
-       HTT_T2H_MSG_TYPE_PEER_MAP               = 0x3,
-       HTT_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
-       HTT_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
-       HTT_T2H_MSG_TYPE_RX_DELBA               = 0x6,
-       HTT_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
-       HTT_T2H_MSG_TYPE_PKTLOG                 = 0x8,
-       HTT_T2H_MSG_TYPE_STATS_CONF             = 0x9,
-       HTT_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
-       HTT_T2H_MSG_TYPE_SEC_IND                = 0xb,
-       HTT_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
-       HTT_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
-       HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION     = 0xe,
-       HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND   = 0xf,
-       HTT_T2H_MSG_TYPE_RX_PN_IND              = 0x10,
-       HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
-       HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND    = 0x12,
+enum htt_main_t2h_msg_type {
+       HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF             = 0x0,
+       HTT_MAIN_T2H_MSG_TYPE_RX_IND                   = 0x1,
+       HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH                 = 0x2,
+       HTT_MAIN_T2H_MSG_TYPE_PEER_MAP                 = 0x3,
+       HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP               = 0x4,
+       HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA                 = 0x5,
+       HTT_MAIN_T2H_MSG_TYPE_RX_DELBA                 = 0x6,
+       HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND             = 0x7,
+       HTT_MAIN_T2H_MSG_TYPE_PKTLOG                   = 0x8,
+       HTT_MAIN_T2H_MSG_TYPE_STATS_CONF               = 0x9,
+       HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND              = 0xa,
+       HTT_MAIN_T2H_MSG_TYPE_SEC_IND                  = 0xb,
+       HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND           = 0xd,
+       HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND        = 0xe,
+       HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND     = 0xf,
+       HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND                = 0x10,
+       HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND   = 0x11,
+       HTT_MAIN_T2H_MSG_TYPE_TEST,
+       /* keep this last */
+       HTT_MAIN_T2H_NUM_MSGS
+};
+
+enum htt_10x_t2h_msg_type {
+       HTT_10X_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
+       HTT_10X_T2H_MSG_TYPE_RX_IND                    = 0x1,
+       HTT_10X_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
+       HTT_10X_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
+       HTT_10X_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
+       HTT_10X_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
+       HTT_10X_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
+       HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
+       HTT_10X_T2H_MSG_TYPE_PKTLOG                    = 0x8,
+       HTT_10X_T2H_MSG_TYPE_STATS_CONF                = 0x9,
+       HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
+       HTT_10X_T2H_MSG_TYPE_SEC_IND                   = 0xb,
+       HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc,
+       HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
+       HTT_10X_T2H_MSG_TYPE_TEST                      = 0xe,
+       HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE               = 0xf,
+       HTT_10X_T2H_MSG_TYPE_AGGR_CONF                 = 0x11,
+       HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD            = 0x12,
+       HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0x13,
+       /* keep this last */
+       HTT_10X_T2H_NUM_MSGS
+};
+
+enum htt_tlv_t2h_msg_type {
+       HTT_TLV_T2H_MSG_TYPE_VERSION_CONF              = 0x0,
+       HTT_TLV_T2H_MSG_TYPE_RX_IND                    = 0x1,
+       HTT_TLV_T2H_MSG_TYPE_RX_FLUSH                  = 0x2,
+       HTT_TLV_T2H_MSG_TYPE_PEER_MAP                  = 0x3,
+       HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP                = 0x4,
+       HTT_TLV_T2H_MSG_TYPE_RX_ADDBA                  = 0x5,
+       HTT_TLV_T2H_MSG_TYPE_RX_DELBA                  = 0x6,
+       HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND              = 0x7,
+       HTT_TLV_T2H_MSG_TYPE_PKTLOG                    = 0x8,
+       HTT_TLV_T2H_MSG_TYPE_STATS_CONF                = 0x9,
+       HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND               = 0xa,
+       HTT_TLV_T2H_MSG_TYPE_SEC_IND                   = 0xb,
+       HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND             = 0xc, /* deprecated */
+       HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND            = 0xd,
+       HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND         = 0xe,
+       HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND      = 0xf,
+       HTT_TLV_T2H_MSG_TYPE_RX_PN_IND                 = 0x10,
+       HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND    = 0x11,
+       HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND       = 0x12,
        /* 0x13 reservd */
-       HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE    = 0x14,
+       HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE       = 0x14,
+       HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE               = 0x15,
+       HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR           = 0x16,
+       HTT_TLV_T2H_MSG_TYPE_TEST,
+       /* keep this last */
+       HTT_TLV_T2H_NUM_MSGS
+};
 
-       /* FIXME: Do not depend on this event id. Numbering of this event id is
-        * broken across different firmware revisions and HTT version fails to
-        * indicate this.
-        */
+enum htt_t2h_msg_type {
+       HTT_T2H_MSG_TYPE_VERSION_CONF,
+       HTT_T2H_MSG_TYPE_RX_IND,
+       HTT_T2H_MSG_TYPE_RX_FLUSH,
+       HTT_T2H_MSG_TYPE_PEER_MAP,
+       HTT_T2H_MSG_TYPE_PEER_UNMAP,
+       HTT_T2H_MSG_TYPE_RX_ADDBA,
+       HTT_T2H_MSG_TYPE_RX_DELBA,
+       HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+       HTT_T2H_MSG_TYPE_PKTLOG,
+       HTT_T2H_MSG_TYPE_STATS_CONF,
+       HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+       HTT_T2H_MSG_TYPE_SEC_IND,
+       HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+       HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+       HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+       HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+       HTT_T2H_MSG_TYPE_RX_PN_IND,
+       HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+       HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+       HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+       HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+       HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+       HTT_T2H_MSG_TYPE_AGGR_CONF,
+       HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
        HTT_T2H_MSG_TYPE_TEST,
-
        /* keep this last */
        HTT_T2H_NUM_MSGS
 };
@@ -1222,6 +1297,7 @@ struct htt_tx_done {
        u32 msdu_id;
        bool discard;
        bool no_ack;
+       bool success;
 };
 
 struct htt_peer_map_event {
@@ -1248,6 +1324,10 @@ struct ath10k_htt {
        u8 target_version_major;
        u8 target_version_minor;
        struct completion target_version_received;
+       enum ath10k_fw_htt_op_version op_version;
+
+       const enum htt_t2h_msg_type *t2h_msg_types;
+       u32 t2h_msg_types_max;
 
        struct {
                /*
index 01a2b384f358355ded1207323dd2f499b9e2a3c3..89eb16b30fc42479a3b1c11a7b9b3fd88c043490 100644 (file)
@@ -637,58 +637,21 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
        return 0;
 }
 
-struct rfc1042_hdr {
-       u8 llc_dsap;
-       u8 llc_ssap;
-       u8 llc_ctrl;
-       u8 snap_oui[3];
-       __be16 snap_type;
-} __packed;
-
 struct amsdu_subframe_hdr {
        u8 dst[ETH_ALEN];
        u8 src[ETH_ALEN];
        __be16 len;
 } __packed;
 
-static const u8 rx_legacy_rate_idx[] = {
-       3,      /* 0x00  - 11Mbps  */
-       2,      /* 0x01  - 5.5Mbps */
-       1,      /* 0x02  - 2Mbps   */
-       0,      /* 0x03  - 1Mbps   */
-       3,      /* 0x04  - 11Mbps  */
-       2,      /* 0x05  - 5.5Mbps */
-       1,      /* 0x06  - 2Mbps   */
-       0,      /* 0x07  - 1Mbps   */
-       10,     /* 0x08  - 48Mbps  */
-       8,      /* 0x09  - 24Mbps  */
-       6,      /* 0x0A  - 12Mbps  */
-       4,      /* 0x0B  - 6Mbps   */
-       11,     /* 0x0C  - 54Mbps  */
-       9,      /* 0x0D  - 36Mbps  */
-       7,      /* 0x0E  - 18Mbps  */
-       5,      /* 0x0F  - 9Mbps   */
-};
-
 static void ath10k_htt_rx_h_rates(struct ath10k *ar,
                                  struct ieee80211_rx_status *status,
                                  struct htt_rx_desc *rxd)
 {
-       enum ieee80211_band band;
-       u8 cck, rate, rate_idx, bw, sgi, mcs, nss;
+       struct ieee80211_supported_band *sband;
+       u8 cck, rate, bw, sgi, mcs, nss;
        u8 preamble = 0;
        u32 info1, info2, info3;
 
-       /* Band value can't be set as undefined but freq can be 0 - use that to
-        * determine whether band is provided.
-        *
-        * FIXME: Perhaps this can go away if CCK rate reporting is a little
-        * reworked?
-        */
-       if (!status->freq)
-               return;
-
-       band = status->band;
        info1 = __le32_to_cpu(rxd->ppdu_start.info1);
        info2 = __le32_to_cpu(rxd->ppdu_start.info2);
        info3 = __le32_to_cpu(rxd->ppdu_start.info3);
@@ -697,31 +660,18 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
 
        switch (preamble) {
        case HTT_RX_LEGACY:
+               /* To get legacy rate index band is required. Since band can't
+                * be undefined check if freq is non-zero.
+                */
+               if (!status->freq)
+                       return;
+
                cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
                rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
-               rate_idx = 0;
-
-               if (rate < 0x08 || rate > 0x0F)
-                       break;
-
-               switch (band) {
-               case IEEE80211_BAND_2GHZ:
-                       if (cck)
-                               rate &= ~BIT(3);
-                       rate_idx = rx_legacy_rate_idx[rate];
-                       break;
-               case IEEE80211_BAND_5GHZ:
-                       rate_idx = rx_legacy_rate_idx[rate];
-                       /* We are using same rate table registering
-                          HW - ath10k_rates[]. In case of 5GHz skip
-                          CCK rates, so -4 here */
-                       rate_idx -= 4;
-                       break;
-               default:
-                       break;
-               }
+               rate &= ~RX_PPDU_START_RATE_FLAG;
 
-               status->rate_idx = rate_idx;
+               sband = &ar->mac.sbands[status->band];
+               status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
                break;
        case HTT_RX_HT:
        case HTT_RX_HT_WITH_TXBF:
@@ -773,8 +723,87 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
        }
 }
 
+static struct ieee80211_channel *
+ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
+{
+       struct ath10k_peer *peer;
+       struct ath10k_vif *arvif;
+       struct cfg80211_chan_def def;
+       u16 peer_id;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       if (!rxd)
+               return NULL;
+
+       if (rxd->attention.flags &
+           __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
+               return NULL;
+
+       if (!(rxd->msdu_end.info0 &
+             __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
+               return NULL;
+
+       peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+                    RX_MPDU_START_INFO0_PEER_IDX);
+
+       peer = ath10k_peer_find_by_id(ar, peer_id);
+       if (!peer)
+               return NULL;
+
+       arvif = ath10k_get_arvif(ar, peer->vdev_id);
+       if (WARN_ON_ONCE(!arvif))
+               return NULL;
+
+       if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+               return NULL;
+
+       return def.chan;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
+{
+       struct ath10k_vif *arvif;
+       struct cfg80211_chan_def def;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               if (arvif->vdev_id == vdev_id &&
+                   ath10k_mac_vif_chan(arvif->vif, &def) == 0)
+                       return def.chan;
+       }
+
+       return NULL;
+}
+
+static void
+ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
+                             struct ieee80211_chanctx_conf *conf,
+                             void *data)
+{
+       struct cfg80211_chan_def *def = data;
+
+       *def = conf->def;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_any_channel(struct ath10k *ar)
+{
+       struct cfg80211_chan_def def = {};
+
+       ieee80211_iter_chan_contexts_atomic(ar->hw,
+                                           ath10k_htt_rx_h_any_chan_iter,
+                                           &def);
+
+       return def.chan;
+}
+
 static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
-                                   struct ieee80211_rx_status *status)
+                                   struct ieee80211_rx_status *status,
+                                   struct htt_rx_desc *rxd,
+                                   u32 vdev_id)
 {
        struct ieee80211_channel *ch;
 
@@ -782,6 +811,12 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
        ch = ar->scan_channel;
        if (!ch)
                ch = ar->rx_channel;
+       if (!ch)
+               ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
+       if (!ch)
+               ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
+       if (!ch)
+               ch = ath10k_htt_rx_h_any_channel(ar);
        spin_unlock_bh(&ar->data_lock);
 
        if (!ch)
@@ -819,7 +854,8 @@ static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
 
 static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
                                 struct sk_buff_head *amsdu,
-                                struct ieee80211_rx_status *status)
+                                struct ieee80211_rx_status *status,
+                                u32 vdev_id)
 {
        struct sk_buff *first;
        struct htt_rx_desc *rxd;
@@ -851,7 +887,7 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
                status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
                ath10k_htt_rx_h_signal(ar, status, rxd);
-               ath10k_htt_rx_h_channel(ar, status);
+               ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
                ath10k_htt_rx_h_rates(ar, status, rxd);
        }
 
@@ -929,10 +965,16 @@ static void ath10k_process_rx(struct ath10k *ar,
        ieee80211_rx(ar->hw, skb);
 }
 
-static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
+static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
+                                     struct ieee80211_hdr *hdr)
 {
-       /* nwifi header is padded to 4 bytes. this fixes 4addr rx */
-       return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+       int len = ieee80211_hdrlen(hdr->frame_control);
+
+       if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+                     ar->fw_features))
+               len = round_up(len, 4);
+
+       return len;
 }
 
 static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
@@ -1031,7 +1073,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
 
        /* pull decapped header and copy SA & DA */
        hdr = (struct ieee80211_hdr *)msdu->data;
-       hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
+       hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
        ether_addr_copy(da, ieee80211_get_DA(hdr));
        ether_addr_copy(sa, ieee80211_get_SA(hdr));
        skb_pull(msdu, hdr_len);
@@ -1522,7 +1564,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                        break;
                }
 
-               ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+               ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
                ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
                ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
                ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
@@ -1569,7 +1611,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
                return;
        }
 
-       ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status);
+       ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
        ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
        ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
        ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
@@ -1598,6 +1640,7 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
                tx_done.no_ack = true;
                break;
        case HTT_DATA_TX_STATUS_OK:
+               tx_done.success = true;
                break;
        case HTT_DATA_TX_STATUS_DISCARD:
        case HTT_DATA_TX_STATUS_POSTPONE:
@@ -1796,7 +1839,7 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
                status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
                ath10k_htt_rx_h_rx_offload_prot(status, msdu);
-               ath10k_htt_rx_h_channel(ar, status);
+               ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
                ath10k_process_rx(ar, status, msdu);
        }
 }
@@ -1869,7 +1912,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
                         * better to report something than nothing though. This
                         * should still give an idea about rx rate to the user.
                         */
-                       ath10k_htt_rx_h_ppdu(ar, &amsdu, status);
+                       ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
                        ath10k_htt_rx_h_filter(ar, &amsdu, status);
                        ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
                        ath10k_htt_rx_h_deliver(ar, &amsdu, status);
@@ -1892,6 +1935,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ath10k_htt *htt = &ar->htt;
        struct htt_resp *resp = (struct htt_resp *)skb->data;
+       enum htt_t2h_msg_type type;
 
        /* confirm alignment */
        if (!IS_ALIGNED((unsigned long)skb->data, 4))
@@ -1899,7 +1943,16 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 
        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
                   resp->hdr.msg_type);
-       switch (resp->hdr.msg_type) {
+
+       if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
+               ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
+                          resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
+               dev_kfree_skb_any(skb);
+               return;
+       }
+       type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
+
+       switch (type) {
        case HTT_T2H_MSG_TYPE_VERSION_CONF: {
                htt->target_version_major = resp->ver_resp.major;
                htt->target_version_minor = resp->ver_resp.minor;
@@ -1937,6 +1990,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
 
                switch (status) {
                case HTT_MGMT_TX_STATUS_OK:
+                       tx_done.success = true;
                        break;
                case HTT_MGMT_TX_STATUS_RETRY:
                        tx_done.no_ack = true;
@@ -1976,7 +2030,6 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                break;
        }
        case HTT_T2H_MSG_TYPE_TEST:
-               /* FIX THIS */
                break;
        case HTT_T2H_MSG_TYPE_STATS_CONF:
                trace_ath10k_htt_stats(ar, skb->data, skb->len);
@@ -2018,11 +2071,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                return;
        }
        case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
-               /* FIXME: This WMI-TLV event is overlapping with 10.2
-                * CHAN_CHANGE - both being 0xF. Neither is being used in
-                * practice so no immediate action is necessary. Nevertheless
-                * HTT may need an abstraction layer like WMI has one day.
-                */
+               break;
+       case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
                break;
        default:
                ath10k_warn(ar, "htt event (%d) not handled\n",
index cbd2bc9e62025fbdc861f4913b00f41c4222bbb9..a60ef7d1d5fcb98278b3838858432f4cb3c8930a 100644 (file)
@@ -26,7 +26,7 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
 {
        htt->num_pending_tx--;
        if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
-               ieee80211_wake_queues(htt->ar->hw);
+               ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 }
 
 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
@@ -49,7 +49,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
 
        htt->num_pending_tx++;
        if (htt->num_pending_tx == htt->max_num_pending_tx)
-               ieee80211_stop_queues(htt->ar->hw);
+               ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 
 exit:
        spin_unlock_bh(&htt->tx_lock);
@@ -420,9 +420,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        int res;
        u8 flags0 = 0;
        u16 msdu_id, flags1 = 0;
-       dma_addr_t paddr;
-       u32 frags_paddr;
-       bool use_frags;
+       dma_addr_t paddr = 0;
+       u32 frags_paddr = 0;
 
        res = ath10k_htt_tx_inc_pending(htt);
        if (res)
@@ -440,12 +439,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        prefetch_len = min(htt->prefetch_len, msdu->len);
        prefetch_len = roundup(prefetch_len, 4);
 
-       /* Since HTT 3.0 there is no separate mgmt tx command. However in case
-        * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
-        * fragment list host driver specifies directly frame pointer. */
-       use_frags = htt->target_version_major < 3 ||
-                   !ieee80211_is_mgmt(hdr->frame_control);
-
        skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
                                           &paddr);
        if (!skb_cb->htt.txbuf) {
@@ -466,7 +459,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        if (res)
                goto err_free_txbuf;
 
-       if (likely(use_frags)) {
+       switch (skb_cb->txmode) {
+       case ATH10K_HW_TXRX_RAW:
+       case ATH10K_HW_TXRX_NATIVE_WIFI:
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+               /* pass through */
+       case ATH10K_HW_TXRX_ETHERNET:
                frags = skb_cb->htt.txbuf->frags;
 
                frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
@@ -474,15 +472,17 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
                frags[1].paddr = 0;
                frags[1].len = 0;
 
-               flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
-                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+               flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 
                frags_paddr = skb_cb->htt.txbuf_paddr;
-       } else {
+               break;
+       case ATH10K_HW_TXRX_MGMT:
                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
                             HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
 
                frags_paddr = skb_cb->paddr;
+               break;
        }
 
        /* Normally all commands go through HTC which manages tx credits for
@@ -508,11 +508,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
                        prefetch_len);
        skb_cb->htt.txbuf->htc_hdr.flags = 0;
 
-       if (!ieee80211_has_protected(hdr->frame_control))
+       if (!skb_cb->is_protected)
                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 
-       flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
-
        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
        if (msdu->ip_summed == CHECKSUM_PARTIAL) {
index 460771fcfe9ea8507f3316910f9815a2281c632d..89e09cbeac19f4cc64d96002aafd31cc0bc1cc2c 100644 (file)
@@ -78,6 +78,9 @@ enum qca6174_chip_id_rev {
 /* added support for ATH10K_FW_IE_WMI_OP_VERSION */
 #define ATH10K_FW_API4_FILE            "firmware-4.bin"
 
+/* HTT id conflict fix for management frames over HTT */
+#define ATH10K_FW_API5_FILE            "firmware-5.bin"
+
 #define ATH10K_FW_UTF_FILE             "utf.bin"
 
 /* includes also the null byte */
@@ -104,6 +107,11 @@ enum ath10k_fw_ie_type {
         * FW API 4 and above.
         */
        ATH10K_FW_IE_WMI_OP_VERSION = 5,
+
+       /* HTT "operations" interface version, 32 bit value. Supported from
+        * FW API 5 and above.
+        */
+       ATH10K_FW_IE_HTT_OP_VERSION = 6,
 };
 
 enum ath10k_fw_wmi_op_version {
@@ -119,6 +127,20 @@ enum ath10k_fw_wmi_op_version {
        ATH10K_FW_WMI_OP_VERSION_MAX,
 };
 
+enum ath10k_fw_htt_op_version {
+       ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
+
+       ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
+
+       /* also used in 10.2 and 10.2.4 branches */
+       ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
+
+       ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+
+       /* keep last */
+       ATH10K_FW_HTT_OP_VERSION_MAX,
+};
+
 enum ath10k_hw_rev {
        ATH10K_HW_QCA988X,
        ATH10K_HW_QCA6174,
@@ -180,6 +202,27 @@ struct ath10k_pktlog_hdr {
        u8 payload[0];
 } __packed;
 
+enum ath10k_hw_rate_ofdm {
+       ATH10K_HW_RATE_OFDM_48M = 0,
+       ATH10K_HW_RATE_OFDM_24M,
+       ATH10K_HW_RATE_OFDM_12M,
+       ATH10K_HW_RATE_OFDM_6M,
+       ATH10K_HW_RATE_OFDM_54M,
+       ATH10K_HW_RATE_OFDM_36M,
+       ATH10K_HW_RATE_OFDM_18M,
+       ATH10K_HW_RATE_OFDM_9M,
+};
+
+enum ath10k_hw_rate_cck {
+       ATH10K_HW_RATE_CCK_LP_11M = 0,
+       ATH10K_HW_RATE_CCK_LP_5_5M,
+       ATH10K_HW_RATE_CCK_LP_2M,
+       ATH10K_HW_RATE_CCK_LP_1M,
+       ATH10K_HW_RATE_CCK_SP_11M,
+       ATH10K_HW_RATE_CCK_SP_5_5M,
+       ATH10K_HW_RATE_CCK_SP_2M,
+};
+
 /* Target specific defines for MAIN firmware */
 #define TARGET_NUM_VDEVS                       8
 #define TARGET_NUM_PEER_AST                    2
@@ -223,7 +266,7 @@ struct ath10k_pktlog_hdr {
 #define TARGET_10X_NUM_WDS_ENTRIES             32
 #define TARGET_10X_DMA_BURST_SIZE              0
 #define TARGET_10X_MAC_AGGR_DELIM              0
-#define TARGET_10X_AST_SKID_LIMIT              16
+#define TARGET_10X_AST_SKID_LIMIT              128
 #define TARGET_10X_NUM_STATIONS                        128
 #define TARGET_10X_NUM_PEERS                   ((TARGET_10X_NUM_STATIONS) + \
                                                 (TARGET_10X_NUM_VDEVS))
@@ -256,13 +299,13 @@ struct ath10k_pktlog_hdr {
 #define TARGET_10_2_DMA_BURST_SIZE             1
 
 /* Target specific defines for WMI-TLV firmware */
-#define TARGET_TLV_NUM_VDEVS                   3
+#define TARGET_TLV_NUM_VDEVS                   4
 #define TARGET_TLV_NUM_STATIONS                        32
-#define TARGET_TLV_NUM_PEERS                   ((TARGET_TLV_NUM_STATIONS) + \
-                                                (TARGET_TLV_NUM_VDEVS) + \
-                                                2)
+#define TARGET_TLV_NUM_PEERS                   35
+#define TARGET_TLV_NUM_TDLS_VDEVS              1
 #define TARGET_TLV_NUM_TIDS                    ((TARGET_TLV_NUM_PEERS) * 2)
 #define TARGET_TLV_NUM_MSDU_DESC               (1024 + 32)
+#define TARGET_TLV_NUM_WOW_PATTERNS            22
 
 /* Number of Copy Engines supported */
 #define CE_COUNT 8
index 973485bd4121e431464b84f2401a81ec48b94b82..0ed422ae46a487e612508de17bfbbfe662749a0f 100644 (file)
 #include "txrx.h"
 #include "testmode.h"
 #include "wmi.h"
+#include "wmi-tlv.h"
 #include "wmi-ops.h"
+#include "wow.h"
+
+/*********/
+/* Rates */
+/*********/
+
+static struct ieee80211_rate ath10k_rates[] = {
+       { .bitrate = 10,
+         .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
+       { .bitrate = 20,
+         .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
+         .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
+         .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+       { .bitrate = 55,
+         .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
+         .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
+         .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+       { .bitrate = 110,
+         .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
+         .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
+         .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+       { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+       { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+       { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+       { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+       { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+       { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+       { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+       { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
+
+#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
+                            ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+static bool ath10k_mac_bitrate_is_cck(int bitrate)
+{
+       switch (bitrate) {
+       case 10:
+       case 20:
+       case 55:
+       case 110:
+               return true;
+       }
+
+       return false;
+}
+
+static u8 ath10k_mac_bitrate_to_rate(int bitrate)
+{
+       return DIV_ROUND_UP(bitrate, 5) |
+              (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+                            u8 hw_rate)
+{
+       const struct ieee80211_rate *rate;
+       int i;
+
+       for (i = 0; i < sband->n_bitrates; i++) {
+               rate = &sband->bitrates[i];
+
+               if (rate->hw_value == hw_rate)
+                       return i;
+               else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+                        rate->hw_value_short == hw_rate)
+                       return i;
+       }
+
+       return 0;
+}
+
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+                            u32 bitrate)
+{
+       int i;
+
+       for (i = 0; i < sband->n_bitrates; i++)
+               if (sband->bitrates[i].bitrate == bitrate)
+                       return i;
+
+       return 0;
+}
+
+static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+       switch ((mcs_map >> (2 * nss)) & 0x3) {
+       case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+       case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+       case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+       }
+       return 0;
+}
+
+static u32
+ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+       int nss;
+
+       for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+               if (ht_mcs_mask[nss])
+                       return nss + 1;
+
+       return 1;
+}
+
+static u32
+ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+       int nss;
+
+       for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+               if (vht_mcs_mask[nss])
+                       return nss + 1;
+
+       return 1;
+}
 
 /**********/
 /* Crypto */
 static int ath10k_send_key(struct ath10k_vif *arvif,
                           struct ieee80211_key_conf *key,
                           enum set_key_cmd cmd,
-                          const u8 *macaddr, bool def_idx)
+                          const u8 *macaddr, u32 flags)
 {
        struct ath10k *ar = arvif->ar;
        struct wmi_vdev_install_key_arg arg = {
@@ -45,16 +169,12 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
                .key_idx = key->keyidx,
                .key_len = key->keylen,
                .key_data = key->key,
+               .key_flags = flags,
                .macaddr = macaddr,
        };
 
        lockdep_assert_held(&arvif->ar->conf_mutex);
 
-       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
-               arg.key_flags = WMI_KEY_PAIRWISE;
-       else
-               arg.key_flags = WMI_KEY_GROUP;
-
        switch (key->cipher) {
        case WLAN_CIPHER_SUITE_CCMP:
                arg.key_cipher = WMI_CIPHER_AES_CCM;
@@ -68,17 +188,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
                arg.key_cipher = WMI_CIPHER_WEP;
-               /* AP/IBSS mode requires self-key to be groupwise
-                * Otherwise pairwise key must be set */
-               if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN))
-                       arg.key_flags = WMI_KEY_PAIRWISE;
-
-               if (def_idx)
-                       arg.key_flags |= WMI_KEY_TX_USAGE;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               /* this one needs to be done in software */
-               return 1;
+               WARN_ON(1);
+               return -EINVAL;
        default:
                ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
                return -EOPNOTSUPP;
@@ -95,21 +208,22 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
 static int ath10k_install_key(struct ath10k_vif *arvif,
                              struct ieee80211_key_conf *key,
                              enum set_key_cmd cmd,
-                             const u8 *macaddr, bool def_idx)
+                             const u8 *macaddr, u32 flags)
 {
        struct ath10k *ar = arvif->ar;
        int ret;
+       unsigned long time_left;
 
        lockdep_assert_held(&ar->conf_mutex);
 
        reinit_completion(&ar->install_key_done);
 
-       ret = ath10k_send_key(arvif, key, cmd, macaddr, def_idx);
+       ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
        if (ret)
                return ret;
 
-       ret = wait_for_completion_timeout(&ar->install_key_done, 3*HZ);
-       if (ret == 0)
+       time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
+       if (time_left == 0)
                return -ETIMEDOUT;
 
        return 0;
@@ -122,7 +236,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
        struct ath10k_peer *peer;
        int ret;
        int i;
-       bool def_idx;
+       u32 flags;
 
        lockdep_assert_held(&ar->conf_mutex);
 
@@ -136,14 +250,20 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
        for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
                if (arvif->wep_keys[i] == NULL)
                        continue;
-               /* set TX_USAGE flag for default key id */
-               if (arvif->def_wep_key_idx == i)
-                       def_idx = true;
-               else
-                       def_idx = false;
+
+               flags = 0;
+               flags |= WMI_KEY_PAIRWISE;
+
+               ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
+                                        addr, flags);
+               if (ret)
+                       return ret;
+
+               flags = 0;
+               flags |= WMI_KEY_GROUP;
 
                ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
-                                        addr, def_idx);
+                                        addr, flags);
                if (ret)
                        return ret;
 
@@ -152,6 +272,27 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
                spin_unlock_bh(&ar->data_lock);
        }
 
+       /* In some cases (notably with static WEP IBSS with multiple keys)
+        * multicast Tx becomes broken. Both pairwise and groupwise keys are
+        * installed already. Using WMI_KEY_TX_USAGE in different combinations
+        * didn't seem help. Using def_keyid vdev parameter seems to be
+        * effective so use that.
+        *
+        * FIXME: Revisit. Perhaps this can be done in a less hacky way.
+        */
+       if (arvif->def_wep_key_idx == -1)
+               return 0;
+
+       ret = ath10k_wmi_vdev_set_param(arvif->ar,
+                                       arvif->vdev_id,
+                                       arvif->ar->wmi.vdev_param->def_keyid,
+                                       arvif->def_wep_key_idx);
+       if (ret) {
+               ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
        return 0;
 }
 
@@ -163,6 +304,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
        int first_errno = 0;
        int ret;
        int i;
+       u32 flags = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
@@ -179,7 +321,7 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
 
                /* key flags are not required to delete the key */
                ret = ath10k_install_key(arvif, peer->keys[i],
-                                        DISABLE_KEY, addr, false);
+                                        DISABLE_KEY, addr, flags);
                if (ret && first_errno == 0)
                        first_errno = ret;
 
@@ -229,6 +371,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
        int first_errno = 0;
        int ret;
        int i;
+       u32 flags = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
@@ -254,7 +397,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
                if (i == ARRAY_SIZE(peer->keys))
                        break;
                /* key flags are not required to delete the key */
-               ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, false);
+               ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
                if (ret && first_errno == 0)
                        first_errno = ret;
 
@@ -266,6 +409,39 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
        return first_errno;
 }
 
+static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
+                                        struct ieee80211_key_conf *key)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ath10k_peer *peer;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       list_for_each_entry(peer, &ar->peers, list) {
+               if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+                       continue;
+
+               if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+                       continue;
+
+               if (peer->keys[key->keyidx] == key)
+                       continue;
+
+               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
+                          arvif->vdev_id, key->keyidx);
+
+               ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
+               if (ret) {
+                       ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
+                                   arvif->vdev_id, peer->addr, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 /*********************/
 /* General utilities */
 /*********************/
@@ -364,7 +540,56 @@ static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
        }
 }
 
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+                       struct cfg80211_chan_def *def)
+{
+       struct ieee80211_chanctx_conf *conf;
+
+       rcu_read_lock();
+       conf = rcu_dereference(vif->chanctx_conf);
+       if (!conf) {
+               rcu_read_unlock();
+               return -ENOENT;
+       }
+
+       *def = conf->def;
+       rcu_read_unlock();
+
+       return 0;
+}
+
+static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+                                        struct ieee80211_chanctx_conf *conf,
+                                        void *data)
+{
+       int *num = data;
+
+       (*num)++;
+}
+
+static int ath10k_mac_num_chanctxs(struct ath10k *ar)
+{
+       int num = 0;
+
+       ieee80211_iter_chan_contexts_atomic(ar->hw,
+                                           ath10k_mac_num_chanctxs_iter,
+                                           &num);
+
+       return num;
+}
+
+static void
+ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+                               struct ieee80211_chanctx_conf *conf,
+                               void *data)
+{
+       struct cfg80211_chan_def **def = data;
+
+       *def = &conf->def;
+}
+
+static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+                             enum wmi_peer_type peer_type)
 {
        int ret;
 
@@ -373,7 +598,7 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
        if (ar->num_peers >= ar->max_num_peers)
                return -ENOBUFS;
 
-       ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
+       ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
        if (ret) {
                ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
                            addr, vdev_id, ret);
@@ -517,6 +742,38 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
        ar->num_stations = 0;
 }
 
+static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
+                                      struct ieee80211_sta *sta,
+                                      enum wmi_tdls_peer_state state)
+{
+       int ret;
+       struct wmi_tdls_peer_update_cmd_arg arg = {};
+       struct wmi_tdls_peer_capab_arg cap = {};
+       struct wmi_channel_arg chan_arg = {};
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       arg.vdev_id = vdev_id;
+       arg.peer_state = state;
+       ether_addr_copy(arg.addr, sta->addr);
+
+       cap.peer_max_sp = sta->max_sp;
+       cap.peer_uapsd_queues = sta->uapsd_queues;
+
+       if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
+           !sta->tdls_initiator)
+               cap.is_peer_responder = 1;
+
+       ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
+       if (ret) {
+               ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
+                           arg.addr, vdev_id, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 /************************/
 /* Interface management */
 /************************/
@@ -561,16 +818,16 @@ static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
 
 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
 {
-       int ret;
+       unsigned long time_left;
 
        lockdep_assert_held(&ar->conf_mutex);
 
        if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
                return -ESHUTDOWN;
 
-       ret = wait_for_completion_timeout(&ar->vdev_setup_done,
-                                         ATH10K_VDEV_SETUP_TIMEOUT_HZ);
-       if (ret == 0)
+       time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
+                                               ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+       if (time_left == 0)
                return -ETIMEDOUT;
 
        return 0;
@@ -578,13 +835,21 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
 
 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 {
-       struct cfg80211_chan_def *chandef = &ar->chandef;
+       struct cfg80211_chan_def *chandef = NULL;
        struct ieee80211_channel *channel = chandef->chan;
        struct wmi_vdev_start_request_arg arg = {};
        int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
+       ieee80211_iter_chan_contexts_atomic(ar->hw,
+                                           ath10k_mac_get_any_chandef_iter,
+                                           &chandef);
+       if (WARN_ON_ONCE(!chandef))
+               return -ENOENT;
+
+       channel = chandef->chan;
+
        arg.vdev_id = vdev_id;
        arg.channel.freq = channel->center_freq;
        arg.channel.band_center_freq1 = chandef->center_freq1;
@@ -766,27 +1031,78 @@ static int ath10k_monitor_stop(struct ath10k *ar)
        return 0;
 }
 
+static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
+{
+       int num_ctx;
+
+       /* At least one chanctx is required to derive a channel to start
+        * monitor vdev on.
+        */
+       num_ctx = ath10k_mac_num_chanctxs(ar);
+       if (num_ctx == 0)
+               return false;
+
+       /* If there's already an existing special monitor interface then don't
+        * bother creating another monitor vdev.
+        */
+       if (ar->monitor_arvif)
+               return false;
+
+       return ar->monitor ||
+              test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+}
+
+static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
+{
+       int num_ctx;
+
+       num_ctx = ath10k_mac_num_chanctxs(ar);
+
+       /* FIXME: Current interface combinations and cfg80211/mac80211 code
+        * shouldn't allow this but make sure to prevent handling the following
+        * case anyway since multi-channel DFS hasn't been tested at all.
+        */
+       if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
+               return false;
+
+       return true;
+}
+
 static int ath10k_monitor_recalc(struct ath10k *ar)
 {
-       bool should_start;
+       bool needed;
+       bool allowed;
+       int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       should_start = ar->monitor ||
-                      ar->filter_flags & FIF_PROMISC_IN_BSS ||
-                      test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+       needed = ath10k_mac_monitor_vdev_is_needed(ar);
+       allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
 
        ath10k_dbg(ar, ATH10K_DBG_MAC,
-                  "mac monitor recalc started? %d should? %d\n",
-                  ar->monitor_started, should_start);
+                  "mac monitor recalc started? %d needed? %d allowed? %d\n",
+                  ar->monitor_started, needed, allowed);
 
-       if (should_start == ar->monitor_started)
+       if (WARN_ON(needed && !allowed)) {
+               if (ar->monitor_started) {
+                       ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
+
+                       ret = ath10k_monitor_stop(ar);
+                       if (ret)
+                               ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret);
+                               /* not serious */
+               }
+
+               return -EPERM;
+       }
+
+       if (needed == ar->monitor_started)
                return 0;
 
-       if (should_start)
+       if (needed)
                return ath10k_monitor_start(ar);
-
-       return ath10k_monitor_stop(ar);
+       else
+               return ath10k_monitor_stop(ar);
 }
 
 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
@@ -798,12 +1114,14 @@ static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
 
        vdev_param = ar->wmi.vdev_param->enable_rtscts;
 
-       if (arvif->use_cts_prot || arvif->num_legacy_stations > 0)
-               rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+       rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
 
        if (arvif->num_legacy_stations > 0)
                rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
                              WMI_RTSCTS_PROFILE);
+       else
+               rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
+                             WMI_RTSCTS_PROFILE);
 
        return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                         rts_cts);
@@ -846,6 +1164,27 @@ static int ath10k_stop_cac(struct ath10k *ar)
        return 0;
 }
 
+static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
+                                     struct ieee80211_chanctx_conf *conf,
+                                     void *data)
+{
+       bool *ret = data;
+
+       if (!*ret && conf->radar_enabled)
+               *ret = true;
+}
+
+static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
+{
+       bool has_radar = false;
+
+       ieee80211_iter_chan_contexts_atomic(ar->hw,
+                                           ath10k_mac_has_radar_iter,
+                                           &has_radar);
+
+       return has_radar;
+}
+
 static void ath10k_recalc_radar_detection(struct ath10k *ar)
 {
        int ret;
@@ -854,7 +1193,7 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
 
        ath10k_stop_cac(ar);
 
-       if (!ar->radar_enabled)
+       if (!ath10k_mac_has_radar_enabled(ar))
                return;
 
        if (ar->num_started_vdevs > 0)
@@ -872,10 +1211,44 @@ static void ath10k_recalc_radar_detection(struct ath10k *ar)
        }
 }
 
-static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       reinit_completion(&ar->vdev_setup_done);
+
+       ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+       if (ret) {
+               ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_setup_sync(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       WARN_ON(ar->num_started_vdevs == 0);
+
+       if (ar->num_started_vdevs != 0) {
+               ar->num_started_vdevs--;
+               ath10k_recalc_radar_detection(ar);
+       }
+
+       return ret;
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
+                                    const struct cfg80211_chan_def *chandef,
+                                    bool restart)
 {
        struct ath10k *ar = arvif->ar;
-       struct cfg80211_chan_def *chandef = &ar->chandef;
        struct wmi_vdev_start_request_arg arg = {};
        int ret = 0;
 
@@ -939,47 +1312,16 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, bool restart)
        return ret;
 }
 
-static int ath10k_vdev_start(struct ath10k_vif *arvif)
+static int ath10k_vdev_start(struct ath10k_vif *arvif,
+                            const struct cfg80211_chan_def *def)
 {
-       return ath10k_vdev_start_restart(arvif, false);
+       return ath10k_vdev_start_restart(arvif, def, false);
 }
 
-static int ath10k_vdev_restart(struct ath10k_vif *arvif)
-{
-       return ath10k_vdev_start_restart(arvif, true);
-}
-
-static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+static int ath10k_vdev_restart(struct ath10k_vif *arvif,
+                              const struct cfg80211_chan_def *def)
 {
-       struct ath10k *ar = arvif->ar;
-       int ret;
-
-       lockdep_assert_held(&ar->conf_mutex);
-
-       reinit_completion(&ar->vdev_setup_done);
-
-       ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
-       if (ret) {
-               ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
-                           arvif->vdev_id, ret);
-               return ret;
-       }
-
-       ret = ath10k_vdev_setup_sync(ar);
-       if (ret) {
-               ath10k_warn(ar, "failed to synchronize setup for vdev %i stop: %d\n",
-                           arvif->vdev_id, ret);
-               return ret;
-       }
-
-       WARN_ON(ar->num_started_vdevs == 0);
-
-       if (ar->num_started_vdevs != 0) {
-               ar->num_started_vdevs--;
-               ath10k_recalc_radar_detection(ar);
-       }
-
-       return ret;
+       return ath10k_vdev_start_restart(arvif, def, true);
 }
 
 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
@@ -1056,6 +1398,10 @@ static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
        if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
                return 0;
 
+       if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+           arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+               return 0;
+
        bcn = ieee80211_beacon_get_template(hw, vif, &offs);
        if (!bcn) {
                ath10k_warn(ar, "failed to get beacon template from mac80211\n");
@@ -1101,6 +1447,9 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
        if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
                return 0;
 
+       if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+               return 0;
+
        prb = ieee80211_proberesp_get(hw, vif);
        if (!prb) {
                ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
@@ -1119,6 +1468,80 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
        return 0;
 }
 
+static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       struct cfg80211_chan_def def;
+       int ret;
+
+       /* When originally vdev is started during assign_vif_chanctx() some
+        * information is missing, notably SSID. Firmware revisions with beacon
+        * offloading require the SSID to be provided during vdev (re)start to
+        * handle hidden SSID properly.
+        *
+        * Vdev restart must be done after vdev has been both started and
+        * upped. Otherwise some firmware revisions (at least 10.2) fail to
+        * deliver vdev restart response event causing timeouts during vdev
+        * syncing in ath10k.
+        *
+        * Note: The vdev down/up and template reinstallation could be skipped
+        * since only wmi-tlv firmware are known to have beacon offload and
+        * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
+        * response delivery. It's probably more robust to keep it as is.
+        */
+       if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+               return 0;
+
+       if (WARN_ON(!arvif->is_started))
+               return -EINVAL;
+
+       if (WARN_ON(!arvif->is_up))
+               return -EINVAL;
+
+       if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+               return -EINVAL;
+
+       ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+       if (ret) {
+               ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
+        * firmware will crash upon vdev up.
+        */
+
+       ret = ath10k_mac_setup_bcn_tmpl(arvif);
+       if (ret) {
+               ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_mac_setup_prb_tmpl(arvif);
+       if (ret) {
+               ath10k_warn(ar, "failed to update presp template: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_vdev_restart(arvif, &def);
+       if (ret) {
+               ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+                                arvif->bssid);
+       if (ret) {
+               ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
                                     struct ieee80211_bss_conf *info)
 {
@@ -1128,9 +1551,11 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
        lockdep_assert_held(&arvif->ar->conf_mutex);
 
        if (!info->enable_beacon) {
-               ath10k_vdev_stop(arvif);
+               ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+               if (ret)
+                       ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
+                                   arvif->vdev_id, ret);
 
-               arvif->is_started = false;
                arvif->is_up = false;
 
                spin_lock_bh(&arvif->ar->data_lock);
@@ -1142,10 +1567,6 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
 
        arvif->tx_seq_no = 0x1000;
 
-       ret = ath10k_vdev_start(arvif);
-       if (ret)
-               return;
-
        arvif->aid = 0;
        ether_addr_copy(arvif->bssid, info->bssid);
 
@@ -1154,13 +1575,18 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
        if (ret) {
                ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
                            arvif->vdev_id, ret);
-               ath10k_vdev_stop(arvif);
                return;
        }
 
-       arvif->is_started = true;
        arvif->is_up = true;
 
+       ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
+       if (ret) {
+               ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
+                           arvif->vdev_id, ret);
+               return;
+       }
+
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
 }
 
@@ -1175,11 +1601,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
        lockdep_assert_held(&arvif->ar->conf_mutex);
 
        if (!info->ibss_joined) {
-               ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
-               if (ret)
-                       ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
-                                   self_peer, arvif->vdev_id, ret);
-
                if (is_zero_ether_addr(arvif->bssid))
                        return;
 
@@ -1188,13 +1609,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
                return;
        }
 
-       ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
-       if (ret) {
-               ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
-                           self_peer, arvif->vdev_id, ret);
-               return;
-       }
-
        vdev_param = arvif->ar->wmi.vdev_param->atim_window;
        ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
                                        ATH10K_DEFAULT_ATIM);
@@ -1294,7 +1708,14 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
                enable_ps = false;
        }
 
-       if (enable_ps) {
+       if (!arvif->is_started) {
+               /* mac80211 can update vif powersave state while disconnected.
+                * Firmware doesn't behave nicely and consumes more power than
+                * necessary if PS is disabled on a non-started vdev. Hence
+                * force-enable PS for non-running vdevs.
+                */
+               psmode = WMI_STA_PS_MODE_ENABLED;
+       } else if (enable_ps) {
                psmode = WMI_STA_PS_MODE_ENABLED;
                param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
 
@@ -1361,6 +1782,123 @@ static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
        return 0;
 }
 
+static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       struct ieee80211_vif *vif = arvif->vif;
+       int ret;
+
+       lockdep_assert_held(&arvif->ar->conf_mutex);
+
+       if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
+               return;
+
+       if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+               return;
+
+       if (!vif->csa_active)
+               return;
+
+       if (!arvif->is_up)
+               return;
+
+       if (!ieee80211_csa_is_complete(vif)) {
+               ieee80211_csa_update_counter(vif);
+
+               ret = ath10k_mac_setup_bcn_tmpl(arvif);
+               if (ret)
+                       ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+                                   ret);
+
+               ret = ath10k_mac_setup_prb_tmpl(arvif);
+               if (ret)
+                       ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+                                   ret);
+       } else {
+               ieee80211_csa_finish(vif);
+       }
+}
+
+static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
+{
+       struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+                                               ap_csa_work);
+       struct ath10k *ar = arvif->ar;
+
+       mutex_lock(&ar->conf_mutex);
+       ath10k_mac_vif_ap_csa_count_down(arvif);
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
+                                         struct ieee80211_vif *vif)
+{
+       struct sk_buff *skb = data;
+       struct ieee80211_mgmt *mgmt = (void *)skb->data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+               return;
+
+       cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
+{
+       ieee80211_iterate_active_interfaces_atomic(ar->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  ath10k_mac_handle_beacon_iter,
+                                                  skb);
+}
+
+static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+                                              struct ieee80211_vif *vif)
+{
+       u32 *vdev_id = data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k *ar = arvif->ar;
+       struct ieee80211_hw *hw = ar->hw;
+
+       if (arvif->vdev_id != *vdev_id)
+               return;
+
+       if (!arvif->is_up)
+               return;
+
+       ieee80211_beacon_loss(vif);
+
+       /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+        * (done by mac80211) succeeds but beacons do not resume then it
+        * doesn't make sense to continue operation. Queue connection loss work
+        * which can be cancelled when beacon is received.
+        */
+       ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+                                    ATH10K_CONNECTION_LOSS_HZ);
+}
+
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
+{
+       ieee80211_iterate_active_interfaces_atomic(ar->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  ath10k_mac_handle_beacon_miss_iter,
+                                                  &vdev_id);
+}
+
+static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+       struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+                                               connection_loss_work.work);
+       struct ieee80211_vif *vif = arvif->vif;
+
+       if (!arvif->is_up)
+               return;
+
+       ieee80211_connection_loss(vif);
+}
+
 /**********************/
 /* Station management */
 /**********************/
@@ -1388,12 +1926,18 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
                                      struct wmi_peer_assoc_complete_arg *arg)
 {
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       u32 aid;
 
        lockdep_assert_held(&ar->conf_mutex);
 
+       if (vif->type == NL80211_IFTYPE_STATION)
+               aid = vif->bss_conf.aid;
+       else
+               aid = sta->aid;
+
        ether_addr_copy(arg->addr, sta->addr);
        arg->vdev_id = arvif->vdev_id;
-       arg->peer_aid = sta->aid;
+       arg->peer_aid = aid;
        arg->peer_flags |= WMI_PEER_AUTH;
        arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
        arg->peer_num_spatial_streams = 1;
@@ -1405,15 +1949,18 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
                                       struct wmi_peer_assoc_complete_arg *arg)
 {
        struct ieee80211_bss_conf *info = &vif->bss_conf;
+       struct cfg80211_chan_def def;
        struct cfg80211_bss *bss;
        const u8 *rsnie = NULL;
        const u8 *wpaie = NULL;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
-                              info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY,
-                              IEEE80211_PRIVACY_ANY);
+       if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+               return;
+
+       bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+                              IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
        if (bss) {
                const struct cfg80211_bss_ies *ies;
 
@@ -1443,19 +1990,29 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
 }
 
 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+                                     struct ieee80211_vif *vif,
                                      struct ieee80211_sta *sta,
                                      struct wmi_peer_assoc_complete_arg *arg)
 {
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+       struct cfg80211_chan_def def;
        const struct ieee80211_supported_band *sband;
        const struct ieee80211_rate *rates;
+       enum ieee80211_band band;
        u32 ratemask;
+       u8 rate;
        int i;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
-       ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
+       if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+               return;
+
+       band = def.chan->band;
+       sband = ar->hw->wiphy->bands[band];
+       ratemask = sta->supp_rates[band];
+       ratemask &= arvif->bitrate_mask.control[band].legacy;
        rates = sband->bitrates;
 
        rateset->num_rates = 0;
@@ -1464,24 +2021,66 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
                if (!(ratemask & 1))
                        continue;
 
-               rateset->rates[rateset->num_rates] = rates->hw_value;
+               rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
+               rateset->rates[rateset->num_rates] = rate;
                rateset->num_rates++;
        }
 }
 
+static bool
+ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+       int nss;
+
+       for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+               if (ht_mcs_mask[nss])
+                       return false;
+
+       return true;
+}
+
+static bool
+ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+       int nss;
+
+       for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+               if (vht_mcs_mask[nss])
+                       return false;
+
+       return true;
+}
+
 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+                                  struct ieee80211_vif *vif,
                                   struct ieee80211_sta *sta,
                                   struct wmi_peer_assoc_complete_arg *arg)
 {
        const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-       int i, n;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct cfg80211_chan_def def;
+       enum ieee80211_band band;
+       const u8 *ht_mcs_mask;
+       const u16 *vht_mcs_mask;
+       int i, n, max_nss;
        u32 stbc;
 
        lockdep_assert_held(&ar->conf_mutex);
 
+       if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+               return;
+
        if (!ht_cap->ht_supported)
                return;
 
+       band = def.chan->band;
+       ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+       vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+       if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
+           ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+               return;
+
        arg->peer_flags |= WMI_PEER_HT;
        arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
                                    ht_cap->ampdu_factor)) - 1;
@@ -1500,11 +2099,13 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
                arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
        }
 
-       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
-               arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+       if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+               if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+                       arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
 
-       if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
-               arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+               if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+                       arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+       }
 
        if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
                arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
@@ -1524,9 +2125,12 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
        else if (ht_cap->mcs.rx_mask[1])
                arg->peer_rate_caps |= WMI_RC_DS_FLAG;
 
-       for (i = 0, n = 0; i < IEEE80211_HT_MCS_MASK_LEN*8; i++)
-               if (ht_cap->mcs.rx_mask[i/8] & (1 << i%8))
+       for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+               if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+                   (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+                       max_nss = (i / 8) + 1;
                        arg->peer_ht_rates.rates[n++] = i;
+               }
 
        /*
         * This is a workaround for HT-enabled STAs which break the spec
@@ -1543,7 +2147,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
                        arg->peer_ht_rates.rates[i] = i;
        } else {
                arg->peer_ht_rates.num_rates = n;
-               arg->peer_num_spatial_streams = sta->rx_nss;
+               arg->peer_num_spatial_streams = max_nss;
        }
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -1619,19 +2223,84 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
        return 0;
 }
 
+static u16
+ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+                             const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+       int idx_limit;
+       int nss;
+       u16 mcs_map;
+       u16 mcs;
+
+       for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+               mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+                         vht_mcs_limit[nss];
+
+               if (mcs_map)
+                       idx_limit = fls(mcs_map) - 1;
+               else
+                       idx_limit = -1;
+
+               switch (idx_limit) {
+               case 0: /* fall through */
+               case 1: /* fall through */
+               case 2: /* fall through */
+               case 3: /* fall through */
+               case 4: /* fall through */
+               case 5: /* fall through */
+               case 6: /* fall through */
+               default:
+                       /* see ath10k_mac_can_set_bitrate_mask() */
+                       WARN_ON(1);
+                       /* fall through */
+               case -1:
+                       mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+                       break;
+               case 7:
+                       mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+                       break;
+               case 8:
+                       mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+                       break;
+               case 9:
+                       mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+                       break;
+               }
+
+               tx_mcs_set &= ~(0x3 << (nss * 2));
+               tx_mcs_set |= mcs << (nss * 2);
+       }
+
+       return tx_mcs_set;
+}
+
 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+                                   struct ieee80211_vif *vif,
                                    struct ieee80211_sta *sta,
                                    struct wmi_peer_assoc_complete_arg *arg)
 {
        const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct cfg80211_chan_def def;
+       enum ieee80211_band band;
+       const u16 *vht_mcs_mask;
        u8 ampdu_factor;
 
+       if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+               return;
+
        if (!vht_cap->vht_supported)
                return;
 
+       band = def.chan->band;
+       vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+       if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+               return;
+
        arg->peer_flags |= WMI_PEER_VHT;
 
-       if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+       if (def.chan->band == IEEE80211_BAND_2GHZ)
                arg->peer_flags |= WMI_PEER_VHT_2G;
 
        arg->peer_vht_caps = vht_cap->cap;
@@ -1657,8 +2326,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
                __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
        arg->peer_vht_rates.tx_max_rate =
                __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
-       arg->peer_vht_rates.tx_mcs_set =
-               __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+       arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
+               __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
                   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
@@ -1697,10 +2366,10 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
                   sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
 }
 
-static bool ath10k_mac_sta_has_11g_rates(struct ieee80211_sta *sta)
+static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
 {
-       /* First 4 rates in ath10k_rates are CCK (11b) rates. */
-       return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 4;
+       return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+              ATH10K_MAC_FIRST_OFDM_RATE_IDX;
 }
 
 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
@@ -1708,21 +2377,35 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
                                        struct ieee80211_sta *sta,
                                        struct wmi_peer_assoc_complete_arg *arg)
 {
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct cfg80211_chan_def def;
+       enum ieee80211_band band;
+       const u8 *ht_mcs_mask;
+       const u16 *vht_mcs_mask;
        enum wmi_phy_mode phymode = MODE_UNKNOWN;
 
-       switch (ar->hw->conf.chandef.chan->band) {
+       if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+               return;
+
+       band = def.chan->band;
+       ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+       vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+       switch (band) {
        case IEEE80211_BAND_2GHZ:
-               if (sta->vht_cap.vht_supported) {
+               if (sta->vht_cap.vht_supported &&
+                   !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
                        if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
                                phymode = MODE_11AC_VHT40;
                        else
                                phymode = MODE_11AC_VHT20;
-               } else if (sta->ht_cap.ht_supported) {
+               } else if (sta->ht_cap.ht_supported &&
+                          !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
                        if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
                                phymode = MODE_11NG_HT40;
                        else
                                phymode = MODE_11NG_HT20;
-               } else if (ath10k_mac_sta_has_11g_rates(sta)) {
+               } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
                        phymode = MODE_11G;
                } else {
                        phymode = MODE_11B;
@@ -1733,15 +2416,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
                /*
                 * Check VHT first.
                 */
-               if (sta->vht_cap.vht_supported) {
+               if (sta->vht_cap.vht_supported &&
+                   !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
                        if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
                                phymode = MODE_11AC_VHT80;
                        else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
                                phymode = MODE_11AC_VHT40;
                        else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
                                phymode = MODE_11AC_VHT20;
-               } else if (sta->ht_cap.ht_supported) {
-                       if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+               } else if (sta->ht_cap.ht_supported &&
+                          !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+                       if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
                                phymode = MODE_11NA_HT40;
                        else
                                phymode = MODE_11NA_HT20;
@@ -1772,9 +2457,9 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
 
        ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
        ath10k_peer_assoc_h_crypto(ar, vif, arg);
-       ath10k_peer_assoc_h_rates(ar, sta, arg);
-       ath10k_peer_assoc_h_ht(ar, sta, arg);
-       ath10k_peer_assoc_h_vht(ar, sta, arg);
+       ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
+       ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+       ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
        ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
        ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
 
@@ -1993,6 +2678,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
        }
 
        arvif->is_up = false;
+
+       cancel_delayed_work_sync(&arvif->connection_loss_work);
 }
 
 static int ath10k_station_assoc(struct ath10k *ar,
@@ -2013,7 +2700,6 @@ static int ath10k_station_assoc(struct ath10k *ar,
                return ret;
        }
 
-       peer_arg.peer_reassoc = reassoc;
        ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
        if (ret) {
                ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -2274,71 +2960,286 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
 /* TX handlers */
 /***************/
 
-static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
 {
-       if (ieee80211_is_mgmt(hdr->frame_control))
-               return HTT_DATA_TX_EXT_TID_MGMT;
-
-       if (!ieee80211_is_data_qos(hdr->frame_control))
-               return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-
-       if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
-               return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+       lockdep_assert_held(&ar->htt.tx_lock);
 
-       return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+       WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+       ar->tx_paused |= BIT(reason);
+       ieee80211_stop_queues(ar->hw);
 }
 
-static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
+static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
+                                     struct ieee80211_vif *vif)
 {
-       if (vif)
-               return ath10k_vif_to_arvif(vif)->vdev_id;
+       struct ath10k *ar = data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 
-       if (ar->monitor_started)
-               return ar->monitor_vdev_id;
+       if (arvif->tx_paused)
+               return;
 
-       ath10k_warn(ar, "failed to resolve vdev id\n");
-       return 0;
+       ieee80211_wake_queue(ar->hw, arvif->vdev_id);
 }
 
-/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
- * Control in the header.
- */
-static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
 {
-       struct ieee80211_hdr *hdr = (void *)skb->data;
-       struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
-       u8 *qos_ctl;
+       lockdep_assert_held(&ar->htt.tx_lock);
 
-       if (!ieee80211_is_data_qos(hdr->frame_control))
-               return;
+       WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+       ar->tx_paused &= ~BIT(reason);
 
-       qos_ctl = ieee80211_get_qos_ctl(hdr);
-       memmove(skb->data + IEEE80211_QOS_CTL_LEN,
-               skb->data, (void *)qos_ctl - (void *)skb->data);
-       skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+       if (ar->tx_paused)
+               return;
 
-       /* Fw/Hw generates a corrupted QoS Control Field for QoS NullFunc
-        * frames. Powersave is handled by the fw/hw so QoS NyllFunc frames are
-        * used only for CQM purposes (e.g. hostapd station keepalive ping) so
-        * it is safe to downgrade to NullFunc.
-        */
-       hdr = (void *)skb->data;
-       if (ieee80211_is_qos_nullfunc(hdr->frame_control)) {
-               hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
-               cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-       }
+       ieee80211_iterate_active_interfaces_atomic(ar->hw,
+                                                  IEEE80211_IFACE_ITER_RESUME_ALL,
+                                                  ath10k_mac_tx_unlock_iter,
+                                                  ar);
 }
 
-static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
-                                      struct ieee80211_vif *vif,
-                                      struct sk_buff *skb)
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
 {
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k *ar = arvif->ar;
 
-       /* This is case only for P2P_GO */
-       if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
-           arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+       lockdep_assert_held(&ar->htt.tx_lock);
+
+       WARN_ON(reason >= BITS_PER_LONG);
+       arvif->tx_paused |= BIT(reason);
+       ieee80211_stop_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
+{
+       struct ath10k *ar = arvif->ar;
+
+       lockdep_assert_held(&ar->htt.tx_lock);
+
+       WARN_ON(reason >= BITS_PER_LONG);
+       arvif->tx_paused &= ~BIT(reason);
+
+       if (ar->tx_paused)
+               return;
+
+       if (arvif->tx_paused)
+               return;
+
+       ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
+                                          enum wmi_tlv_tx_pause_id pause_id,
+                                          enum wmi_tlv_tx_pause_action action)
+{
+       struct ath10k *ar = arvif->ar;
+
+       lockdep_assert_held(&ar->htt.tx_lock);
+
+       switch (pause_id) {
+       case WMI_TLV_TX_PAUSE_ID_MCC:
+       case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+       case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+       case WMI_TLV_TX_PAUSE_ID_AP_PS:
+       case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+               switch (action) {
+               case WMI_TLV_TX_PAUSE_ACTION_STOP:
+                       ath10k_mac_vif_tx_lock(arvif, pause_id);
+                       break;
+               case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+                       ath10k_mac_vif_tx_unlock(arvif, pause_id);
+                       break;
+               default:
+                       ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+                                   action, arvif->vdev_id);
+                       break;
+               }
+               break;
+       case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+       case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+       case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+       case WMI_TLV_TX_PAUSE_ID_HOST:
+       default:
+               /* FIXME: Some pause_ids aren't vdev specific. Instead they
+                * target peer_id and tid. Implementing these could improve
+                * traffic scheduling fairness across multiple connected
+                * stations in AP/IBSS modes.
+                */
+               ath10k_dbg(ar, ATH10K_DBG_MAC,
+                          "mac ignoring unsupported tx pause vdev %i id %d\n",
+                          arvif->vdev_id, pause_id);
+               break;
+       }
+}
+
+struct ath10k_mac_tx_pause {
+       u32 vdev_id;
+       enum wmi_tlv_tx_pause_id pause_id;
+       enum wmi_tlv_tx_pause_action action;
+};
+
+static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_mac_tx_pause *arg = data;
+
+       ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
+}
+
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+                               enum wmi_tlv_tx_pause_id pause_id,
+                               enum wmi_tlv_tx_pause_action action)
+{
+       struct ath10k_mac_tx_pause arg = {
+               .vdev_id = vdev_id,
+               .pause_id = pause_id,
+               .action = action,
+       };
+
+       spin_lock_bh(&ar->htt.tx_lock);
+       ieee80211_iterate_active_interfaces_atomic(ar->hw,
+                                                  IEEE80211_IFACE_ITER_RESUME_ALL,
+                                                  ath10k_mac_handle_tx_pause_iter,
+                                                  &arg);
+       spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+       if (ieee80211_is_mgmt(hdr->frame_control))
+               return HTT_DATA_TX_EXT_TID_MGMT;
+
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+       if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+               return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+       return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
+{
+       if (vif)
+               return ath10k_vif_to_arvif(vif)->vdev_id;
+
+       if (ar->monitor_started)
+               return ar->monitor_vdev_id;
+
+       ath10k_warn(ar, "failed to resolve vdev id\n");
+       return 0;
+}
+
+static enum ath10k_hw_txrx_mode
+ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta, struct sk_buff *skb)
+{
+       const struct ieee80211_hdr *hdr = (void *)skb->data;
+       __le16 fc = hdr->frame_control;
+
+       if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
+               return ATH10K_HW_TXRX_RAW;
+
+       if (ieee80211_is_mgmt(fc))
+               return ATH10K_HW_TXRX_MGMT;
+
+       /* Workaround:
+        *
+        * NullFunc frames are mostly used to ping if a client or AP are still
+        * reachable and responsive. This implies tx status reports must be
+        * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
+        * come to a conclusion that the other end disappeared and tear down
+        * BSS connection or it can never disconnect from BSS/client (which is
+        * the case).
+        *
+        * Firmware with HTT older than 3.0 delivers incorrect tx status for
+        * NullFunc frames to driver. However there's a HTT Mgmt Tx command
+        * which seems to deliver correct tx reports for NullFunc frames. The
+        * downside of using it is it ignores client powersave state so it can
+        * end up disconnecting sleeping clients in AP mode. It should fix STA
+        * mode though because AP don't sleep.
+        */
+       if (ar->htt.target_version_major < 3 &&
+           (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+           !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+               return ATH10K_HW_TXRX_MGMT;
+
+       /* Workaround:
+        *
+        * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
+        * NativeWifi txmode - it selects AP key instead of peer key. It seems
+        * to work with Ethernet txmode so use it.
+        */
+       if (ieee80211_is_data_present(fc) && sta && sta->tdls)
+               return ATH10K_HW_TXRX_ETHERNET;
+
+       return ATH10K_HW_TXRX_NATIVE_WIFI;
+}
+
+/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
+ * Control in the header.
+ */
+static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+       u8 *qos_ctl;
+
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return;
+
+       qos_ctl = ieee80211_get_qos_ctl(hdr);
+       memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+               skb->data, (void *)qos_ctl - (void *)skb->data);
+       skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+       /* Some firmware revisions don't handle sending QoS NullFunc well.
+        * These frames are mainly used for CQM purposes so it doesn't really
+        * matter whether QoS NullFunc or NullFunc are sent.
+        */
+       hdr = (void *)skb->data;
+       if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+               cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+       hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static void ath10k_tx_h_8023(struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+       struct rfc1042_hdr *rfc1042;
+       struct ethhdr *eth;
+       size_t hdrlen;
+       u8 da[ETH_ALEN];
+       u8 sa[ETH_ALEN];
+       __be16 type;
+
+       hdr = (void *)skb->data;
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       rfc1042 = (void *)skb->data + hdrlen;
+
+       ether_addr_copy(da, ieee80211_get_DA(hdr));
+       ether_addr_copy(sa, ieee80211_get_SA(hdr));
+       type = rfc1042->snap_type;
+
+       skb_pull(skb, hdrlen + sizeof(*rfc1042));
+       skb_push(skb, sizeof(*eth));
+
+       eth = (void *)skb->data;
+       ether_addr_copy(eth->h_dest, da);
+       ether_addr_copy(eth->h_source, sa);
+       eth->h_proto = type;
+}
+
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
+                                      struct ieee80211_vif *vif,
+                                      struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       /* This is case only for P2P_GO */
+       if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
+           arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
                return;
 
        if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
@@ -2365,45 +3266,51 @@ static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
                 ar->htt.target_version_minor >= 4);
 }
 
-static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
 {
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
        int ret = 0;
 
-       if (ar->htt.target_version_major >= 3) {
-               /* Since HTT 3.0 there is no separate mgmt tx command */
-               ret = ath10k_htt_tx(&ar->htt, skb);
-               goto exit;
+       spin_lock_bh(&ar->data_lock);
+
+       if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
+               ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+               ret = -ENOSPC;
+               goto unlock;
        }
 
-       if (ieee80211_is_mgmt(hdr->frame_control)) {
-               if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-                            ar->fw_features)) {
-                       if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
-                           ATH10K_MAX_NUM_MGMT_PENDING) {
-                               ath10k_warn(ar, "reached WMI management transmit queue limit\n");
-                               ret = -EBUSY;
-                               goto exit;
-                       }
+       __skb_queue_tail(q, skb);
+       ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
 
-                       skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
-                       ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
-               } else {
-                       ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
-               }
-       } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
-                            ar->fw_features) &&
-                  ieee80211_is_nullfunc(hdr->frame_control)) {
-               /* FW does not report tx status properly for NullFunc frames
-                * unless they are sent through mgmt tx path. mac80211 sends
-                * those frames when it detects link/beacon loss and depends
-                * on the tx status to be correct. */
-               ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
-       } else {
-               ret = ath10k_htt_tx(&ar->htt, skb);
+unlock:
+       spin_unlock_bh(&ar->data_lock);
+
+       return ret;
+}
+
+static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+       struct ath10k_htt *htt = &ar->htt;
+       int ret = 0;
+
+       switch (cb->txmode) {
+       case ATH10K_HW_TXRX_RAW:
+       case ATH10K_HW_TXRX_NATIVE_WIFI:
+       case ATH10K_HW_TXRX_ETHERNET:
+               ret = ath10k_htt_tx(htt, skb);
+               break;
+       case ATH10K_HW_TXRX_MGMT:
+               if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+                            ar->fw_features))
+                       ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+               else if (ar->htt.target_version_major >= 3)
+                       ret = ath10k_htt_tx(htt, skb);
+               else
+                       ret = ath10k_htt_mgmt_tx(htt, skb);
+               break;
        }
 
-exit:
        if (ret) {
                ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
                            ret);
@@ -2433,6 +3340,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
        const u8 *peer_addr;
        int vdev_id;
        int ret;
+       unsigned long time_left;
 
        /* FW requirement: We must create a peer before FW will send out
         * an offchannel frame. Otherwise the frame will be stuck and
@@ -2465,7 +3373,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                                   peer_addr, vdev_id);
 
                if (!peer) {
-                       ret = ath10k_peer_create(ar, vdev_id, peer_addr);
+                       ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+                                                WMI_PEER_TYPE_DEFAULT);
                        if (ret)
                                ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
                                            peer_addr, vdev_id, ret);
@@ -2476,11 +3385,11 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                ar->offchan_tx_skb = skb;
                spin_unlock_bh(&ar->data_lock);
 
-               ath10k_tx_htt(ar, skb);
+               ath10k_mac_tx(ar, skb);
 
-               ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
-                                                 3 * HZ);
-               if (ret == 0)
+               time_left =
+               wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+               if (time_left == 0)
                        ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
                                    skb);
 
@@ -2700,21 +3609,38 @@ static void ath10k_tx(struct ieee80211_hw *hw,
        struct ath10k *ar = hw->priv;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_vif *vif = info->control.vif;
+       struct ieee80211_sta *sta = control->sta;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       __le16 fc = hdr->frame_control;
 
        /* We should disable CCK RATE due to P2P */
        if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
                ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
 
        ATH10K_SKB_CB(skb)->htt.is_offchan = false;
+       ATH10K_SKB_CB(skb)->htt.freq = 0;
        ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
        ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
+       ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
+       ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
 
-       /* it makes no sense to process injected frames like that */
-       if (vif && vif->type != NL80211_IFTYPE_MONITOR) {
+       switch (ATH10K_SKB_CB(skb)->txmode) {
+       case ATH10K_HW_TXRX_MGMT:
+       case ATH10K_HW_TXRX_NATIVE_WIFI:
                ath10k_tx_h_nwifi(hw, skb);
                ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
                ath10k_tx_h_seq_no(vif, skb);
+               break;
+       case ATH10K_HW_TXRX_ETHERNET:
+               ath10k_tx_h_8023(skb);
+               break;
+       case ATH10K_HW_TXRX_RAW:
+               /* FIXME: Packet injection isn't implemented. It should be
+                * doable with firmware 10.2 on qca988x.
+                */
+               WARN_ON_ONCE(1);
+               ieee80211_free_txskb(hw, skb);
+               return;
        }
 
        if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -2736,7 +3662,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
                }
        }
 
-       ath10k_tx_htt(ar, skb);
+       ath10k_mac_tx(ar, skb);
 }
 
 /* Must not be called with conf_mutex held as workers can use that also. */
@@ -2761,11 +3687,13 @@ void ath10k_halt(struct ath10k *ar)
        clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
        ar->filter_flags = 0;
        ar->monitor = false;
+       ar->monitor_arvif = NULL;
 
        if (ar->monitor_started)
                ath10k_monitor_stop(ar);
 
        ar->monitor_started = false;
+       ar->tx_paused = 0;
 
        ath10k_scan_finish(ar);
        ath10k_peer_cleanup_all(ar);
@@ -2859,6 +3787,7 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
 static int ath10k_start(struct ieee80211_hw *hw)
 {
        struct ath10k *ar = hw->priv;
+       u32 burst_enable;
        int ret = 0;
 
        /*
@@ -2913,6 +3842,24 @@ static int ath10k_start(struct ieee80211_hw *hw)
                goto err_core_stop;
        }
 
+       if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+               ret = ath10k_wmi_adaptive_qcs(ar, true);
+               if (ret) {
+                       ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
+                                   ret);
+                       goto err_core_stop;
+               }
+       }
+
+       if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
+               burst_enable = ar->wmi.pdev_param->burst_enable;
+               ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+               if (ret) {
+                       ath10k_warn(ar, "failed to disable burst: %d\n", ret);
+                       goto err_core_stop;
+               }
+       }
+
        if (ar->cfg_tx_chainmask)
                __ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
                                     ar->cfg_rx_chainmask);
@@ -2934,10 +3881,21 @@ static int ath10k_start(struct ieee80211_hw *hw)
                goto err_core_stop;
        }
 
+       ret = ath10k_wmi_pdev_set_param(ar,
+                                       ar->wmi.pdev_param->ani_enable, 1);
+       if (ret) {
+               ath10k_warn(ar, "failed to enable ani by default: %d\n",
+                           ret);
+               goto err_core_stop;
+       }
+
+       ar->ani_enabled = true;
+
        ar->num_started_vdevs = 0;
        ath10k_regd_update(ar);
 
        ath10k_spectral_start(ar);
+       ath10k_thermal_set_throttling(ar);
 
        mutex_unlock(&ar->conf_mutex);
        return 0;
@@ -2991,42 +3949,15 @@ static int ath10k_config_ps(struct ath10k *ar)
        return ret;
 }
 
-static const char *chandef_get_width(enum nl80211_chan_width width)
-{
-       switch (width) {
-       case NL80211_CHAN_WIDTH_20_NOHT:
-               return "20 (noht)";
-       case NL80211_CHAN_WIDTH_20:
-               return "20";
-       case NL80211_CHAN_WIDTH_40:
-               return "40";
-       case NL80211_CHAN_WIDTH_80:
-               return "80";
-       case NL80211_CHAN_WIDTH_80P80:
-               return "80+80";
-       case NL80211_CHAN_WIDTH_160:
-               return "160";
-       case NL80211_CHAN_WIDTH_5:
-               return "5";
-       case NL80211_CHAN_WIDTH_10:
-               return "10";
-       }
-       return "?";
-}
-
-static void ath10k_config_chan(struct ath10k *ar)
+static void ath10k_mac_chan_reconfigure(struct ath10k *ar)
 {
        struct ath10k_vif *arvif;
+       struct cfg80211_chan_def def;
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       ath10k_dbg(ar, ATH10K_DBG_MAC,
-                  "mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
-                  ar->chandef.chan->center_freq,
-                  ar->chandef.center_freq1,
-                  ar->chandef.center_freq2,
-                  chandef_get_width(ar->chandef.width));
+       ath10k_dbg(ar, ATH10K_DBG_MAC, "mac chan reconfigure\n");
 
        /* First stop monitor interface. Some FW versions crash if there's a
         * lone monitor interface. */
@@ -3060,7 +3991,20 @@ static void ath10k_config_chan(struct ath10k *ar)
                if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
                        continue;
 
-               ret = ath10k_vdev_restart(arvif);
+               ret = ath10k_mac_setup_bcn_tmpl(arvif);
+               if (ret)
+                       ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+                                   ret);
+
+               ret = ath10k_mac_setup_prb_tmpl(arvif);
+               if (ret)
+                       ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+                                   ret);
+
+               if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+                       continue;
+
+               ret = ath10k_vdev_restart(arvif, &def);
                if (ret) {
                        ath10k_warn(ar, "failed to restart vdev %d: %d\n",
                                    arvif->vdev_id, ret);
@@ -3147,26 +4091,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
 
        mutex_lock(&ar->conf_mutex);
 
-       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-               ath10k_dbg(ar, ATH10K_DBG_MAC,
-                          "mac config channel %dMHz flags 0x%x radar %d\n",
-                          conf->chandef.chan->center_freq,
-                          conf->chandef.chan->flags,
-                          conf->radar_enabled);
-
-               spin_lock_bh(&ar->data_lock);
-               ar->rx_channel = conf->chandef.chan;
-               spin_unlock_bh(&ar->data_lock);
-
-               ar->radar_enabled = conf->radar_enabled;
-               ath10k_recalc_radar_detection(ar);
-
-               if (!cfg80211_chandef_identical(&ar->chandef, &conf->chandef)) {
-                       ar->chandef = conf->chandef;
-                       ath10k_config_chan(ar);
-               }
-       }
-
        if (changed & IEEE80211_CONF_CHANGE_PS)
                ath10k_config_ps(ar);
 
@@ -3208,6 +4132,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        int ret = 0;
        u32 value;
        int bit;
+       int i;
        u32 vdev_param;
 
        vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
@@ -3220,6 +4145,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        arvif->vif = vif;
 
        INIT_LIST_HEAD(&arvif->list);
+       INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
+       INIT_DELAYED_WORK(&arvif->connection_loss_work,
+                         ath10k_mac_vif_sta_connection_loss_work);
+
+       for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+               arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+               memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+                      sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+               memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+                      sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+       }
 
        if (ar->free_vdev_map == 0) {
                ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
@@ -3262,6 +4198,15 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                break;
        }
 
+       /* Using vdev_id as queue number will make it very easy to do per-vif
+        * tx queue locking. This shouldn't wrap due to interface combinations
+        * but do a modulo for correctness sake and prevent using offchannel tx
+        * queues for regular vif tx.
+        */
+       vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+       for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+               vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+
        /* Some firmware revisions don't wait for beacon tx completion before
         * sending another SWBA event. This could lead to hardware using old
         * (freed) beacon data in some cases, e.g. tx credit starvation
@@ -3343,14 +4288,18 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
-               ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+           arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+               ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
+                                        WMI_PEER_TYPE_DEFAULT);
                if (ret) {
-                       ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
+                       ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
                                    arvif->vdev_id, ret);
                        goto err_vdev_delete;
                }
+       }
 
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_mac_set_kickout(arvif);
                if (ret) {
                        ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
@@ -3406,11 +4355,21 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                goto err_peer_delete;
        }
 
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               ar->monitor_arvif = arvif;
+               ret = ath10k_monitor_recalc(ar);
+               if (ret) {
+                       ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+                       goto err_peer_delete;
+               }
+       }
+
        mutex_unlock(&ar->conf_mutex);
        return 0;
 
 err_peer_delete:
-       if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+           arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
                ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
 
 err_vdev_delete:
@@ -3430,6 +4389,14 @@ err:
        return ret;
 }
 
+static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
+{
+       int i;
+
+       for (i = 0; i < BITS_PER_LONG; i++)
+               ath10k_mac_vif_tx_unlock(arvif, i);
+}
+
 static void ath10k_remove_interface(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif)
 {
@@ -3437,6 +4404,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        int ret;
 
+       cancel_work_sync(&arvif->ap_csa_work);
+       cancel_delayed_work_sync(&arvif->connection_loss_work);
+
        mutex_lock(&ar->conf_mutex);
 
        spin_lock_bh(&ar->data_lock);
@@ -3451,11 +4421,12 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        ar->free_vdev_map |= 1LL << arvif->vdev_id;
        list_del(&arvif->list);
 
-       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+           arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
                ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
                                             vif->addr);
                if (ret)
-                       ath10k_warn(ar, "failed to submit AP self-peer removal on vdev %i: %d\n",
+                       ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
                                    arvif->vdev_id, ret);
 
                kfree(arvif->u.ap.noa_data);
@@ -3472,7 +4443,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
        /* Some firmware revisions don't notify host about self-peer removal
         * until after associated vdev is deleted.
         */
-       if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+           arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
                ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
                                                   vif->addr);
                if (ret)
@@ -3486,6 +4458,17 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
 
        ath10k_peer_cleanup(ar, arvif->vdev_id);
 
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               ar->monitor_arvif = NULL;
+               ret = ath10k_monitor_recalc(ar);
+               if (ret)
+                       ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+       }
+
+       spin_lock_bh(&ar->htt.tx_lock);
+       ath10k_mac_vif_tx_unlock_all(arvif);
+       spin_unlock_bh(&ar->htt.tx_lock);
+
        mutex_unlock(&ar->conf_mutex);
 }
 
@@ -3493,8 +4476,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
  * FIXME: Has to be verified.
  */
 #define SUPPORTED_FILTERS                      \
-       (FIF_PROMISC_IN_BSS |                   \
-       FIF_ALLMULTI |                          \
+       (FIF_ALLMULTI |                         \
        FIF_CONTROL |                           \
        FIF_PSPOLL |                            \
        FIF_OTHER_BSS |                         \
@@ -3615,6 +4597,13 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                if (ret)
                        ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
                                    arvif->vdev_id, ret);
+
+               vdev_param = ar->wmi.vdev_param->protection_mode;
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+                                               info->use_cts_prot ? 1 : 0);
+               if (ret)
+                       ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
+                                       info->use_cts_prot, arvif->vdev_id, ret);
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -3791,10 +4780,14 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
         * frames with multi-vif APs. This is not required for main firmware
         * branch (e.g. 636).
         *
-        * FIXME: This has been tested only in AP. It remains unknown if this
-        * is required for multi-vif STA interfaces on 10.1 */
+        * This is also needed for 636 fw for IBSS-RSN to work more reliably.
+        *
+        * FIXME: It remains unknown if this is required for multi-vif STA
+        * interfaces on 10.1.
+        */
 
-       if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+       if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+           arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
                return;
 
        if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
@@ -3826,8 +4819,14 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        const u8 *peer_addr;
        bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
                      key->cipher == WLAN_CIPHER_SUITE_WEP104;
-       bool def_idx = false;
        int ret = 0;
+       int ret2;
+       u32 flags = 0;
+       u32 flags2;
+
+       /* this one needs to be done in software */
+       if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
+               return 1;
 
        if (key->keyidx > WMI_MAX_KEY_INDEX)
                return -ENOSPC;
@@ -3843,6 +4842,13 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        key->hw_key_idx = key->keyidx;
 
+       if (is_wep) {
+               if (cmd == SET_KEY)
+                       arvif->wep_keys[key->keyidx] = key;
+               else
+                       arvif->wep_keys[key->keyidx] = NULL;
+       }
+
        /* the peer should not disappear in mid-way (unless FW goes awry) since
         * we already hold conf_mutex. we just make sure its there now. */
        spin_lock_bh(&ar->data_lock);
@@ -3862,31 +4868,62 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                }
        }
 
-       if (is_wep) {
-               if (cmd == SET_KEY)
-                       arvif->wep_keys[key->keyidx] = key;
-               else
-                       arvif->wep_keys[key->keyidx] = NULL;
+       if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+               flags |= WMI_KEY_PAIRWISE;
+       else
+               flags |= WMI_KEY_GROUP;
 
+       if (is_wep) {
                if (cmd == DISABLE_KEY)
                        ath10k_clear_vdev_key(arvif, key);
-       }
 
-       /* set TX_USAGE flag for all the keys incase of dot1x-WEP. For
-        * static WEP, do not set this flag for the keys whose key id
-        * is  greater than default key id.
-        */
-       if (arvif->def_wep_key_idx == -1)
-               def_idx = true;
+               /* When WEP keys are uploaded it's possible that there are
+                * stations associated already (e.g. when merging) without any
+                * keys. Static WEP needs an explicit per-peer key upload.
+                */
+               if (vif->type == NL80211_IFTYPE_ADHOC &&
+                   cmd == SET_KEY)
+                       ath10k_mac_vif_update_wep_key(arvif, key);
+
+               /* 802.1x never sets the def_wep_key_idx so each set_key()
+                * call changes default tx key.
+                *
+                * Static WEP sets def_wep_key_idx via .set_default_unicast_key
+                * after first set_key().
+                */
+               if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
+                       flags |= WMI_KEY_TX_USAGE;
+       }
 
-       ret = ath10k_install_key(arvif, key, cmd, peer_addr, def_idx);
+       ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
        if (ret) {
                ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
                            arvif->vdev_id, peer_addr, ret);
                goto exit;
        }
 
-       ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
+       /* mac80211 sets static WEP keys as groupwise while firmware requires
+        * them to be installed twice as both pairwise and groupwise.
+        */
+       if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
+               flags2 = flags;
+               flags2 &= ~WMI_KEY_GROUP;
+               flags2 |= WMI_KEY_PAIRWISE;
+
+               ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
+               if (ret) {
+                       ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
+                                   arvif->vdev_id, peer_addr, ret);
+                       ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
+                                                 peer_addr, flags);
+                       if (ret2)
+                               ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
+                                           arvif->vdev_id, peer_addr, ret2);
+                       goto exit;
+               }
+       }
+
+       ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
 
        spin_lock_bh(&ar->data_lock);
        peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
@@ -3933,6 +4970,7 @@ static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
        }
 
        arvif->def_wep_key_idx = keyidx;
+
 unlock:
        mutex_unlock(&arvif->ar->conf_mutex);
 }
@@ -3943,6 +4981,10 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
        struct ath10k_vif *arvif;
        struct ath10k_sta *arsta;
        struct ieee80211_sta *sta;
+       struct cfg80211_chan_def def;
+       enum ieee80211_band band;
+       const u8 *ht_mcs_mask;
+       const u16 *vht_mcs_mask;
        u32 changed, bw, nss, smps;
        int err;
 
@@ -3951,6 +4993,13 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
        arvif = arsta->arvif;
        ar = arvif->ar;
 
+       if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+               return;
+
+       band = def.chan->band;
+       ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+       vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
        spin_lock_bh(&ar->data_lock);
 
        changed = arsta->changed;
@@ -3964,6 +5013,10 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
 
        mutex_lock(&ar->conf_mutex);
 
+       nss = max_t(u32, 1, nss);
+       nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+                          ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
        if (changed & IEEE80211_RC_BW_CHANGED) {
                ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
                           sta->addr, bw);
@@ -4011,14 +5064,14 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
        mutex_unlock(&ar->conf_mutex);
 }
 
-static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
+                                      struct ieee80211_sta *sta)
 {
        struct ath10k *ar = arvif->ar;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
-           arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+       if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
                return 0;
 
        if (ar->num_stations >= ar->max_num_stations)
@@ -4029,19 +5082,72 @@ static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif)
        return 0;
 }
 
-static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif)
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
+                                       struct ieee80211_sta *sta)
 {
        struct ath10k *ar = arvif->ar;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
-           arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+       if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
                return;
 
        ar->num_stations--;
 }
 
+struct ath10k_mac_tdls_iter_data {
+       u32 num_tdls_stations;
+       struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+                                                   struct ieee80211_sta *sta)
+{
+       struct ath10k_mac_tdls_iter_data *iter_data = data;
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+       if (sta->tdls && sta_vif == iter_data->curr_vif)
+               iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+                                             struct ieee80211_vif *vif)
+{
+       struct ath10k_mac_tdls_iter_data data = {};
+
+       data.curr_vif = vif;
+
+       ieee80211_iterate_stations_atomic(hw,
+                                         ath10k_mac_tdls_vif_stations_count_iter,
+                                         &data);
+       return data.num_tdls_stations;
+}
+
+static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       int *num_tdls_vifs = data;
+
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return;
+
+       if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
+               (*num_tdls_vifs)++;
+}
+
+static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
+{
+       int num_tdls_vifs = 0;
+
+       ieee80211_iterate_active_interfaces_atomic(hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  ath10k_mac_tdls_vifs_count_iter,
+                                                  &num_tdls_vifs);
+       return num_tdls_vifs;
+}
+
 static int ath10k_sta_state(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta,
@@ -4072,41 +5178,80 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                /*
                 * New station addition.
                 */
+               enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
+               u32 num_tdls_stations;
+               u32 num_tdls_vifs;
+
                ath10k_dbg(ar, ATH10K_DBG_MAC,
                           "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
                           arvif->vdev_id, sta->addr,
                           ar->num_stations + 1, ar->max_num_stations,
                           ar->num_peers + 1, ar->max_num_peers);
 
-               ret = ath10k_mac_inc_num_stations(arvif);
+               ret = ath10k_mac_inc_num_stations(arvif, sta);
                if (ret) {
                        ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
                                    ar->max_num_stations);
                        goto exit;
                }
 
-               ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
+               if (sta->tdls)
+                       peer_type = WMI_PEER_TYPE_TDLS;
+
+               ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
+                                        peer_type);
                if (ret) {
                        ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
-                       ath10k_mac_dec_num_stations(arvif);
+                       ath10k_mac_dec_num_stations(arvif, sta);
+                       goto exit;
+               }
+
+               if (!sta->tdls)
+                       goto exit;
+
+               num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+               num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
+
+               if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
+                   num_tdls_stations == 0) {
+                       ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+                                   arvif->vdev_id, ar->max_num_tdls_vdevs);
+                       ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+                       ath10k_mac_dec_num_stations(arvif, sta);
+                       ret = -ENOBUFS;
                        goto exit;
                }
 
-               if (vif->type == NL80211_IFTYPE_STATION) {
-                       WARN_ON(arvif->is_started);
+               if (num_tdls_stations == 0) {
+                       /* This is the first tdls peer in current vif */
+                       enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
 
-                       ret = ath10k_vdev_start(arvif);
+                       ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+                                                             state);
                        if (ret) {
-                               ath10k_warn(ar, "failed to start vdev %i: %d\n",
+                               ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
                                            arvif->vdev_id, ret);
-                               WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
-                                                          sta->addr));
-                               ath10k_mac_dec_num_stations(arvif);
+                               ath10k_peer_delete(ar, arvif->vdev_id,
+                                                  sta->addr);
+                               ath10k_mac_dec_num_stations(arvif, sta);
                                goto exit;
                        }
+               }
+
+               ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+                                                 WMI_TDLS_PEER_STATE_PEERING);
+               if (ret) {
+                       ath10k_warn(ar,
+                                   "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
+                                   sta->addr, arvif->vdev_id, ret);
+                       ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+                       ath10k_mac_dec_num_stations(arvif, sta);
 
-                       arvif->is_started = true;
+                       if (num_tdls_stations != 0)
+                               goto exit;
+                       ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+                                                       WMI_TDLS_DISABLE);
                }
        } else if ((old_state == IEEE80211_STA_NONE &&
                    new_state == IEEE80211_STA_NOTEXIST)) {
@@ -4117,23 +5262,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                           "mac vdev %d peer delete %pM (sta gone)\n",
                           arvif->vdev_id, sta->addr);
 
-               if (vif->type == NL80211_IFTYPE_STATION) {
-                       WARN_ON(!arvif->is_started);
-
-                       ret = ath10k_vdev_stop(arvif);
-                       if (ret)
-                               ath10k_warn(ar, "failed to stop vdev %i: %d\n",
-                                           arvif->vdev_id, ret);
-
-                       arvif->is_started = false;
-               }
-
                ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
                if (ret)
                        ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
 
-               ath10k_mac_dec_num_stations(arvif);
+               ath10k_mac_dec_num_stations(arvif, sta);
+
+               if (!sta->tdls)
+                       goto exit;
+
+               if (ath10k_mac_tdls_vif_stations_count(hw, vif))
+                       goto exit;
+
+               /* This was the last tdls peer in current vif */
+               ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+                                                     WMI_TDLS_DISABLE);
+               if (ret) {
+                       ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+                                   arvif->vdev_id, ret);
+               }
        } else if (old_state == IEEE80211_STA_AUTH &&
                   new_state == IEEE80211_STA_ASSOC &&
                   (vif->type == NL80211_IFTYPE_AP ||
@@ -4149,9 +5297,30 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                        ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
                                    sta->addr, arvif->vdev_id, ret);
        } else if (old_state == IEEE80211_STA_ASSOC &&
-                  new_state == IEEE80211_STA_AUTH &&
-                  (vif->type == NL80211_IFTYPE_AP ||
-                   vif->type == NL80211_IFTYPE_ADHOC)) {
+                  new_state == IEEE80211_STA_AUTHORIZED &&
+                  sta->tdls) {
+               /*
+                * Tdls station authorized.
+                */
+               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
+                          sta->addr);
+
+               ret = ath10k_station_assoc(ar, vif, sta, false);
+               if (ret) {
+                       ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
+                                   sta->addr, arvif->vdev_id, ret);
+                       goto exit;
+               }
+
+               ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+                                                 WMI_TDLS_PEER_STATE_CONNECTED);
+               if (ret)
+                       ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
+                                   sta->addr, arvif->vdev_id, ret);
+       } else if (old_state == IEEE80211_STA_ASSOC &&
+                   new_state == IEEE80211_STA_AUTH &&
+                   (vif->type == NL80211_IFTYPE_AP ||
+                    vif->type == NL80211_IFTYPE_ADHOC)) {
                /*
                 * Disassociation.
                 */
@@ -4356,6 +5525,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct wmi_start_scan_arg arg;
        int ret = 0;
+       u32 scan_time_msec;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -4382,7 +5552,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
        if (ret)
                goto exit;
 
-       duration = max(duration, WMI_SCAN_CHAN_MIN_TIME_MSEC);
+       scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
 
        memset(&arg, 0, sizeof(arg));
        ath10k_wmi_start_scan_init(ar, &arg);
@@ -4390,11 +5560,12 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
        arg.scan_id = ATH10K_SCAN_ID;
        arg.n_channels = 1;
        arg.channels[0] = chan->center_freq;
-       arg.dwell_time_active = duration;
-       arg.dwell_time_passive = duration;
-       arg.max_scan_time = 2 * duration;
+       arg.dwell_time_active = scan_time_msec;
+       arg.dwell_time_passive = scan_time_msec;
+       arg.max_scan_time = scan_time_msec;
        arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
        arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+       arg.burst_duration_ms = duration;
 
        ret = ath10k_start_scan(ar, &arg);
        if (ret) {
@@ -4417,6 +5588,9 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
                goto exit;
        }
 
+       ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+                                    msecs_to_jiffies(duration));
+
        ret = 0;
 exit:
        mutex_unlock(&ar->conf_mutex);
@@ -4512,70 +5686,6 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
        return 1;
 }
 
-#ifdef CONFIG_PM
-static int ath10k_suspend(struct ieee80211_hw *hw,
-                         struct cfg80211_wowlan *wowlan)
-{
-       struct ath10k *ar = hw->priv;
-       int ret;
-
-       mutex_lock(&ar->conf_mutex);
-
-       ret = ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND);
-       if (ret) {
-               if (ret == -ETIMEDOUT)
-                       goto resume;
-               ret = 1;
-               goto exit;
-       }
-
-       ret = ath10k_hif_suspend(ar);
-       if (ret) {
-               ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
-               goto resume;
-       }
-
-       ret = 0;
-       goto exit;
-resume:
-       ret = ath10k_wmi_pdev_resume_target(ar);
-       if (ret)
-               ath10k_warn(ar, "failed to resume target: %d\n", ret);
-
-       ret = 1;
-exit:
-       mutex_unlock(&ar->conf_mutex);
-       return ret;
-}
-
-static int ath10k_resume(struct ieee80211_hw *hw)
-{
-       struct ath10k *ar = hw->priv;
-       int ret;
-
-       mutex_lock(&ar->conf_mutex);
-
-       ret = ath10k_hif_resume(ar);
-       if (ret) {
-               ath10k_warn(ar, "failed to resume hif: %d\n", ret);
-               ret = 1;
-               goto exit;
-       }
-
-       ret = ath10k_wmi_pdev_resume_target(ar);
-       if (ret) {
-               ath10k_warn(ar, "failed to resume target: %d\n", ret);
-               ret = 1;
-               goto exit;
-       }
-
-       ret = 0;
-exit:
-       mutex_unlock(&ar->conf_mutex);
-       return ret;
-}
-#endif
-
 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
                                     enum ieee80211_reconfig_type reconfig_type)
 {
@@ -4635,343 +5745,286 @@ exit:
        return ret;
 }
 
-/* Helper table for legacy fixed_rate/bitrate_mask */
-static const u8 cck_ofdm_rate[] = {
-       /* CCK */
-       3, /* 1Mbps */
-       2, /* 2Mbps */
-       1, /* 5.5Mbps */
-       0, /* 11Mbps */
-       /* OFDM */
-       3, /* 6Mbps */
-       7, /* 9Mbps */
-       2, /* 12Mbps */
-       6, /* 18Mbps */
-       1, /* 24Mbps */
-       5, /* 36Mbps */
-       0, /* 48Mbps */
-       4, /* 54Mbps */
-};
-
-/* Check if only one bit set */
-static int ath10k_check_single_mask(u32 mask)
-{
-       int bit;
-
-       bit = ffs(mask);
-       if (!bit)
-               return 0;
-
-       mask &= ~BIT(bit - 1);
-       if (mask)
-               return 2;
-
-       return 1;
-}
-
 static bool
-ath10k_default_bitrate_mask(struct ath10k *ar,
-                           enum ieee80211_band band,
-                           const struct cfg80211_bitrate_mask *mask)
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+                                       enum ieee80211_band band,
+                                       const struct cfg80211_bitrate_mask *mask)
 {
-       u32 legacy = 0x00ff;
-       u8 ht = 0xff, i;
-       u16 vht = 0x3ff;
-       u16 nrf = ar->num_rf_chains;
-
-       if (ar->cfg_tx_chainmask)
-               nrf = get_nss_from_chainmask(ar->cfg_tx_chainmask);
-
-       switch (band) {
-       case IEEE80211_BAND_2GHZ:
-               legacy = 0x00fff;
-               vht = 0;
-               break;
-       case IEEE80211_BAND_5GHZ:
-               break;
-       default:
-               return false;
-       }
+       int num_rates = 0;
+       int i;
 
-       if (mask->control[band].legacy != legacy)
-               return false;
+       num_rates += hweight32(mask->control[band].legacy);
 
-       for (i = 0; i < nrf; i++)
-               if (mask->control[band].ht_mcs[i] != ht)
-                       return false;
+       for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+               num_rates += hweight8(mask->control[band].ht_mcs[i]);
 
-       for (i = 0; i < nrf; i++)
-               if (mask->control[band].vht_mcs[i] != vht)
-                       return false;
+       for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+               num_rates += hweight16(mask->control[band].vht_mcs[i]);
 
-       return true;
+       return num_rates == 1;
 }
 
 static bool
-ath10k_bitrate_mask_nss(const struct cfg80211_bitrate_mask *mask,
-                       enum ieee80211_band band,
-                       u8 *fixed_nss)
-{
-       int ht_nss = 0, vht_nss = 0, i;
+ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
+                                      enum ieee80211_band band,
+                                      const struct cfg80211_bitrate_mask *mask,
+                                      int *nss)
+{
+       struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+       u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+       u8 ht_nss_mask = 0;
+       u8 vht_nss_mask = 0;
+       int i;
 
-       /* check legacy */
-       if (ath10k_check_single_mask(mask->control[band].legacy))
+       if (mask->control[band].legacy)
                return false;
 
-       /* check HT */
-       for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
-               if (mask->control[band].ht_mcs[i] == 0xff)
+       for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+               if (mask->control[band].ht_mcs[i] == 0)
                        continue;
-               else if (mask->control[band].ht_mcs[i] == 0x00)
-                       break;
-
-               return false;
+               else if (mask->control[band].ht_mcs[i] ==
+                        sband->ht_cap.mcs.rx_mask[i])
+                       ht_nss_mask |= BIT(i);
+               else
+                       return false;
        }
 
-       ht_nss = i;
-
-       /* check VHT */
-       for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
-               if (mask->control[band].vht_mcs[i] == 0x03ff)
+       for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+               if (mask->control[band].vht_mcs[i] == 0)
                        continue;
-               else if (mask->control[band].vht_mcs[i] == 0x0000)
-                       break;
-
-               return false;
+               else if (mask->control[band].vht_mcs[i] ==
+                        ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+                       vht_nss_mask |= BIT(i);
+               else
+                       return false;
        }
 
-       vht_nss = i;
-
-       if (ht_nss > 0 && vht_nss > 0)
+       if (ht_nss_mask != vht_nss_mask)
                return false;
 
-       if (ht_nss)
-               *fixed_nss = ht_nss;
-       else if (vht_nss)
-               *fixed_nss = vht_nss;
-       else
+       if (ht_nss_mask == 0)
                return false;
 
-       return true;
-}
-
-static bool
-ath10k_bitrate_mask_correct(const struct cfg80211_bitrate_mask *mask,
-                           enum ieee80211_band band,
-                           enum wmi_rate_preamble *preamble)
-{
-       int legacy = 0, ht = 0, vht = 0, i;
-
-       *preamble = WMI_RATE_PREAMBLE_OFDM;
-
-       /* check legacy */
-       legacy = ath10k_check_single_mask(mask->control[band].legacy);
-       if (legacy > 1)
+       if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
                return false;
 
-       /* check HT */
-       for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
-               ht += ath10k_check_single_mask(mask->control[band].ht_mcs[i]);
-       if (ht > 1)
-               return false;
-
-       /* check VHT */
-       for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
-               vht += ath10k_check_single_mask(mask->control[band].vht_mcs[i]);
-       if (vht > 1)
-               return false;
-
-       /* Currently we support only one fixed_rate */
-       if ((legacy + ht + vht) != 1)
-               return false;
-
-       if (ht)
-               *preamble = WMI_RATE_PREAMBLE_HT;
-       else if (vht)
-               *preamble = WMI_RATE_PREAMBLE_VHT;
+       *nss = fls(ht_nss_mask);
 
        return true;
 }
 
-static bool
-ath10k_bitrate_mask_rate(struct ath10k *ar,
-                        const struct cfg80211_bitrate_mask *mask,
-                        enum ieee80211_band band,
-                        u8 *fixed_rate,
-                        u8 *fixed_nss)
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+                                       enum ieee80211_band band,
+                                       const struct cfg80211_bitrate_mask *mask,
+                                       u8 *rate, u8 *nss)
 {
-       u8 rate = 0, pream = 0, nss = 0, i;
-       enum wmi_rate_preamble preamble;
-
-       /* Check if single rate correct */
-       if (!ath10k_bitrate_mask_correct(mask, band, &preamble))
-               return false;
-
-       pream = preamble;
-
-       switch (preamble) {
-       case WMI_RATE_PREAMBLE_CCK:
-       case WMI_RATE_PREAMBLE_OFDM:
-               i = ffs(mask->control[band].legacy) - 1;
-
-               if (band == IEEE80211_BAND_2GHZ && i < 4)
-                       pream = WMI_RATE_PREAMBLE_CCK;
-
-               if (band == IEEE80211_BAND_5GHZ)
-                       i += 4;
-
-               if (i >= ARRAY_SIZE(cck_ofdm_rate))
-                       return false;
+       struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+       int rate_idx;
+       int i;
+       u16 bitrate;
+       u8 preamble;
+       u8 hw_rate;
 
-               rate = cck_ofdm_rate[i];
-               break;
-       case WMI_RATE_PREAMBLE_HT:
-               for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
-                       if (mask->control[band].ht_mcs[i])
-                               break;
+       if (hweight32(mask->control[band].legacy) == 1) {
+               rate_idx = ffs(mask->control[band].legacy) - 1;
 
-               if (i == IEEE80211_HT_MCS_MASK_LEN)
-                       return false;
+               hw_rate = sband->bitrates[rate_idx].hw_value;
+               bitrate = sband->bitrates[rate_idx].bitrate;
 
-               rate = ffs(mask->control[band].ht_mcs[i]) - 1;
-               nss = i;
-               break;
-       case WMI_RATE_PREAMBLE_VHT:
-               for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
-                       if (mask->control[band].vht_mcs[i])
-                               break;
+               if (ath10k_mac_bitrate_is_cck(bitrate))
+                       preamble = WMI_RATE_PREAMBLE_CCK;
+               else
+                       preamble = WMI_RATE_PREAMBLE_OFDM;
 
-               if (i == NL80211_VHT_NSS_MAX)
-                       return false;
+               *nss = 1;
+               *rate = preamble << 6 |
+                       (*nss - 1) << 4 |
+                       hw_rate << 0;
 
-               rate = ffs(mask->control[band].vht_mcs[i]) - 1;
-               nss = i;
-               break;
+               return 0;
        }
 
-       *fixed_nss = nss + 1;
-       nss <<= 4;
-       pream <<= 6;
+       for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+               if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+                       *nss = i + 1;
+                       *rate = WMI_RATE_PREAMBLE_HT << 6 |
+                               (*nss - 1) << 4 |
+                               (ffs(mask->control[band].ht_mcs[i]) - 1);
 
-       ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
-                  pream, nss, rate);
-
-       *fixed_rate = pream | nss | rate;
+                       return 0;
+               }
+       }
 
-       return true;
-}
+       for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+               if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+                       *nss = i + 1;
+                       *rate = WMI_RATE_PREAMBLE_VHT << 6 |
+                               (*nss - 1) << 4 |
+                               (ffs(mask->control[band].vht_mcs[i]) - 1);
 
-static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
-                                     const struct cfg80211_bitrate_mask *mask,
-                                     enum ieee80211_band band,
-                                     u8 *fixed_rate,
-                                     u8 *fixed_nss)
-{
-       /* First check full NSS mask, if we can simply limit NSS */
-       if (ath10k_bitrate_mask_nss(mask, band, fixed_nss))
-               return true;
+                       return 0;
+               }
+       }
 
-       /* Next Check single rate is set */
-       return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
+       return -EINVAL;
 }
 
-static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
-                                      u8 fixed_rate,
-                                      u8 fixed_nss,
-                                      u8 force_sgi)
+static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
+                                           u8 rate, u8 nss, u8 sgi)
 {
        struct ath10k *ar = arvif->ar;
        u32 vdev_param;
-       int ret = 0;
-
-       mutex_lock(&ar->conf_mutex);
-
-       if (arvif->fixed_rate == fixed_rate &&
-           arvif->fixed_nss == fixed_nss &&
-           arvif->force_sgi == force_sgi)
-               goto exit;
+       int ret;
 
-       if (fixed_rate == WMI_FIXED_RATE_NONE)
-               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+       lockdep_assert_held(&ar->conf_mutex);
 
-       if (force_sgi)
-               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
+       ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+                  arvif->vdev_id, rate, nss, sgi);
 
        vdev_param = ar->wmi.vdev_param->fixed_rate;
-       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                       vdev_param, fixed_rate);
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
        if (ret) {
                ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
-                           fixed_rate, ret);
-               ret = -EINVAL;
-               goto exit;
+                           rate, ret);
+               return ret;
        }
 
-       arvif->fixed_rate = fixed_rate;
-
        vdev_param = ar->wmi.vdev_param->nss;
-       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                       vdev_param, fixed_nss);
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
+       if (ret) {
+               ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
+               return ret;
+       }
 
+       vdev_param = ar->wmi.vdev_param->sgi;
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
        if (ret) {
-               ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
-                           fixed_nss, ret);
-               ret = -EINVAL;
-               goto exit;
+               ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
+               return ret;
        }
 
-       arvif->fixed_nss = fixed_nss;
+       return 0;
+}
 
-       vdev_param = ar->wmi.vdev_param->sgi;
-       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
-                                       force_sgi);
+static bool
+ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
+                               enum ieee80211_band band,
+                               const struct cfg80211_bitrate_mask *mask)
+{
+       int i;
+       u16 vht_mcs;
 
-       if (ret) {
-               ath10k_warn(ar, "failed to set sgi param %d: %d\n",
-                           force_sgi, ret);
-               ret = -EINVAL;
-               goto exit;
+       /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
+        * to express all VHT MCS rate masks. Effectively only the following
+        * ranges can be used: none, 0-7, 0-8 and 0-9.
+        */
+       for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+               vht_mcs = mask->control[band].vht_mcs[i];
+
+               switch (vht_mcs) {
+               case 0:
+               case BIT(8) - 1:
+               case BIT(9) - 1:
+               case BIT(10) - 1:
+                       break;
+               default:
+                       ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+                       return false;
+               }
        }
 
-       arvif->force_sgi = force_sgi;
+       return true;
+}
 
-exit:
-       mutex_unlock(&ar->conf_mutex);
-       return ret;
+static void ath10k_mac_set_bitrate_mask_iter(void *data,
+                                            struct ieee80211_sta *sta)
+{
+       struct ath10k_vif *arvif = data;
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       struct ath10k *ar = arvif->ar;
+
+       if (arsta->arvif != arvif)
+               return;
+
+       spin_lock_bh(&ar->data_lock);
+       arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+       spin_unlock_bh(&ar->data_lock);
+
+       ieee80211_queue_work(ar->hw, &arsta->update_wk);
 }
 
-static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
-                                  struct ieee80211_vif *vif,
-                                  const struct cfg80211_bitrate_mask *mask)
+static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+                                         struct ieee80211_vif *vif,
+                                         const struct cfg80211_bitrate_mask *mask)
 {
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct cfg80211_chan_def def;
        struct ath10k *ar = arvif->ar;
-       enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
-       u8 fixed_rate = WMI_FIXED_RATE_NONE;
-       u8 fixed_nss = ar->num_rf_chains;
-       u8 force_sgi;
+       enum ieee80211_band band;
+       const u8 *ht_mcs_mask;
+       const u16 *vht_mcs_mask;
+       u8 rate;
+       u8 nss;
+       u8 sgi;
+       int single_nss;
+       int ret;
 
-       if (ar->cfg_tx_chainmask)
-               fixed_nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+       if (ath10k_mac_vif_chan(vif, &def))
+               return -EPERM;
 
-       force_sgi = mask->control[band].gi;
-       if (force_sgi == NL80211_TXRATE_FORCE_LGI)
+       band = def.chan->band;
+       ht_mcs_mask = mask->control[band].ht_mcs;
+       vht_mcs_mask = mask->control[band].vht_mcs;
+
+       sgi = mask->control[band].gi;
+       if (sgi == NL80211_TXRATE_FORCE_LGI)
                return -EINVAL;
 
-       if (!ath10k_default_bitrate_mask(ar, band, mask)) {
-               if (!ath10k_get_fixed_rate_nss(ar, mask, band,
-                                              &fixed_rate,
-                                              &fixed_nss))
+       if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
+               ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+                                                             &rate, &nss);
+               if (ret) {
+                       ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
+       } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+                                                         &single_nss)) {
+               rate = WMI_FIXED_RATE_NONE;
+               nss = single_nss;
+       } else {
+               rate = WMI_FIXED_RATE_NONE;
+               nss = min(ar->num_rf_chains,
+                         max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+                             ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+               if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
                        return -EINVAL;
+
+               mutex_lock(&ar->conf_mutex);
+
+               arvif->bitrate_mask = *mask;
+               ieee80211_iterate_stations_atomic(ar->hw,
+                                                 ath10k_mac_set_bitrate_mask_iter,
+                                                 arvif);
+
+               mutex_unlock(&ar->conf_mutex);
        }
 
-       if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
-               ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
-               return -EINVAL;
+       mutex_lock(&ar->conf_mutex);
+
+       ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi);
+       if (ret) {
+               ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+               goto exit;
        }
 
-       return ath10k_set_fixed_rate_param(arvif, fixed_rate,
-                                          fixed_nss, force_sgi);
+exit:
+       mutex_unlock(&ar->conf_mutex);
+
+       return ret;
 }
 
 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
@@ -5090,6 +6143,286 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw,
        return -EINVAL;
 }
 
+static void
+ath10k_mac_update_rx_channel(struct ath10k *ar)
+{
+       struct cfg80211_chan_def *def = NULL;
+
+       /* Both locks are required because ar->rx_channel is modified. This
+        * allows readers to hold either lock.
+        */
+       lockdep_assert_held(&ar->conf_mutex);
+       lockdep_assert_held(&ar->data_lock);
+
+       /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+        * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+        * ppdu on Rx may reduce performance on low-end systems. It should be
+        * possible to make tables/hashmaps to speed the lookup up (be vary of
+        * cpu data cache lines though regarding sizes) but to keep the initial
+        * implementation simple and less intrusive fallback to the slow lookup
+        * only for multi-channel cases. Single-channel cases will remain to
+        * use the old channel derival and thus performance should not be
+        * affected much.
+        */
+       rcu_read_lock();
+       if (ath10k_mac_num_chanctxs(ar) == 1) {
+               ieee80211_iter_chan_contexts_atomic(ar->hw,
+                                       ath10k_mac_get_any_chandef_iter,
+                                       &def);
+               ar->rx_channel = def->chan;
+       } else {
+               ar->rx_channel = NULL;
+       }
+       rcu_read_unlock();
+}
+
+static void
+ath10k_mac_chan_ctx_init(struct ath10k *ar,
+                        struct ath10k_chanctx *arctx,
+                        struct ieee80211_chanctx_conf *conf)
+{
+       lockdep_assert_held(&ar->conf_mutex);
+       lockdep_assert_held(&ar->data_lock);
+
+       memset(arctx, 0, sizeof(*arctx));
+
+       arctx->conf = *conf;
+}
+
+static int
+ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+                         struct ieee80211_chanctx_conf *ctx)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+
+       ath10k_dbg(ar, ATH10K_DBG_MAC,
+                  "mac chanctx add freq %hu width %d ptr %p\n",
+                  ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+       mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       ath10k_mac_chan_ctx_init(ar, arctx, ctx);
+       ath10k_mac_update_rx_channel(ar);
+       spin_unlock_bh(&ar->data_lock);
+
+       ath10k_recalc_radar_detection(ar);
+       ath10k_monitor_recalc(ar);
+
+       mutex_unlock(&ar->conf_mutex);
+
+       return 0;
+}
+
+static void
+ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+                            struct ieee80211_chanctx_conf *ctx)
+{
+       struct ath10k *ar = hw->priv;
+
+       ath10k_dbg(ar, ATH10K_DBG_MAC,
+                  "mac chanctx remove freq %hu width %d ptr %p\n",
+                  ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+       mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       ath10k_mac_update_rx_channel(ar);
+       spin_unlock_bh(&ar->data_lock);
+
+       ath10k_recalc_radar_detection(ar);
+       ath10k_monitor_recalc(ar);
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+                            struct ieee80211_chanctx_conf *ctx,
+                            u32 changed)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+
+       mutex_lock(&ar->conf_mutex);
+
+       ath10k_dbg(ar, ATH10K_DBG_MAC,
+                  "mac chanctx change freq %hu->%hu width %d->%d ptr %p changed %x\n",
+                  arctx->conf.def.chan->center_freq,
+                  ctx->def.chan->center_freq,
+                  arctx->conf.def.width, ctx->def.width,
+                  ctx, changed);
+
+       /* This shouldn't really happen because channel switching should use
+        * switch_vif_chanctx().
+        */
+       if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+               goto unlock;
+
+       spin_lock_bh(&ar->data_lock);
+       arctx->conf = *ctx;
+       spin_unlock_bh(&ar->data_lock);
+
+       ath10k_recalc_radar_detection(ar);
+
+       /* FIXME: How to configure Rx chains properly? */
+
+       /* No other actions are actually necessary. Firmware maintains channel
+        * definitions per vdev internally and there's no host-side channel
+        * context abstraction to configure, e.g. channel width.
+        */
+
+unlock:
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_chanctx_conf *ctx)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_chanctx *arctx = (void *)ctx->drv_priv;
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       ath10k_dbg(ar, ATH10K_DBG_MAC,
+                  "mac chanctx assign ptr %p vdev_id %i\n",
+                  ctx, arvif->vdev_id);
+
+       if (WARN_ON(arvif->is_started)) {
+               mutex_unlock(&ar->conf_mutex);
+               return -EBUSY;
+       }
+
+       ret = ath10k_vdev_start(arvif, &arctx->conf.def);
+       if (ret) {
+               ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
+                           arvif->vdev_id, vif->addr,
+                           arctx->conf.def.chan->center_freq, ret);
+               goto err;
+       }
+
+       arvif->is_started = true;
+
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
+               if (ret) {
+                       ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+                       goto err_stop;
+               }
+
+               arvif->is_up = true;
+       }
+
+       mutex_unlock(&ar->conf_mutex);
+       return 0;
+
+err_stop:
+       ath10k_vdev_stop(arvif);
+       arvif->is_started = false;
+
+err:
+       mutex_unlock(&ar->conf_mutex);
+       return ret;
+}
+
+static void
+ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_chanctx_conf *ctx)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       ath10k_dbg(ar, ATH10K_DBG_MAC,
+                  "mac chanctx unassign ptr %p vdev_id %i\n",
+                  ctx, arvif->vdev_id);
+
+       WARN_ON(!arvif->is_started);
+
+       if (vif->type == NL80211_IFTYPE_MONITOR) {
+               WARN_ON(!arvif->is_up);
+
+               ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+               if (ret)
+                       ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+
+               arvif->is_up = false;
+       }
+
+       ret = ath10k_vdev_stop(arvif);
+       if (ret)
+               ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+                           arvif->vdev_id, ret);
+
+       arvif->is_started = false;
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+                                struct ieee80211_vif_chanctx_switch *vifs,
+                                int n_vifs,
+                                enum ieee80211_chanctx_switch_mode mode)
+{
+       struct ath10k *ar = hw->priv;
+       struct ath10k_vif *arvif;
+       struct ath10k_chanctx *arctx_new, *arctx_old;
+       int i;
+
+       mutex_lock(&ar->conf_mutex);
+
+       ath10k_dbg(ar, ATH10K_DBG_MAC,
+                  "mac chanctx switch n_vifs %d mode %d\n",
+                  n_vifs, mode);
+
+       spin_lock_bh(&ar->data_lock);
+       for (i = 0; i < n_vifs; i++) {
+               arvif = ath10k_vif_to_arvif(vifs[i].vif);
+               arctx_new = (void *)vifs[i].new_ctx->drv_priv;
+               arctx_old = (void *)vifs[i].old_ctx->drv_priv;
+
+               ath10k_dbg(ar, ATH10K_DBG_MAC,
+                          "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d ptr %p->%p\n",
+                          arvif->vdev_id,
+                          vifs[i].old_ctx->def.chan->center_freq,
+                          vifs[i].new_ctx->def.chan->center_freq,
+                          vifs[i].old_ctx->def.width,
+                          vifs[i].new_ctx->def.width,
+                          arctx_old, arctx_new);
+
+               if (mode == CHANCTX_SWMODE_SWAP_CONTEXTS) {
+                       ath10k_mac_chan_ctx_init(ar, arctx_new,
+                                                vifs[i].new_ctx);
+               }
+
+               arctx_new->conf = *vifs[i].new_ctx;
+
+               /* FIXME: ath10k_mac_chan_reconfigure() uses current, i.e. not
+                * yet updated chanctx_conf pointer.
+                */
+               arctx_old->conf = *vifs[i].new_ctx;
+       }
+       ath10k_mac_update_rx_channel(ar);
+       spin_unlock_bh(&ar->data_lock);
+
+       /* FIXME: Reconfigure only affected vifs */
+       ath10k_mac_chan_reconfigure(ar);
+
+       mutex_unlock(&ar->conf_mutex);
+       return 0;
+}
+
 static const struct ieee80211_ops ath10k_ops = {
        .tx                             = ath10k_tx,
        .start                          = ath10k_start,
@@ -5114,31 +6447,31 @@ static const struct ieee80211_ops ath10k_ops = {
        .get_antenna                    = ath10k_get_antenna,
        .reconfig_complete              = ath10k_reconfig_complete,
        .get_survey                     = ath10k_get_survey,
-       .set_bitrate_mask               = ath10k_set_bitrate_mask,
+       .set_bitrate_mask               = ath10k_mac_op_set_bitrate_mask,
        .sta_rc_update                  = ath10k_sta_rc_update,
        .get_tsf                        = ath10k_get_tsf,
        .ampdu_action                   = ath10k_ampdu_action,
        .get_et_sset_count              = ath10k_debug_get_et_sset_count,
        .get_et_stats                   = ath10k_debug_get_et_stats,
        .get_et_strings                 = ath10k_debug_get_et_strings,
+       .add_chanctx                    = ath10k_mac_op_add_chanctx,
+       .remove_chanctx                 = ath10k_mac_op_remove_chanctx,
+       .change_chanctx                 = ath10k_mac_op_change_chanctx,
+       .assign_vif_chanctx             = ath10k_mac_op_assign_vif_chanctx,
+       .unassign_vif_chanctx           = ath10k_mac_op_unassign_vif_chanctx,
+       .switch_vif_chanctx             = ath10k_mac_op_switch_vif_chanctx,
 
        CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
 
 #ifdef CONFIG_PM
-       .suspend                        = ath10k_suspend,
-       .resume                         = ath10k_resume,
+       .suspend                        = ath10k_wow_op_suspend,
+       .resume                         = ath10k_wow_op_resume,
 #endif
 #ifdef CONFIG_MAC80211_DEBUGFS
        .sta_add_debugfs                = ath10k_sta_add_debugfs,
 #endif
 };
 
-#define RATETAB_ENT(_rate, _rateid, _flags) { \
-       .bitrate                = (_rate), \
-       .flags                  = (_flags), \
-       .hw_value               = (_rateid), \
-}
-
 #define CHAN2G(_channel, _freq, _flags) { \
        .band                   = IEEE80211_BAND_2GHZ, \
        .hw_value               = (_channel), \
@@ -5194,6 +6527,7 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
        CHAN5G(132, 5660, 0),
        CHAN5G(136, 5680, 0),
        CHAN5G(140, 5700, 0),
+       CHAN5G(144, 5720, 0),
        CHAN5G(149, 5745, 0),
        CHAN5G(153, 5765, 0),
        CHAN5G(157, 5785, 0),
@@ -5201,31 +6535,6 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
        CHAN5G(165, 5825, 0),
 };
 
-/* Note: Be careful if you re-order these. There is code which depends on this
- * ordering.
- */
-static struct ieee80211_rate ath10k_rates[] = {
-       /* CCK */
-       RATETAB_ENT(10,  0x82, 0),
-       RATETAB_ENT(20,  0x84, 0),
-       RATETAB_ENT(55,  0x8b, 0),
-       RATETAB_ENT(110, 0x96, 0),
-       /* OFDM */
-       RATETAB_ENT(60,  0x0c, 0),
-       RATETAB_ENT(90,  0x12, 0),
-       RATETAB_ENT(120, 0x18, 0),
-       RATETAB_ENT(180, 0x24, 0),
-       RATETAB_ENT(240, 0x30, 0),
-       RATETAB_ENT(360, 0x48, 0),
-       RATETAB_ENT(480, 0x60, 0),
-       RATETAB_ENT(540, 0x6c, 0),
-};
-
-#define ath10k_a_rates (ath10k_rates + 4)
-#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - 4)
-#define ath10k_g_rates (ath10k_rates + 0)
-#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
-
 struct ath10k *ath10k_mac_create(size_t priv_size)
 {
        struct ieee80211_hw *hw;
@@ -5299,15 +6608,92 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
        },
 };
 
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_STATION) |
+                        BIT(NL80211_IFTYPE_AP) |
+                        BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                        BIT(NL80211_IFTYPE_P2P_GO),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_ADHOC),
+       },
+};
+
+/* FIXME: This is not thouroughly tested. These combinations may over- or
+ * underestimate hw/fw capabilities.
+ */
+static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
+       {
+               .limits = ath10k_tlv_if_limit,
+               .num_different_channels = 1,
+               .max_interfaces = 3,
+               .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+       },
+       {
+               .limits = ath10k_tlv_if_limit_ibss,
+               .num_different_channels = 1,
+               .max_interfaces = 2,
+               .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+       },
+};
+
+static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
+       {
+               .limits = ath10k_tlv_if_limit,
+               .num_different_channels = 2,
+               .max_interfaces = 3,
+               .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+       },
+       {
+               .limits = ath10k_tlv_if_limit_ibss,
+               .num_different_channels = 1,
+               .max_interfaces = 2,
+               .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+       },
+};
+
 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
 {
        struct ieee80211_sta_vht_cap vht_cap = {0};
        u16 mcs_map;
+       u32 val;
        int i;
 
        vht_cap.vht_supported = 1;
        vht_cap.cap = ar->vht_cap_info;
 
+       if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                               IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+               val = ar->num_rf_chains - 1;
+               val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+               val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+               vht_cap.cap |= val;
+       }
+
+       if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+                               IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+               val = ar->num_rf_chains - 1;
+               val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+               val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+               vht_cap.cap |= val;
+       }
+
        mcs_map = 0;
        for (i = 0; i < 8; i++) {
                if (i < ar->num_rf_chains)
@@ -5438,6 +6824,10 @@ int ath10k_mac_register(struct ath10k *ar)
        ht_cap = ath10k_get_ht_cap(ar);
        vht_cap = ath10k_create_vht_cap(ar);
 
+       BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
+                     ARRAY_SIZE(ath10k_5ghz_channels)) !=
+                    ATH10K_NUM_CHANS);
+
        if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
                channels = kmemdup(ath10k_2ghz_channels,
                                   sizeof(ath10k_2ghz_channels),
@@ -5500,9 +6890,16 @@ int ath10k_mac_register(struct ath10k *ar)
                        IEEE80211_HW_HAS_RATE_CONTROL |
                        IEEE80211_HW_AP_LINK_PS |
                        IEEE80211_HW_SPECTRUM_MGMT |
-                       IEEE80211_HW_SW_CRYPTO_CONTROL;
+                       IEEE80211_HW_SW_CRYPTO_CONTROL |
+                       IEEE80211_HW_SUPPORT_FAST_XMIT |
+                       IEEE80211_HW_CONNECTION_MONITOR |
+                       IEEE80211_HW_SUPPORTS_PER_STA_GTK |
+                       IEEE80211_HW_WANT_MONITOR_VIF |
+                       IEEE80211_HW_CHANCTX_STA_CSA |
+                       IEEE80211_HW_QUEUE_CONTROL;
 
        ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+       ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
        if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
                ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
@@ -5517,6 +6914,7 @@ int ath10k_mac_register(struct ath10k *ar)
 
        ar->hw->vif_data_size = sizeof(struct ath10k_vif);
        ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+       ar->hw->chanctx_data_size = sizeof(struct ath10k_chanctx);
 
        ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
 
@@ -5533,6 +6931,9 @@ int ath10k_mac_register(struct ath10k *ar)
                        NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
        }
 
+       if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
+               ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
        ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
        ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
        ar->hw->wiphy->max_remain_on_channel_duration = 5000;
@@ -5540,20 +6941,46 @@ int ath10k_mac_register(struct ath10k *ar)
        ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
        ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
 
+       ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+       ret = ath10k_wow_init(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to init wow: %d\n", ret);
+               goto err_free;
+       }
+
        /*
         * on LL hardware queues are managed entirely by the FW
         * so we only advertise to mac we can do the queues thing
         */
-       ar->hw->queues = 4;
+       ar->hw->queues = IEEE80211_MAX_QUEUES;
+
+       /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
+        * something that vdev_ids can't reach so that we don't stop the queue
+        * accidentally.
+        */
+       ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
 
        switch (ar->wmi.op_version) {
        case ATH10K_FW_WMI_OP_VERSION_MAIN:
-       case ATH10K_FW_WMI_OP_VERSION_TLV:
                ar->hw->wiphy->iface_combinations = ath10k_if_comb;
                ar->hw->wiphy->n_iface_combinations =
                        ARRAY_SIZE(ath10k_if_comb);
                ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
                break;
+       case ATH10K_FW_WMI_OP_VERSION_TLV:
+               if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+                       ar->hw->wiphy->iface_combinations =
+                               ath10k_tlv_qcs_if_comb;
+                       ar->hw->wiphy->n_iface_combinations =
+                               ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
+               } else {
+                       ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
+                       ar->hw->wiphy->n_iface_combinations =
+                               ARRAY_SIZE(ath10k_tlv_if_comb);
+               }
+               ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+               break;
        case ATH10K_FW_WMI_OP_VERSION_10_1:
        case ATH10K_FW_WMI_OP_VERSION_10_2:
        case ATH10K_FW_WMI_OP_VERSION_10_2_4:
index 68296117d20333e9b3eb428c50b2e15f82fbd648..b291f063705c3bb816f811aad450e9549aca5dfb 100644 (file)
 
 #define WEP_KEYID_SHIFT 6
 
+enum wmi_tlv_tx_pause_id;
+enum wmi_tlv_tx_pause_action;
+
 struct ath10k_generic_iter {
        struct ath10k *ar;
        int ret;
 };
 
+struct rfc1042_hdr {
+       u8 llc_dsap;
+       u8 llc_ssap;
+       u8 llc_ctrl;
+       u8 snap_oui[3];
+       __be16 snap_type;
+} __packed;
+
 struct ath10k *ath10k_mac_create(size_t priv_size);
 void ath10k_mac_destroy(struct ath10k *ar);
 int ath10k_mac_register(struct ath10k *ar);
@@ -45,6 +56,24 @@ void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
 void ath10k_drain_tx(struct ath10k *ar);
 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
                                    u8 keyidx);
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+                       struct cfg80211_chan_def *def);
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
+void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
+                               enum wmi_tlv_tx_pause_id pause_id,
+                               enum wmi_tlv_tx_pause_action action);
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+                            u8 hw_rate);
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+                            u32 bitrate);
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
 
 static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
 {
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
new file mode 100644 (file)
index 0000000..c0b6ffa
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
+                                  const struct wmi_p2p_noa_info *noa)
+{
+       struct ieee80211_p2p_noa_attr *noa_attr;
+       u8  ctwindow_oppps = noa->ctwindow_oppps;
+       u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+       bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+       __le16 *noa_attr_len;
+       u16 attr_len;
+       u8 noa_descriptors = noa->num_descriptors;
+       int i;
+
+       /* P2P IE */
+       data[0] = WLAN_EID_VENDOR_SPECIFIC;
+       data[1] = len - 2;
+       data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+       data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+       data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+       data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+       /* NOA ATTR */
+       data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+       noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+       noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+       noa_attr->index = noa->index;
+       noa_attr->oppps_ctwindow = ctwindow;
+       if (oppps)
+               noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+       for (i = 0; i < noa_descriptors; i++) {
+               noa_attr->desc[i].count =
+                       __le32_to_cpu(noa->descriptors[i].type_count);
+               noa_attr->desc[i].duration = noa->descriptors[i].duration;
+               noa_attr->desc[i].interval = noa->descriptors[i].interval;
+               noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+       }
+
+       attr_len = 2; /* index + oppps_ctwindow */
+       attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+       *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
+{
+       size_t len = 0;
+
+       if (!noa->num_descriptors &&
+           !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
+               return 0;
+
+       len += 1 + 1 + 4; /* EID + len + OUI */
+       len += 1 + 2; /* noa attr + attr len */
+       len += 1 + 1; /* index + oppps_ctwindow */
+       len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+       return len;
+}
+
+static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
+                                    size_t len)
+{
+       struct ath10k *ar = arvif->ar;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       kfree(arvif->u.ap.noa_data);
+
+       arvif->u.ap.noa_data = ie;
+       arvif->u.ap.noa_len = len;
+}
+
+static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+                                   const struct wmi_p2p_noa_info *noa)
+{
+       struct ath10k *ar = arvif->ar;
+       void *ie;
+       size_t len;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+       len = ath10k_p2p_noa_ie_len_compute(noa);
+       if (!len)
+               return;
+
+       ie = kmalloc(len, GFP_ATOMIC);
+       if (!ie)
+               return;
+
+       ath10k_p2p_noa_ie_fill(ie, len, noa);
+       ath10k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+                          const struct wmi_p2p_noa_info *noa)
+{
+       struct ath10k *ar = arvif->ar;
+
+       spin_lock_bh(&ar->data_lock);
+       __ath10k_p2p_noa_update(arvif, noa);
+       spin_unlock_bh(&ar->data_lock);
+}
+
+struct ath10k_p2p_noa_arg {
+       u32 vdev_id;
+       const struct wmi_p2p_noa_info *noa;
+};
+
+static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_p2p_noa_arg *arg = data;
+
+       if (arvif->vdev_id != arg->vdev_id)
+               return;
+
+       ath10k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+                                     const struct wmi_p2p_noa_info *noa)
+{
+       struct ath10k_p2p_noa_arg arg = {
+               .vdev_id = vdev_id,
+               .noa = noa,
+       };
+
+       ieee80211_iterate_active_interfaces_atomic(ar->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  ath10k_p2p_noa_update_vdev_iter,
+                                                  &arg);
+}
diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h
new file mode 100644 (file)
index 0000000..7be616e
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _P2P_H
+#define _P2P_H
+
+struct ath10k_vif;
+struct wmi_p2p_noa_info;
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+                          const struct wmi_p2p_noa_info *noa);
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+                                     const struct wmi_p2p_noa_info *noa);
+
+#endif
index 7681237fe298a4d430ec4d8ac95fd5fad2cc9de2..17a060e8efa2250bbe8d9d63c0eb104fa5282953 100644 (file)
@@ -113,7 +113,7 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .flags = CE_ATTR_FLAGS,
                .src_nentries = 0,
                .src_sz_max = 2048,
-               .dest_nentries = 32,
+               .dest_nentries = 128,
        },
 
        /* CE3: host->target WMI */
@@ -183,7 +183,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
        {
                .pipenum = __cpu_to_le32(2),
                .pipedir = __cpu_to_le32(PIPEDIR_IN),
-               .nentries = __cpu_to_le32(32),
+               .nentries = __cpu_to_le32(64),
                .nbytes_max = __cpu_to_le32(2048),
                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
                .reserved = __cpu_to_le32(0),
@@ -330,6 +330,205 @@ static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
        },
 };
 
+static bool ath10k_pci_is_awake(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                          RTC_STATE_ADDRESS);
+
+       return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
+}
+
+static void __ath10k_pci_wake(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       lockdep_assert_held(&ar_pci->ps_lock);
+
+       ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
+                  ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+       iowrite32(PCIE_SOC_WAKE_V_MASK,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+}
+
+static void __ath10k_pci_sleep(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       lockdep_assert_held(&ar_pci->ps_lock);
+
+       ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
+                  ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+       iowrite32(PCIE_SOC_WAKE_RESET,
+                 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+                 PCIE_SOC_WAKE_ADDRESS);
+       ar_pci->ps_awake = false;
+}
+
+static int ath10k_pci_wake_wait(struct ath10k *ar)
+{
+       int tot_delay = 0;
+       int curr_delay = 5;
+
+       while (tot_delay < PCIE_WAKE_TIMEOUT) {
+               if (ath10k_pci_is_awake(ar))
+                       return 0;
+
+               udelay(curr_delay);
+               tot_delay += curr_delay;
+
+               if (curr_delay < 50)
+                       curr_delay += 5;
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int ath10k_pci_wake(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+       ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
+                  ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+       /* This function can be called very frequently. To avoid excessive
+        * CPU stalls for MMIO reads use a cache var to hold the device state.
+        */
+       if (!ar_pci->ps_awake) {
+               __ath10k_pci_wake(ar);
+
+               ret = ath10k_pci_wake_wait(ar);
+               if (ret == 0)
+                       ar_pci->ps_awake = true;
+       }
+
+       if (ret == 0) {
+               ar_pci->ps_wake_refcount++;
+               WARN_ON(ar_pci->ps_wake_refcount == 0);
+       }
+
+       spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+       return ret;
+}
+
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+       ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
+                  ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+       if (WARN_ON(ar_pci->ps_wake_refcount == 0))
+               goto skip;
+
+       ar_pci->ps_wake_refcount--;
+
+       mod_timer(&ar_pci->ps_timer, jiffies +
+                 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
+
+skip:
+       spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_ps_timer(unsigned long ptr)
+{
+       struct ath10k *ar = (void *)ptr;
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+       ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
+                  ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+       if (ar_pci->ps_wake_refcount > 0)
+               goto skip;
+
+       __ath10k_pci_sleep(ar);
+
+skip:
+       spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_sleep_sync(struct ath10k *ar)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned long flags;
+
+       del_timer_sync(&ar_pci->ps_timer);
+
+       spin_lock_irqsave(&ar_pci->ps_lock, flags);
+       WARN_ON(ar_pci->ps_wake_refcount > 0);
+       __ath10k_pci_sleep(ar);
+       spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       int ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
+                           value, offset, ret);
+               return;
+       }
+
+       iowrite32(value, ar_pci->mem + offset);
+       ath10k_pci_sleep(ar);
+}
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       u32 val;
+       int ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
+                           offset, ret);
+               return 0xffffffff;
+       }
+
+       val = ioread32(ar_pci->mem + offset);
+       ath10k_pci_sleep(ar);
+
+       return val;
+}
+
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+       return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+       ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
+{
+       return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+       ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
+}
+
 static bool ath10k_pci_irq_pending(struct ath10k *ar)
 {
        u32 cause;
@@ -793,45 +992,6 @@ static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
        return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
 }
 
-static bool ath10k_pci_is_awake(struct ath10k *ar)
-{
-       u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
-
-       return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
-}
-
-static int ath10k_pci_wake_wait(struct ath10k *ar)
-{
-       int tot_delay = 0;
-       int curr_delay = 5;
-
-       while (tot_delay < PCIE_WAKE_TIMEOUT) {
-               if (ath10k_pci_is_awake(ar))
-                       return 0;
-
-               udelay(curr_delay);
-               tot_delay += curr_delay;
-
-               if (curr_delay < 50)
-                       curr_delay += 5;
-       }
-
-       return -ETIMEDOUT;
-}
-
-static int ath10k_pci_wake(struct ath10k *ar)
-{
-       ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
-                              PCIE_SOC_WAKE_V_MASK);
-       return ath10k_pci_wake_wait(ar);
-}
-
-static void ath10k_pci_sleep(struct ath10k *ar)
-{
-       ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
-                              PCIE_SOC_WAKE_RESET);
-}
-
 /* Called by lower (CE) layer when a send to Target completes. */
 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 {
@@ -1212,11 +1372,15 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
 
 static int ath10k_pci_hif_start(struct ath10k *ar)
 {
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
 
        ath10k_pci_irq_enable(ar);
        ath10k_pci_rx_post(ar);
 
+       pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+                                  ar_pci->link_ctl);
+
        return 0;
 }
 
@@ -1329,6 +1493,9 @@ static void ath10k_pci_flush(struct ath10k *ar)
 
 static void ath10k_pci_hif_stop(struct ath10k *ar)
 {
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       unsigned long flags;
+
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
 
        /* Most likely the device has HTT Rx ring configured. The only way to
@@ -1347,6 +1514,10 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
        ath10k_pci_irq_disable(ar);
        ath10k_pci_irq_sync(ar);
        ath10k_pci_flush(ar);
+
+       spin_lock_irqsave(&ar_pci->ps_lock, flags);
+       WARN_ON(ar_pci->ps_wake_refcount > 0);
+       spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 }
 
 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1524,12 +1695,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
                case QCA6174_HW_1_0_CHIP_ID_REV:
                case QCA6174_HW_1_1_CHIP_ID_REV:
+               case QCA6174_HW_2_1_CHIP_ID_REV:
+               case QCA6174_HW_2_2_CHIP_ID_REV:
                        return 3;
                case QCA6174_HW_1_3_CHIP_ID_REV:
                        return 2;
-               case QCA6174_HW_2_1_CHIP_ID_REV:
-               case QCA6174_HW_2_2_CHIP_ID_REV:
-                       return 6;
                case QCA6174_HW_3_0_CHIP_ID_REV:
                case QCA6174_HW_3_1_CHIP_ID_REV:
                case QCA6174_HW_3_2_CHIP_ID_REV:
@@ -1967,15 +2137,15 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
 
 static int ath10k_pci_hif_power_up(struct ath10k *ar)
 {
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret;
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
 
-       ret = ath10k_pci_wake(ar);
-       if (ret) {
-               ath10k_err(ar, "failed to wake up target: %d\n", ret);
-               return ret;
-       }
+       pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+                                 &ar_pci->link_ctl);
+       pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+                                  ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
 
        /*
         * Bring the target up cleanly.
@@ -2023,7 +2193,6 @@ err_ce:
        ath10k_pci_ce_deinit(ar);
 
 err_sleep:
-       ath10k_pci_sleep(ar);
        return ret;
 }
 
@@ -2034,28 +2203,18 @@ static void ath10k_pci_hif_power_down(struct ath10k *ar)
        /* Currently hif_power_up performs effectively a reset and hif_stop
         * resets the chip as well so there's no point in resetting here.
         */
-
-       ath10k_pci_sleep(ar);
 }
 
 #ifdef CONFIG_PM
 
-#define ATH10K_PCI_PM_CONTROL 0x44
-
 static int ath10k_pci_hif_suspend(struct ath10k *ar)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct pci_dev *pdev = ar_pci->pdev;
-       u32 val;
-
-       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
-       if ((val & 0x000000ff) != 0x3) {
-               pci_save_state(pdev);
-               pci_disable_device(pdev);
-               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
-                                      (val & 0xffffff00) | 0x03);
-       }
+       /* The grace timer can still be counting down and ar->ps_awake be true.
+        * It is known that the device may be asleep after resuming regardless
+        * of the SoC powersave state before suspending. Hence make sure the
+        * device is asleep before proceeding.
+        */
+       ath10k_pci_sleep_sync(ar);
 
        return 0;
 }
@@ -2066,22 +2225,14 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
        struct pci_dev *pdev = ar_pci->pdev;
        u32 val;
 
-       pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
-       if ((val & 0x000000ff) != 0) {
-               pci_restore_state(pdev);
-               pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
-                                      val & 0xffffff00);
-               /*
-                * Suspend/Resume resets the PCI configuration space,
-                * so we have to re-disable the RETRY_TIMEOUT register (0x41)
-                * to keep PCI Tx retries from interfering with C3 CPU state
-                */
-               pci_read_config_dword(pdev, 0x40, &val);
-
-               if ((val & 0x0000ff00) != 0)
-                       pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-       }
+       /* Suspend/Resume resets the PCI configuration space, so we have to
+        * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+        * from interfering with C3 CPU state. pci_restore_state won't help
+        * here since it only restores the first 64 bytes pci config header.
+        */
+       pci_read_config_dword(pdev, 0x40, &val);
+       if ((val & 0x0000ff00) != 0)
+               pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
        return 0;
 }
@@ -2497,7 +2648,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct pci_dev *pdev = ar_pci->pdev;
-       u32 lcr_val;
        int ret;
 
        pci_set_drvdata(pdev, ar);
@@ -2531,10 +2681,6 @@ static int ath10k_pci_claim(struct ath10k *ar)
 
        pci_set_master(pdev);
 
-       /* Workaround: Disable ASPM */
-       pci_read_config_dword(pdev, 0x80, &lcr_val);
-       pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
-
        /* Arrange for access to Target SoC registers. */
        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
        if (!ar_pci->mem) {
@@ -2621,9 +2767,19 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        ar_pci->dev = &pdev->dev;
        ar_pci->ar = ar;
 
+       if (pdev->subsystem_vendor || pdev->subsystem_device)
+               scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
+                         "%04x:%04x:%04x:%04x",
+                         pdev->vendor, pdev->device,
+                         pdev->subsystem_vendor, pdev->subsystem_device);
+
        spin_lock_init(&ar_pci->ce_lock);
+       spin_lock_init(&ar_pci->ps_lock);
+
        setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
                    (unsigned long)ar);
+       setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
+                   (unsigned long)ar);
 
        ret = ath10k_pci_claim(ar);
        if (ret) {
@@ -2631,12 +2787,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                goto err_core_destroy;
        }
 
-       ret = ath10k_pci_wake(ar);
-       if (ret) {
-               ath10k_err(ar, "failed to wake up: %d\n", ret);
-               goto err_release;
-       }
-
        ret = ath10k_pci_alloc_pipes(ar);
        if (ret) {
                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
@@ -2678,11 +2828,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
                ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
                           pdev->device, chip_id);
-               goto err_sleep;
+               goto err_free_irq;
        }
 
-       ath10k_pci_sleep(ar);
-
        ret = ath10k_core_register(ar, chip_id);
        if (ret) {
                ath10k_err(ar, "failed to register driver core: %d\n", ret);
@@ -2702,9 +2850,6 @@ err_free_pipes:
        ath10k_pci_free_pipes(ar);
 
 err_sleep:
-       ath10k_pci_sleep(ar);
-
-err_release:
        ath10k_pci_release(ar);
 
 err_core_destroy:
@@ -2734,6 +2879,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
        ath10k_pci_deinit_irq(ar);
        ath10k_pci_ce_deinit(ar);
        ath10k_pci_free_pipes(ar);
+       ath10k_pci_sleep_sync(ar);
        ath10k_pci_release(ar);
        ath10k_core_destroy(ar);
 }
@@ -2770,7 +2916,19 @@ module_exit(ath10k_pci_exit);
 MODULE_AUTHOR("Qualcomm Atheros");
 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
 MODULE_LICENSE("Dual BSD/GPL");
+
+/* QCA988x 2.0 firmware files */
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+
+/* QCA6174 2.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+
+/* QCA6174 3.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
index bddf54320160d91c8f21fd9982c54e123e3d3881..d7696ddc03c42b2b2622913f9c42674f22f84039 100644 (file)
@@ -185,6 +185,41 @@ struct ath10k_pci {
        /* Map CE id to ce_state */
        struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
        struct timer_list rx_post_retry;
+
+       /* Due to HW quirks it is recommended to disable ASPM during device
+        * bootup. To do that the original PCI-E Link Control is stored before
+        * device bootup is executed and re-programmed later.
+        */
+       u16 link_ctl;
+
+       /* Protects ps_awake and ps_wake_refcount */
+       spinlock_t ps_lock;
+
+       /* The device has a special powersave-oriented register. When device is
+        * considered asleep it drains less power and driver is forbidden from
+        * accessing most MMIO registers. If host were to access them without
+        * waking up the device might scribble over host memory or return
+        * 0xdeadbeef readouts.
+        */
+       unsigned long ps_wake_refcount;
+
+       /* Waking up takes some time (up to 2ms in some cases) so it can be bad
+        * for latency. To mitigate this the device isn't immediately allowed
+        * to sleep after all references are undone - instead there's a grace
+        * period after which the powersave register is updated unless some
+        * activity to/from device happened in the meantime.
+        *
+        * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
+        */
+       struct timer_list ps_timer;
+
+       /* MMIO registers are used to communicate with the device. With
+        * intensive traffic accessing powersave register would be a bit
+        * wasteful overhead and would needlessly stall CPU. It is far more
+        * efficient to rely on a variable in RAM and update it only upon
+        * powersave register state changes.
+        */
+       bool ps_awake;
 };
 
 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -209,61 +244,25 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
  * for this device; but that's not guaranteed.
  */
 #define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)                 \
-       (((ioread32((pci_addr)+(SOC_CORE_BASE_ADDRESS|                  \
+       (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS |               \
          CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |                         \
         0x100000 | ((addr) & 0xfffff))
 
 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
 
-/* Target exposes its registers for direct access. However before host can
- * access them it needs to make sure the target is awake (ath10k_pci_wake,
- * ath10k_pci_wake_wait, ath10k_pci_is_awake). Once target is awake it won't go
- * to sleep unless host tells it to (ath10k_pci_sleep).
- *
- * If host tries to access target registers without waking it up it can
- * scribble over host memory.
- *
- * If target is asleep waking it up may take up to even 2ms.
- */
-
-static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
-                                     u32 value)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-       iowrite32(value, ar_pci->mem + offset);
-}
-
-static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-       return ioread32(ar_pci->mem + offset);
-}
-
-static inline u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
-{
-       return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
-}
-
-static inline void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
-{
-       ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
-}
-
-static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
-       return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
-}
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
 
-static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
-{
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
 
-       iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
-}
+/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
+ * frequently. To avoid this put SoC to sleep after a very conservative grace
+ * period. Adjust with great care.
+ */
+#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
 
 #endif /* _PCI_H_ */
index e9cc7787bf5fd61c788c91212d080e78cde9fcc5..492b5a5af434ddb67e01fa9c0d4638eb84f3d66e 100644 (file)
@@ -661,6 +661,28 @@ struct rx_msdu_end {
 #define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
 #define RX_PPDU_START_INFO5_SERVICE_LSB  0
 
+/* No idea what this flag means. It seems to be always set in rate. */
+#define RX_PPDU_START_RATE_FLAG BIT(3)
+
+enum rx_ppdu_start_rate {
+       RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
+       RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
+       RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
+       RX_PPDU_START_RATE_OFDM_6M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
+       RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
+       RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
+       RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
+       RX_PPDU_START_RATE_OFDM_9M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
+
+       RX_PPDU_START_RATE_CCK_LP_11M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
+       RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
+       RX_PPDU_START_RATE_CCK_LP_2M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
+       RX_PPDU_START_RATE_CCK_LP_1M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
+       RX_PPDU_START_RATE_CCK_SP_11M  = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
+       RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
+       RX_PPDU_START_RATE_CCK_SP_2M   = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
+};
+
 struct rx_ppdu_start {
        struct {
                u8 pri20_mhz;
index d22addf6118b832501647c07c39cb56338542bc9..8dcd424aa5029965ffa5d2a2fb07067e9a54ce61 100644 (file)
@@ -519,9 +519,12 @@ int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
 
 int ath10k_spectral_create(struct ath10k *ar)
 {
+       /* The buffer size covers whole channels in dual bands up to 128 bins.
+        * Scan with bigger than 128 bins needs to be run on single band each.
+        */
        ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
                                                     ar->debug.debugfs_phy,
-                                                    1024, 256,
+                                                    1140, 2500,
                                                     &rfs_spec_scan_cb, NULL);
        debugfs_create_file("spectral_scan_ctl",
                            S_IRUSR | S_IWUSR,
index aede750809fefd9055872174cfae4b05231ef6b9..1a899d70dc5db5e6dea73280a42f06218285de77 100644 (file)
 #include "debug.h"
 #include "wmi-ops.h"
 
-static int ath10k_thermal_get_active_vifs(struct ath10k *ar,
-                                         enum wmi_vdev_type type)
+static int
+ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long *state)
 {
-       struct ath10k_vif *arvif;
-       int count = 0;
-
-       lockdep_assert_held(&ar->conf_mutex);
-
-       list_for_each_entry(arvif, &ar->arvifs, list) {
-               if (!arvif->is_started)
-                       continue;
-
-               if (!arvif->is_up)
-                       continue;
-
-               if (arvif->vdev_type != type)
-                       continue;
-
-               count++;
-       }
-       return count;
-}
-
-static int ath10k_thermal_get_max_dutycycle(struct thermal_cooling_device *cdev,
-                                           unsigned long *state)
-{
-       *state = ATH10K_QUIET_DUTY_CYCLE_MAX;
+       *state = ATH10K_THERMAL_THROTTLE_MAX;
 
        return 0;
 }
 
-static int ath10k_thermal_get_cur_dutycycle(struct thermal_cooling_device *cdev,
-                                           unsigned long *state)
+static int
+ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long *state)
 {
        struct ath10k *ar = cdev->devdata;
 
        mutex_lock(&ar->conf_mutex);
-       *state = ar->thermal.duty_cycle;
+       *state = ar->thermal.throttle_state;
        mutex_unlock(&ar->conf_mutex);
 
        return 0;
 }
 
-static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
-                                           unsigned long duty_cycle)
+static int
+ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+                                     unsigned long throttle_state)
 {
        struct ath10k *ar = cdev->devdata;
-       u32 period, duration, enabled;
-       int num_bss, ret = 0;
 
-       mutex_lock(&ar->conf_mutex);
-       if (ar->state != ATH10K_STATE_ON) {
-               ret = -ENETDOWN;
-               goto out;
-       }
-
-       if (duty_cycle > ATH10K_QUIET_DUTY_CYCLE_MAX) {
-               ath10k_warn(ar, "duty cycle %ld is exceeding the limit %d\n",
-                           duty_cycle, ATH10K_QUIET_DUTY_CYCLE_MAX);
-               ret = -EINVAL;
-               goto out;
-       }
-       /* TODO: Right now, thermal mitigation is handled only for single/multi
-        * vif AP mode. Since quiet param is not validated in STA mode, it needs
-        * to be investigated further to handle multi STA and multi-vif (AP+STA)
-        * mode properly.
-        */
-       num_bss = ath10k_thermal_get_active_vifs(ar, WMI_VDEV_TYPE_AP);
-       if (!num_bss) {
-               ath10k_warn(ar, "no active AP interfaces\n");
-               ret = -ENETDOWN;
-               goto out;
-       }
-       period = max(ATH10K_QUIET_PERIOD_MIN,
-                    (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
-       duration = (period * duty_cycle) / 100;
-       enabled = duration ? 1 : 0;
-
-       ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
-                                            ATH10K_QUIET_START_OFFSET,
-                                            enabled);
-       if (ret) {
-               ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
-                           period, duration, enabled, ret);
-               goto out;
+       if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
+               ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
+                           throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
+               return -EINVAL;
        }
-       ar->thermal.duty_cycle = duty_cycle;
-out:
+       mutex_lock(&ar->conf_mutex);
+       ar->thermal.throttle_state = throttle_state;
+       ath10k_thermal_set_throttling(ar);
        mutex_unlock(&ar->conf_mutex);
-       return ret;
+       return 0;
 }
 
 static struct thermal_cooling_device_ops ath10k_thermal_ops = {
-       .get_max_state = ath10k_thermal_get_max_dutycycle,
-       .get_cur_state = ath10k_thermal_get_cur_dutycycle,
-       .set_cur_state = ath10k_thermal_set_cur_dutycycle,
+       .get_max_state = ath10k_thermal_get_max_throttle_state,
+       .get_cur_state = ath10k_thermal_get_cur_throttle_state,
+       .set_cur_state = ath10k_thermal_set_cur_throttle_state,
 };
 
 static ssize_t ath10k_thermal_show_temp(struct device *dev,
@@ -127,6 +75,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
 {
        struct ath10k *ar = dev_get_drvdata(dev);
        int ret, temperature;
+       unsigned long time_left;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -148,9 +97,9 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
                goto out;
        }
 
-       ret = wait_for_completion_timeout(&ar->thermal.wmi_sync,
-                                         ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
-       if (ret == 0) {
+       time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+                                               ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+       if (!time_left) {
                ath10k_warn(ar, "failed to synchronize thermal read\n");
                ret = -ETIMEDOUT;
                goto out;
@@ -184,6 +133,32 @@ static struct attribute *ath10k_hwmon_attrs[] = {
 };
 ATTRIBUTE_GROUPS(ath10k_hwmon);
 
+void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+       u32 period, duration, enabled;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+               return;
+
+       if (ar->state != ATH10K_STATE_ON)
+               return;
+
+       period = ar->thermal.quiet_period;
+       duration = (period * ar->thermal.throttle_state) / 100;
+       enabled = duration ? 1 : 0;
+
+       ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+                                            ATH10K_QUIET_START_OFFSET,
+                                            enabled);
+       if (ret) {
+               ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+                           period, duration, enabled, ret);
+       }
+}
+
 int ath10k_thermal_register(struct ath10k *ar)
 {
        struct thermal_cooling_device *cdev;
@@ -202,11 +177,12 @@ int ath10k_thermal_register(struct ath10k *ar)
        ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
                                "cooling_device");
        if (ret) {
-               ath10k_err(ar, "failed to create thermal symlink\n");
+               ath10k_err(ar, "failed to create cooling device symlink\n");
                goto err_cooling_destroy;
        }
 
        ar->thermal.cdev = cdev;
+       ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
 
        /* Do not register hwmon device when temperature reading is not
         * supported by firmware
@@ -231,7 +207,7 @@ int ath10k_thermal_register(struct ath10k *ar)
        return 0;
 
 err_remove_link:
-       sysfs_remove_link(&ar->dev->kobj, "thermal_sensor");
+       sysfs_remove_link(&ar->dev->kobj, "cooling_device");
 err_cooling_destroy:
        thermal_cooling_device_unregister(cdev);
        return ret;
index bccc17ae0fde12d61a7567596a56a8900cca7749..b610ea5caae88727bc0adef058da82b92f4624a9 100644 (file)
 #define ATH10K_QUIET_PERIOD_DEFAULT     100
 #define ATH10K_QUIET_PERIOD_MIN         25
 #define ATH10K_QUIET_START_OFFSET       10
-#define ATH10K_QUIET_DUTY_CYCLE_MAX     70
 #define ATH10K_HWMON_NAME_LEN           15
 #define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_THROTTLE_MAX     100
 
 struct ath10k_thermal {
        struct thermal_cooling_device *cdev;
        struct completion wmi_sync;
 
        /* protected by conf_mutex */
-       u32 duty_cycle;
+       u32 throttle_state;
+       u32 quiet_period;
        /* temperature value in Celcius degree
         * protected by data_lock
         */
@@ -39,6 +40,7 @@ struct ath10k_thermal {
 int ath10k_thermal_register(struct ath10k *ar);
 void ath10k_thermal_unregister(struct ath10k *ar);
 void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+void ath10k_thermal_set_throttling(struct ath10k *ar);
 #else
 static inline int ath10k_thermal_register(struct ath10k *ar)
 {
@@ -54,5 +56,9 @@ static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
 {
 }
 
+static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+}
+
 #endif
 #endif /* _THERMAL_ */
index 5407887380abe71a91a9570d3e23b063f67e59d3..71bdb368813de3af8ba8c062d49a27000392cf98 100644 (file)
 #include "core.h"
 
 #if !defined(_TRACE_H_)
-static inline u32 ath10k_frm_hdr_len(const void *buf)
+static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
 {
        const struct ieee80211_hdr *hdr = buf;
 
-       return ieee80211_hdrlen(hdr->frame_control);
+       /* In some rare cases (e.g. fcs error) device reports frame buffer
+        * shorter than what frame header implies (e.g. len = 0). The buffer
+        * can still be accessed so do a simple min() to guarantee caller
+        * doesn't get value greater than len.
+        */
+       return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
 }
 #endif
 
@@ -46,7 +51,7 @@ static inline void trace_ ## name(proto) {}
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM ath10k
 
-#define ATH10K_MSG_MAX 200
+#define ATH10K_MSG_MAX 400
 
 DECLARE_EVENT_CLASS(ath10k_log_event,
        TP_PROTO(struct ath10k *ar, struct va_format *vaf),
@@ -360,13 +365,13 @@ DECLARE_EVENT_CLASS(ath10k_hdr_event,
                __string(device, dev_name(ar->dev))
                __string(driver, dev_driver_string(ar->dev))
                __field(size_t, len)
-               __dynamic_array(u8, data, ath10k_frm_hdr_len(data))
+               __dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
        ),
 
        TP_fast_assign(
                __assign_str(device, dev_name(ar->dev));
                __assign_str(driver, dev_driver_string(ar->dev));
-               __entry->len = ath10k_frm_hdr_len(data);
+               __entry->len = ath10k_frm_hdr_len(data, len);
                memcpy(__get_dynamic_array(data), data, __entry->len);
        ),
 
@@ -387,15 +392,16 @@ DECLARE_EVENT_CLASS(ath10k_payload_event,
                __string(device, dev_name(ar->dev))
                __string(driver, dev_driver_string(ar->dev))
                __field(size_t, len)
-               __dynamic_array(u8, payload, (len - ath10k_frm_hdr_len(data)))
+               __dynamic_array(u8, payload, (len -
+                                             ath10k_frm_hdr_len(data, len)))
        ),
 
        TP_fast_assign(
                __assign_str(device, dev_name(ar->dev));
                __assign_str(driver, dev_driver_string(ar->dev));
-               __entry->len = len - ath10k_frm_hdr_len(data);
+               __entry->len = len - ath10k_frm_hdr_len(data, len);
                memcpy(__get_dynamic_array(payload),
-                      data + ath10k_frm_hdr_len(data), __entry->len);
+                      data + ath10k_frm_hdr_len(data, len), __entry->len);
        ),
 
        TP_printk(
index 3f00cec8aef52ea0872f7969cfddca7d8ca0769c..826500bb2b1b247233fbf1998733c2d8c16b3fe7 100644 (file)
@@ -55,8 +55,10 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
 
        lockdep_assert_held(&htt->tx_lock);
 
-       ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
-                  tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
+       ath10k_dbg(ar, ATH10K_DBG_HTT,
+                  "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
+                  tx_done->msdu_id, !!tx_done->discard,
+                  !!tx_done->no_ack, !!tx_done->success);
 
        if (tx_done->msdu_id >= htt->max_num_pending_tx) {
                ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
@@ -97,6 +99,9 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        if (tx_done->no_ack)
                info->flags &= ~IEEE80211_TX_STAT_ACK;
 
+       if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+               info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
        ieee80211_tx_status(htt->ar->hw, msdu);
        /* we do not own the msdu anymore */
 
index c8b64e7a6089c2ba2f874cb5aad3007c5c04f296..47fe2e756becd4ebacabf199ee204f732413e378 100644 (file)
@@ -45,6 +45,10 @@ struct wmi_ops {
                        struct wmi_rdy_ev_arg *arg);
        int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
                             struct ath10k_fw_stats *stats);
+       int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
+                           struct wmi_roam_ev_arg *arg);
+       int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
+                             struct wmi_wow_ev_arg *arg);
 
        struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
        struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -81,7 +85,8 @@ struct wmi_ops {
        struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
                                             const struct wmi_wmm_params_all_arg *arg);
        struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
-                                          const u8 peer_addr[ETH_ALEN]);
+                                          const u8 peer_addr[ETH_ALEN],
+                                          enum wmi_peer_type peer_type);
        struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
                                           const u8 peer_addr[ETH_ALEN]);
        struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
@@ -148,6 +153,27 @@ struct wmi_ops {
                                              u32 num_ac);
        struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
                                             const struct wmi_sta_keepalive_arg *arg);
+       struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
+       struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
+                                                   enum wmi_wow_wakeup_event event,
+                                                   u32 enable);
+       struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
+       struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
+                                              u32 pattern_id,
+                                              const u8 *pattern,
+                                              const u8 *mask,
+                                              int pattern_len,
+                                              int pattern_offset);
+       struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
+                                              u32 pattern_id);
+       struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
+                                                   u32 vdev_id,
+                                                   enum wmi_tdls_state state);
+       struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
+                                               const struct wmi_tdls_peer_update_cmd_arg *arg,
+                                               const struct wmi_tdls_peer_capab_arg *cap,
+                                               const struct wmi_channel_arg *chan);
+       struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
 };
 
 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -273,6 +299,26 @@ ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
        return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
 }
 
+static inline int
+ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+                       struct wmi_roam_ev_arg *arg)
+{
+       if (!ar->wmi.ops->pull_roam_ev)
+               return -EOPNOTSUPP;
+
+       return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
+                         struct wmi_wow_ev_arg *arg)
+{
+       if (!ar->wmi.ops->pull_wow_event)
+               return -EOPNOTSUPP;
+
+       return ar->wmi.ops->pull_wow_event(ar, skb, arg);
+}
+
 static inline int
 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 {
@@ -624,14 +670,15 @@ ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
 
 static inline int
 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
-                      const u8 peer_addr[ETH_ALEN])
+                      const u8 peer_addr[ETH_ALEN],
+                      enum wmi_peer_type peer_type)
 {
        struct sk_buff *skb;
 
        if (!ar->wmi.ops->gen_peer_create)
                return -EOPNOTSUPP;
 
-       skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr);
+       skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
@@ -1060,4 +1107,145 @@ ath10k_wmi_sta_keepalive(struct ath10k *ar,
        return ath10k_wmi_cmd_send(ar, skb, cmd_id);
 }
 
+static inline int
+ath10k_wmi_wow_enable(struct ath10k *ar)
+{
+       struct sk_buff *skb;
+       u32 cmd_id;
+
+       if (!ar->wmi.ops->gen_wow_enable)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_wow_enable(ar);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       cmd_id = ar->wmi.cmd->wow_enable_cmdid;
+       return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
+                               enum wmi_wow_wakeup_event event,
+                               u32 enable)
+{
+       struct sk_buff *skb;
+       u32 cmd_id;
+
+       if (!ar->wmi.ops->gen_wow_add_wakeup_event)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
+       return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
+{
+       struct sk_buff *skb;
+       u32 cmd_id;
+
+       if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
+       return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
+                          const u8 *pattern, const u8 *mask,
+                          int pattern_len, int pattern_offset)
+{
+       struct sk_buff *skb;
+       u32 cmd_id;
+
+       if (!ar->wmi.ops->gen_wow_add_pattern)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
+                                              pattern, mask, pattern_len,
+                                              pattern_offset);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
+       return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
+{
+       struct sk_buff *skb;
+       u32 cmd_id;
+
+       if (!ar->wmi.ops->gen_wow_del_pattern)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
+       return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+                               enum wmi_tdls_state state)
+{
+       struct sk_buff *skb;
+
+       if (!ar->wmi.ops->gen_update_fw_tdls_state)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
+}
+
+static inline int
+ath10k_wmi_tdls_peer_update(struct ath10k *ar,
+                           const struct wmi_tdls_peer_update_cmd_arg *arg,
+                           const struct wmi_tdls_peer_capab_arg *cap,
+                           const struct wmi_channel_arg *chan)
+{
+       struct sk_buff *skb;
+
+       if (!ar->wmi.ops->gen_tdls_peer_update)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->tdls_peer_update_cmdid);
+}
+
+static inline int
+ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+       struct sk_buff *skb;
+
+       if (!ar->wmi.ops->gen_adaptive_qcs)
+               return -EOPNOTSUPP;
+
+       skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
+}
+
 #endif
index ee0c5f602e297424b3f5eb143cdda542b1231291..563fde73623c19037be244470abc523db00df971 100644 (file)
  */
 #include "core.h"
 #include "debug.h"
+#include "mac.h"
 #include "hw.h"
+#include "mac.h"
 #include "wmi.h"
 #include "wmi-ops.h"
 #include "wmi-tlv.h"
+#include "p2p.h"
 
 /***************/
 /* TLV helpers */
@@ -31,9 +34,9 @@ struct wmi_tlv_policy {
 
 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
        [WMI_TLV_TAG_ARRAY_BYTE]
-               = { .min_len = sizeof(u8) },
+               = { .min_len = 0 },
        [WMI_TLV_TAG_ARRAY_UINT32]
-               = { .min_len = sizeof(u32) },
+               = { .min_len = 0 },
        [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
                = { .min_len = sizeof(struct wmi_scan_event) },
        [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
@@ -62,6 +65,14 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
                = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
        [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
                = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+       [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+               = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+       [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+               = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+       [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+               = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+       [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+               = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
 };
 
 static int
@@ -168,6 +179,7 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
 {
        const void **tb;
        const struct wmi_tlv_bcn_tx_status_ev *ev;
+       struct ath10k_vif *arvif;
        u32 vdev_id, tx_status;
        int ret;
 
@@ -201,6 +213,10 @@ static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
                break;
        }
 
+       arvif = ath10k_get_arvif(ar, vdev_id);
+       if (arvif && arvif->is_up && arvif->vif->csa_active)
+               ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
        kfree(tb);
        return 0;
 }
@@ -296,6 +312,83 @@ static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
        return 0;
 }
 
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+                                       struct sk_buff *skb)
+{
+       const void **tb;
+       const struct wmi_tlv_p2p_noa_ev *ev;
+       const struct wmi_p2p_noa_info *noa;
+       int ret, vdev_id;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+       noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+       if (!ev || !noa) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       vdev_id = __le32_to_cpu(ev->vdev_id);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+                  vdev_id, noa->num_descriptors);
+
+       ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+       kfree(tb);
+       return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+                                        struct sk_buff *skb)
+{
+       const void **tb;
+       const struct wmi_tlv_tx_pause_ev *ev;
+       int ret, vdev_id;
+       u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+       if (!ev) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       pause_id = __le32_to_cpu(ev->pause_id);
+       action = __le32_to_cpu(ev->action);
+       vdev_map = __le32_to_cpu(ev->vdev_map);
+       peer_id = __le32_to_cpu(ev->peer_id);
+       tid_map = __le32_to_cpu(ev->tid_map);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+                  pause_id, action, vdev_map, peer_id, tid_map);
+
+       for (vdev_id = 0; vdev_map; vdev_id++) {
+               if (!(vdev_map & BIT(vdev_id)))
+                       continue;
+
+               vdev_map &= ~BIT(vdev_id);
+               ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+       }
+
+       kfree(tb);
+       return 0;
+}
+
 /***********/
 /* TLV ops */
 /***********/
@@ -417,6 +510,12 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
        case WMI_TLV_DIAG_EVENTID:
                ath10k_wmi_tlv_event_diag(ar, skb);
                break;
+       case WMI_TLV_P2P_NOA_EVENTID:
+               ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+               break;
+       case WMI_TLV_TX_PAUSE_EVENTID:
+               ath10k_wmi_tlv_event_tx_pause(ar, skb);
+               break;
        default:
                ath10k_warn(ar, "Unknown eventid: %d\n", id);
                break;
@@ -1012,6 +1111,65 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
        return 0;
 }
 
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+                                         struct sk_buff *skb,
+                                         struct wmi_roam_ev_arg *arg)
+{
+       const void **tb;
+       const struct wmi_tlv_roam_ev *ev;
+       int ret;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+       if (!ev) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       arg->vdev_id = ev->vdev_id;
+       arg->reason = ev->reason;
+       arg->rssi = ev->rssi;
+
+       kfree(tb);
+       return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+                             struct wmi_wow_ev_arg *arg)
+{
+       const void **tb;
+       const struct wmi_tlv_wow_event_info *ev;
+       int ret;
+
+       tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+       if (IS_ERR(tb)) {
+               ret = PTR_ERR(tb);
+               ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+               return ret;
+       }
+
+       ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+       if (!ev) {
+               kfree(tb);
+               return -EPROTO;
+       }
+
+       arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+       arg->flag = __le32_to_cpu(ev->flag);
+       arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+       arg->data_len = __le32_to_cpu(ev->data_len);
+
+       kfree(tb);
+       return 0;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
 {
@@ -1160,8 +1318,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
 
        if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
-               cfg->num_offload_peers = __cpu_to_le32(3);
-               cfg->num_offload_reorder_bufs = __cpu_to_le32(3);
+               cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+               cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
        } else {
                cfg->num_offload_peers = __cpu_to_le32(0);
                cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
@@ -1178,8 +1336,8 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
        cfg->rx_decap_mode = __cpu_to_le32(1);
        cfg->scan_max_pending_reqs = __cpu_to_le32(4);
-       cfg->bmiss_offload_max_vdev = __cpu_to_le32(3);
-       cfg->roam_offload_max_vdev = __cpu_to_le32(3);
+       cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+       cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
        cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
        cfg->num_mcast_groups = __cpu_to_le32(0);
        cfg->num_mcast_table_elems = __cpu_to_le32(0);
@@ -1193,11 +1351,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
        cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
        cfg->max_frag_entries = __cpu_to_le32(2);
-       cfg->num_tdls_vdevs = __cpu_to_le32(1);
+       cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
        cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
        cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
        cfg->num_multicast_filter_entries = __cpu_to_le32(5);
-       cfg->num_wow_filters = __cpu_to_le32(0x16);
+       cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
        cfg->num_keep_alive_pattern = __cpu_to_le32(6);
        cfg->keep_alive_pattern_size = __cpu_to_le32(0);
        cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
@@ -1248,7 +1406,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
        cmd = (void *)tlv->value;
 
        ath10k_wmi_put_start_scan_common(&cmd->common, arg);
-       cmd->burst_duration_ms = __cpu_to_le32(0);
+       cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
        cmd->num_channels = __cpu_to_le32(arg->n_channels);
        cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
        cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
@@ -1408,8 +1566,6 @@ ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
        void *ptr;
        u32 flags = 0;
 
-       if (WARN_ON(arg->ssid && arg->ssid_len == 0))
-               return ERR_PTR(-EINVAL);
        if (WARN_ON(arg->hidden_ssid && !arg->ssid))
                return ERR_PTR(-EINVAL);
        if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -1782,7 +1938,8 @@ ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
 
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
-                                 const u8 peer_addr[ETH_ALEN])
+                                 const u8 peer_addr[ETH_ALEN],
+                                 enum wmi_peer_type peer_type)
 {
        struct wmi_tlv_peer_create_cmd *cmd;
        struct wmi_tlv *tlv;
@@ -1797,7 +1954,7 @@ ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
        tlv->len = __cpu_to_le16(sizeof(*cmd));
        cmd = (void *)tlv->value;
        cmd->vdev_id = __cpu_to_le32(vdev_id);
-       cmd->peer_type = __cpu_to_le32(WMI_TLV_PEER_TYPE_DEFAULT); /* FIXME */
+       cmd->peer_type = __cpu_to_le32(peer_type);
        ether_addr_copy(cmd->peer_addr.addr, peer_addr);
 
        ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
@@ -2027,7 +2184,7 @@ ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
        if (!mac)
                return ERR_PTR(-EINVAL);
 
-       skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+       skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
@@ -2485,6 +2642,387 @@ ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
        return skb;
 }
 
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+                                          enum wmi_tdls_state state)
+{
+       struct wmi_tdls_set_state_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       void *ptr;
+       size_t len;
+       /* Set to options from wmi_tlv_tdls_options,
+        * for now none of them are enabled.
+        */
+       u32 options = 0;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+       cmd = (void *)tlv->value;
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->state = __cpu_to_le32(state);
+       cmd->notification_interval_ms = __cpu_to_le32(5000);
+       cmd->tx_discovery_threshold = __cpu_to_le32(100);
+       cmd->tx_teardown_threshold = __cpu_to_le32(5);
+       cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+       cmd->rssi_delta = __cpu_to_le32(-20);
+       cmd->tdls_options = __cpu_to_le32(options);
+       cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+       cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+       cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+       cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+       cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+                  state, vdev_id);
+       return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+       u32 peer_qos = 0;
+
+       if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+               peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+       if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+               peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+       if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+               peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+       if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+               peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+       peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+       return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+                                      const struct wmi_tdls_peer_update_cmd_arg *arg,
+                                      const struct wmi_tdls_peer_capab_arg *cap,
+                                      const struct wmi_channel_arg *chan_arg)
+{
+       struct wmi_tdls_peer_update_cmd *cmd;
+       struct wmi_tdls_peer_capab *peer_cap;
+       struct wmi_channel *chan;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       u32 peer_qos;
+       void *ptr;
+       int len;
+       int i;
+
+       len = sizeof(*tlv) + sizeof(*cmd) +
+             sizeof(*tlv) + sizeof(*peer_cap) +
+             sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+       cmd = (void *)tlv->value;
+       cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+       ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+       cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+       tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+       peer_cap = (void *)tlv->value;
+       peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+                                                  cap->peer_max_sp);
+       peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+       peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+       peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+       peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+       peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+       peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+       peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+       for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+               peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+       peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+       peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+       peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*peer_cap);
+
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+       ptr += sizeof(*tlv);
+
+       for (i = 0; i < cap->peer_chan_len; i++) {
+               tlv = ptr;
+               tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+               tlv->len = __cpu_to_le16(sizeof(*chan));
+               chan = (void *)tlv->value;
+               ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+
+               ptr += sizeof(*tlv);
+               ptr += sizeof(*chan);
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+                  arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+       struct wmi_tlv_wow_enable_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (struct wmi_tlv *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       cmd->enable = __cpu_to_le32(1);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+                                          u32 vdev_id,
+                                          enum wmi_wow_wakeup_event event,
+                                          u32 enable)
+{
+       struct wmi_tlv_wow_add_del_event_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (struct wmi_tlv *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->is_add = __cpu_to_le32(enable);
+       cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+                  wow_wakeup_event(event), enable, vdev_id);
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+       struct wmi_tlv_wow_host_wakeup_ind *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (struct wmi_tlv *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+                                     u32 pattern_id, const u8 *pattern,
+                                     const u8 *bitmask, int pattern_len,
+                                     int pattern_offset)
+{
+       struct wmi_tlv_wow_add_pattern_cmd *cmd;
+       struct wmi_tlv_wow_bitmap_pattern *bitmap;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       void *ptr;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd) +
+             sizeof(*tlv) +                    /* array struct */
+             sizeof(*tlv) + sizeof(*bitmap) +  /* bitmap */
+             sizeof(*tlv) +                    /* empty ipv4 sync */
+             sizeof(*tlv) +                    /* empty ipv6 sync */
+             sizeof(*tlv) +                    /* empty magic */
+             sizeof(*tlv) +                    /* empty info timeout */
+             sizeof(*tlv) + sizeof(u32);       /* ratelimit interval */
+
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       /* cmd */
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->pattern_id = __cpu_to_le32(pattern_id);
+       cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       /* bitmap */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+       ptr += sizeof(*tlv);
+
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+       tlv->len = __cpu_to_le16(sizeof(*bitmap));
+       bitmap = (void *)tlv->value;
+
+       memcpy(bitmap->patternbuf, pattern, pattern_len);
+       memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+       bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+       bitmap->pattern_len = __cpu_to_le32(pattern_len);
+       bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+       bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*bitmap);
+
+       /* ipv4 sync */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(0);
+
+       ptr += sizeof(*tlv);
+
+       /* ipv6 sync */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(0);
+
+       ptr += sizeof(*tlv);
+
+       /* magic */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+       tlv->len = __cpu_to_le16(0);
+
+       ptr += sizeof(*tlv);
+
+       /* pattern info timeout */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+       tlv->len = __cpu_to_le16(0);
+
+       ptr += sizeof(*tlv);
+
+       /* ratelimit interval */
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+       tlv->len = __cpu_to_le16(sizeof(u32));
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+                  vdev_id, pattern_id, pattern_offset);
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+                                     u32 pattern_id)
+{
+       struct wmi_tlv_wow_del_pattern_cmd *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       tlv = (struct wmi_tlv *)skb->data;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+
+       cmd->vdev_id = __cpu_to_le32(vdev_id);
+       cmd->pattern_id = __cpu_to_le32(pattern_id);
+       cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+                  vdev_id, pattern_id);
+       return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+       struct wmi_tlv_adaptive_qcs *cmd;
+       struct wmi_tlv *tlv;
+       struct sk_buff *skb;
+       void *ptr;
+       size_t len;
+
+       len = sizeof(*tlv) + sizeof(*cmd);
+       skb = ath10k_wmi_alloc_skb(ar, len);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       ptr = (void *)skb->data;
+       tlv = ptr;
+       tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+       tlv->len = __cpu_to_le16(sizeof(*cmd));
+       cmd = (void *)tlv->value;
+       cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+       ptr += sizeof(*tlv);
+       ptr += sizeof(*cmd);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+       return skb;
+}
+
 /****************/
 /* TLV mappings */
 /****************/
@@ -2609,6 +3147,9 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
        .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_TLV_CMD_UNSUPPORTED,
        .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+       .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+       .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+       .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
 };
 
 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -2736,6 +3277,8 @@ static const struct wmi_ops wmi_tlv_ops = {
        .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
        .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
        .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+       .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+       .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
 
        .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
@@ -2781,6 +3324,14 @@ static const struct wmi_ops wmi_tlv_ops = {
        .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
        .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
        .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+       .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+       .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+       .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+       .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+       .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+       .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+       .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+       .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
 };
 
 /************/
index a6c8280cc4b194384c1abee08e8d379bd4754e23..ad655c44afdb604572c36f70ca7a611f53f6c56b 100644 (file)
@@ -1454,6 +1454,174 @@ struct wmi_tlv_stats_ev {
        __le32 num_chan_stats;
 } __packed;
 
+struct wmi_tlv_p2p_noa_ev {
+       __le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_roam_ev {
+       __le32 vdev_id;
+       __le32 reason;
+       __le32 rssi;
+} __packed;
+
+struct wmi_tlv_wow_add_del_event_cmd {
+       __le32 vdev_id;
+       __le32 is_add;
+       __le32 event_bitmap;
+} __packed;
+
+struct wmi_tlv_wow_enable_cmd {
+       __le32 enable;
+} __packed;
+
+struct wmi_tlv_wow_host_wakeup_ind {
+       __le32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+       __le32 vdev_id;
+       __le32 flag;
+       __le32 wake_reason;
+       __le32 data_len;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+       WOW_PATTERN_MIN = 0,
+       WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+       WOW_IPV4_SYNC_PATTERN,
+       WOW_IPV6_SYNC_PATTERN,
+       WOW_WILD_CARD_PATTERN,
+       WOW_TIMER_PATTERN,
+       WOW_MAGIC_PATTERN,
+       WOW_IPV6_RA_PATTERN,
+       WOW_IOAC_PKT_PATTERN,
+       WOW_IOAC_TMR_PATTERN,
+       WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE                148
+#define WOW_DEFAULT_BITMASK_SIZE               148
+
+struct wmi_tlv_wow_bitmap_pattern {
+       u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+       u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+       __le32 pattern_offset;
+       __le32 pattern_len;
+       __le32 bitmask_len;
+       __le32 pattern_id;
+} __packed;
+
+struct wmi_tlv_wow_add_pattern_cmd {
+       __le32 vdev_id;
+       __le32 pattern_id;
+       __le32 pattern_type;
+} __packed;
+
+struct wmi_tlv_wow_del_pattern_cmd {
+       __le32 vdev_id;
+       __le32 pattern_id;
+       __le32 pattern_type;
+} __packed;
+
+/* TDLS Options */
+enum wmi_tlv_tdls_options {
+       WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
+       WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
+       WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+struct wmi_tdls_set_state_cmd {
+       __le32 vdev_id;
+       __le32 state;
+       __le32 notification_interval_ms;
+       __le32 tx_discovery_threshold;
+       __le32 tx_teardown_threshold;
+       __le32 rssi_teardown_threshold;
+       __le32 rssi_delta;
+       __le32 tdls_options;
+       __le32 tdls_peer_traffic_ind_window;
+       __le32 tdls_peer_traffic_response_timeout_ms;
+       __le32 tdls_puapsd_mask;
+       __le32 tdls_puapsd_inactivity_time_ms;
+       __le32 tdls_puapsd_rx_frame_threshold;
+} __packed;
+
+struct wmi_tdls_peer_update_cmd {
+       __le32 vdev_id;
+       struct wmi_mac_addr peer_macaddr;
+       __le32 peer_state;
+} __packed;
+
+enum {
+       WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
+       WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
+       WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
+       WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+#define WMI_TLV_TDLS_PEER_SP_MASK      0x60
+#define WMI_TLV_TDLS_PEER_SP_LSB       5
+
+struct wmi_tdls_peer_capab {
+       __le32 peer_qos;
+       __le32 buff_sta_support;
+       __le32 off_chan_support;
+       __le32 peer_curr_operclass;
+       __le32 self_curr_operclass;
+       __le32 peer_chan_len;
+       __le32 peer_operclass_len;
+       u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+       __le32 is_peer_responder;
+       __le32 pref_offchan_num;
+       __le32 pref_offchan_bw;
+} __packed;
+
+struct wmi_tlv_adaptive_qcs {
+       __le32 enable;
+} __packed;
+
+/**
+ * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ *
+ * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
+ *             Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
+ *             Only peer_id is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
+ *             vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
+ *             vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
+ */
+enum wmi_tlv_tx_pause_id {
+       WMI_TLV_TX_PAUSE_ID_MCC = 1,
+       WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
+       WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
+       WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
+       WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
+       WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
+       WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
+       WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
+       WMI_TLV_TX_PAUSE_ID_HOST = 21,
+};
+
+enum wmi_tlv_tx_pause_action {
+       WMI_TLV_TX_PAUSE_ACTION_STOP,
+       WMI_TLV_TX_PAUSE_ACTION_WAKE,
+};
+
+struct wmi_tlv_tx_pause_ev {
+       __le32 pause_id;
+       __le32 action;
+       __le32 vdev_map;
+       __le32 peer_id;
+       __le32 tid_map;
+} __packed;
+
 void ath10k_wmi_tlv_attach(struct ath10k *ar);
 
 #endif
index c7ea77edce245ccd389111ad3ac9fd3958d4bc89..0fabe689179c8a8ab3d150bada5f50bc4300a6a2 100644 (file)
@@ -26,6 +26,7 @@
 #include "mac.h"
 #include "testmode.h"
 #include "wmi-ops.h"
+#include "p2p.h"
 
 /* MAIN WMI cmd track */
 static struct wmi_cmd_map wmi_cmd_map = {
@@ -884,20 +885,24 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
 
 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
 {
-       int ret;
+       unsigned long time_left;
 
-       ret = wait_for_completion_timeout(&ar->wmi.service_ready,
-                                         WMI_SERVICE_READY_TIMEOUT_HZ);
-       return ret;
+       time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+                                               WMI_SERVICE_READY_TIMEOUT_HZ);
+       if (!time_left)
+               return -ETIMEDOUT;
+       return 0;
 }
 
 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
 {
-       int ret;
+       unsigned long time_left;
 
-       ret = wait_for_completion_timeout(&ar->wmi.unified_ready,
-                                         WMI_UNIFIED_READY_TIMEOUT_HZ);
-       return ret;
+       time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
+                                               WMI_UNIFIED_READY_TIMEOUT_HZ);
+       if (!time_left)
+               return -ETIMEDOUT;
+       return 0;
 }
 
 struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
@@ -1351,63 +1356,6 @@ static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
        return band;
 }
 
-static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
-{
-       u8 rate_idx = 0;
-
-       /* rate in Kbps */
-       switch (rate) {
-       case 1000:
-               rate_idx = 0;
-               break;
-       case 2000:
-               rate_idx = 1;
-               break;
-       case 5500:
-               rate_idx = 2;
-               break;
-       case 11000:
-               rate_idx = 3;
-               break;
-       case 6000:
-               rate_idx = 4;
-               break;
-       case 9000:
-               rate_idx = 5;
-               break;
-       case 12000:
-               rate_idx = 6;
-               break;
-       case 18000:
-               rate_idx = 7;
-               break;
-       case 24000:
-               rate_idx = 8;
-               break;
-       case 36000:
-               rate_idx = 9;
-               break;
-       case 48000:
-               rate_idx = 10;
-               break;
-       case 54000:
-               rate_idx = 11;
-               break;
-       default:
-               break;
-       }
-
-       if (band == IEEE80211_BAND_5GHZ) {
-               if (rate_idx > 3)
-                       /* Omit CCK rates */
-                       rate_idx -= 4;
-               else
-                       rate_idx = 0;
-       }
-
-       return rate_idx;
-}
-
 /* If keys are configured, HW decrypts all frames
  * with protected bit set. Mark such frames as decrypted.
  */
@@ -1489,6 +1437,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
        struct wmi_mgmt_rx_ev_arg arg = {};
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr;
+       struct ieee80211_supported_band *sband;
        u32 rx_status;
        u32 channel;
        u32 phy_mode;
@@ -1559,9 +1508,11 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
        if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
                ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
 
+       sband = &ar->mac.sbands[status->band];
+
        status->freq = ieee80211_channel_to_frequency(channel, status->band);
        status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
-       status->rate_idx = get_rate_idx(rate, status->band);
+       status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
 
        hdr = (struct ieee80211_hdr *)skb->data;
        fc = le16_to_cpu(hdr->frame_control);
@@ -1585,6 +1536,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
                }
        }
 
+       if (ieee80211_is_beacon(hdr->frame_control))
+               ath10k_mac_handle_beacon(ar, skb);
+
        ath10k_dbg(ar, ATH10K_DBG_MGMT,
                   "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
                   skb, skb->len,
@@ -1691,10 +1645,10 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
 
                survey = &ar->survey[idx];
                survey->time = WMI_CHAN_INFO_MSEC(cycle_count);
-               survey->time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
+               survey->time_busy = WMI_CHAN_INFO_MSEC(rx_clear_count);
                survey->noise = noise_floor;
                survey->filled = SURVEY_INFO_TIME |
-                                SURVEY_INFO_TIME_RX |
+                                SURVEY_INFO_TIME_BUSY |
                                 SURVEY_INFO_NOISE_DBM;
        }
 
@@ -2276,109 +2230,25 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
                   tim->bitmap_ctrl, pvm_len);
 }
 
-static void ath10k_p2p_fill_noa_ie(u8 *data, u32 len,
-                                  const struct wmi_p2p_noa_info *noa)
-{
-       struct ieee80211_p2p_noa_attr *noa_attr;
-       u8  ctwindow_oppps = noa->ctwindow_oppps;
-       u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
-       bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
-       __le16 *noa_attr_len;
-       u16 attr_len;
-       u8 noa_descriptors = noa->num_descriptors;
-       int i;
-
-       /* P2P IE */
-       data[0] = WLAN_EID_VENDOR_SPECIFIC;
-       data[1] = len - 2;
-       data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
-       data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
-       data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
-       data[5] = WLAN_OUI_TYPE_WFA_P2P;
-
-       /* NOA ATTR */
-       data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
-       noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
-       noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
-
-       noa_attr->index = noa->index;
-       noa_attr->oppps_ctwindow = ctwindow;
-       if (oppps)
-               noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
-
-       for (i = 0; i < noa_descriptors; i++) {
-               noa_attr->desc[i].count =
-                       __le32_to_cpu(noa->descriptors[i].type_count);
-               noa_attr->desc[i].duration = noa->descriptors[i].duration;
-               noa_attr->desc[i].interval = noa->descriptors[i].interval;
-               noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
-       }
-
-       attr_len = 2; /* index + oppps_ctwindow */
-       attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
-       *noa_attr_len = __cpu_to_le16(attr_len);
-}
-
-static u32 ath10k_p2p_calc_noa_ie_len(const struct wmi_p2p_noa_info *noa)
-{
-       u32 len = 0;
-       u8 noa_descriptors = noa->num_descriptors;
-       u8 opp_ps_info = noa->ctwindow_oppps;
-       bool opps_enabled = !!(opp_ps_info & WMI_P2P_OPPPS_ENABLE_BIT);
-
-       if (!noa_descriptors && !opps_enabled)
-               return len;
-
-       len += 1 + 1 + 4; /* EID + len + OUI */
-       len += 1 + 2; /* noa attr  + attr len */
-       len += 1 + 1; /* index + oppps_ctwindow */
-       len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
-
-       return len;
-}
-
 static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
                                  struct sk_buff *bcn,
                                  const struct wmi_p2p_noa_info *noa)
 {
-       u8 *new_data, *old_data = arvif->u.ap.noa_data;
-       u32 new_len;
-
        if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
                return;
 
        ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
-       if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
-               new_len = ath10k_p2p_calc_noa_ie_len(noa);
-               if (!new_len)
-                       goto cleanup;
 
-               new_data = kmalloc(new_len, GFP_ATOMIC);
-               if (!new_data)
-                       goto cleanup;
-
-               ath10k_p2p_fill_noa_ie(new_data, new_len, noa);
-
-               spin_lock_bh(&ar->data_lock);
-               arvif->u.ap.noa_data = new_data;
-               arvif->u.ap.noa_len = new_len;
-               spin_unlock_bh(&ar->data_lock);
-               kfree(old_data);
-       }
+       if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
+               ath10k_p2p_noa_update(arvif, noa);
 
        if (arvif->u.ap.noa_data)
                if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
                        memcpy(skb_put(bcn, arvif->u.ap.noa_len),
                               arvif->u.ap.noa_data,
                               arvif->u.ap.noa_len);
-       return;
 
-cleanup:
-       spin_lock_bh(&ar->data_lock);
-       arvif->u.ap.noa_data = NULL;
-       arvif->u.ap.noa_len = 0;
-       spin_unlock_bh(&ar->data_lock);
-       kfree(old_data);
+       return;
 }
 
 static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
@@ -2555,6 +2425,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
                                    u64 tsf)
 {
        u32 reg0, reg1, tsf32l;
+       struct ieee80211_channel *ch;
        struct pulse_event pe;
        u64 tsf64;
        u8 rssi, width;
@@ -2583,6 +2454,15 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
        if (!ar->dfs_detector)
                return;
 
+       spin_lock_bh(&ar->data_lock);
+       ch = ar->rx_channel;
+       spin_unlock_bh(&ar->data_lock);
+
+       if (!ch) {
+               ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
+               goto radar_detected;
+       }
+
        /* report event to DFS pattern detector */
        tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
        tsf64 = tsf & (~0xFFFFFFFFULL);
@@ -2598,10 +2478,10 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
                rssi = 0;
 
        pe.ts = tsf64;
-       pe.freq = ar->hw->conf.chandef.chan->center_freq;
+       pe.freq = ch->center_freq;
        pe.width = width;
        pe.rssi = rssi;
-
+       pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
        ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
                   "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
                   pe.freq, pe.width, pe.rssi, pe.ts);
@@ -2614,6 +2494,7 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
                return;
        }
 
+radar_detected:
        ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
        ATH10K_DFS_STAT_INC(ar, radar_detected);
 
@@ -2872,7 +2753,43 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
 
 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
 {
-       ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+       struct wmi_roam_ev_arg arg = {};
+       int ret;
+       u32 vdev_id;
+       u32 reason;
+       s32 rssi;
+
+       ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
+       if (ret) {
+               ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
+               return;
+       }
+
+       vdev_id = __le32_to_cpu(arg.vdev_id);
+       reason = __le32_to_cpu(arg.reason);
+       rssi = __le32_to_cpu(arg.rssi);
+       rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI,
+                  "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+                  vdev_id, reason, rssi);
+
+       if (reason >= WMI_ROAM_REASON_MAX)
+               ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
+                           reason, vdev_id);
+
+       switch (reason) {
+       case WMI_ROAM_REASON_BEACON_MISS:
+               ath10k_mac_handle_beacon_miss(ar, vdev_id);
+               break;
+       case WMI_ROAM_REASON_BETTER_AP:
+       case WMI_ROAM_REASON_LOW_RSSI:
+       case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+       case WMI_ROAM_REASON_HO_FAILED:
+               ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
+                           reason, vdev_id);
+               break;
+       }
 }
 
 void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
@@ -2942,7 +2859,19 @@ void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
 
 void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
 {
-       ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+       struct wmi_wow_ev_arg ev = {};
+       int ret;
+
+       complete(&ar->wow.wakeup_completed);
+
+       ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
+       if (ret) {
+               ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
+               return;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
+                  wow_reason(ev.wake_reason));
 }
 
 void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
@@ -3231,6 +3160,21 @@ static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+                                     struct wmi_roam_ev_arg *arg)
+{
+       struct wmi_roam_ev *ev = (void *)skb->data;
+
+       if (skb->len < sizeof(*ev))
+               return -EPROTO;
+
+       skb_pull(skb, sizeof(*ev));
+       arg->vdev_id = ev->vdev_id;
+       arg->reason = ev->reason;
+
+       return 0;
+}
+
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_rdy_ev_arg arg = {};
@@ -3989,6 +3933,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
        cmd = (struct wmi_init_cmd_10_2 *)buf->data;
 
        features = WMI_10_2_RX_BATCH_MODE;
+       if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+               features |= WMI_10_2_COEX_GPIO;
        cmd->resource_config.feature_mask = __cpu_to_le32(features);
 
        memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -4315,8 +4261,6 @@ ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
        const char *cmdname;
        u32 flags = 0;
 
-       if (WARN_ON(arg->ssid && arg->ssid_len == 0))
-               return ERR_PTR(-EINVAL);
        if (WARN_ON(arg->hidden_ssid && !arg->ssid))
                return ERR_PTR(-EINVAL);
        if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
@@ -4539,7 +4483,8 @@ ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
 
 static struct sk_buff *
 ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
-                             const u8 peer_addr[ETH_ALEN])
+                             const u8 peer_addr[ETH_ALEN],
+                             enum wmi_peer_type peer_type)
 {
        struct wmi_peer_create_cmd *cmd;
        struct sk_buff *skb;
@@ -5223,6 +5168,7 @@ static const struct wmi_ops wmi_ops = {
        .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
        .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+       .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5268,6 +5214,7 @@ static const struct wmi_ops wmi_ops = {
        /* .gen_bcn_tmpl not implemented */
        /* .gen_prb_tmpl not implemented */
        /* .gen_p2p_go_bcn_ie not implemented */
+       /* .gen_adaptive_qcs not implemented */
 };
 
 static const struct wmi_ops wmi_10_1_ops = {
@@ -5290,6 +5237,7 @@ static const struct wmi_ops wmi_10_1_ops = {
        .pull_swba = ath10k_wmi_op_pull_swba_ev,
        .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+       .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5330,6 +5278,7 @@ static const struct wmi_ops wmi_10_1_ops = {
        /* .gen_bcn_tmpl not implemented */
        /* .gen_prb_tmpl not implemented */
        /* .gen_p2p_go_bcn_ie not implemented */
+       /* .gen_adaptive_qcs not implemented */
 };
 
 static const struct wmi_ops wmi_10_2_ops = {
@@ -5353,6 +5302,7 @@ static const struct wmi_ops wmi_10_2_ops = {
        .pull_swba = ath10k_wmi_op_pull_swba_ev,
        .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+       .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5413,6 +5363,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
        .pull_swba = ath10k_wmi_op_pull_swba_ev,
        .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
        .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+       .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
 
        .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -5452,6 +5403,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
        /* .gen_bcn_tmpl not implemented */
        /* .gen_prb_tmpl not implemented */
        /* .gen_p2p_go_bcn_ie not implemented */
+       /* .gen_adaptive_qcs not implemented */
 };
 
 int ath10k_wmi_attach(struct ath10k *ar)
index adf935bf0580f488708688c4728aaab1f7325dc5..cad72ae76253362cf0e7f28ba9930a71814d35f4 100644 (file)
@@ -148,6 +148,8 @@ enum wmi_service {
        WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
        WMI_SERVICE_MDNS_OFFLOAD,
        WMI_SERVICE_SAP_AUTH_OFFLOAD,
+       WMI_SERVICE_ATF,
+       WMI_SERVICE_COEX_GPIO,
 
        /* keep last */
        WMI_SERVICE_MAX,
@@ -177,6 +179,8 @@ enum wmi_10x_service {
        WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
        WMI_10X_SERVICE_FORCE_FW_HANG,
        WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+       WMI_10X_SERVICE_ATF,
+       WMI_10X_SERVICE_COEX_GPIO,
 };
 
 enum wmi_main_service {
@@ -293,6 +297,8 @@ static inline char *wmi_service_name(int service_id)
        SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
        SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
        SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+       SVCSTR(WMI_SERVICE_ATF);
+       SVCSTR(WMI_SERVICE_COEX_GPIO);
        default:
                return NULL;
        }
@@ -356,6 +362,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
               WMI_SERVICE_FORCE_FW_HANG, len);
        SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
               WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+       SVCMAP(WMI_10X_SERVICE_ATF,
+              WMI_SERVICE_ATF, len);
+       SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
+              WMI_SERVICE_COEX_GPIO, len);
 }
 
 static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -552,6 +562,9 @@ struct wmi_cmd_map {
        u32 gpio_output_cmdid;
        u32 pdev_get_temperature_cmdid;
        u32 vdev_set_wmm_params_cmdid;
+       u32 tdls_set_state_cmdid;
+       u32 tdls_peer_update_cmdid;
+       u32 adaptive_qcs_cmdid;
 };
 
 /*
@@ -1952,6 +1965,7 @@ struct wmi_resource_config_10x {
 enum wmi_10_2_feature_mask {
        WMI_10_2_RX_BATCH_MODE = BIT(0),
        WMI_10_2_ATF_CONFIG    = BIT(1),
+       WMI_10_2_COEX_GPIO     = BIT(3),
 };
 
 struct wmi_resource_config_10_2 {
@@ -2166,6 +2180,7 @@ struct wmi_start_scan_arg {
        u32 max_scan_time;
        u32 probe_delay;
        u32 scan_ctrl_flags;
+       u32 burst_duration_ms;
 
        u32 ie_len;
        u32 n_channels;
@@ -4333,6 +4348,12 @@ struct wmi_peer_create_cmd {
        struct wmi_mac_addr peer_macaddr;
 } __packed;
 
+enum wmi_peer_type {
+       WMI_PEER_TYPE_DEFAULT = 0,
+       WMI_PEER_TYPE_BSS = 1,
+       WMI_PEER_TYPE_TDLS = 2,
+};
+
 struct wmi_peer_delete_cmd {
        __le32 vdev_id;
        struct wmi_mac_addr peer_macaddr;
@@ -4644,9 +4665,7 @@ struct wmi_peer_sta_kickout_event {
 } __packed;
 
 #define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
-
-/* FIXME: empirically extrapolated */
-#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
+#define WMI_CHAN_INFO_MSEC(x) ((x) / 88000)
 
 /* Beacon filter wmi command info */
 #define BCN_FLT_MAX_SUPPORTED_IES      256
@@ -4769,6 +4788,22 @@ struct wmi_dbglog_cfg_cmd {
        __le32 config_valid;
 } __packed;
 
+enum wmi_roam_reason {
+       WMI_ROAM_REASON_BETTER_AP = 1,
+       WMI_ROAM_REASON_BEACON_MISS = 2,
+       WMI_ROAM_REASON_LOW_RSSI = 3,
+       WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+       WMI_ROAM_REASON_HO_FAILED = 5,
+
+       /* keep last */
+       WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_ev {
+       __le32 vdev_id;
+       __le32 reason;
+} __packed;
+
 #define ATH10K_FRAGMT_THRESHOLD_MIN    540
 #define ATH10K_FRAGMT_THRESHOLD_MAX    2346
 
@@ -4857,11 +4892,200 @@ struct wmi_rdy_ev_arg {
        const u8 *mac_addr;
 };
 
+struct wmi_roam_ev_arg {
+       __le32 vdev_id;
+       __le32 reason;
+       __le32 rssi;
+};
+
 struct wmi_pdev_temperature_event {
        /* temperature value in Celcius degree */
        __le32 temperature;
 } __packed;
 
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+       WOW_BMISS_EVENT = 0,
+       WOW_BETTER_AP_EVENT,
+       WOW_DEAUTH_RECVD_EVENT,
+       WOW_MAGIC_PKT_RECVD_EVENT,
+       WOW_GTK_ERR_EVENT,
+       WOW_FOURWAY_HSHAKE_EVENT,
+       WOW_EAPOL_RECVD_EVENT,
+       WOW_NLO_DETECTED_EVENT,
+       WOW_DISASSOC_RECVD_EVENT,
+       WOW_PATTERN_MATCH_EVENT,
+       WOW_CSA_IE_EVENT,
+       WOW_PROBE_REQ_WPS_IE_EVENT,
+       WOW_AUTH_REQ_EVENT,
+       WOW_ASSOC_REQ_EVENT,
+       WOW_HTT_EVENT,
+       WOW_RA_MATCH_EVENT,
+       WOW_HOST_AUTO_SHUTDOWN_EVENT,
+       WOW_IOAC_MAGIC_EVENT,
+       WOW_IOAC_SHORT_EVENT,
+       WOW_IOAC_EXTEND_EVENT,
+       WOW_IOAC_TIMER_EVENT,
+       WOW_DFS_PHYERR_RADAR_EVENT,
+       WOW_BEACON_EVENT,
+       WOW_CLIENT_KICKOUT_EVENT,
+       WOW_EVENT_MAX,
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+       switch (ev) {
+       C2S(WOW_BMISS_EVENT);
+       C2S(WOW_BETTER_AP_EVENT);
+       C2S(WOW_DEAUTH_RECVD_EVENT);
+       C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+       C2S(WOW_GTK_ERR_EVENT);
+       C2S(WOW_FOURWAY_HSHAKE_EVENT);
+       C2S(WOW_EAPOL_RECVD_EVENT);
+       C2S(WOW_NLO_DETECTED_EVENT);
+       C2S(WOW_DISASSOC_RECVD_EVENT);
+       C2S(WOW_PATTERN_MATCH_EVENT);
+       C2S(WOW_CSA_IE_EVENT);
+       C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+       C2S(WOW_AUTH_REQ_EVENT);
+       C2S(WOW_ASSOC_REQ_EVENT);
+       C2S(WOW_HTT_EVENT);
+       C2S(WOW_RA_MATCH_EVENT);
+       C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+       C2S(WOW_IOAC_MAGIC_EVENT);
+       C2S(WOW_IOAC_SHORT_EVENT);
+       C2S(WOW_IOAC_EXTEND_EVENT);
+       C2S(WOW_IOAC_TIMER_EVENT);
+       C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+       C2S(WOW_BEACON_EVENT);
+       C2S(WOW_CLIENT_KICKOUT_EVENT);
+       C2S(WOW_EVENT_MAX);
+       default:
+               return NULL;
+       }
+}
+
+enum wmi_wow_wake_reason {
+       WOW_REASON_UNSPECIFIED = -1,
+       WOW_REASON_NLOD = 0,
+       WOW_REASON_AP_ASSOC_LOST,
+       WOW_REASON_LOW_RSSI,
+       WOW_REASON_DEAUTH_RECVD,
+       WOW_REASON_DISASSOC_RECVD,
+       WOW_REASON_GTK_HS_ERR,
+       WOW_REASON_EAP_REQ,
+       WOW_REASON_FOURWAY_HS_RECV,
+       WOW_REASON_TIMER_INTR_RECV,
+       WOW_REASON_PATTERN_MATCH_FOUND,
+       WOW_REASON_RECV_MAGIC_PATTERN,
+       WOW_REASON_P2P_DISC,
+       WOW_REASON_WLAN_HB,
+       WOW_REASON_CSA_EVENT,
+       WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+       WOW_REASON_AUTH_REQ_RECV,
+       WOW_REASON_ASSOC_REQ_RECV,
+       WOW_REASON_HTT_EVENT,
+       WOW_REASON_RA_MATCH,
+       WOW_REASON_HOST_AUTO_SHUTDOWN,
+       WOW_REASON_IOAC_MAGIC_EVENT,
+       WOW_REASON_IOAC_SHORT_EVENT,
+       WOW_REASON_IOAC_EXTEND_EVENT,
+       WOW_REASON_IOAC_TIMER_EVENT,
+       WOW_REASON_ROAM_HO,
+       WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+       WOW_REASON_BEACON_RECV,
+       WOW_REASON_CLIENT_KICKOUT_EVENT,
+       WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+       switch (reason) {
+       C2S(WOW_REASON_UNSPECIFIED);
+       C2S(WOW_REASON_NLOD);
+       C2S(WOW_REASON_AP_ASSOC_LOST);
+       C2S(WOW_REASON_LOW_RSSI);
+       C2S(WOW_REASON_DEAUTH_RECVD);
+       C2S(WOW_REASON_DISASSOC_RECVD);
+       C2S(WOW_REASON_GTK_HS_ERR);
+       C2S(WOW_REASON_EAP_REQ);
+       C2S(WOW_REASON_FOURWAY_HS_RECV);
+       C2S(WOW_REASON_TIMER_INTR_RECV);
+       C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+       C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+       C2S(WOW_REASON_P2P_DISC);
+       C2S(WOW_REASON_WLAN_HB);
+       C2S(WOW_REASON_CSA_EVENT);
+       C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+       C2S(WOW_REASON_AUTH_REQ_RECV);
+       C2S(WOW_REASON_ASSOC_REQ_RECV);
+       C2S(WOW_REASON_HTT_EVENT);
+       C2S(WOW_REASON_RA_MATCH);
+       C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+       C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+       C2S(WOW_REASON_IOAC_SHORT_EVENT);
+       C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+       C2S(WOW_REASON_IOAC_TIMER_EVENT);
+       C2S(WOW_REASON_ROAM_HO);
+       C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+       C2S(WOW_REASON_BEACON_RECV);
+       C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+       C2S(WOW_REASON_DEBUG_TEST);
+       default:
+               return NULL;
+       }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+       u32 vdev_id;
+       u32 flag;
+       enum wmi_wow_wake_reason wake_reason;
+       u32 data_len;
+};
+
+#define WOW_MIN_PATTERN_SIZE   1
+#define WOW_MAX_PATTERN_SIZE   148
+#define WOW_MAX_PKT_OFFSET     128
+
+enum wmi_tdls_state {
+       WMI_TDLS_DISABLE,
+       WMI_TDLS_ENABLE_PASSIVE,
+       WMI_TDLS_ENABLE_ACTIVE,
+};
+
+enum wmi_tdls_peer_state {
+       WMI_TDLS_PEER_STATE_PEERING,
+       WMI_TDLS_PEER_STATE_CONNECTED,
+       WMI_TDLS_PEER_STATE_TEARDOWN,
+};
+
+struct wmi_tdls_peer_update_cmd_arg {
+       u32 vdev_id;
+       enum wmi_tdls_peer_state peer_state;
+       u8 addr[ETH_ALEN];
+};
+
+#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
+
+struct wmi_tdls_peer_capab_arg {
+       u8 peer_uapsd_queues;
+       u8 peer_max_sp;
+       u32 buff_sta_support;
+       u32 off_chan_support;
+       u32 peer_curr_operclass;
+       u32 self_curr_operclass;
+       u32 peer_chan_len;
+       u32 peer_operclass_len;
+       u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+       u32 is_peer_responder;
+       u32 pref_offchan_num;
+       u32 pref_offchan_bw;
+};
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
new file mode 100644 (file)
index 0000000..a68d8fd
--- /dev/null
@@ -0,0 +1,321 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+static const struct wiphy_wowlan_support ath10k_wowlan_support = {
+       .flags = WIPHY_WOWLAN_DISCONNECT |
+                WIPHY_WOWLAN_MAGIC_PKT,
+       .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+       .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+       .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       int i, ret;
+
+       for (i = 0; i < WOW_EVENT_MAX; i++) {
+               ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+               if (ret) {
+                       ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+                                   wow_wakeup_event(i), arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
+       for (i = 0; i < ar->wow.max_num_patterns; i++) {
+               ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+               if (ret) {
+                       ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
+                                   i, arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ath10k_wow_cleanup(struct ath10k *ar)
+{
+       struct ath10k_vif *arvif;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               ret = ath10k_wow_vif_cleanup(arvif);
+               if (ret) {
+                       ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
+                                     struct cfg80211_wowlan *wowlan)
+{
+       int ret, i;
+       unsigned long wow_mask = 0;
+       struct ath10k *ar = arvif->ar;
+       const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+       int pattern_id = 0;
+
+       /* Setup requested WOW features */
+       switch (arvif->vdev_type) {
+       case WMI_VDEV_TYPE_IBSS:
+               __set_bit(WOW_BEACON_EVENT, &wow_mask);
+                /* fall through */
+       case WMI_VDEV_TYPE_AP:
+               __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+               __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+               __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+               __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+               __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+               __set_bit(WOW_HTT_EVENT, &wow_mask);
+               __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+               break;
+       case WMI_VDEV_TYPE_STA:
+               if (wowlan->disconnect) {
+                       __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+                       __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+                       __set_bit(WOW_BMISS_EVENT, &wow_mask);
+                       __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+               }
+
+               if (wowlan->magic_pkt)
+                       __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+               break;
+       default:
+               break;
+       }
+
+       for (i = 0; i < wowlan->n_patterns; i++) {
+               u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+               int j;
+
+               if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+                       continue;
+
+               /* convert bytemask to bitmask */
+               for (j = 0; j < patterns[i].pattern_len; j++)
+                       if (patterns[i].mask[j / 8] & BIT(j % 8))
+                               bitmask[j] = 0xff;
+
+               ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+                                                pattern_id,
+                                                patterns[i].pattern,
+                                                bitmask,
+                                                patterns[i].pattern_len,
+                                                patterns[i].pkt_offset);
+               if (ret) {
+                       ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
+                                   pattern_id,
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
+
+               pattern_id++;
+               __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+       }
+
+       for (i = 0; i < WOW_EVENT_MAX; i++) {
+               if (!test_bit(i, &wow_mask))
+                       continue;
+               ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+               if (ret) {
+                       ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
+                                   wow_wakeup_event(i), arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ath10k_wow_set_wakeups(struct ath10k *ar,
+                                 struct cfg80211_wowlan *wowlan)
+{
+       struct ath10k_vif *arvif;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
+               if (ret) {
+                       ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
+                                   arvif->vdev_id, ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int ath10k_wow_enable(struct ath10k *ar)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       reinit_completion(&ar->target_suspend);
+
+       ret = ath10k_wmi_wow_enable(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
+               return ret;
+       }
+
+       ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
+       if (ret == 0) {
+               ath10k_warn(ar, "timed out while waiting for suspend completion\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int ath10k_wow_wakeup(struct ath10k *ar)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       reinit_completion(&ar->wow.wakeup_completed);
+
+       ret = ath10k_wmi_wow_host_wakeup_ind(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
+                           ret);
+               return ret;
+       }
+
+       ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
+       if (ret == 0) {
+               ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+                         struct cfg80211_wowlan *wowlan)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+                             ar->fw_features))) {
+               ret = 1;
+               goto exit;
+       }
+
+       ret =  ath10k_wow_cleanup(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+                           ret);
+               goto exit;
+       }
+
+       ret = ath10k_wow_set_wakeups(ar, wowlan);
+       if (ret) {
+               ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
+                           ret);
+               goto cleanup;
+       }
+
+       ret = ath10k_wow_enable(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to start wow: %d\n", ret);
+               goto cleanup;
+       }
+
+       ret = ath10k_hif_suspend(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+               goto wakeup;
+       }
+
+       goto exit;
+
+wakeup:
+       ath10k_wow_wakeup(ar);
+
+cleanup:
+       ath10k_wow_cleanup(ar);
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret ? 1 : 0;
+}
+
+int ath10k_wow_op_resume(struct ieee80211_hw *hw)
+{
+       struct ath10k *ar = hw->priv;
+       int ret;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+                             ar->fw_features))) {
+               ret = 1;
+               goto exit;
+       }
+
+       ret = ath10k_hif_resume(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+               goto exit;
+       }
+
+       ret = ath10k_wow_wakeup(ar);
+       if (ret)
+               ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+
+exit:
+       mutex_unlock(&ar->conf_mutex);
+       return ret ? 1 : 0;
+}
+
+int ath10k_wow_init(struct ath10k *ar)
+{
+       if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+               return 0;
+
+       if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
+               return -EINVAL;
+
+       ar->wow.wowlan_support = ath10k_wowlan_support;
+       ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+       ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+       return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
new file mode 100644 (file)
index 0000000..abbb04b
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath10k_wow {
+       u32 max_num_patterns;
+       struct completion wakeup_completed;
+       struct wiphy_wowlan_support wowlan_support;
+};
+
+#ifdef CONFIG_PM
+
+int ath10k_wow_init(struct ath10k *ar);
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+                         struct cfg80211_wowlan *wowlan);
+int ath10k_wow_op_resume(struct ieee80211_hw *hw);
+
+#else
+
+static inline int ath10k_wow_init(struct ath10k *ar)
+{
+       return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
index 7ca0d6f930fd2e6e11021ac8a5d09ef002e80fc5..e22b0e778927155ed693ad84c177b9a6c8d03715 100644 (file)
@@ -1280,7 +1280,6 @@ struct ath5k_hw {
 
        DECLARE_BITMAP(status, 4);
 #define ATH_STAT_INVALID       0               /* disable hardware accesses */
-#define ATH_STAT_PROMISC       1
 #define ATH_STAT_LEDSOFT       2               /* enable LED gpio status */
 #define ATH_STAT_STARTED       3               /* opened & irqs enabled */
 #define ATH_STAT_RESET         4               /* hw reset */
index ca4b7ccd697fd81b06112b6c5b2e06a83a7f3f27..803030fd17d3b9203dd74535da40fe1de08284d6 100644 (file)
@@ -124,7 +124,7 @@ ath5k_led_brightness_set(struct led_classdev *led_dev,
 
 static int
 ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
-                  const char *name, char *trigger)
+                  const char *name, const char *trigger)
 {
        int err;
 
index 3b4a6463d87a336c7a6f5bb74c07c13ae30e718c..dc44cfef75176e1710f6622fee85f1f5e9b97e11 100644 (file)
@@ -369,7 +369,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
                       unsigned int *new_flags, u64 multicast)
 {
 #define SUPPORTED_FIF_FLAGS \
-       (FIF_PROMISC_IN_BSS |  FIF_ALLMULTI | FIF_FCSFAIL | \
+       (FIF_ALLMULTI | FIF_FCSFAIL | \
        FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
        FIF_BCN_PRBRESP_PROMISC)
 
@@ -393,16 +393,6 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
                (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
                AR5K_RX_FILTER_MCAST);
 
-       if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
-               if (*new_flags & FIF_PROMISC_IN_BSS)
-                       __set_bit(ATH_STAT_PROMISC, ah->status);
-               else
-                       __clear_bit(ATH_STAT_PROMISC, ah->status);
-       }
-
-       if (test_bit(ATH_STAT_PROMISC, ah->status))
-               rfilt |= AR5K_RX_FILTER_PROM;
-
        /* Note, AR5K_RX_FILTER_MCAST is already enabled */
        if (*new_flags & FIF_ALLMULTI) {
                mfilt[0] =  ~0;
@@ -418,8 +408,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
        if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
                rfilt |= AR5K_RX_FILTER_BEACON;
 
-       /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
-        * set we should only pass on control frames for this
+       /* FIF_CONTROL doc says we should only pass on control frames for this
         * station. This needs testing. I believe right now this
         * enables *all* control frames, which is OK.. but
         * but we should see if we can improve on granularity */
@@ -809,7 +798,6 @@ const struct ieee80211_ops ath5k_hw_ops = {
        .sw_scan_start          = ath5k_sw_scan_start,
        .sw_scan_complete       = ath5k_sw_scan_complete,
        .get_stats              = ath5k_get_stats,
-       /* .get_tkip_seq        = not implemented */
        /* .set_frag_threshold  = not implemented */
        /* .set_rts_threshold   = not implemented */
        /* .sta_add             = not implemented */
index cce4625a53ad7eb630bef4b717cc9fe0177d96fc..a511ef3614b9ed098558d1d86a9fec88b2330235 100644 (file)
@@ -889,7 +889,7 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
                                        GFP_KERNEL);
        } else if (vif->sme_state == SME_CONNECTED) {
                cfg80211_disconnected(vif->ndev, proto_reason,
-                                     NULL, 0, GFP_KERNEL);
+                                     NULL, 0, false, GFP_KERNEL);
        }
 
        vif->sme_state = SME_DISCONNECTED;
@@ -3467,7 +3467,7 @@ void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
                                        GFP_KERNEL);
                break;
        case SME_CONNECTED:
-               cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+               cfg80211_disconnected(vif->ndev, 0, NULL, 0, true, GFP_KERNEL);
                break;
        }
 
index 6c23d279525f5dfa8671342b3c30b25163b6e0ba..8f8793004b9f021c7f689e13dabcf32f10e2c3a0 100644 (file)
@@ -254,86 +254,25 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
        return 0;
 }
 
-/**
- * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
- * @ah: atheros hardware structure
- * @chan:
- *
- * For non single-chip solutions. Converts to baseband spur frequency given the
- * input channel frequency and compute register settings below.
- */
-static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
-                                   struct ath9k_channel *chan)
+void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+                         struct ath9k_channel *chan, int bin)
 {
-       int bb_spur = AR_NO_SPUR;
-       int bin, cur_bin;
-       int spur_freq_sd;
-       int spur_delta_phase;
-       int denominator;
+       int cur_bin;
        int upper, lower, cur_vit_mask;
-       int tmp, new;
        int i;
-       static int pilot_mask_reg[4] = {
+       int8_t mask_m[123];
+       int8_t mask_p[123];
+       int8_t mask_amt;
+       int tmp_mask;
+       static const int pilot_mask_reg[4] = {
                AR_PHY_TIMING7, AR_PHY_TIMING8,
                AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
        };
-       static int chan_mask_reg[4] = {
+       static const int chan_mask_reg[4] = {
                AR_PHY_TIMING9, AR_PHY_TIMING10,
                AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
        };
-       static int inc[4] = { 0, 100, 0, 0 };
-
-       int8_t mask_m[123];
-       int8_t mask_p[123];
-       int8_t mask_amt;
-       int tmp_mask;
-       int cur_bb_spur;
-       bool is2GHz = IS_CHAN_2GHZ(chan);
-
-       memset(&mask_m, 0, sizeof(int8_t) * 123);
-       memset(&mask_p, 0, sizeof(int8_t) * 123);
-
-       for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
-               cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
-               if (AR_NO_SPUR == cur_bb_spur)
-                       break;
-               cur_bb_spur = cur_bb_spur - (chan->channel * 10);
-               if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
-                       bb_spur = cur_bb_spur;
-                       break;
-               }
-       }
-
-       if (AR_NO_SPUR == bb_spur)
-               return;
-
-       bin = bb_spur * 32;
-
-       tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
-       new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
-                    AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
-                    AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
-                    AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
-
-       REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
-
-       new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
-              AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
-              AR_PHY_SPUR_REG_MASK_RATE_SELECT |
-              AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
-              SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
-       REG_WRITE(ah, AR_PHY_SPUR_REG, new);
-
-       spur_delta_phase = ((bb_spur * 524288) / 100) &
-               AR_PHY_TIMING11_SPUR_DELTA_PHASE;
-
-       denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
-       spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
-
-       new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
-              SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
-              SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
-       REG_WRITE(ah, AR_PHY_TIMING11, new);
+       static const int inc[4] = { 0, 100, 0, 0 };
 
        cur_bin = -6000;
        upper = bin + 100;
@@ -343,6 +282,7 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
                int pilot_mask = 0;
                int chan_mask = 0;
                int bp = 0;
+
                for (bp = 0; bp < 30; bp++) {
                        if ((cur_bin > lower) && (cur_bin < upper)) {
                                pilot_mask = pilot_mask | 0x1 << bp;
@@ -361,7 +301,6 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
 
        for (i = 0; i < 123; i++) {
                if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
                        /* workaround for gcc bug #37014 */
                        volatile int tmp_v = abs(cur_vit_mask - bin);
 
@@ -466,6 +405,78 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
        REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
 }
 
+/**
+ * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
+ * @ah: atheros hardware structure
+ * @chan:
+ *
+ * For non single-chip solutions. Converts to baseband spur frequency given the
+ * input channel frequency and compute register settings below.
+ */
+static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
+                                   struct ath9k_channel *chan)
+{
+       int bb_spur = AR_NO_SPUR;
+       int bin;
+       int spur_freq_sd;
+       int spur_delta_phase;
+       int denominator;
+       int tmp, new;
+       int i;
+
+       int8_t mask_m[123];
+       int8_t mask_p[123];
+       int cur_bb_spur;
+       bool is2GHz = IS_CHAN_2GHZ(chan);
+
+       memset(&mask_m, 0, sizeof(int8_t) * 123);
+       memset(&mask_p, 0, sizeof(int8_t) * 123);
+
+       for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
+               cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
+               if (AR_NO_SPUR == cur_bb_spur)
+                       break;
+               cur_bb_spur = cur_bb_spur - (chan->channel * 10);
+               if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
+                       bb_spur = cur_bb_spur;
+                       break;
+               }
+       }
+
+       if (AR_NO_SPUR == bb_spur)
+               return;
+
+       bin = bb_spur * 32;
+
+       tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
+       new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
+                    AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
+                    AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
+                    AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
+
+       REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
+
+       new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
+              AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
+              AR_PHY_SPUR_REG_MASK_RATE_SELECT |
+              AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
+              SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
+       REG_WRITE(ah, AR_PHY_SPUR_REG, new);
+
+       spur_delta_phase = ((bb_spur * 524288) / 100) &
+               AR_PHY_TIMING11_SPUR_DELTA_PHASE;
+
+       denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
+       spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
+
+       new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
+              SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
+              SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
+       REG_WRITE(ah, AR_PHY_TIMING11, new);
+
+       ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
+}
+
 /**
  * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
  * @ah: atheros hardware structure
index fc08162b58200143d5324e9825cab8384b6b5118..db6624527d9959d3ffd10a32f3fa76cb0b6dff83 100644 (file)
@@ -169,29 +169,17 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
 {
        int bb_spur = AR_NO_SPUR;
        int freq;
-       int bin, cur_bin;
+       int bin;
        int bb_spur_off, spur_subchannel_sd;
        int spur_freq_sd;
        int spur_delta_phase;
        int denominator;
-       int upper, lower, cur_vit_mask;
        int tmp, newVal;
        int i;
-       static const int pilot_mask_reg[4] = {
-               AR_PHY_TIMING7, AR_PHY_TIMING8,
-               AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
-       };
-       static const int chan_mask_reg[4] = {
-               AR_PHY_TIMING9, AR_PHY_TIMING10,
-               AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
-       };
-       static const int inc[4] = { 0, 100, 0, 0 };
        struct chan_centers centers;
 
        int8_t mask_m[123];
        int8_t mask_p[123];
-       int8_t mask_amt;
-       int tmp_mask;
        int cur_bb_spur;
        bool is2GHz = IS_CHAN_2GHZ(chan);
 
@@ -288,135 +276,7 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
        newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
        REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
 
-       cur_bin = -6000;
-       upper = bin + 100;
-       lower = bin - 100;
-
-       for (i = 0; i < 4; i++) {
-               int pilot_mask = 0;
-               int chan_mask = 0;
-               int bp = 0;
-               for (bp = 0; bp < 30; bp++) {
-                       if ((cur_bin > lower) && (cur_bin < upper)) {
-                               pilot_mask = pilot_mask | 0x1 << bp;
-                               chan_mask = chan_mask | 0x1 << bp;
-                       }
-                       cur_bin += 100;
-               }
-               cur_bin += inc[i];
-               REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
-               REG_WRITE(ah, chan_mask_reg[i], chan_mask);
-       }
-
-       cur_vit_mask = 6100;
-       upper = bin + 120;
-       lower = bin - 120;
-
-       for (i = 0; i < 123; i++) {
-               if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
-
-                       /* workaround for gcc bug #37014 */
-                       volatile int tmp_v = abs(cur_vit_mask - bin);
-
-                       if (tmp_v < 75)
-                               mask_amt = 1;
-                       else
-                               mask_amt = 0;
-                       if (cur_vit_mask < 0)
-                               mask_m[abs(cur_vit_mask / 100)] = mask_amt;
-                       else
-                               mask_p[cur_vit_mask / 100] = mask_amt;
-               }
-               cur_vit_mask -= 100;
-       }
-
-       tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
-               | (mask_m[48] << 26) | (mask_m[49] << 24)
-               | (mask_m[50] << 22) | (mask_m[51] << 20)
-               | (mask_m[52] << 18) | (mask_m[53] << 16)
-               | (mask_m[54] << 14) | (mask_m[55] << 12)
-               | (mask_m[56] << 10) | (mask_m[57] << 8)
-               | (mask_m[58] << 6) | (mask_m[59] << 4)
-               | (mask_m[60] << 2) | (mask_m[61] << 0);
-       REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
-       REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
-
-       tmp_mask = (mask_m[31] << 28)
-               | (mask_m[32] << 26) | (mask_m[33] << 24)
-               | (mask_m[34] << 22) | (mask_m[35] << 20)
-               | (mask_m[36] << 18) | (mask_m[37] << 16)
-               | (mask_m[48] << 14) | (mask_m[39] << 12)
-               | (mask_m[40] << 10) | (mask_m[41] << 8)
-               | (mask_m[42] << 6) | (mask_m[43] << 4)
-               | (mask_m[44] << 2) | (mask_m[45] << 0);
-       REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
-       REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
-
-       tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
-               | (mask_m[18] << 26) | (mask_m[18] << 24)
-               | (mask_m[20] << 22) | (mask_m[20] << 20)
-               | (mask_m[22] << 18) | (mask_m[22] << 16)
-               | (mask_m[24] << 14) | (mask_m[24] << 12)
-               | (mask_m[25] << 10) | (mask_m[26] << 8)
-               | (mask_m[27] << 6) | (mask_m[28] << 4)
-               | (mask_m[29] << 2) | (mask_m[30] << 0);
-       REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
-       REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
-
-       tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
-               | (mask_m[2] << 26) | (mask_m[3] << 24)
-               | (mask_m[4] << 22) | (mask_m[5] << 20)
-               | (mask_m[6] << 18) | (mask_m[7] << 16)
-               | (mask_m[8] << 14) | (mask_m[9] << 12)
-               | (mask_m[10] << 10) | (mask_m[11] << 8)
-               | (mask_m[12] << 6) | (mask_m[13] << 4)
-               | (mask_m[14] << 2) | (mask_m[15] << 0);
-       REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
-       REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
-
-       tmp_mask = (mask_p[15] << 28)
-               | (mask_p[14] << 26) | (mask_p[13] << 24)
-               | (mask_p[12] << 22) | (mask_p[11] << 20)
-               | (mask_p[10] << 18) | (mask_p[9] << 16)
-               | (mask_p[8] << 14) | (mask_p[7] << 12)
-               | (mask_p[6] << 10) | (mask_p[5] << 8)
-               | (mask_p[4] << 6) | (mask_p[3] << 4)
-               | (mask_p[2] << 2) | (mask_p[1] << 0);
-       REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
-       REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
-
-       tmp_mask = (mask_p[30] << 28)
-               | (mask_p[29] << 26) | (mask_p[28] << 24)
-               | (mask_p[27] << 22) | (mask_p[26] << 20)
-               | (mask_p[25] << 18) | (mask_p[24] << 16)
-               | (mask_p[23] << 14) | (mask_p[22] << 12)
-               | (mask_p[21] << 10) | (mask_p[20] << 8)
-               | (mask_p[19] << 6) | (mask_p[18] << 4)
-               | (mask_p[17] << 2) | (mask_p[16] << 0);
-       REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
-       REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
-
-       tmp_mask = (mask_p[45] << 28)
-               | (mask_p[44] << 26) | (mask_p[43] << 24)
-               | (mask_p[42] << 22) | (mask_p[41] << 20)
-               | (mask_p[40] << 18) | (mask_p[39] << 16)
-               | (mask_p[38] << 14) | (mask_p[37] << 12)
-               | (mask_p[36] << 10) | (mask_p[35] << 8)
-               | (mask_p[34] << 6) | (mask_p[33] << 4)
-               | (mask_p[32] << 2) | (mask_p[31] << 0);
-       REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
-       REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
-
-       tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
-               | (mask_p[59] << 26) | (mask_p[58] << 24)
-               | (mask_p[57] << 22) | (mask_p[56] << 20)
-               | (mask_p[55] << 18) | (mask_p[54] << 16)
-               | (mask_p[53] << 14) | (mask_p[52] << 12)
-               | (mask_p[51] << 10) | (mask_p[50] << 8)
-               | (mask_p[49] << 6) | (mask_p[48] << 4)
-               | (mask_p[47] << 2) | (mask_p[46] << 0);
-       REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
-       REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
+       ar5008_hw_cmn_spur_mitigate(ah, chan, bin);
 
        REGWRITE_BUFFER_FLUSH(ah);
 }
index 5cee231cca1f99510e21410e362a1086cae19672..a8762711ad74b40ab7a0afa5b0f64820188f3011 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/relay.h>
+#include <linux/random.h>
 #include "ath9k.h"
 
 static s8 fix_rssi_inv_only(u8 rssi_val)
@@ -36,21 +37,480 @@ static void ath_debug_send_fft_sample(struct ath_spec_scan_priv *spec_priv,
        relay_write(spec_priv->rfs_chan_spec_scan, fft_sample_tlv, length);
 }
 
+typedef int (ath_cmn_fft_idx_validator) (u8 *sample_end, int bytes_read);
+
+static int
+ath_cmn_max_idx_verify_ht20_fft(u8 *sample_end, int bytes_read)
+{
+       struct ath_ht20_mag_info *mag_info;
+       u8 *sample;
+       u16 max_magnitude;
+       u8 max_index;
+       u8 max_exp;
+
+       /* Sanity check so that we don't read outside the read
+        * buffer
+        */
+       if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN - 1)
+               return -1;
+
+       mag_info = (struct ath_ht20_mag_info *) (sample_end -
+                               sizeof(struct ath_ht20_mag_info) + 1);
+
+       sample = sample_end - SPECTRAL_HT20_SAMPLE_LEN + 1;
+
+       max_index = spectral_max_index(mag_info->all_bins,
+                                      SPECTRAL_HT20_NUM_BINS);
+       max_magnitude = spectral_max_magnitude(mag_info->all_bins);
+
+       max_exp = mag_info->max_exp & 0xf;
+
+       /* Don't try to read something outside the read buffer
+        * in case of a missing byte (so bins[0] will be outside
+        * the read buffer)
+        */
+       if (bytes_read < SPECTRAL_HT20_SAMPLE_LEN && max_index < 1)
+               return -1;
+
+       if (sample[max_index] != (max_magnitude >> max_exp))
+               return -1;
+       else
+               return 0;
+}
+
+static int
+ath_cmn_max_idx_verify_ht20_40_fft(u8 *sample_end, int bytes_read)
+{
+       struct ath_ht20_40_mag_info *mag_info;
+       u8 *sample;
+       u16 lower_mag, upper_mag;
+       u8 lower_max_index, upper_max_index;
+       u8 max_exp;
+       int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
+
+       /* Sanity check so that we don't read outside the read
+        * buffer
+        */
+       if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN - 1)
+               return -1;
+
+       mag_info = (struct ath_ht20_40_mag_info *) (sample_end -
+                               sizeof(struct ath_ht20_40_mag_info) + 1);
+
+       sample = sample_end - SPECTRAL_HT20_40_SAMPLE_LEN + 1;
+
+       lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+       lower_max_index = spectral_max_index(mag_info->lower_bins,
+                                            SPECTRAL_HT20_40_NUM_BINS);
+
+       upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+       upper_max_index = spectral_max_index(mag_info->upper_bins,
+                                            SPECTRAL_HT20_40_NUM_BINS);
+
+       max_exp = mag_info->max_exp & 0xf;
+
+       /* Don't try to read something outside the read buffer
+        * in case of a missing byte (so bins[0] will be outside
+        * the read buffer)
+        */
+       if (bytes_read < SPECTRAL_HT20_40_SAMPLE_LEN &&
+          ((upper_max_index < 1) || (lower_max_index < 1)))
+               return -1;
+
+       /* Some time hardware messes up the index and adds
+        * the index of the middle point (dc_pos). Try to fix it.
+        */
+       if ((upper_max_index - dc_pos > 0) &&
+          (sample[upper_max_index] == (upper_mag >> max_exp)))
+               upper_max_index -= dc_pos;
+
+       if ((lower_max_index - dc_pos > 0) &&
+          (sample[lower_max_index - dc_pos] == (lower_mag >> max_exp)))
+               lower_max_index -= dc_pos;
+
+       if ((sample[upper_max_index + dc_pos] != (upper_mag >> max_exp)) ||
+          (sample[lower_max_index] != (lower_mag >> max_exp)))
+               return -1;
+       else
+               return 0;
+}
+
+typedef int (ath_cmn_fft_sample_handler) (struct ath_rx_status *rs,
+                       struct ath_spec_scan_priv *spec_priv,
+                       u8 *sample_buf, u64 tsf, u16 freq, int chan_type);
+
+static int
+ath_cmn_process_ht20_fft(struct ath_rx_status *rs,
+                       struct ath_spec_scan_priv *spec_priv,
+                       u8 *sample_buf,
+                       u64 tsf, u16 freq, int chan_type)
+{
+       struct fft_sample_ht20 fft_sample_20;
+       struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+       struct ath_hw *ah = spec_priv->ah;
+       struct ath_ht20_mag_info *mag_info;
+       struct fft_sample_tlv *tlv;
+       int i = 0;
+       int ret = 0;
+       int dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+       u16 magnitude, tmp_mag, length;
+       u8 max_index, bitmap_w, max_exp;
+
+       length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+       fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+       fft_sample_20.tlv.length = __cpu_to_be16(length);
+       fft_sample_20.freq = __cpu_to_be16(freq);
+       fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+       fft_sample_20.noise = ah->noise;
+
+       mag_info = (struct ath_ht20_mag_info *) (sample_buf +
+                                       SPECTRAL_HT20_NUM_BINS);
+
+       magnitude = spectral_max_magnitude(mag_info->all_bins);
+       fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+
+       max_index = spectral_max_index(mag_info->all_bins,
+                                       SPECTRAL_HT20_NUM_BINS);
+       fft_sample_20.max_index = max_index;
+
+       bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+       fft_sample_20.bitmap_weight = bitmap_w;
+
+       max_exp = mag_info->max_exp & 0xf;
+       fft_sample_20.max_exp = max_exp;
+
+       fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+       memcpy(fft_sample_20.data, sample_buf, SPECTRAL_HT20_NUM_BINS);
+
+       ath_dbg(common, SPECTRAL_SCAN, "FFT HT20 frame: max mag 0x%X,"
+                                       "max_mag_idx %i\n",
+                                       magnitude >> max_exp,
+                                       max_index);
+
+       if (fft_sample_20.data[max_index] != (magnitude >> max_exp)) {
+               ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
+               ret = -1;
+       }
+
+       /* DC value (value in the middle) is the blind spot of the spectral
+        * sample and invalid, interpolate it.
+        */
+       fft_sample_20.data[dc_pos] = (fft_sample_20.data[dc_pos + 1] +
+                                       fft_sample_20.data[dc_pos - 1]) / 2;
+
+       /* Check if the maximum magnitude is indeed maximum,
+        * also if the maximum value was at dc_pos, calculate
+        * a new one (since value at dc_pos is invalid).
+        */
+       if (max_index == dc_pos) {
+               tmp_mag = 0;
+               for (i = 0; i < dc_pos; i++) {
+                       if (fft_sample_20.data[i] > tmp_mag) {
+                               tmp_mag = fft_sample_20.data[i];
+                               fft_sample_20.max_index = i;
+                       }
+               }
+
+               magnitude = tmp_mag << max_exp;
+               fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+
+               ath_dbg(common, SPECTRAL_SCAN,
+                       "Calculated new lower max 0x%X at %i\n",
+                       tmp_mag, fft_sample_20.max_index);
+       } else
+       for (i = 0; i < SPECTRAL_HT20_NUM_BINS; i++) {
+               if (fft_sample_20.data[i] == (magnitude >> max_exp))
+                       ath_dbg(common, SPECTRAL_SCAN,
+                               "Got max: 0x%X at index %i\n",
+                               fft_sample_20.data[i], i);
+
+               if (fft_sample_20.data[i] > (magnitude >> max_exp)) {
+                       ath_dbg(common, SPECTRAL_SCAN,
+                               "Got bin %i greater than max: 0x%X\n",
+                               i, fft_sample_20.data[i]);
+                       ret = -1;
+               }
+       }
+
+       if (ret < 0)
+               return ret;
+
+       tlv = (struct fft_sample_tlv *)&fft_sample_20;
+
+       ath_debug_send_fft_sample(spec_priv, tlv);
+
+       return 0;
+}
+
+static int
+ath_cmn_process_ht20_40_fft(struct ath_rx_status *rs,
+                       struct ath_spec_scan_priv *spec_priv,
+                       u8 *sample_buf,
+                       u64 tsf, u16 freq, int chan_type)
+{
+       struct fft_sample_ht20_40 fft_sample_40;
+       struct ath_common *common = ath9k_hw_common(spec_priv->ah);
+       struct ath_hw *ah = spec_priv->ah;
+       struct ath9k_hw_cal_data *caldata = ah->caldata;
+       struct ath_ht20_40_mag_info *mag_info;
+       struct fft_sample_tlv *tlv;
+       int dc_pos = SPECTRAL_HT20_40_NUM_BINS / 2;
+       int i = 0;
+       int ret = 0;
+       s16 ext_nf;
+       u16 lower_mag, upper_mag, tmp_mag, length;
+       s8 lower_rssi, upper_rssi;
+       u8 lower_max_index, upper_max_index;
+       u8 lower_bitmap_w, upper_bitmap_w, max_exp;
+
+       if (caldata)
+               ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+                               caldata->nfCalHist[3].privNF);
+       else
+               ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+       length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+       fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+       fft_sample_40.tlv.length = __cpu_to_be16(length);
+       fft_sample_40.freq = __cpu_to_be16(freq);
+       fft_sample_40.channel_type = chan_type;
+
+       if (chan_type == NL80211_CHAN_HT40PLUS) {
+               lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+               upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+
+               fft_sample_40.lower_noise = ah->noise;
+               fft_sample_40.upper_noise = ext_nf;
+       } else {
+               lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
+               upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
+
+               fft_sample_40.lower_noise = ext_nf;
+               fft_sample_40.upper_noise = ah->noise;
+       }
+
+       fft_sample_40.lower_rssi = lower_rssi;
+       fft_sample_40.upper_rssi = upper_rssi;
+
+       mag_info = (struct ath_ht20_40_mag_info *) (sample_buf +
+                                       SPECTRAL_HT20_40_NUM_BINS);
+
+       lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+       fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+
+       upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+       fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+
+       lower_max_index = spectral_max_index(mag_info->lower_bins,
+                                       SPECTRAL_HT20_40_NUM_BINS);
+       fft_sample_40.lower_max_index = lower_max_index;
+
+       upper_max_index = spectral_max_index(mag_info->upper_bins,
+                                       SPECTRAL_HT20_40_NUM_BINS);
+       fft_sample_40.upper_max_index = upper_max_index;
+
+       lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+       fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+
+       upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+       fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+
+       max_exp = mag_info->max_exp & 0xf;
+       fft_sample_40.max_exp = max_exp;
+
+       fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+       memcpy(fft_sample_40.data, sample_buf, SPECTRAL_HT20_40_NUM_BINS);
+
+       ath_dbg(common, SPECTRAL_SCAN, "FFT HT20/40 frame: lower mag 0x%X,"
+                                       "lower_mag_idx %i, upper mag 0x%X,"
+                                       "upper_mag_idx %i\n",
+                                       lower_mag >> max_exp,
+                                       lower_max_index,
+                                       upper_mag >> max_exp,
+                                       upper_max_index);
+
+       /* Some time hardware messes up the index and adds
+        * the index of the middle point (dc_pos). Try to fix it.
+        */
+       if ((upper_max_index - dc_pos > 0) &&
+          (fft_sample_40.data[upper_max_index] == (upper_mag >> max_exp))) {
+               upper_max_index -= dc_pos;
+               fft_sample_40.upper_max_index = upper_max_index;
+       }
+
+       if ((lower_max_index - dc_pos > 0) &&
+          (fft_sample_40.data[lower_max_index - dc_pos] ==
+          (lower_mag >> max_exp))) {
+               lower_max_index -= dc_pos;
+               fft_sample_40.lower_max_index = lower_max_index;
+       }
+
+       /* Check if we got the expected magnitude values at
+        * the expected bins
+        */
+       if ((fft_sample_40.data[upper_max_index + dc_pos]
+           != (upper_mag >> max_exp)) ||
+          (fft_sample_40.data[lower_max_index]
+           != (lower_mag >> max_exp))) {
+               ath_dbg(common, SPECTRAL_SCAN, "Magnitude mismatch !\n");
+               ret = -1;
+       }
+
+       /* DC value (value in the middle) is the blind spot of the spectral
+        * sample and invalid, interpolate it.
+        */
+       fft_sample_40.data[dc_pos] = (fft_sample_40.data[dc_pos + 1] +
+                                       fft_sample_40.data[dc_pos - 1]) / 2;
+
+       /* Check if the maximum magnitudes are indeed maximum,
+        * also if the maximum value was at dc_pos, calculate
+        * a new one (since value at dc_pos is invalid).
+        */
+       if (lower_max_index == dc_pos) {
+               tmp_mag = 0;
+               for (i = 0; i < dc_pos; i++) {
+                       if (fft_sample_40.data[i] > tmp_mag) {
+                               tmp_mag = fft_sample_40.data[i];
+                               fft_sample_40.lower_max_index = i;
+                       }
+               }
+
+               lower_mag = tmp_mag << max_exp;
+               fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+
+               ath_dbg(common, SPECTRAL_SCAN,
+                       "Calculated new lower max 0x%X at %i\n",
+                       tmp_mag, fft_sample_40.lower_max_index);
+       } else
+       for (i = 0; i < dc_pos; i++) {
+               if (fft_sample_40.data[i] == (lower_mag >> max_exp))
+                       ath_dbg(common, SPECTRAL_SCAN,
+                               "Got lower mag: 0x%X at index %i\n",
+                               fft_sample_40.data[i], i);
+
+               if (fft_sample_40.data[i] > (lower_mag >> max_exp)) {
+                       ath_dbg(common, SPECTRAL_SCAN,
+                               "Got lower bin %i higher than max: 0x%X\n",
+                               i, fft_sample_40.data[i]);
+                       ret = -1;
+               }
+       }
+
+       if (upper_max_index == dc_pos) {
+               tmp_mag = 0;
+               for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+                       if (fft_sample_40.data[i] > tmp_mag) {
+                               tmp_mag = fft_sample_40.data[i];
+                               fft_sample_40.upper_max_index = i;
+                       }
+               }
+               upper_mag = tmp_mag << max_exp;
+               fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+
+               ath_dbg(common, SPECTRAL_SCAN,
+                       "Calculated new upper max 0x%X at %i\n",
+                       tmp_mag, i);
+       } else
+       for (i = dc_pos; i < SPECTRAL_HT20_40_NUM_BINS; i++) {
+               if (fft_sample_40.data[i] == (upper_mag >> max_exp))
+                       ath_dbg(common, SPECTRAL_SCAN,
+                               "Got upper mag: 0x%X at index %i\n",
+                               fft_sample_40.data[i], i);
+
+               if (fft_sample_40.data[i] > (upper_mag >> max_exp)) {
+                       ath_dbg(common, SPECTRAL_SCAN,
+                               "Got upper bin %i higher than max: 0x%X\n",
+                               i, fft_sample_40.data[i]);
+
+                       ret = -1;
+               }
+       }
+
+       if (ret < 0)
+               return ret;
+
+       tlv = (struct fft_sample_tlv *)&fft_sample_40;
+
+       ath_debug_send_fft_sample(spec_priv, tlv);
+
+       return 0;
+}
+
+static inline void
+ath_cmn_copy_fft_frame(u8 *in, u8 *out, int sample_len, int sample_bytes)
+{
+       switch (sample_bytes - sample_len) {
+       case -1:
+               /* First byte missing */
+               memcpy(&out[1], in,
+                      sample_len - 1);
+               break;
+       case 0:
+               /* Length correct, nothing to do. */
+               memcpy(out, in, sample_len);
+               break;
+       case 1:
+               /* MAC added 2 extra bytes AND first byte
+                * is missing.
+                */
+               memcpy(&out[1], in, 30);
+               out[31] = in[31];
+               memcpy(&out[32], &in[33],
+                      sample_len - 32);
+               break;
+       case 2:
+               /* MAC added 2 extra bytes at bin 30 and 32,
+                * remove them.
+                */
+               memcpy(out, in, 30);
+               out[30] = in[31];
+               memcpy(&out[31], &in[33],
+                      sample_len - 31);
+               break;
+       default:
+               break;
+       }
+}
+
+static int
+ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
+{
+       int i = 0;
+       int ret = 0;
+       struct rchan *rc = spec_priv->rfs_chan_spec_scan;
+
+       for_each_online_cpu(i)
+               ret += relay_buf_full(rc->buf[i]);
+
+       i = num_online_cpus();
+
+       if (ret == i)
+               return 1;
+       else
+               return 0;
+}
+
 /* returns 1 if this was a spectral frame, even if not handled. */
 int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
                    struct ath_rx_status *rs, u64 tsf)
 {
+       u8 sample_buf[SPECTRAL_SAMPLE_MAX_LEN] = {0};
        struct ath_hw *ah = spec_priv->ah;
        struct ath_common *common = ath9k_hw_common(spec_priv->ah);
-       u8 num_bins, *bins, *vdata = (u8 *)hdr;
-       struct fft_sample_ht20 fft_sample_20;
-       struct fft_sample_ht20_40 fft_sample_40;
-       struct fft_sample_tlv *tlv;
+       u8 num_bins, *vdata = (u8 *)hdr;
        struct ath_radar_info *radar_info;
        int len = rs->rs_datalen;
-       int dc_pos;
-       u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+       int i;
+       int got_slen = 0;
+       u8  *sample_start;
+       int sample_bytes = 0;
+       int ret = 0;
+       u16 fft_len, sample_len, freq = ah->curchan->chan->center_freq;
        enum nl80211_channel_type chan_type;
+       ath_cmn_fft_idx_validator *fft_idx_validator;
+       ath_cmn_fft_sample_handler *fft_handler;
 
        /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
         * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -68,140 +528,170 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
        if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
                return 0;
 
+       /* Output buffers are full, no need to process anything
+        * since there is no space to put the result anyway
+        */
+       ret = ath_cmn_is_fft_buf_full(spec_priv);
+       if (ret == 1) {
+               ath_dbg(common, SPECTRAL_SCAN, "FFT report ignored, no space "
+                                               "left on output buffers\n");
+               return 1;
+       }
+
        chan_type = cfg80211_get_chandef_type(&common->hw->conf.chandef);
        if ((chan_type == NL80211_CHAN_HT40MINUS) ||
            (chan_type == NL80211_CHAN_HT40PLUS)) {
                fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+               sample_len = SPECTRAL_HT20_40_SAMPLE_LEN;
                num_bins = SPECTRAL_HT20_40_NUM_BINS;
-               bins = (u8 *)fft_sample_40.data;
+               fft_idx_validator = &ath_cmn_max_idx_verify_ht20_40_fft;
+               fft_handler = &ath_cmn_process_ht20_40_fft;
        } else {
                fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+               sample_len = SPECTRAL_HT20_SAMPLE_LEN;
                num_bins = SPECTRAL_HT20_NUM_BINS;
-               bins = (u8 *)fft_sample_20.data;
-       }
-
-       /* Variation in the data length is possible and will be fixed later */
-       if ((len > fft_len + 2) || (len < fft_len - 1))
-               return 1;
-
-       switch (len - fft_len) {
-       case 0:
-               /* length correct, nothing to do. */
-               memcpy(bins, vdata, num_bins);
-               break;
-       case -1:
-               /* first byte missing, duplicate it. */
-               memcpy(&bins[1], vdata, num_bins - 1);
-               bins[0] = vdata[0];
-               break;
-       case 2:
-               /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
-               memcpy(bins, vdata, 30);
-               bins[30] = vdata[31];
-               memcpy(&bins[31], &vdata[33], num_bins - 31);
-               break;
-       case 1:
-               /* MAC added 2 extra bytes AND first byte is missing. */
-               bins[0] = vdata[0];
-               memcpy(&bins[1], vdata, 30);
-               bins[31] = vdata[31];
-               memcpy(&bins[32], &vdata[33], num_bins - 32);
-               break;
-       default:
-               return 1;
+               fft_idx_validator = ath_cmn_max_idx_verify_ht20_fft;
+               fft_handler = &ath_cmn_process_ht20_fft;
        }
 
-       /* DC value (value in the middle) is the blind spot of the spectral
-        * sample and invalid, interpolate it.
-        */
-       dc_pos = num_bins / 2;
-       bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
-
-       if ((chan_type == NL80211_CHAN_HT40MINUS) ||
-           (chan_type == NL80211_CHAN_HT40PLUS)) {
-               s8 lower_rssi, upper_rssi;
-               s16 ext_nf;
-               u8 lower_max_index, upper_max_index;
-               u8 lower_bitmap_w, upper_bitmap_w;
-               u16 lower_mag, upper_mag;
-               struct ath9k_hw_cal_data *caldata = ah->caldata;
-               struct ath_ht20_40_mag_info *mag_info;
-
-               if (caldata)
-                       ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
-                                       caldata->nfCalHist[3].privNF);
-               else
-                       ext_nf = ATH_DEFAULT_NOISE_FLOOR;
-
-               length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
-               fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
-               fft_sample_40.tlv.length = __cpu_to_be16(length);
-               fft_sample_40.freq = __cpu_to_be16(freq);
-               fft_sample_40.channel_type = chan_type;
-
-               if (chan_type == NL80211_CHAN_HT40PLUS) {
-                       lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
-                       upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
-
-                       fft_sample_40.lower_noise = ah->noise;
-                       fft_sample_40.upper_noise = ext_nf;
-               } else {
-                       lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext[0]);
-                       upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
-
-                       fft_sample_40.lower_noise = ext_nf;
-                       fft_sample_40.upper_noise = ah->noise;
+       ath_dbg(common, SPECTRAL_SCAN, "Got radar dump bw_info: 0x%X,"
+                                       "len: %i fft_len: %i\n",
+                                       radar_info->pulse_bw_info,
+                                       len,
+                                       fft_len);
+       sample_start = vdata;
+       for (i = 0; i < len - 2; i++) {
+               sample_bytes++;
+
+               /* Only a single sample received, no need to look
+                * for the sample's end, do the correction based
+                * on the packet's length instead. Note that hw
+                * will always put the radar_info structure on
+                * the end.
+                */
+               if (len <= fft_len + 2) {
+                       sample_bytes = len - sizeof(struct ath_radar_info);
+                       got_slen = 1;
                }
-               fft_sample_40.lower_rssi = lower_rssi;
-               fft_sample_40.upper_rssi = upper_rssi;
-
-               mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
-               lower_mag = spectral_max_magnitude(mag_info->lower_bins);
-               upper_mag = spectral_max_magnitude(mag_info->upper_bins);
-               fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
-               fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
-               lower_max_index = spectral_max_index(mag_info->lower_bins);
-               upper_max_index = spectral_max_index(mag_info->upper_bins);
-               fft_sample_40.lower_max_index = lower_max_index;
-               fft_sample_40.upper_max_index = upper_max_index;
-               lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
-               upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
-               fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
-               fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
-               fft_sample_40.max_exp = mag_info->max_exp & 0xf;
 
-               fft_sample_40.tsf = __cpu_to_be64(tsf);
-
-               tlv = (struct fft_sample_tlv *)&fft_sample_40;
-       } else {
-               u8 max_index, bitmap_w;
-               u16 magnitude;
-               struct ath_ht20_mag_info *mag_info;
-
-               length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
-               fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
-               fft_sample_20.tlv.length = __cpu_to_be16(length);
-               fft_sample_20.freq = __cpu_to_be16(freq);
-
-               fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl[0]);
-               fft_sample_20.noise = ah->noise;
-
-               mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
-               magnitude = spectral_max_magnitude(mag_info->all_bins);
-               fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
-               max_index = spectral_max_index(mag_info->all_bins);
-               fft_sample_20.max_index = max_index;
-               bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
-               fft_sample_20.bitmap_weight = bitmap_w;
-               fft_sample_20.max_exp = mag_info->max_exp & 0xf;
-
-               fft_sample_20.tsf = __cpu_to_be64(tsf);
+               /* Search for the end of the FFT frame between
+                * sample_len - 1 and sample_len + 2. exp_max is 3
+                * bits long and it's the only value on the last
+                * byte of the frame so since it'll be smaller than
+                * the next byte (the first bin of the next sample)
+                * 90% of the time, we can use it as a separator.
+                */
+               if (vdata[i] <= 0x7 && sample_bytes >= sample_len - 1) {
+
+                       /* Got a frame length within boundaries, there are
+                        * four scenarios here:
+                        *
+                        * a) sample_len -> We got the correct length
+                        * b) sample_len + 2 -> 2 bytes added around bin[31]
+                        * c) sample_len - 1 -> The first byte is missing
+                        * d) sample_len + 1 -> b + c at the same time
+                        *
+                        * When MAC adds 2 extra bytes, bin[31] and bin[32]
+                        * have the same value, so we can use that for further
+                        * verification in cases b and d.
+                        */
+
+                       /* Did we go too far ? If so we couldn't determine
+                        * this sample's boundaries, discard any further
+                        * data
+                        */
+                       if ((sample_bytes > sample_len + 2) ||
+                          ((sample_bytes > sample_len) &&
+                          (sample_start[31] != sample_start[32])))
+                               break;
+
+                       /* See if we got a valid frame by checking the
+                        * consistency of mag_info fields. This is to
+                        * prevent from "fixing" a correct frame.
+                        * Failure is non-fatal, later frames may
+                        * be valid.
+                        */
+                       if (!fft_idx_validator(&vdata[i], i)) {
+                               ath_dbg(common, SPECTRAL_SCAN,
+                                       "Found valid fft frame at %i\n", i);
+                               got_slen = 1;
+                       }
+
+                       /* We expect 1 - 2 more bytes */
+                       else if ((sample_start[31] == sample_start[32]) &&
+                               (sample_bytes >= sample_len) &&
+                               (sample_bytes < sample_len + 2) &&
+                               (vdata[i + 1] <= 0x7))
+                               continue;
+
+                       /* Try to distinguish cases a and c */
+                       else if ((sample_bytes == sample_len - 1) &&
+                               (vdata[i + 1] <= 0x7))
+                               continue;
+
+                       got_slen = 1;
+               }
 
-               tlv = (struct fft_sample_tlv *)&fft_sample_20;
+               if (got_slen) {
+                       ath_dbg(common, SPECTRAL_SCAN, "FFT frame len: %i\n",
+                               sample_bytes);
+
+                       /* Only try to fix a frame if it's the only one
+                        * on the report, else just skip it.
+                        */
+                       if (sample_bytes != sample_len && len <= fft_len + 2) {
+                               ath_cmn_copy_fft_frame(sample_start,
+                                                      sample_buf, sample_len,
+                                                      sample_bytes);
+
+                               fft_handler(rs, spec_priv, sample_buf,
+                                           tsf, freq, chan_type);
+
+                               memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
+
+                               /* Mix the received bins to the /dev/random
+                                * pool
+                                */
+                               add_device_randomness(sample_buf, num_bins);
+                       }
+
+                       /* Process a normal frame */
+                       if (sample_bytes == sample_len) {
+                               ret = fft_handler(rs, spec_priv, sample_start,
+                                                 tsf, freq, chan_type);
+
+                               /* Mix the received bins to the /dev/random
+                                * pool
+                                */
+                               add_device_randomness(sample_start, num_bins);
+                       }
+
+                       /* Short report processed, break out of the
+                        * loop.
+                        */
+                       if (len <= fft_len + 2)
+                               break;
+
+                       sample_start = &vdata[i + 1];
+
+                       /* -1 to grab sample_len -1, -2 since
+                        * they 'll get increased by one. In case
+                        * of failure try to recover by going byte
+                        * by byte instead.
+                        */
+                       if (ret == 0) {
+                               i += num_bins - 2;
+                               sample_bytes = num_bins - 2;
+                       }
+                       got_slen = 0;
+               }
        }
 
-       ath_debug_send_fft_sample(spec_priv, tlv);
-
+       i -= num_bins - 2;
+       if (len - i != sizeof(struct ath_radar_info))
+               ath_dbg(common, SPECTRAL_SCAN, "FFT report truncated"
+                                               "(bytes left: %i)\n",
+                                               len - i);
        return 1;
 }
 EXPORT_SYMBOL(ath_cmn_process_fft);
index 82d9dd29652cd2912239ff51246bdc7f24e6b8a8..998743be9c6724de510d1dc33d62eb67df32577d 100644 (file)
@@ -66,6 +66,8 @@ struct ath_ht20_fft_packet {
 } __packed;
 
 #define SPECTRAL_HT20_TOTAL_DATA_LEN   (sizeof(struct ath_ht20_fft_packet))
+#define        SPECTRAL_HT20_SAMPLE_LEN        (sizeof(struct ath_ht20_mag_info) +\
+                                       SPECTRAL_HT20_NUM_BINS)
 
 /* Dynamic 20/40 mode:
  *
@@ -101,6 +103,10 @@ struct ath_spec_scan_priv {
 };
 
 #define SPECTRAL_HT20_40_TOTAL_DATA_LEN        (sizeof(struct ath_ht20_40_fft_packet))
+#define        SPECTRAL_HT20_40_SAMPLE_LEN     (sizeof(struct ath_ht20_40_mag_info) +\
+                                       SPECTRAL_HT20_40_NUM_BINS)
+
+#define        SPECTRAL_SAMPLE_MAX_LEN         SPECTRAL_HT20_40_SAMPLE_LEN
 
 /* grabs the max magnitude from the all/upper/lower bins */
 static inline u16 spectral_max_magnitude(u8 *bins)
@@ -111,17 +117,32 @@ static inline u16 spectral_max_magnitude(u8 *bins)
 }
 
 /* return the max magnitude from the all/upper/lower bins */
-static inline u8 spectral_max_index(u8 *bins)
+static inline u8 spectral_max_index(u8 *bins, int num_bins)
 {
        s8 m = (bins[2] & 0xfc) >> 2;
-
-       /* TODO: this still doesn't always report the right values ... */
-       if (m > 32)
+       u8 zero_idx = num_bins / 2;
+
+       /* It's a 5 bit signed int, remove its sign and use one's
+        * complement interpretation to add the sign back to the 8
+        * bit int
+        */
+       if (m & 0x20) {
+               m &= ~0x20;
                m |= 0xe0;
-       else
-               m &= ~0xe0;
+       }
+
+       /* Bring the zero point to the beginning
+        * instead of the middle so that we can use
+        * it for array lookup and that we don't deal
+        * with negative values later
+        */
+       m += zero_idx;
+
+       /* Sanity check to make sure index is within bounds */
+       if (m < 0 || m > num_bins - 1)
+               m = 0;
 
-       return m + 29;
+       return m;
 }
 
 /* return the bitmap weight from the all/upper/lower bins */
index e82a0d4ce23f99247ea540be2a00dd4c26f9bd90..5dbc617ecf8a824eb61e5cf8d0bd231717f72a3f 100644 (file)
@@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
 }
 #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
 
-#define OP_BT_PRIORITY_DETECTED    BIT(3)
-#define OP_BT_SCAN                 BIT(4)
-#define OP_TSF_RESET               BIT(6)
+#define OP_BT_PRIORITY_DETECTED    3
+#define OP_BT_SCAN                 4
+#define OP_TSF_RESET               6
 
 enum htc_op_flags {
        HTC_FWFLAG_NO_RMW,
index d7beefe60683df8bd22b134e6a4418d9f19e5bbd..746856243bff610d9e10773046b9005851214646 100644 (file)
@@ -594,7 +594,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
 
        priv->spec_priv.ah = priv->ah;
        priv->spec_priv.spec_config.enabled = 0;
-       priv->spec_priv.spec_config.short_repeat = false;
+       priv->spec_priv.spec_config.short_repeat = true;
        priv->spec_priv.spec_config.count = 8;
        priv->spec_priv.spec_config.endless = false;
        priv->spec_priv.spec_config.period = 0x12;
index 564923c0df87cdad5226da74a0d9e575cc66bfd4..b71f3072fd9a937ca5f67f2471d85e05761b1d32 100644 (file)
@@ -1238,8 +1238,7 @@ out:
 }
 
 #define SUPPORTED_FILTERS                      \
-       (FIF_PROMISC_IN_BSS |                   \
-       FIF_ALLMULTI |                          \
+       (FIF_ALLMULTI |                         \
        FIF_CONTROL |                           \
        FIF_PSPOLL |                            \
        FIF_OTHER_BSS |                         \
index a0f58e2aa553825d65a7f1ecc4cc1e06338ea444..cc9648f844aeffef25c8d36eb33776b7c152d90c 100644 (file)
@@ -872,14 +872,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
        if (priv->rxfilter & FIF_PROBE_REQ)
                rfilt |= ATH9K_RX_FILTER_PROBEREQ;
 
-       /*
-        * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
-        * mode interface or when in monitor mode. AP mode does not need this
-        * since it receives all in-BSS frames anyway.
-        */
-       if (((ah->opmode != NL80211_IFTYPE_AP) &&
-            (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
-           ah->is_monitoring)
+       if (ah->is_monitoring)
                rfilt |= ATH9K_RX_FILTER_PROM;
 
        if (priv->rxfilter & FIF_CONTROL)
index c1d2d0340febadb445bed891754f0565025f8f09..e8454db17634b95450f643ada6f52db82a57aa84 100644 (file)
@@ -1119,6 +1119,8 @@ bool ar9003_is_paprd_enabled(struct ath_hw *ah);
 void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
 void ar9003_hw_init_rate_txpower(struct ath_hw *ah, u8 *rate_array,
                                 struct ath9k_channel *chan);
+void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
+                                struct ath9k_channel *chan, int bin);
 void ar5008_hw_init_rate_txpower(struct ath_hw *ah, int16_t *rate_array,
                                 struct ath9k_channel *chan, int ht40_delta);
 
index b0badef71ce793e5bc85358e0166208edbb9688b..d285e3a89853dc3cb9fa968caec77ca2b07f10b5 100644 (file)
@@ -1442,8 +1442,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
 }
 
 #define SUPPORTED_FILTERS                      \
-       (FIF_PROMISC_IN_BSS |                   \
-       FIF_ALLMULTI |                          \
+       (FIF_ALLMULTI |                         \
        FIF_CONTROL |                           \
        FIF_PSPOLL |                            \
        FIF_OTHER_BSS |                         \
index 6fb40ef86fd6964c584fdb77a29d7808d2357717..6c75fb1ab77d45ba8b6dab67a7fe6bb3b1f3dc98 100644 (file)
@@ -392,11 +392,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
        if (sc->cur_chan->rxfilter & FIF_PROBE_REQ)
                rfilt |= ATH9K_RX_FILTER_PROBEREQ;
 
-       /*
-        * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
-        * mode interface or when in monitor mode. AP mode does not need this
-        * since it receives all in-BSS frames anyway.
-        */
        if (sc->sc_ah->is_monitoring)
                rfilt |= ATH9K_RX_FILTER_PROM;
 
index 47d5c2e910ad834d81c8c22fe368e191be3d6e32..020cd46471f528048bc312581cdcebc2d9d1c274 100644 (file)
@@ -310,8 +310,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
        if (SUPP(CARL9170FW_RX_FILTER)) {
                ar->fw.rx_filter = true;
                ar->rx_filter_caps = FIF_FCSFAIL | FIF_PLCPFAIL |
-                       FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS |
-                       FIF_PROMISC_IN_BSS;
+                       FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS;
        }
 
        if (SUPP(CARL9170FW_HW_COUNTERS))
index 78dadc7975586b030e948b21cf62f78fae6b1adc..2c74425f5059d54f3b64d13fc2db257d667ee534 100644 (file)
@@ -122,7 +122,7 @@ static void carl9170_led_set_brightness(struct led_classdev *led,
 }
 
 static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
-                                    char *trigger)
+                                    const char *trigger)
 {
        int err;
 
index f1455a04cb623a06ab279c0686a1cf0c9576db31..59db6732d4e3908c6299d6ee7462c58b270e995d 100644 (file)
@@ -1011,9 +1011,8 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
        if (multicast != ar->cur_mc_hash)
                WARN_ON(carl9170_update_multicast(ar, multicast));
 
-       if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
-               ar->sniffer_enabled = !!(*new_flags &
-                       (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
+       if (changed_flags & FIF_OTHER_BSS) {
+               ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
 
                WARN_ON(carl9170_set_operating_mode(ar));
        }
@@ -1033,7 +1032,7 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
                if (!(*new_flags & FIF_PSPOLL))
                        rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
 
-               if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
+               if (!(*new_flags & FIF_OTHER_BSS)) {
                        rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
                        rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
                }
index c9f93310c0d6c0abaffc780fd4c30b970a420c54..76842e6ca38e64c516c29a44699442b5987864c6 100644 (file)
@@ -651,6 +651,7 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
        unsigned int plen, void *payload, unsigned int outlen, void *out)
 {
        int err = -ENOMEM;
+       unsigned long time_left;
 
        if (!IS_ACCEPTING_CMD(ar))
                return -EIO;
@@ -672,8 +673,8 @@ int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,
        err = __carl9170_exec_cmd(ar, &ar->cmd, false);
 
        if (!(cmd & CARL9170_CMD_ASYNC_FLAG)) {
-               err = wait_for_completion_timeout(&ar->cmd_wait, HZ);
-               if (err == 0) {
+               time_left = wait_for_completion_timeout(&ar->cmd_wait, HZ);
+               if (time_left == 0) {
                        err = -ETIMEDOUT;
                        goto err_unbuf;
                }
index c657ca26a71a7c8e2d75dd931ce22d87a372dd45..656ce42b339a50d3b8bf9644fd5386055b707950 100644 (file)
@@ -41,30 +41,31 @@ struct radar_types {
 
 /* percentage on ppb threshold to trigger detection */
 #define MIN_PPB_THRESH 50
-#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
+#define PPB_THRESH_RATE(PPB, RATE) ((PPB * RATE + 100 - RATE) / 100)
+#define PPB_THRESH(PPB) PPB_THRESH_RATE(PPB, MIN_PPB_THRESH)
 #define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
 /* percentage of pulse width tolerance */
 #define WIDTH_TOLERANCE 5
 #define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
 #define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
 
-#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB)     \
+#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP)      \
 {                                                              \
        ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),               \
        (PRF2PRI(PMAX) - PRI_TOLERANCE),                        \
        (PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF,  \
-       PPB_THRESH(PPB), PRI_TOLERANCE,                         \
+       PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP                   \
 }
 
 /* radar types as defined by ETSI EN-301-893 v1.5.1 */
 static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
-       ETSI_PATTERN(0,  0,  1,  700,  700, 1, 18),
-       ETSI_PATTERN(1,  0,  5,  200, 1000, 1, 10),
-       ETSI_PATTERN(2,  0, 15,  200, 1600, 1, 15),
-       ETSI_PATTERN(3,  0, 15, 2300, 4000, 1, 25),
-       ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
-       ETSI_PATTERN(5,  0,  2,  300,  400, 3, 10),
-       ETSI_PATTERN(6,  0,  2,  400, 1200, 3, 15),
+       ETSI_PATTERN(0,  0,  1,  700,  700, 1, 18, false),
+       ETSI_PATTERN(1,  0,  5,  200, 1000, 1, 10, false),
+       ETSI_PATTERN(2,  0, 15,  200, 1600, 1, 15, false),
+       ETSI_PATTERN(3,  0, 15, 2300, 4000, 1, 25, false),
+       ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20, false),
+       ETSI_PATTERN(5,  0,  2,  300,  400, 3, 10, false),
+       ETSI_PATTERN(6,  0,  2,  400, 1200, 3, 15, false),
 };
 
 static const struct radar_types etsi_radar_types_v15 = {
@@ -73,21 +74,30 @@ static const struct radar_types etsi_radar_types_v15 = {
        .radar_types            = etsi_radar_ref_types_v15,
 };
 
-#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB)      \
+#define FCC_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, CHIRP)       \
 {                                                              \
        ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),               \
        PMIN - PRI_TOLERANCE,                                   \
        PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF,             \
-       PPB_THRESH(PPB), PRI_TOLERANCE,                         \
+       PPB_THRESH(PPB), PRI_TOLERANCE, CHIRP                   \
 }
 
+/* radar types released on August 14, 2014
+ * type 1 PRI values randomly selected within the range of 518 and 3066.
+ * divide it to 3 groups is good enough for both of radar detection and
+ * avoiding false detection based on practical test results
+ * collected for more than a year.
+ */
 static const struct radar_detector_specs fcc_radar_ref_types[] = {
-       FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
-       FCC_PATTERN(1, 0, 5, 150, 230, 1, 23),
-       FCC_PATTERN(2, 6, 10, 200, 500, 1, 16),
-       FCC_PATTERN(3, 11, 20, 200, 500, 1, 12),
-       FCC_PATTERN(4, 50, 100, 1000, 2000, 1, 1),
-       FCC_PATTERN(5, 0, 1, 333, 333, 1, 9),
+       FCC_PATTERN(0, 0, 1, 1428, 1428, 1, 18, false),
+       FCC_PATTERN(101, 0, 1, 518, 938, 1, 57, false),
+       FCC_PATTERN(102, 0, 1, 938, 2000, 1, 27, false),
+       FCC_PATTERN(103, 0, 1, 2000, 3066, 1, 18, false),
+       FCC_PATTERN(2, 0, 5, 150, 230, 1, 23, false),
+       FCC_PATTERN(3, 6, 10, 200, 500, 1, 16, false),
+       FCC_PATTERN(4, 11, 20, 200, 500, 1, 12, false),
+       FCC_PATTERN(5, 50, 100, 1000, 2000, 1, 1, true),
+       FCC_PATTERN(6, 0, 1, 333, 333, 1, 9, false),
 };
 
 static const struct radar_types fcc_radar_types = {
@@ -96,17 +106,23 @@ static const struct radar_types fcc_radar_types = {
        .radar_types            = fcc_radar_ref_types,
 };
 
-#define JP_PATTERN FCC_PATTERN
+#define JP_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB, RATE, CHIRP)  \
+{                                                              \
+       ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX),               \
+       PMIN - PRI_TOLERANCE,                                   \
+       PMAX * PRF + PRI_TOLERANCE, PRF, PPB * PRF,             \
+       PPB_THRESH_RATE(PPB, RATE), PRI_TOLERANCE, CHIRP        \
+}
 static const struct radar_detector_specs jp_radar_ref_types[] = {
-       JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18),
-       JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18),
-       JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18),
-       JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18),
-       JP_PATTERN(4, 0, 5, 150, 230, 1, 23),
-       JP_PATTERN(5, 6, 10, 200, 500, 1, 16),
-       JP_PATTERN(6, 11, 20, 200, 500, 1, 12),
-       JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20),
-       JP_PATTERN(5, 0, 1, 333, 333, 1, 9),
+       JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
+       JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
+       JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
+       JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
+       JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
+       JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
+       JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
+       JP_PATTERN(7, 50, 100, 1000, 2000, 1, 20, 50, false),
+       JP_PATTERN(5, 0, 1, 333, 333, 1, 9, 50, false),
 };
 
 static const struct radar_types jp_radar_types = {
index dde2652b787cd533d7d0c2c411aefd99ada56c4b..25a43d632f908dfe72fabd2a2bc6f92f59520a35 100644 (file)
@@ -40,12 +40,14 @@ struct ath_dfs_pool_stats {
  * @freq: channel frequency in MHz
  * @width: pulse duration in us
  * @rssi: rssi of radar event
+ * @chirp: chirp detected in pulse
  */
 struct pulse_event {
        u64 ts;
        u16 freq;
        u8 width;
        u8 rssi;
+       bool chirp;
 };
 
 /**
@@ -59,6 +61,7 @@ struct pulse_event {
  * @ppb: pulses per bursts for this type
  * @ppb_thresh: number of pulses required to trigger detection
  * @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
+ * @chirp: chirp required for the radar pattern
  */
 struct radar_detector_specs {
        u8 type_id;
@@ -70,6 +73,7 @@ struct radar_detector_specs {
        u8 ppb;
        u8 ppb_thresh;
        u8 max_pri_tolerance;
+       bool chirp;
 };
 
 /**
index 43b60817888450555cd579b7803848054811d387..1b5ad1965607cd287ca211d2232da1cdcc8d73fc 100644 (file)
@@ -390,6 +390,10 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
        if ((ts - de->last_ts) < rs->max_pri_tolerance)
                /* if delta to last pulse is too short, don't use this pulse */
                return NULL;
+       /* radar detector spec needs chirp, but not detected */
+       if (rs->chirp && rs->chirp != event->chirp)
+               return NULL;
+
        de->last_ts = ts;
 
        max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
index caa717bf52f3534c289949bd313b9497f21a630c..050506f842e9a63298920af2f5b8700e80aaf87b 100644 (file)
@@ -12,6 +12,7 @@ wil6210-y += debug.o
 wil6210-y += rx_reorder.o
 wil6210-y += ioctl.o
 wil6210-y += fw.o
+wil6210-y += pmc.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
 wil6210-y += ethtool.o
index b97172667bc7b3c5c3ea3463c9b6573811a6d064..dbfcdd16628a25ddb4238d51988c9c5759fdf6b9 100644 (file)
@@ -402,11 +402,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        rsn_eid = sme->ie ?
                        cfg80211_find_ie(WLAN_EID_RSN, sme->ie, sme->ie_len) :
                        NULL;
-
-       if (sme->privacy && !rsn_eid) {
-               wil_err(wil, "Missing RSN IE for secure connection\n");
-               return -EINVAL;
-       }
+       if (sme->privacy && !rsn_eid)
+               wil_info(wil, "WSC connection\n");
 
        bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
                               sme->ssid, sme->ssid_len,
@@ -425,10 +422,17 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        wil->privacy = sme->privacy;
 
        if (wil->privacy) {
-               /* For secure assoc, send WMI_DELETE_CIPHER_KEY_CMD */
-               rc = wmi_del_cipher_key(wil, 0, bss->bssid);
+               /* For secure assoc, remove old keys */
+               rc = wmi_del_cipher_key(wil, 0, bss->bssid,
+                                       WMI_KEY_USE_PAIRWISE);
                if (rc) {
-                       wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD failed\n");
+                       wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(PTK) failed\n");
+                       goto out;
+               }
+               rc = wmi_del_cipher_key(wil, 0, bss->bssid,
+                                       WMI_KEY_USE_RX_GROUP);
+               if (rc) {
+                       wil_err(wil, "WMI_DELETE_CIPHER_KEY_CMD(GTK) failed\n");
                        goto out;
                }
        }
@@ -458,11 +462,18 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
                goto out;
        }
        if (wil->privacy) {
-               conn.dot11_auth_mode = WMI_AUTH11_SHARED;
-               conn.auth_mode = WMI_AUTH_WPA2_PSK;
-               conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
-               conn.pairwise_crypto_len = 16;
-       } else {
+               if (rsn_eid) { /* regular secure connection */
+                       conn.dot11_auth_mode = WMI_AUTH11_SHARED;
+                       conn.auth_mode = WMI_AUTH_WPA2_PSK;
+                       conn.pairwise_crypto_type = WMI_CRYPT_AES_GCMP;
+                       conn.pairwise_crypto_len = 16;
+                       conn.group_crypto_type = WMI_CRYPT_AES_GCMP;
+                       conn.group_crypto_len = 16;
+               } else { /* WSC */
+                       conn.dot11_auth_mode = WMI_AUTH11_WSC;
+                       conn.auth_mode = WMI_AUTH_NONE;
+               }
+       } else { /* insecure connection */
                conn.dot11_auth_mode = WMI_AUTH11_OPEN;
                conn.auth_mode = WMI_AUTH_NONE;
        }
@@ -507,6 +518,8 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
        int rc;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
+       wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
+
        rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0);
 
        return rc;
@@ -561,6 +574,39 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
        return 0;
 }
 
+static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
+                                              bool pairwise)
+{
+       struct wireless_dev *wdev = wil->wdev;
+       enum wmi_key_usage rc;
+       static const char * const key_usage_str[] = {
+               [WMI_KEY_USE_PAIRWISE]  = "WMI_KEY_USE_PAIRWISE",
+               [WMI_KEY_USE_RX_GROUP]  = "WMI_KEY_USE_RX_GROUP",
+               [WMI_KEY_USE_TX_GROUP]  = "WMI_KEY_USE_TX_GROUP",
+       };
+
+       if (pairwise) {
+               rc = WMI_KEY_USE_PAIRWISE;
+       } else {
+               switch (wdev->iftype) {
+               case NL80211_IFTYPE_STATION:
+                       rc = WMI_KEY_USE_RX_GROUP;
+                       break;
+               case NL80211_IFTYPE_AP:
+                       rc = WMI_KEY_USE_TX_GROUP;
+                       break;
+               default:
+                       /* TODO: Rx GTK or Tx GTK? */
+                       wil_err(wil, "Can't determine GTK type\n");
+                       rc = WMI_KEY_USE_RX_GROUP;
+                       break;
+               }
+       }
+       wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
+
+       return rc;
+}
+
 static int wil_cfg80211_add_key(struct wiphy *wiphy,
                                struct net_device *ndev,
                                u8 key_index, bool pairwise,
@@ -568,13 +614,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
                                struct key_params *params)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
 
-       /* group key is not used */
-       if (!pairwise)
-               return 0;
+       wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
+                    pairwise ? "PTK" : "GTK");
 
-       return wmi_add_cipher_key(wil, key_index, mac_addr,
-                                 params->key_len, params->key);
+       return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
+                                 params->key, key_usage);
 }
 
 static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -583,12 +629,12 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
                                const u8 *mac_addr)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
 
-       /* group key is not used */
-       if (!pairwise)
-               return 0;
+       wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
+                    pairwise ? "PTK" : "GTK");
 
-       return wmi_del_cipher_key(wil, key_index, mac_addr);
+       return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
 }
 
 /* Need to be present or wiphy_new() will WARN */
@@ -661,11 +707,6 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
        if (bcon->probe_resp_len <= hlen)
                return 0;
 
-       if (!bcon->proberesp_ies) {
-               bcon->proberesp_ies = f->u.probe_resp.variable;
-               bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
-               rc = 1;
-       }
        if (!bcon->assocresp_ies) {
                bcon->assocresp_ies = f->u.probe_resp.variable;
                bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
@@ -680,9 +721,19 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
                                      struct cfg80211_beacon_data *bcon)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+       size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+       const u8 *pr_ies = NULL;
+       size_t pr_ies_len = 0;
        int rc;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_print_bcon_data(bcon);
+
+       if (bcon->probe_resp_len > hlen) {
+               pr_ies = f->u.probe_resp.variable;
+               pr_ies_len = bcon->probe_resp_len - hlen;
+       }
 
        if (wil_fix_bcon(wil, bcon)) {
                wil_dbg_misc(wil, "Fixed bcon\n");
@@ -695,9 +746,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
         * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
         * bcon->beacon_ies);
         */
-       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
-                       bcon->proberesp_ies_len,
-                       bcon->proberesp_ies);
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
        if (rc) {
                wil_err(wil, "set_ie(PROBE_RESP) failed\n");
                return rc;
@@ -725,6 +774,10 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        struct cfg80211_beacon_data *bcon = &info->beacon;
        struct cfg80211_crypto_settings *crypto = &info->crypto;
        u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+       struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
+       size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+       const u8 *pr_ies = NULL;
+       size_t pr_ies_len = 0;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
@@ -744,6 +797,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        wil_print_bcon_data(bcon);
        wil_print_crypto(wil, crypto);
 
+       if (bcon->probe_resp_len > hlen) {
+               pr_ies = f->u.probe_resp.variable;
+               pr_ies_len = bcon->probe_resp_len - hlen;
+       }
+
        if (wil_fix_bcon(wil, bcon)) {
                wil_dbg_misc(wil, "Fixed bcon\n");
                wil_print_bcon_data(bcon);
@@ -771,8 +829,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
         * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
         * bcon->beacon_ies);
         */
-       wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
-                  bcon->proberesp_ies);
+       wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
        wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
                   bcon->assocresp_ies);
 
@@ -814,13 +871,9 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
        wmi_pcp_stop(wil);
 
        __wil_down(wil);
-       __wil_up(wil);
 
        mutex_unlock(&wil->mutex);
 
-       /* some functions above might fail (e.g. __wil_up). Nevertheless, we
-        * return success because AP has stopped
-        */
        return 0;
 }
 
@@ -830,6 +883,9 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
+       wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
+                    params->reason_code);
+
        mutex_lock(&wil->mutex);
        wil6210_disconnect(wil, params->mac, params->reason_code, false);
        mutex_unlock(&wil->mutex);
index bbc22d88f78f27dadd73ea64134f5484ca9df497..8f9c0722a8018b605242a26307015a69ad3e48ee 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -24,6 +24,7 @@
 #include "wil6210.h"
 #include "wmi.h"
 #include "txrx.h"
+#include "pmc.h"
 
 /* Nasty hack. Better have per device instances */
 static u32 mem_addr;
@@ -123,15 +124,17 @@ static int wil_vring_debugfs_show(struct seq_file *s, void *data)
 
                        if (cid < WIL6210_MAX_CID)
                                seq_printf(s,
-                                          "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
+                                          "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n",
                                           wil->sta[cid].addr, cid, tid,
+                                          txdata->dot1x_open ? "+" : "-",
                                           txdata->agg_wsize,
                                           txdata->agg_timeout,
                                           txdata->agg_amsdu ? "+" : "-",
                                           used, avail, sidle);
                        else
                                seq_printf(s,
-                                          "\nBroadcast [%3d|%3d] idle %s\n",
+                                          "\nBroadcast 1x%s [%3d|%3d] idle %s\n",
+                                          txdata->dot1x_open ? "+" : "-",
                                           used, avail, sidle);
 
                        wil_print_vring(s, wil, name, vring, '_', 'H');
@@ -702,6 +705,89 @@ static const struct file_operations fops_back = {
        .open  = simple_open,
 };
 
+/* pmc control, write:
+ * - "alloc <num descriptors> <descriptor_size>" to allocate PMC
+ * - "free" to release memory allocated for PMC
+ */
+static ssize_t wil_write_pmccfg(struct file *file, const char __user *buf,
+                               size_t len, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       int rc;
+       char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+       char cmd[9];
+       int num_descs, desc_size;
+
+       if (!kbuf)
+               return -ENOMEM;
+
+       rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+       if (rc != len) {
+               kfree(kbuf);
+               return rc >= 0 ? -EIO : rc;
+       }
+
+       kbuf[len] = '\0';
+       rc = sscanf(kbuf, "%8s %d %d", cmd, &num_descs, &desc_size);
+       kfree(kbuf);
+
+       if (rc < 0)
+               return rc;
+
+       if (rc < 1) {
+               wil_err(wil, "pmccfg: no params given\n");
+               return -EINVAL;
+       }
+
+       if (0 == strcmp(cmd, "alloc")) {
+               if (rc != 3) {
+                       wil_err(wil, "pmccfg: alloc requires 2 params\n");
+                       return -EINVAL;
+               }
+               wil_pmc_alloc(wil, num_descs, desc_size);
+       } else if (0 == strcmp(cmd, "free")) {
+               if (rc != 1) {
+                       wil_err(wil, "pmccfg: free does not have any params\n");
+                       return -EINVAL;
+               }
+               wil_pmc_free(wil, true);
+       } else {
+               wil_err(wil, "pmccfg: Unrecognized command \"%s\"\n", cmd);
+               return -EINVAL;
+       }
+
+       return len;
+}
+
+static ssize_t wil_read_pmccfg(struct file *file, char __user *user_buf,
+                              size_t count, loff_t *ppos)
+{
+       struct wil6210_priv *wil = file->private_data;
+       char text[256];
+       char help[] = "pmc control, write:\n"
+       " - \"alloc <num descriptors> <descriptor_size>\" to allocate pmc\n"
+       " - \"free\" to free memory allocated for pmc\n";
+
+       sprintf(text, "Last command status: %d\n\n%s",
+               wil_pmc_last_cmd_status(wil),
+               help);
+
+       return simple_read_from_buffer(user_buf, count, ppos, text,
+                                      strlen(text) + 1);
+}
+
+static const struct file_operations fops_pmccfg = {
+       .read = wil_read_pmccfg,
+       .write = wil_write_pmccfg,
+       .open  = simple_open,
+};
+
+static const struct file_operations fops_pmcdata = {
+       .open           = simple_open,
+       .read           = wil_pmc_read,
+       .llseek         = wil_pmc_llseek,
+};
+
 /*---tx_mgmt---*/
 /* Write mgmt frame to this file to send it */
 static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
@@ -1111,8 +1197,7 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data)
                        status = "connected";
                        break;
                }
-               seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
-                          (p->data_port_open ? " data_port_open" : ""));
+               seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
 
                if (p->status == wil_sta_connected) {
                        rc = wil_cid_fill_sinfo(wil, i, &sinfo);
@@ -1292,8 +1377,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
                        status = "connected";
                        break;
                }
-               seq_printf(s, "[%d] %pM %s%s\n", i, p->addr, status,
-                          (p->data_port_open ? " data_port_open" : ""));
+               seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
 
                if (p->status == wil_sta_connected) {
                        spin_lock_bh(&p->tid_rx_lock);
@@ -1363,6 +1447,8 @@ static const struct {
        {"tx_mgmt",               S_IWUSR,      &fops_txmgmt},
        {"wmi_send",              S_IWUSR,      &fops_wmi},
        {"back",        S_IRUGO | S_IWUSR,      &fops_back},
+       {"pmccfg",      S_IRUGO | S_IWUSR,      &fops_pmccfg},
+       {"pmcdata",     S_IRUGO,                &fops_pmcdata},
        {"temp",        S_IRUGO,                &fops_temp},
        {"freq",        S_IRUGO,                &fops_freq},
        {"link",        S_IRUGO,                &fops_link},
@@ -1440,6 +1526,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
        if (IS_ERR_OR_NULL(dbg))
                return -ENODEV;
 
+       wil_pmc_init(wil);
+
        wil6210_debugfs_init_files(wil, dbg);
        wil6210_debugfs_init_isr(wil, dbg);
        wil6210_debugfs_init_blobs(wil, dbg);
@@ -1459,4 +1547,9 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil)
 {
        debugfs_remove_recursive(wil->debug);
        wil->debug = NULL;
+
+       /* free pmc memory without sending command to fw, as it will
+        * be reset on the way down anyway
+        */
+       wil_pmc_free(wil, false);
 }
index c2a238426425462c7ff40f61c8e98fec9dadca6d..6d704aee3afd0f083fdb8861c1aa22a07d45bcd0 100644 (file)
 #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
 
+bool debug_fw; /* = false; */
+module_param(debug_fw, bool, S_IRUGO);
+MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
+
 bool no_fw_recovery;
 module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -146,7 +150,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
                     sta->status);
 
-       sta->data_port_open = false;
        if (sta->status != wil_sta_unused) {
                if (!from_event)
                        wmi_disconnect_sta(wil, sta->addr, reason_code);
@@ -224,7 +227,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
                if (test_bit(wil_status_fwconnected, wil->status)) {
                        clear_bit(wil_status_fwconnected, wil->status);
                        cfg80211_disconnected(ndev, reason_code,
-                                             NULL, 0, GFP_KERNEL);
+                                             NULL, 0, false, GFP_KERNEL);
                } else if (test_bit(wil_status_fwconnecting, wil->status)) {
                        cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
                                                WLAN_STATUS_UNSPECIFIED_FAILURE,
@@ -373,9 +376,10 @@ int wil_bcast_init(struct wil6210_priv *wil)
        if (ri < 0)
                return ri;
 
+       wil->bcast_vring = ri;
        rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order);
-       if (rc == 0)
-               wil->bcast_vring = ri;
+       if (rc)
+               wil->bcast_vring = -1;
 
        return rc;
 }
@@ -547,7 +551,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
 static int wil_target_reset(struct wil6210_priv *wil)
 {
        int delay = 0;
-       u32 x;
+       u32 x, x1 = 0;
 
        wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
@@ -602,12 +606,16 @@ static int wil_target_reset(struct wil6210_priv *wil)
        do {
                msleep(RST_DELAY);
                x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+               if (x1 != x) {
+                       wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
+                       x1 = x;
+               }
                if (delay++ > RST_COUNT) {
                        wil_err(wil, "Reset not completed, bl.ready 0x%08x\n",
                                x);
                        return -ETIME;
                }
-       } while (!(x & BIT_BL_READY));
+       } while (x != BIT_BL_READY);
 
        C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
@@ -686,6 +694,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        WARN_ON(!mutex_is_locked(&wil->mutex));
        WARN_ON(test_bit(wil_status_napi_en, wil->status));
 
+       if (debug_fw) {
+               static const u8 mac[ETH_ALEN] = {
+                       0x00, 0xde, 0xad, 0x12, 0x34, 0x56,
+               };
+               struct net_device *ndev = wil_to_ndev(wil);
+
+               ether_addr_copy(ndev->perm_addr, mac);
+               ether_addr_copy(ndev->dev_addr, ndev->perm_addr);
+               return 0;
+       }
+
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
        wil_bcast_fini(wil);
index f2f7ea29558e058f27d7f934ba36769db36306c2..6042f61b016c010568c3b9f5903cff8ff668f0eb 100644 (file)
@@ -24,6 +24,11 @@ static int wil_open(struct net_device *ndev)
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
+       if (debug_fw) {
+               wil_err(wil, "%s() while in debug_fw mode\n", __func__);
+               return -EINVAL;
+       }
+
        return wil_up(wil);
 }
 
index 109986114abfa10d6bd6083fa52fb29de2912fea..58c79166a6d11a5d676b857ccee5f55f0b71a415 100644 (file)
@@ -27,10 +27,6 @@ MODULE_PARM_DESC(use_msi,
                 " Use MSI interrupt: "
                 "0 - don't, 1 - (default) - single, or 3");
 
-static bool debug_fw; /* = false; */
-module_param(debug_fw, bool, S_IRUGO);
-MODULE_PARM_DESC(debug_fw, " load driver if FW not ready. For FW debug");
-
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
@@ -133,8 +129,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
        mutex_lock(&wil->mutex);
        rc = wil_reset(wil, false);
        mutex_unlock(&wil->mutex);
-       if (debug_fw)
-               rc = 0;
        if (rc)
                goto release_irq;
 
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
new file mode 100644 (file)
index 0000000..8a8cdc6
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include "wmi.h"
+#include "wil6210.h"
+#include "txrx.h"
+#include "pmc.h"
+
+struct desc_alloc_info {
+       dma_addr_t pa;
+       void      *va;
+};
+
+static int wil_is_pmc_allocated(struct pmc_ctx *pmc)
+{
+       return !!pmc->pring_va;
+}
+
+void wil_pmc_init(struct wil6210_priv *wil)
+{
+       memset(&wil->pmc, 0, sizeof(struct pmc_ctx));
+       mutex_init(&wil->pmc.lock);
+}
+
+/**
+ * Allocate the physical ring (p-ring) and the required
+ * number of descriptors of required size.
+ * Initialize the descriptors as required by pmc dma.
+ * The descriptors' buffers dwords are initialized to hold
+ * dword's serial number in the lsw and reserved value
+ * PCM_DATA_INVALID_DW_VAL in the msw.
+ */
+void wil_pmc_alloc(struct wil6210_priv *wil,
+                  int num_descriptors,
+                  int descriptor_size)
+{
+       u32 i;
+       struct pmc_ctx *pmc = &wil->pmc;
+       struct device *dev = wil_to_dev(wil);
+       struct wmi_pmc_cmd pmc_cmd = {0};
+
+       mutex_lock(&pmc->lock);
+
+       if (wil_is_pmc_allocated(pmc)) {
+               /* sanity check */
+               wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
+               goto no_release_err;
+       }
+
+       pmc->num_descriptors = num_descriptors;
+       pmc->descriptor_size = descriptor_size;
+
+       wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
+                    __func__, num_descriptors, descriptor_size);
+
+       /* allocate descriptors info list in pmc context*/
+       pmc->descriptors = kcalloc(num_descriptors,
+                                 sizeof(struct desc_alloc_info),
+                                 GFP_KERNEL);
+       if (!pmc->descriptors) {
+               wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
+               goto no_release_err;
+       }
+
+       wil_dbg_misc(wil,
+                    "%s: allocated descriptors info list %p\n",
+                    __func__, pmc->descriptors);
+
+       /* Allocate pring buffer and descriptors.
+        * vring->va should be aligned on its size rounded up to power of 2
+        * This is granted by the dma_alloc_coherent
+        */
+       pmc->pring_va = dma_alloc_coherent(dev,
+                       sizeof(struct vring_tx_desc) * num_descriptors,
+                       &pmc->pring_pa,
+                       GFP_KERNEL);
+
+       wil_dbg_misc(wil,
+                    "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
+                    __func__,
+                    pmc->pring_va, &pmc->pring_pa,
+                    sizeof(struct vring_tx_desc),
+                    num_descriptors,
+                    sizeof(struct vring_tx_desc) * num_descriptors);
+
+       if (!pmc->pring_va) {
+               wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
+               goto release_pmc_skb_list;
+       }
+
+       /* initially, all descriptors are SW owned
+        * For Tx, Rx, and PMC, ownership bit is at the same location, thus
+        * we can use any
+        */
+       for (i = 0; i < num_descriptors; i++) {
+               struct vring_tx_desc *_d = &pmc->pring_va[i];
+               struct vring_tx_desc dd, *d = &dd;
+               int j = 0;
+
+               pmc->descriptors[i].va = dma_alloc_coherent(dev,
+                       descriptor_size,
+                       &pmc->descriptors[i].pa,
+                       GFP_KERNEL);
+
+               if (unlikely(!pmc->descriptors[i].va)) {
+                       wil_err(wil,
+                               "%s: ERROR allocating pmc descriptor %d",
+                               __func__, i);
+                       goto release_pmc_skbs;
+               }
+
+               for (j = 0; j < descriptor_size / sizeof(u32); j++) {
+                       u32 *p = (u32 *)pmc->descriptors[i].va + j;
+                       *p = PCM_DATA_INVALID_DW_VAL | j;
+               }
+
+               /* configure dma descriptor */
+               d->dma.addr.addr_low =
+                       cpu_to_le32(lower_32_bits(pmc->descriptors[i].pa));
+               d->dma.addr.addr_high =
+                       cpu_to_le16((u16)upper_32_bits(pmc->descriptors[i].pa));
+               d->dma.status = 0; /* 0 = HW_OWNED */
+               d->dma.length = cpu_to_le16(descriptor_size);
+               d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+               *_d = *d;
+       }
+
+       wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
+
+       pmc_cmd.op = WMI_PMC_ALLOCATE;
+       pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
+       pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
+
+       wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
+       pmc->last_cmd_status = wmi_send(wil,
+                                       WMI_PMC_CMDID,
+                                       &pmc_cmd,
+                                       sizeof(pmc_cmd));
+       if (pmc->last_cmd_status) {
+               wil_err(wil,
+                       "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
+                       __func__, pmc->last_cmd_status);
+               goto release_pmc_skbs;
+       }
+
+       mutex_unlock(&pmc->lock);
+
+       return;
+
+release_pmc_skbs:
+       wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
+       for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
+               dma_free_coherent(dev,
+                                 descriptor_size,
+                                 pmc->descriptors[i].va,
+                                 pmc->descriptors[i].pa);
+
+               pmc->descriptors[i].va = NULL;
+       }
+       wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
+
+       dma_free_coherent(dev,
+                         sizeof(struct vring_tx_desc) * num_descriptors,
+                         pmc->pring_va,
+                         pmc->pring_pa);
+
+       pmc->pring_va = NULL;
+
+release_pmc_skb_list:
+       wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
+               __func__);
+       kfree(pmc->descriptors);
+       pmc->descriptors = NULL;
+
+no_release_err:
+       pmc->last_cmd_status = -ENOMEM;
+       mutex_unlock(&pmc->lock);
+}
+
+/**
+ * Traverse the p-ring and release all buffers.
+ * At the end release the p-ring memory
+ */
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
+{
+       struct pmc_ctx *pmc = &wil->pmc;
+       struct device *dev = wil_to_dev(wil);
+       struct wmi_pmc_cmd pmc_cmd = {0};
+
+       mutex_lock(&pmc->lock);
+
+       pmc->last_cmd_status = 0;
+
+       if (!wil_is_pmc_allocated(pmc)) {
+               wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
+                            __func__);
+               pmc->last_cmd_status = -EPERM;
+               mutex_unlock(&pmc->lock);
+               return;
+       }
+
+       if (send_pmc_cmd) {
+               wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
+                            __func__);
+               pmc_cmd.op = WMI_PMC_RELEASE;
+               pmc->last_cmd_status =
+                               wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
+                                        sizeof(pmc_cmd));
+               if (pmc->last_cmd_status) {
+                       wil_err(wil,
+                               "%s WMI_PMC_CMD with RELEASE op failed, status %d",
+                               __func__, pmc->last_cmd_status);
+                       /* There's nothing we can do with this error.
+                        * Normally, it should never occur.
+                        * Continue to freeing all memory allocated for pmc.
+                        */
+               }
+       }
+
+       if (pmc->pring_va) {
+               size_t buf_size = sizeof(struct vring_tx_desc) *
+                                 pmc->num_descriptors;
+
+               wil_dbg_misc(wil, "%s: free pring va %p\n",
+                            __func__, pmc->pring_va);
+               dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
+
+               pmc->pring_va = NULL;
+       } else {
+               pmc->last_cmd_status = -ENOENT;
+       }
+
+       if (pmc->descriptors) {
+               int i;
+
+               for (i = 0;
+                    pmc->descriptors[i].va && i < pmc->num_descriptors; i++) {
+                       dma_free_coherent(dev,
+                                         pmc->descriptor_size,
+                                         pmc->descriptors[i].va,
+                                         pmc->descriptors[i].pa);
+                       pmc->descriptors[i].va = NULL;
+               }
+               wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
+                            __func__, i, pmc->num_descriptors);
+               wil_dbg_misc(wil,
+                            "%s: free pmc descriptors info list %p\n",
+                            __func__, pmc->descriptors);
+               kfree(pmc->descriptors);
+               pmc->descriptors = NULL;
+       } else {
+               pmc->last_cmd_status = -ENOENT;
+       }
+
+       mutex_unlock(&pmc->lock);
+}
+
+/**
+ * Status of the last operation requested via debugfs: alloc/free/read.
+ * 0 - success or negative errno
+ */
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
+{
+       wil_dbg_misc(wil, "%s: status %d\n", __func__,
+                    wil->pmc.last_cmd_status);
+
+       return wil->pmc.last_cmd_status;
+}
+
+/**
+ * Read from required position up to the end of current descriptor,
+ * depends on descriptor size configured during alloc request.
+ */
+ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
+                    loff_t *f_pos)
+{
+       struct wil6210_priv *wil = filp->private_data;
+       struct pmc_ctx *pmc = &wil->pmc;
+       size_t retval = 0;
+       unsigned long long idx;
+       loff_t offset;
+       size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+       mutex_lock(&pmc->lock);
+
+       if (!wil_is_pmc_allocated(pmc)) {
+               wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
+               pmc->last_cmd_status = -EPERM;
+               mutex_unlock(&pmc->lock);
+               return -EPERM;
+       }
+
+       wil_dbg_misc(wil,
+                    "%s: size %u, pos %lld\n",
+                    __func__, (unsigned)count, *f_pos);
+
+       pmc->last_cmd_status = 0;
+
+       idx = *f_pos;
+       do_div(idx, pmc->descriptor_size);
+       offset = *f_pos - (idx * pmc->descriptor_size);
+
+       if (*f_pos >= pmc_size) {
+               wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
+                            __func__, *f_pos, (unsigned)pmc_size);
+               pmc->last_cmd_status = -ERANGE;
+               goto out;
+       }
+
+       wil_dbg_misc(wil,
+                    "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+                    __func__, *f_pos, idx, offset, count);
+
+       /* if no errors, return the copied byte count */
+       retval = simple_read_from_buffer(buf,
+                                        count,
+                                        &offset,
+                                        pmc->descriptors[idx].va,
+                                        pmc->descriptor_size);
+       *f_pos += retval;
+out:
+       mutex_unlock(&pmc->lock);
+
+       return retval;
+}
+
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence)
+{
+       loff_t newpos;
+       struct wil6210_priv *wil = filp->private_data;
+       struct pmc_ctx *pmc = &wil->pmc;
+       size_t pmc_size = pmc->descriptor_size * pmc->num_descriptors;
+
+       switch (whence) {
+       case 0: /* SEEK_SET */
+               newpos = off;
+               break;
+
+       case 1: /* SEEK_CUR */
+               newpos = filp->f_pos + off;
+               break;
+
+       case 2: /* SEEK_END */
+               newpos = pmc_size;
+               break;
+
+       default: /* can't happen */
+               return -EINVAL;
+       }
+
+       if (newpos < 0)
+               return -EINVAL;
+       if (newpos > pmc_size)
+               newpos = pmc_size;
+
+       filp->f_pos = newpos;
+
+       return newpos;
+}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.h b/drivers/net/wireless/ath/wil6210/pmc.h
new file mode 100644 (file)
index 0000000..bebc8d5
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+
+#define PCM_DATA_INVALID_DW_VAL (0xB0BA0000)
+
+void wil_pmc_init(struct wil6210_priv *wil);
+void wil_pmc_alloc(struct wil6210_priv *wil,
+                  int num_descriptors, int descriptor_size);
+void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd);
+int wil_pmc_last_cmd_status(struct wil6210_priv *wil);
+ssize_t wil_pmc_read(struct file *, char __user *, size_t, loff_t *);
+loff_t wil_pmc_llseek(struct file *filp, loff_t off, int whence);
index e8bd512d81a9b7bbd70b7d56e65e0133e39881b0..0113dac3a9a9f33b6190609e26057777a35aa8ff 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -236,7 +236,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
                return -ENOMEM;
        }
 
-       d->dma.d0 = BIT(9) | RX_DMA_D0_CMD_DMA_IT;
+       d->dma.d0 = RX_DMA_D0_CMD_DMA_RT | RX_DMA_D0_CMD_DMA_IT;
        wil_desc_addr_set(&d->dma.addr, pa);
        /* ip_length don't care */
        /* b11 don't care */
@@ -724,6 +724,8 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
 
        cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
 
+       if (!wil->privacy)
+               txdata->dot1x_open = true;
        rc = wmi_call(wil, WMI_VRING_CFG_CMDID, &cmd, sizeof(cmd),
                      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
        if (rc)
@@ -738,11 +740,13 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
        vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr);
 
        txdata->enabled = 1;
-       if (wil->sta[cid].data_port_open && (agg_wsize >= 0))
+       if (txdata->dot1x_open && (agg_wsize >= 0))
                wil_addba_tx_request(wil, id, agg_wsize);
 
        return 0;
  out_free:
+       txdata->dot1x_open = false;
+       txdata->enabled = 0;
        wil_vring_free(wil, vring, 1);
  out:
 
@@ -792,6 +796,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
 
        cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa);
 
+       if (!wil->privacy)
+               txdata->dot1x_open = true;
        rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd),
                      WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100);
        if (rc)
@@ -809,6 +815,8 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
 
        return 0;
  out_free:
+       txdata->enabled = 0;
+       txdata->dot1x_open = false;
        wil_vring_free(wil, vring, 1);
  out:
 
@@ -828,6 +836,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
        wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
 
        spin_lock_bh(&txdata->lock);
+       txdata->dot1x_open = false;
        txdata->enabled = 0; /* no Tx can be in progress or start anew */
        spin_unlock_bh(&txdata->lock);
        /* make sure NAPI won't touch this vring */
@@ -848,12 +857,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
        if (cid < 0)
                return NULL;
 
-       if (!wil->sta[cid].data_port_open &&
-           (skb->protocol != cpu_to_be16(ETH_P_PAE)))
-               return NULL;
-
        /* TODO: fix for multiple TID */
        for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++) {
+               if (!wil->vring_tx_data[i].dot1x_open &&
+                   (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+                       continue;
                if (wil->vring2cid_tid[i][0] == cid) {
                        struct vring *v = &wil->vring_tx[i];
 
@@ -883,7 +891,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
 
        /* In the STA mode, it is expected to have only 1 VRING
         * for the AP we connected to.
-        * find 1-st vring and see whether it is eligible for data
+        * find 1-st vring eligible for this skb and use it.
         */
        for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
                v = &wil->vring_tx[i];
@@ -894,9 +902,9 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
                if (cid >= WIL6210_MAX_CID) /* skip BCAST */
                        continue;
 
-               if (!wil->sta[cid].data_port_open &&
+               if (!wil->vring_tx_data[i].dot1x_open &&
                    (skb->protocol != cpu_to_be16(ETH_P_PAE)))
-                       break;
+                       continue;
 
                wil_dbg_txrx(wil, "Tx -> ring %d\n", i);
 
@@ -918,7 +926,6 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil,
  *    in all cases override dest address to unicast peer's address
  * Use old strategy when new is not supported yet:
  *  - for PBSS
- *  - for secure link
  */
 static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
                                         struct sk_buff *skb)
@@ -931,6 +938,9 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil,
        v = &wil->vring_tx[i];
        if (!v->va)
                return NULL;
+       if (!wil->vring_tx_data[i].dot1x_open &&
+           (skb->protocol != cpu_to_be16(ETH_P_PAE)))
+               return NULL;
 
        return v;
 }
@@ -963,7 +973,8 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
                cid = wil->vring2cid_tid[i][0];
                if (cid >= WIL6210_MAX_CID) /* skip BCAST */
                        continue;
-               if (!wil->sta[cid].data_port_open)
+               if (!wil->vring_tx_data[i].dot1x_open &&
+                   (skb->protocol != cpu_to_be16(ETH_P_PAE)))
                        continue;
 
                /* don't Tx back to source when re-routing Rx->Tx at the AP */
@@ -989,7 +1000,8 @@ found:
                cid = wil->vring2cid_tid[i][0];
                if (cid >= WIL6210_MAX_CID) /* skip BCAST */
                        continue;
-               if (!wil->sta[cid].data_port_open)
+               if (!wil->vring_tx_data[i].dot1x_open &&
+                   (skb->protocol != cpu_to_be16(ETH_P_PAE)))
                        continue;
 
                if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN))
@@ -1016,9 +1028,6 @@ static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
        if (wdev->iftype != NL80211_IFTYPE_AP)
                return wil_find_tx_bcast_2(wil, skb);
 
-       if (wil->privacy)
-               return wil_find_tx_bcast_2(wil, skb);
-
        return wil_find_tx_bcast_1(wil, skb);
 }
 
@@ -1144,13 +1153,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        wil_tx_desc_map(d, pa, len, vring_index);
        if (unlikely(mcast)) {
                d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */
-               if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) {
-                       /* set MCS 1 */
+               if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) /* set MCS 1 */
                        d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
-                       /* packet mode 2 */
-                       d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) |
-                                      (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS);
-               }
        }
        /* Process TCP/UDP checksum offloading */
        if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
index d90c8aa20c157924cadde2ef8f1bdad9bcab16ae..0c4638487c742c3c5cbace887a95562a8e618ea2 100644 (file)
@@ -384,19 +384,27 @@ struct vring_rx_mac {
  *  [word 7] length
  */
 
-#define RX_DMA_D0_CMD_DMA_IT     BIT(10)
-
-/* Error field, offload bits */
-#define RX_DMA_ERROR_L3_ERR   BIT(4)
-#define RX_DMA_ERROR_L4_ERR   BIT(5)
+#define RX_DMA_D0_CMD_DMA_EOP  BIT(8)
+#define RX_DMA_D0_CMD_DMA_RT   BIT(9)  /* always 1 */
+#define RX_DMA_D0_CMD_DMA_IT   BIT(10) /* interrupt */
+
+/* Error field */
+#define RX_DMA_ERROR_FCS       BIT(0)
+#define RX_DMA_ERROR_MIC       BIT(1)
+#define RX_DMA_ERROR_KEY       BIT(2) /* Key missing */
+#define RX_DMA_ERROR_REPLAY    BIT(3)
+#define RX_DMA_ERROR_L3_ERR    BIT(4)
+#define RX_DMA_ERROR_L4_ERR    BIT(5)
 
 /* Status field */
-#define RX_DMA_STATUS_DU         BIT(0)
-#define RX_DMA_STATUS_ERROR      BIT(2)
-
+#define RX_DMA_STATUS_DU       BIT(0)
+#define RX_DMA_STATUS_EOP      BIT(1)
+#define RX_DMA_STATUS_ERROR    BIT(2)
+#define RX_DMA_STATUS_MI       BIT(3) /* MAC Interrupt is asserted */
 #define RX_DMA_STATUS_L3I      BIT(4)
 #define RX_DMA_STATUS_L4I      BIT(5)
 #define RX_DMA_STATUS_PHY_INFO BIT(6)
+#define RX_DMA_STATUS_FFM      BIT(7) /* EtherType Flex Filter Match */
 
 struct vring_rx_dma {
        u32 d0;
index 4310972c9e1687b5b12dc4c101c9076932d907a8..f3513a1fa4240d8b44504c987e07aa948c99953e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/wireless.h>
 #include <net/cfg80211.h>
 #include <linux/timex.h>
+#include <linux/types.h>
 #include "wil_platform.h"
 
 extern bool no_fw_recovery;
@@ -29,10 +30,11 @@ extern unsigned short rx_ring_overflow_thrsh;
 extern int agg_wsize;
 extern u32 vring_idle_trsh;
 extern bool rx_align_2;
+extern bool debug_fw;
 
 #define WIL_NAME "wil6210"
 #define WIL_FW_NAME "wil6210.fw" /* code */
-#define WIL_FW2_NAME "wil6210.board" /* board & radio parameters */
+#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */
 
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
 
@@ -396,6 +398,7 @@ struct vring {
  * Additional data for Tx Vring
  */
 struct vring_tx_data {
+       bool dot1x_open;
        int enabled;
        cycles_t idle, last_idle, begin;
        u8 agg_wsize; /* agreed aggregation window, 0 - no agg */
@@ -484,7 +487,6 @@ struct wil_sta_info {
        u8 addr[ETH_ALEN];
        enum wil_sta_status status;
        struct wil_net_stats stats;
-       bool data_port_open; /* can send any data, not only EAPOL */
        /* Rx BACK */
        struct wil_tid_ampdu_rx *tid_rx[WIL_STA_TID_NUM];
        spinlock_t tid_rx_lock; /* guarding tid_rx array */
@@ -526,6 +528,17 @@ struct wil_probe_client_req {
        u8 cid;
 };
 
+struct pmc_ctx {
+       /* alloc, free, and read operations must own the lock */
+       struct mutex            lock;
+       struct vring_tx_desc    *pring_va;
+       dma_addr_t              pring_pa;
+       struct desc_alloc_info  *descriptors;
+       int                     last_cmd_status;
+       int                     num_descriptors;
+       int                     descriptor_size;
+};
+
 struct wil6210_priv {
        struct pci_dev *pdev;
        int n_msi;
@@ -610,6 +623,8 @@ struct wil6210_priv {
 
        void *platform_handle;
        struct wil_platform_ops platform_ops;
+
+       struct pmc_ctx pmc;
 };
 
 #define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -701,9 +716,10 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid);
 int wmi_set_channel(struct wil6210_priv *wil, int channel);
 int wmi_get_channel(struct wil6210_priv *wil, int *channel);
 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
-                      const void *mac_addr);
+                      const void *mac_addr, int key_usage);
 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
-                      const void *mac_addr, int key_len, const void *key);
+                      const void *mac_addr, int key_len, const void *key,
+                      int key_usage);
 int wmi_echo(struct wil6210_priv *wil);
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
index 9fe2085be2c5b86d77e9346f003c7f38656ff3d3..3dc8daf69bd2309bdcf9d0719e71bac6b707771d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -543,55 +543,22 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
        }
 }
 
-static void wil_addba_tx_cid(struct wil6210_priv *wil, u8 cid, u16 wsize)
+static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
 {
-       struct vring_tx_data *t;
-       int i;
+       struct wmi_vring_en_event *evt = d;
+       u8 vri = evt->vring_index;
 
-       for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
-               if (cid != wil->vring2cid_tid[i][0])
-                       continue;
-               t = &wil->vring_tx_data[i];
-               if (!t->enabled)
-                       continue;
+       wil_dbg_wmi(wil, "Enable vring %d\n", vri);
 
-               wil_addba_tx_request(wil, i, wsize);
-       }
-}
-
-static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
-{
-       struct wmi_data_port_open_event *evt = d;
-       u8 cid = evt->cid;
-
-       wil_dbg_wmi(wil, "Link UP for CID %d\n", cid);
-
-       if (cid >= ARRAY_SIZE(wil->sta)) {
-               wil_err(wil, "Link UP for invalid CID %d\n", cid);
+       if (vri >= ARRAY_SIZE(wil->vring_tx)) {
+               wil_err(wil, "Enable for invalid vring %d\n", vri);
                return;
        }
-
-       wil->sta[cid].data_port_open = true;
-       if (agg_wsize >= 0)
-               wil_addba_tx_cid(wil, cid, agg_wsize);
-}
-
-static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
-{
-       struct net_device *ndev = wil_to_ndev(wil);
-       struct wmi_wbe_link_down_event *evt = d;
-       u8 cid = evt->cid;
-
-       wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
-                   cid, le32_to_cpu(evt->reason));
-
-       if (cid >= ARRAY_SIZE(wil->sta)) {
-               wil_err(wil, "Link DOWN for invalid CID %d\n", cid);
+       wil->vring_tx_data[vri].dot1x_open = true;
+       if (vri == wil->bcast_vring) /* no BA for bcast */
                return;
-       }
-
-       wil->sta[cid].data_port_open = false;
-       netif_carrier_off(ndev);
+       if (agg_wsize >= 0)
+               wil_addba_tx_request(wil, vri, agg_wsize);
 }
 
 static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
@@ -695,11 +662,10 @@ static const struct {
        {WMI_CONNECT_EVENTID,           wmi_evt_connect},
        {WMI_DISCONNECT_EVENTID,        wmi_evt_disconnect},
        {WMI_EAPOL_RX_EVENTID,          wmi_evt_eapol_rx},
-       {WMI_DATA_PORT_OPEN_EVENTID,    wmi_evt_linkup},
-       {WMI_WBE_LINKDOWN_EVENTID,      wmi_evt_linkdown},
        {WMI_BA_STATUS_EVENTID,         wmi_evt_ba_status},
        {WMI_RCP_ADDBA_REQ_EVENTID,     wmi_evt_addba_rx_req},
        {WMI_DELBA_EVENTID,             wmi_evt_delba},
+       {WMI_VRING_EN_EVENTID,          wmi_evt_vring_en},
 };
 
 /*
@@ -844,7 +810,7 @@ int wmi_echo(struct wil6210_priv *wil)
        };
 
        return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
-                        WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
+                       WMI_ECHO_RSP_EVENTID, NULL, 0, 50);
 }
 
 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
@@ -985,7 +951,7 @@ int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
 }
 
 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
-                      const void *mac_addr)
+                      const void *mac_addr, int key_usage)
 {
        struct wmi_delete_cipher_key_cmd cmd = {
                .key_index = key_index,
@@ -998,11 +964,12 @@ int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
 }
 
 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
-                      const void *mac_addr, int key_len, const void *key)
+                      const void *mac_addr, int key_len, const void *key,
+                      int key_usage)
 {
        struct wmi_add_cipher_key_cmd cmd = {
                .key_index = key_index,
-               .key_usage = WMI_KEY_USE_PAIRWISE,
+               .key_usage = key_usage,
                .key_len = key_len,
        };
 
@@ -1238,7 +1205,8 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
                    cid, tid, agg_wsize, timeout, status, amsdu ? "+" : "-");
 
        rc = wmi_call(wil, WMI_RCP_ADDBA_RESP_CMDID, &cmd, sizeof(cmd),
-                     WMI_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), 100);
+                     WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply),
+                     100);
        if (rc)
                return rc;
 
index b2905531535039f6ae91835ee592a2960f140f83..cc04ab73b398c83c74675e0137b02b9818d61f63 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
  * Copyright (c) 2006-2012 Wilocity .
  *
  * Permission to use, copy, modify, and/or distribute this software for any
@@ -253,8 +253,8 @@ struct wmi_set_passphrase_cmd {
  */
 enum wmi_key_usage {
        WMI_KEY_USE_PAIRWISE    = 0,
-       WMI_KEY_USE_GROUP       = 1,
-       WMI_KEY_USE_TX          = 2,  /* default Tx Key - Static WEP only */
+       WMI_KEY_USE_RX_GROUP    = 1,
+       WMI_KEY_USE_TX_GROUP    = 2,
 };
 
 struct wmi_add_cipher_key_cmd {
@@ -835,6 +835,21 @@ struct wmi_temp_sense_cmd {
        __le32 measure_mode;
 } __packed;
 
+/*
+ * WMI_PMC_CMDID
+ */
+enum wmi_pmc_op_e {
+       WMI_PMC_ALLOCATE = 0,
+       WMI_PMC_RELEASE = 1,
+};
+
+struct wmi_pmc_cmd {
+       u8 op;          /* enum wmi_pmc_cmd_op_type */
+       u8 reserved;
+       __le16 ring_size;
+       __le64 mem_base;
+} __packed;
+
 /*
  * WMI Events
  */
@@ -870,7 +885,7 @@ enum wmi_event_id {
        WMI_VRING_CFG_DONE_EVENTID              = 0x1821,
        WMI_BA_STATUS_EVENTID                   = 0x1823,
        WMI_RCP_ADDBA_REQ_EVENTID               = 0x1824,
-       WMI_ADDBA_RESP_SENT_EVENTID             = 0x1825,
+       WMI_RCP_ADDBA_RESP_SENT_EVENTID         = 0x1825,
        WMI_DELBA_EVENTID                       = 0x1826,
        WMI_GET_SSID_EVENTID                    = 0x1828,
        WMI_GET_PCP_CHANNEL_EVENTID             = 0x182a,
@@ -882,7 +897,7 @@ enum wmi_event_id {
        WMI_WRITE_MAC_TXQ_EVENTID               = 0x1833,
        WMI_WRITE_MAC_XQ_FIELD_EVENTID          = 0x1834,
 
-       WMI_BEAFORMING_MGMT_DONE_EVENTID        = 0x1836,
+       WMI_BEAMFORMING_MGMT_DONE_EVENTID       = 0x1836,
        WMI_BF_TXSS_MGMT_DONE_EVENTID           = 0x1837,
        WMI_BF_RXSS_MGMT_DONE_EVENTID           = 0x1839,
        WMI_RS_MGMT_DONE_EVENTID                = 0x1852,
@@ -894,11 +909,12 @@ enum wmi_event_id {
 
        /* Performance monitoring events */
        WMI_DATA_PORT_OPEN_EVENTID              = 0x1860,
-       WMI_WBE_LINKDOWN_EVENTID                = 0x1861,
+       WMI_WBE_LINK_DOWN_EVENTID               = 0x1861,
 
        WMI_BF_CTRL_DONE_EVENTID                = 0x1862,
        WMI_NOTIFY_REQ_DONE_EVENTID             = 0x1863,
        WMI_GET_STATUS_DONE_EVENTID             = 0x1864,
+       WMI_VRING_EN_EVENTID                    = 0x1865,
 
        WMI_UNIT_TEST_EVENTID                   = 0x1900,
        WMI_FLASH_READ_DONE_EVENTID             = 0x1902,
@@ -1147,7 +1163,7 @@ struct wmi_vring_cfg_done_event {
 } __packed;
 
 /*
- * WMI_ADDBA_RESP_SENT_EVENTID
+ * WMI_RCP_ADDBA_RESP_SENT_EVENTID
  */
 struct wmi_rcp_addba_resp_sent_event {
        u8 cidxtid;
@@ -1179,7 +1195,7 @@ struct wmi_cfg_rx_chain_done_event {
 } __packed;
 
 /*
- * WMI_WBE_LINKDOWN_EVENTID
+ * WMI_WBE_LINK_DOWN_EVENTID
  */
 enum wmi_wbe_link_down_event_reason {
        WMI_WBE_REASON_USER_REQUEST     = 0,
@@ -1201,6 +1217,14 @@ struct wmi_data_port_open_event {
        u8 reserved[3];
 } __packed;
 
+/*
+ * WMI_VRING_EN_EVENTID
+ */
+struct wmi_vring_en_event {
+       u8 vring_index;
+       u8 reserved[3];
+} __packed;
+
 /*
  * WMI_GET_PCP_CHANNEL_EVENTID
  */
index b2f9521fe551a3e32bc85db602bd41a94e2b81ca..f40992969b4ab0b96d12e6bf8735e269a3d5f341 100644 (file)
@@ -3131,8 +3131,6 @@ static void b43_adjust_opmode(struct b43_wldev *dev)
                ctl |= B43_MACCTL_KEEP_BAD;
        if (wl->filter_flags & FIF_PLCPFAIL)
                ctl |= B43_MACCTL_KEEP_BADPLCP;
-       if (wl->filter_flags & FIF_PROMISC_IN_BSS)
-               ctl |= B43_MACCTL_PROMISC;
        if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
                ctl |= B43_MACCTL_BEACPROMISC;
 
@@ -4310,16 +4308,14 @@ static void b43_op_configure_filter(struct ieee80211_hw *hw,
                goto out_unlock;
        }
 
-       *fflags &= FIF_PROMISC_IN_BSS |
-                 FIF_ALLMULTI |
+       *fflags &= FIF_ALLMULTI |
                  FIF_FCSFAIL |
                  FIF_PLCPFAIL |
                  FIF_CONTROL |
                  FIF_OTHER_BSS |
                  FIF_BCN_PRBRESP_PROMISC;
 
-       changed &= FIF_PROMISC_IN_BSS |
-                  FIF_ALLMULTI |
+       changed &= FIF_ALLMULTI |
                   FIF_FCSFAIL |
                   FIF_PLCPFAIL |
                   FIF_CONTROL |
index c77b7f59505cc2eb95c13219e04b18862e42ae72..39d49d6cd07f34e0febec844a0fb4c564a20ccf0 100644 (file)
@@ -2055,8 +2055,6 @@ static void b43legacy_adjust_opmode(struct b43legacy_wldev *dev)
                ctl |= B43legacy_MACCTL_KEEP_BAD;
        if (wl->filter_flags & FIF_PLCPFAIL)
                ctl |= B43legacy_MACCTL_KEEP_BADPLCP;
-       if (wl->filter_flags & FIF_PROMISC_IN_BSS)
-               ctl |= B43legacy_MACCTL_PROMISC;
        if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC)
                ctl |= B43legacy_MACCTL_BEACPROMISC;
 
@@ -2922,16 +2920,14 @@ static void b43legacy_op_configure_filter(struct ieee80211_hw *hw,
        }
 
        spin_lock_irqsave(&wl->irq_lock, flags);
-       *fflags &= FIF_PROMISC_IN_BSS |
-                 FIF_ALLMULTI |
+       *fflags &= FIF_ALLMULTI |
                  FIF_FCSFAIL |
                  FIF_PLCPFAIL |
                  FIF_CONTROL |
                  FIF_OTHER_BSS |
                  FIF_BCN_PRBRESP_PROMISC;
 
-       changed &= FIF_PROMISC_IN_BSS |
-                  FIF_ALLMULTI |
+       changed &= FIF_ALLMULTI |
                   FIF_FCSFAIL |
                   FIF_PLCPFAIL |
                   FIF_CONTROL |
index 9b508bd3b839256e7595ed90c6ad2eb1bb4709f7..71779b9e4bbef3569a3543a5bcc69b4d4bb4cf76 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/module.h>
+#include <linux/acpi.h>
 #include <net/cfg80211.h>
 
 #include <defs.h>
@@ -1011,6 +1012,14 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
        return 0;
 }
 
+static void brcmf_sdiod_host_fixup(struct mmc_host *host)
+{
+       /* runtime-pm powers off the device */
+       pm_runtime_forbid(host->parent);
+       /* avoid removal detection upon resume */
+       host->caps |= MMC_CAP_NONREMOVABLE;
+}
+
 static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 {
        struct sdio_func *func;
@@ -1076,7 +1085,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
                ret = -ENODEV;
                goto out;
        }
-       pm_runtime_forbid(host->parent);
+       brcmf_sdiod_host_fixup(host);
 out:
        if (ret)
                brcmf_sdiod_remove(sdiodev);
@@ -1108,12 +1117,25 @@ MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
 static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata;
 
 
+static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev,
+                                                 int val)
+{
+#if IS_ENABLED(CONFIG_ACPI)
+       struct acpi_device *adev;
+
+       adev = ACPI_COMPANION(dev);
+       if (adev)
+               adev->flags.power_manageable = 0;
+#endif
+}
+
 static int brcmf_ops_sdio_probe(struct sdio_func *func,
                                const struct sdio_device_id *id)
 {
        int err;
        struct brcmf_sdio_dev *sdiodev;
        struct brcmf_bus *bus_if;
+       struct device *dev;
 
        brcmf_dbg(SDIO, "Enter\n");
        brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -1121,6 +1143,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device);
        brcmf_dbg(SDIO, "Function#: %d\n", func->num);
 
+       dev = &func->dev;
+       /* prohibit ACPI power management for this device */
+       brcmf_sdiod_acpi_set_power_manageable(dev, 0);
+
        /* Consume func num 1 but dont do anything with it. */
        if (func->num == 1)
                return 0;
@@ -1246,15 +1272,15 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
        brcmf_sdiod_freezer_on(sdiodev);
        brcmf_sdio_wd_timer(sdiodev->bus, 0);
 
+       sdio_flags = MMC_PM_KEEP_POWER;
        if (sdiodev->wowl_enabled) {
-               sdio_flags = MMC_PM_KEEP_POWER;
                if (sdiodev->pdata->oob_irq_supported)
                        enable_irq_wake(sdiodev->pdata->oob_irq_nr);
                else
-                       sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
-               if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
-                       brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
+                       sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
        }
+       if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+               brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
        return 0;
 }
 
index 8a15ebbce4a360ff054e08c197118ddf43f75381..e10fa67010c072f4d2377cd2e42ed3c3802157fe 100644 (file)
@@ -129,13 +129,47 @@ static struct ieee80211_rate __wl_rates[] = {
        RATETAB_ENT(BRCM_RATE_54M, 0),
 };
 
-#define wl_a_rates             (__wl_rates + 4)
-#define wl_a_rates_size        8
 #define wl_g_rates             (__wl_rates + 0)
-#define wl_g_rates_size        12
+#define wl_g_rates_size                ARRAY_SIZE(__wl_rates)
+#define wl_a_rates             (__wl_rates + 4)
+#define wl_a_rates_size                (wl_g_rates_size - 4)
+
+#define CHAN2G(_channel, _freq) {                              \
+       .band                   = IEEE80211_BAND_2GHZ,          \
+       .center_freq            = (_freq),                      \
+       .hw_value               = (_channel),                   \
+       .flags                  = IEEE80211_CHAN_DISABLED,      \
+       .max_antenna_gain       = 0,                            \
+       .max_power              = 30,                           \
+}
+
+#define CHAN5G(_channel) {                                     \
+       .band                   = IEEE80211_BAND_5GHZ,          \
+       .center_freq            = 5000 + (5 * (_channel)),      \
+       .hw_value               = (_channel),                   \
+       .flags                  = IEEE80211_CHAN_DISABLED,      \
+       .max_antenna_gain       = 0,                            \
+       .max_power              = 30,                           \
+}
+
+static struct ieee80211_channel __wl_2ghz_channels[] = {
+       CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
+       CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
+       CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
+       CHAN2G(13, 2472), CHAN2G(14, 2484)
+};
+
+static struct ieee80211_channel __wl_5ghz_channels[] = {
+       CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
+       CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
+       CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
+       CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
+       CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
+       CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
+};
 
 /* Band templates duplicated per wiphy. The channel info
- * is filled in after querying the device.
+ * above is added to the band during setup.
  */
 static const struct ieee80211_supported_band __wl_band_2ghz = {
        .band = IEEE80211_BAND_2GHZ,
@@ -143,7 +177,7 @@ static const struct ieee80211_supported_band __wl_band_2ghz = {
        .n_bitrates = wl_g_rates_size,
 };
 
-static const struct ieee80211_supported_band __wl_band_5ghz_a = {
+static const struct ieee80211_supported_band __wl_band_5ghz = {
        .band = IEEE80211_BAND_5GHZ,
        .bitrates = wl_a_rates,
        .n_bitrates = wl_a_rates_size,
@@ -1262,7 +1296,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
                }
                clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
                cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
-                                     GFP_KERNEL);
+                                     true, GFP_KERNEL);
 
        }
        clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -1928,7 +1962,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
 
        clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
        clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
-       cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
+       cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL);
 
        memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
        scbval.val = cpu_to_le32(reason_code);
@@ -5253,40 +5287,6 @@ dongle_scantime_out:
        return err;
 }
 
-/* Filter the list of channels received from firmware counting only
- * the 20MHz channels. The wiphy band data only needs those which get
- * flagged to indicate if they can take part in higher bandwidth.
- */
-static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
-                                      struct brcmf_chanspec_list *chlist,
-                                      u32 chcnt[])
-{
-       u32 total = le32_to_cpu(chlist->count);
-       struct brcmu_chan ch;
-       int i;
-
-       for (i = 0; i < total; i++) {
-               ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
-               cfg->d11inf.decchspec(&ch);
-
-               /* Firmware gives a ordered list. We skip non-20MHz
-                * channels is 2G. For 5G we can abort upon reaching
-                * a non-20MHz channel in the list.
-                */
-               if (ch.bw != BRCMU_CHAN_BW_20) {
-                       if (ch.band == BRCMU_CHAN_BAND_5G)
-                               break;
-                       else
-                               continue;
-               }
-
-               if (ch.band == BRCMU_CHAN_BAND_2G)
-                       chcnt[0] += 1;
-               else if (ch.band == BRCMU_CHAN_BAND_5G)
-                       chcnt[1] += 1;
-       }
-}
-
 static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
                                           struct brcmu_chan *ch)
 {
@@ -5322,7 +5322,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
        u32 i, j;
        u32 total;
        u32 chaninfo;
-       u32 chcnt[2] = { 0, 0 };
        u32 index;
 
        pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
@@ -5339,42 +5338,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
                goto fail_pbuf;
        }
 
-       brcmf_count_20mhz_channels(cfg, list, chcnt);
        wiphy = cfg_to_wiphy(cfg);
-       if (chcnt[0]) {
-               band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
-                              GFP_KERNEL);
-               if (band == NULL) {
-                       err = -ENOMEM;
-                       goto fail_pbuf;
-               }
-               band->channels = kcalloc(chcnt[0], sizeof(*channel),
-                                        GFP_KERNEL);
-               if (band->channels == NULL) {
-                       kfree(band);
-                       err = -ENOMEM;
-                       goto fail_pbuf;
-               }
-               band->n_channels = 0;
-               wiphy->bands[IEEE80211_BAND_2GHZ] = band;
-       }
-       if (chcnt[1]) {
-               band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
-                              GFP_KERNEL);
-               if (band == NULL) {
-                       err = -ENOMEM;
-                       goto fail_band2g;
-               }
-               band->channels = kcalloc(chcnt[1], sizeof(*channel),
-                                        GFP_KERNEL);
-               if (band->channels == NULL) {
-                       kfree(band);
-                       err = -ENOMEM;
-                       goto fail_band2g;
-               }
-               band->n_channels = 0;
-               wiphy->bands[IEEE80211_BAND_5GHZ] = band;
-       }
+       band = wiphy->bands[IEEE80211_BAND_2GHZ];
+       if (band)
+               for (i = 0; i < band->n_channels; i++)
+                       band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+       band = wiphy->bands[IEEE80211_BAND_5GHZ];
+       if (band)
+               for (i = 0; i < band->n_channels; i++)
+                       band->channels[i].flags = IEEE80211_CHAN_DISABLED;
 
        total = le32_to_cpu(list->count);
        for (i = 0; i < total; i++) {
@@ -5389,6 +5361,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
                        brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
                        continue;
                }
+               if (!band)
+                       continue;
                if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
                    ch.bw == BRCMU_CHAN_BW_40)
                        continue;
@@ -5416,9 +5390,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
                } else if (ch.bw == BRCMU_CHAN_BW_40) {
                        brcmf_update_bw40_channel_flag(&channel[index], &ch);
                } else {
-                       /* disable other bandwidths for now as mentioned
-                        * order assure they are enabled for subsequent
-                        * chanspecs.
+                       /* enable the channel and disable other bandwidths
+                        * for now as mentioned order assure they are enabled
+                        * for subsequent chanspecs.
                         */
                        channel[index].flags = IEEE80211_CHAN_NO_HT40 |
                                               IEEE80211_CHAN_NO_80MHZ;
@@ -5437,16 +5411,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
                                                IEEE80211_CHAN_NO_IR;
                        }
                }
-               if (index == band->n_channels)
-                       band->n_channels++;
        }
-       kfree(pbuf);
-       return 0;
 
-fail_band2g:
-       kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
-       kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
-       wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
 fail_pbuf:
        kfree(pbuf);
        return err;
@@ -5779,7 +5745,12 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
 
 static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
+       struct ieee80211_supported_band *band;
        struct ieee80211_iface_combination ifc_combo;
+       __le32 bandlist[3];
+       u32 n_bands;
+       int err, i;
+
        wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
        wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
        wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
@@ -5812,7 +5783,8 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
                wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
        wiphy->mgmt_stypes = brcmf_txrx_stypes;
        wiphy->max_remain_on_channel_duration = 5000;
-       brcmf_wiphy_pno_params(wiphy);
+       if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
+               brcmf_wiphy_pno_params(wiphy);
 
        /* vendor commands/events support */
        wiphy->vendor_commands = brcmf_vendor_cmds;
@@ -5821,7 +5793,52 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
        if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
                brcmf_wiphy_wowl_params(wiphy);
 
-       return brcmf_setup_wiphybands(wiphy);
+       err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
+                                    sizeof(bandlist));
+       if (err) {
+               brcmf_err("could not obtain band info: err=%d\n", err);
+               return err;
+       }
+       /* first entry in bandlist is number of bands */
+       n_bands = le32_to_cpu(bandlist[0]);
+       for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
+               if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
+                       band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
+                                      GFP_KERNEL);
+                       if (!band)
+                               return -ENOMEM;
+
+                       band->channels = kmemdup(&__wl_2ghz_channels,
+                                                sizeof(__wl_2ghz_channels),
+                                                GFP_KERNEL);
+                       if (!band->channels) {
+                               kfree(band);
+                               return -ENOMEM;
+                       }
+
+                       band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
+                       wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+               }
+               if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
+                       band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
+                                      GFP_KERNEL);
+                       if (!band)
+                               return -ENOMEM;
+
+                       band->channels = kmemdup(&__wl_5ghz_channels,
+                                                sizeof(__wl_5ghz_channels),
+                                                GFP_KERNEL);
+                       if (!band->channels) {
+                               kfree(band);
+                               return -ENOMEM;
+                       }
+
+                       band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
+                       wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+               }
+       }
+       err = brcmf_setup_wiphybands(wiphy);
+       return err;
 }
 
 static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6007,11 +6024,18 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
        memset(&ccreq, 0, sizeof(ccreq));
        ccreq.rev = cpu_to_le32(-1);
        memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
-       brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
+       if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
+               brcmf_err("firmware rejected country setting\n");
+               return;
+       }
+       brcmf_setup_wiphybands(wiphy);
 }
 
 static void brcmf_free_wiphy(struct wiphy *wiphy)
 {
+       if (!wiphy)
+               return;
+
        kfree(wiphy->iface_combinations);
        if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
                kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
index ab2fac8b2760a89269ffcb549090087fe9a16f71..288f8314f2086f06c33992be2a4da8eaed7f7df9 100644 (file)
@@ -649,6 +649,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
        case BRCM_CC_43567_CHIP_ID:
        case BRCM_CC_43569_CHIP_ID:
        case BRCM_CC_43570_CHIP_ID:
+       case BRCM_CC_4358_CHIP_ID:
        case BRCM_CC_43602_CHIP_ID:
                return 0x180000;
        default:
index 77656c711bedbfc44a8a7ddf97b58cb3efabf24a..26c65872dae3b1b7184d377277a7fe86e125d5eb 100644 (file)
 #include "core.h"
 #include "commonring.h"
 
-
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- * SEE ALSO msgbuf.c
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
 void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
                                  int (*cr_ring_bell)(void *ctx),
                                  int (*cr_update_rptr)(void *ctx),
@@ -206,14 +195,9 @@ int brcmf_commonring_write_complete(struct brcmf_commonring *commonring)
        address = commonring->buf_addr;
        address += (commonring->f_ptr * commonring->item_len);
        if (commonring->f_ptr > commonring->w_ptr) {
-               brcmf_dma_flush(address,
-                               (commonring->depth - commonring->f_ptr) *
-                               commonring->item_len);
                address = commonring->buf_addr;
                commonring->f_ptr = 0;
        }
-       brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) *
-                       commonring->item_len);
 
        commonring->f_ptr = commonring->w_ptr;
 
@@ -258,8 +242,6 @@ void *brcmf_commonring_get_read_ptr(struct brcmf_commonring *commonring,
        if (commonring->r_ptr == commonring->depth)
                commonring->r_ptr = 0;
 
-       brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len);
-
        return ret_addr;
 }
 
index 7748a1ccf14fdf4a6b2864441e9b041da35558a6..2c5fad3a3aa22523e57dfb97d2d0110d7fbfe820 100644 (file)
@@ -124,6 +124,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
        struct brcmf_if *ifp = drvr->iflist[0];
 
        brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
+       brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
        if (drvr->bus_if->wowl_supported)
                brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
        if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
index f5832e077bb7999344877f5fd3bf792cad2ce298..546962525cd2a1a302bdf43667b894176fbf4a40 100644 (file)
 /*
  * Features:
  *
+ * MBSS: multiple BSSID support (eg. guest network in AP mode).
  * MCHAN: multi-channel for concurrent P2P.
+ * PNO: preferred network offload.
+ * WOWL: Wake-On-WLAN.
  */
 #define BRCMF_FEAT_LIST \
        BRCMF_FEAT_DEF(MBSS) \
        BRCMF_FEAT_DEF(MCHAN) \
+       BRCMF_FEAT_DEF(PNO) \
        BRCMF_FEAT_DEF(WOWL)
 /*
  * Quirks:
index 9cb99152ad1753e2bc2317589ce75df7abebd08b..7ae6461df932b77bfe97e7add765c48de1e53616 100644 (file)
 #include "debug.h"
 #include "firmware.h"
 
+#define BRCMF_FW_MAX_NVRAM_SIZE                        64000
+#define BRCMF_FW_NVRAM_DEVPATH_LEN             19      /* devpath0=pcie/1/4/ */
+#define BRCMF_FW_NVRAM_PCIEDEV_LEN             10      /* pcie/1/4/ + \0 */
+
 char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
 module_param_string(firmware_path, brcmf_firmware_path,
                    BRCMF_FW_PATH_LEN, 0440);
@@ -46,6 +50,8 @@ enum nvram_parser_state {
  * @column: current column in line.
  * @pos: byte offset in input buffer.
  * @entry: start position of key,value entry.
+ * @multi_dev_v1: detect pcie multi device v1 (compressed).
+ * @multi_dev_v2: detect pcie multi device v2.
  */
 struct nvram_parser {
        enum nvram_parser_state state;
@@ -56,8 +62,16 @@ struct nvram_parser {
        u32 column;
        u32 pos;
        u32 entry;
+       bool multi_dev_v1;
+       bool multi_dev_v2;
 };
 
+/**
+ * is_nvram_char() - check if char is a valid one for NVRAM entry
+ *
+ * It accepts all printable ASCII chars except for '#' which opens a comment.
+ * Please note that ' ' (space) while accepted is not a valid key name char.
+ */
 static bool is_nvram_char(char c)
 {
        /* comment marker excluded */
@@ -65,7 +79,7 @@ static bool is_nvram_char(char c)
                return false;
 
        /* key and value may have any other readable character */
-       return (c > 0x20 && c < 0x7f);
+       return (c >= 0x20 && c < 0x7f);
 }
 
 static bool is_whitespace(char c)
@@ -108,7 +122,11 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
                        st = COMMENT;
                else
                        st = VALUE;
-       } else if (!is_nvram_char(c)) {
+               if (strncmp(&nvp->fwnv->data[nvp->entry], "devpath", 7) == 0)
+                       nvp->multi_dev_v1 = true;
+               if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0)
+                       nvp->multi_dev_v2 = true;
+       } else if (!is_nvram_char(c) || c == ' ') {
                brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
                          nvp->line, nvp->column);
                return COMMENT;
@@ -133,6 +151,8 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
                ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
                skv = (u8 *)&nvp->fwnv->data[nvp->entry];
                cplen = ekv - skv;
+               if (nvp->nvram_len + cplen + 1 >= BRCMF_FW_MAX_NVRAM_SIZE)
+                       return END;
                /* copy to output buffer */
                memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
                nvp->nvram_len += cplen;
@@ -148,17 +168,20 @@ brcmf_nvram_handle_value(struct nvram_parser *nvp)
 static enum nvram_parser_state
 brcmf_nvram_handle_comment(struct nvram_parser *nvp)
 {
-       char *eol, *sol;
+       char *eoc, *sol;
 
        sol = (char *)&nvp->fwnv->data[nvp->pos];
-       eol = strchr(sol, '\n');
-       if (eol == NULL)
-               return END;
+       eoc = strchr(sol, '\n');
+       if (!eoc) {
+               eoc = strchr(sol, '\0');
+               if (!eoc)
+                       return END;
+       }
 
        /* eat all moving to next line */
        nvp->line++;
        nvp->column = 1;
-       nvp->pos += (eol - sol) + 1;
+       nvp->pos += (eoc - sol) + 1;
        return IDLE;
 }
 
@@ -180,10 +203,18 @@ static enum nvram_parser_state
 static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
                                   const struct firmware *nv)
 {
+       size_t size;
+
        memset(nvp, 0, sizeof(*nvp));
        nvp->fwnv = nv;
+       /* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */
+       if (nv->size > BRCMF_FW_MAX_NVRAM_SIZE)
+               size = BRCMF_FW_MAX_NVRAM_SIZE;
+       else
+               size = nv->size;
        /* Alloc for extra 0 byte + roundup by 4 + length field */
-       nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
+       size += 1 + 3 + sizeof(u32);
+       nvp->nvram = kzalloc(size, GFP_KERNEL);
        if (!nvp->nvram)
                return -ENOMEM;
 
@@ -192,12 +223,141 @@ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
        return 0;
 }
 
+/* brcmf_fw_strip_multi_v1 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v1 is the version where nvram is stored
+ * compressed and "devpath" maps to index for valid entries.
+ */
+static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
+                                   u16 bus_nr)
+{
+       /* Device path with a leading '=' key-value separator */
+       char pcie_path[] = "=pcie/?/?";
+       size_t pcie_len;
+
+       u32 i, j;
+       bool found;
+       u8 *nvram;
+       u8 id;
+
+       nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+       if (!nvram)
+               goto fail;
+
+       /* min length: devpath0=pcie/1/4/ + 0:x=y */
+       if (nvp->nvram_len < BRCMF_FW_NVRAM_DEVPATH_LEN + 6)
+               goto fail;
+
+       /* First search for the devpathX and see if it is the configuration
+        * for domain_nr/bus_nr. Search complete nvp
+        */
+       snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr,
+                bus_nr);
+       pcie_len = strlen(pcie_path);
+       found = false;
+       i = 0;
+       while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
+               /* Format: devpathX=pcie/Y/Z/
+                * Y = domain_nr, Z = bus_nr, X = virtual ID
+                */
+               if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) &&
+                   (strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len) == 0)) {
+                       id = nvp->nvram[i + 7] - '0';
+                       found = true;
+                       break;
+               }
+               while (nvp->nvram[i] != 0)
+                       i++;
+               i++;
+       }
+       if (!found)
+               goto fail;
+
+       /* Now copy all valid entries, release old nvram and assign new one */
+       i = 0;
+       j = 0;
+       while (i < nvp->nvram_len) {
+               if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
+                       i += 2;
+                       while (nvp->nvram[i] != 0) {
+                               nvram[j] = nvp->nvram[i];
+                               i++;
+                               j++;
+                       }
+                       nvram[j] = 0;
+                       j++;
+               }
+               while (nvp->nvram[i] != 0)
+                       i++;
+               i++;
+       }
+       kfree(nvp->nvram);
+       nvp->nvram = nvram;
+       nvp->nvram_len = j;
+       return;
+
+fail:
+       kfree(nvram);
+       nvp->nvram_len = 0;
+}
+
+/* brcmf_fw_strip_multi_v2 :Some nvram files contain settings for multiple
+ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
+ * which data is to be returned. v2 is the version where nvram is stored
+ * uncompressed, all relevant valid entries are identified by
+ * pcie/domain_nr/bus_nr:
+ */
+static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
+                                   u16 bus_nr)
+{
+       char prefix[BRCMF_FW_NVRAM_PCIEDEV_LEN];
+       size_t len;
+       u32 i, j;
+       u8 *nvram;
+
+       nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
+       if (!nvram)
+               goto fail;
+
+       /* Copy all valid entries, release old nvram and assign new one.
+        * Valid entries are of type pcie/X/Y/ where X = domain_nr and
+        * Y = bus_nr.
+        */
+       snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr);
+       len = strlen(prefix);
+       i = 0;
+       j = 0;
+       while (i < nvp->nvram_len - len) {
+               if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
+                       i += len;
+                       while (nvp->nvram[i] != 0) {
+                               nvram[j] = nvp->nvram[i];
+                               i++;
+                               j++;
+                       }
+                       nvram[j] = 0;
+                       j++;
+               }
+               while (nvp->nvram[i] != 0)
+                       i++;
+               i++;
+       }
+       kfree(nvp->nvram);
+       nvp->nvram = nvram;
+       nvp->nvram_len = j;
+       return;
+fail:
+       kfree(nvram);
+       nvp->nvram_len = 0;
+}
+
 /* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
  * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
  * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
  * End of buffer is completed with token identifying length of buffer.
  */
-static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
+static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length,
+                                 u16 domain_nr, u16 bus_nr)
 {
        struct nvram_parser nvp;
        u32 pad;
@@ -212,6 +372,16 @@ static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
                if (nvp.state == END)
                        break;
        }
+       if (nvp.multi_dev_v1)
+               brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
+       else if (nvp.multi_dev_v2)
+               brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+
+       if (nvp.nvram_len == 0) {
+               kfree(nvp.nvram);
+               return NULL;
+       }
+
        pad = nvp.nvram_len;
        *new_length = roundup(nvp.nvram_len + 1, 4);
        while (pad != *new_length) {
@@ -239,6 +409,8 @@ struct brcmf_fw {
        u16 flags;
        const struct firmware *code;
        const char *nvram_name;
+       u16 domain_nr;
+       u16 bus_nr;
        void (*done)(struct device *dev, const struct firmware *fw,
                     void *nvram_image, u32 nvram_len);
 };
@@ -254,7 +426,8 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
                goto fail;
 
        if (fw) {
-               nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
+               nvram = brcmf_fw_nvram_strip(fw, &nvram_length,
+                                            fwctx->domain_nr, fwctx->bus_nr);
                release_firmware(fw);
                if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
                        goto fail;
@@ -309,11 +482,12 @@ fail:
        kfree(fwctx);
 }
 
-int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
-                          const char *code, const char *nvram,
-                          void (*fw_cb)(struct device *dev,
-                                        const struct firmware *fw,
-                                        void *nvram_image, u32 nvram_len))
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+                               const char *code, const char *nvram,
+                               void (*fw_cb)(struct device *dev,
+                                             const struct firmware *fw,
+                                             void *nvram_image, u32 nvram_len),
+                               u16 domain_nr, u16 bus_nr)
 {
        struct brcmf_fw *fwctx;
 
@@ -333,8 +507,21 @@ int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
        fwctx->done = fw_cb;
        if (flags & BRCMF_FW_REQUEST_NVRAM)
                fwctx->nvram_name = nvram;
+       fwctx->domain_nr = domain_nr;
+       fwctx->bus_nr = bus_nr;
 
        return request_firmware_nowait(THIS_MODULE, true, code, dev,
                                       GFP_KERNEL, fwctx,
                                       brcmf_fw_request_code_done);
 }
+
+int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+                          const char *code, const char *nvram,
+                          void (*fw_cb)(struct device *dev,
+                                        const struct firmware *fw,
+                                        void *nvram_image, u32 nvram_len))
+{
+       return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0,
+                                          0);
+}
+
index 4d3482356b77b0e068e1dc095b8888a7c8477094..604dd48ab4e095062a4b2f59e592c2dcf01d2878 100644 (file)
@@ -32,6 +32,12 @@ void brcmf_fw_nvram_free(void *nvram);
  * fails it will not use the callback, but call device_release_driver()
  * instead which will call the driver .remove() callback.
  */
+int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
+                               const char *code, const char *nvram,
+                               void (*fw_cb)(struct device *dev,
+                                             const struct firmware *fw,
+                                             void *nvram_image, u32 nvram_len),
+                               u16 domain_nr, u16 bus_nr);
 int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
                           const char *code, const char *nvram,
                           void (*fw_cb)(struct device *dev,
index eb1325371d3a3aa9eba03e5ba83e65135f218f4e..59440631fec50637fc3f35e162defb230d1f420c 100644 (file)
@@ -249,8 +249,8 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid)
 }
 
 
-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
-                           struct sk_buff *skb)
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+                          struct sk_buff *skb)
 {
        struct brcmf_flowring_ring *ring;
 
@@ -271,6 +271,7 @@ void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
                if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
                        brcmf_flowring_block(flow, flowid, false);
        }
+       return skb_queue_len(&ring->skblist);
 }
 
 
index a34cd394c616c0a11f861c06f18030d3c7129133..5551861a44bc427bca80d096f7809db6c2f12b42 100644 (file)
@@ -64,8 +64,8 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
 void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
 void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
-                           struct sk_buff *skb);
+u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+                          struct sk_buff *skb);
 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
                             struct sk_buff *skb);
index f0dda0ecd23b4395219f31ef88475329e0ab69f2..5017eaa4af45fdd8f9dfd1034919c5042cce7cd8 100644 (file)
@@ -635,7 +635,7 @@ static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h,
        return 0;
 }
 
-static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
+static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
                                          u32 slot_id, struct sk_buff **pktout,
                                          bool remove_item)
 {
index 65efb146898844510aa489502ee2c9db23906c92..1b47de067d25cbb4b194ca8eae02986a3f196092 100644 (file)
@@ -73,7 +73,7 @@
 #define BRCMF_MSGBUF_TX_FLUSH_CNT1             32
 #define BRCMF_MSGBUF_TX_FLUSH_CNT2             96
 
-#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS       64
+#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS       96
 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS     32
 
 struct msgbuf_common_hdr {
@@ -278,16 +278,6 @@ struct brcmf_msgbuf_pktids {
        struct brcmf_msgbuf_pktid *array;
 };
 
-
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
 
 
@@ -462,7 +452,6 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
                memcpy(msgbuf->ioctbuf, buf, buf_len);
        else
                memset(msgbuf->ioctbuf, 0, buf_len);
-       brcmf_dma_flush(ioctl_buf, buf_len);
 
        err = brcmf_commonring_write_complete(commonring);
        brcmf_commonring_unlock(commonring);
@@ -795,6 +784,8 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
        struct brcmf_flowring *flow = msgbuf->flow;
        struct ethhdr *eh = (struct ethhdr *)(skb->data);
        u32 flowid;
+       u32 queue_count;
+       bool force;
 
        flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
        if (flowid == BRCMF_FLOWRING_INVALID_ID) {
@@ -802,8 +793,9 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx,
                if (flowid == BRCMF_FLOWRING_INVALID_ID)
                        return -ENOMEM;
        }
-       brcmf_flowring_enqueue(flow, flowid, skb);
-       brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
+       queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
+       force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
+       brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
 
        return 0;
 }
index c824570ddea3208b1f20230a6eecbc193dd68c50..03f35e0c52ca5469f49a7d43a39edac648563c10 100644 (file)
@@ -39,10 +39,16 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
        if (!sdiodev->pdata)
                return;
 
+       if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
+               sdiodev->pdata->drive_strength = val;
+
+       /* make sure there are interrupts defined in the node */
+       if (!of_find_property(np, "interrupts", NULL))
+               return;
+
        irq = irq_of_parse_and_map(np, 0);
        if (!irq) {
                brcmf_err("interrupt could not be mapped\n");
-               devm_kfree(dev, sdiodev->pdata);
                return;
        }
        irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
@@ -50,7 +56,4 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
        sdiodev->pdata->oob_irq_supported = true;
        sdiodev->pdata->oob_irq_nr = irq;
        sdiodev->pdata->oob_irq_flags = irqf;
-
-       if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
-               sdiodev->pdata->drive_strength = val;
 }
index 1831ecd0813e955dfc88c3c6eb8783f36060465b..37a2624d7bbaed4a79fb3dcf2d6a23f725f3ea97 100644 (file)
@@ -51,6 +51,8 @@ enum brcmf_pcie_state {
 #define BRCMF_PCIE_4356_NVRAM_NAME             "brcm/brcmfmac4356-pcie.txt"
 #define BRCMF_PCIE_43570_FW_NAME               "brcm/brcmfmac43570-pcie.bin"
 #define BRCMF_PCIE_43570_NVRAM_NAME            "brcm/brcmfmac43570-pcie.txt"
+#define BRCMF_PCIE_4358_FW_NAME                        "brcm/brcmfmac4358-pcie.bin"
+#define BRCMF_PCIE_4358_NVRAM_NAME             "brcm/brcmfmac4358-pcie.txt"
 
 #define BRCMF_PCIE_FW_UP_TIMEOUT               2000 /* msec */
 
@@ -110,10 +112,11 @@ enum brcmf_pcie_state {
                                                 BRCMF_PCIE_MB_INT_D2H3_DB0 | \
                                                 BRCMF_PCIE_MB_INT_D2H3_DB1)
 
-#define BRCMF_PCIE_MIN_SHARED_VERSION          4
+#define BRCMF_PCIE_MIN_SHARED_VERSION          5
 #define BRCMF_PCIE_MAX_SHARED_VERSION          5
 #define BRCMF_PCIE_SHARED_VERSION_MASK         0x00FF
-#define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT       0x4000
+#define BRCMF_PCIE_SHARED_DMA_INDEX            0x10000
+#define BRCMF_PCIE_SHARED_DMA_2B_IDX           0x100000
 
 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT            0x4000
 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT            0x8000
@@ -145,6 +148,10 @@ enum brcmf_pcie_state {
 #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
 #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
 #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
+#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET  20
+#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET  28
+#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET  36
+#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET  44
 #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET    0
 #define BRCMF_SHARED_RING_MAX_SUB_QUEUES       52
 
@@ -189,6 +196,8 @@ MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME);
 MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_FW_NAME);
+MODULE_FIRMWARE(BRCMF_PCIE_4358_NVRAM_NAME);
 
 
 struct brcmf_pcie_console {
@@ -244,6 +253,13 @@ struct brcmf_pciedev_info {
        bool mbdata_completed;
        bool irq_allocated;
        bool wowl_enabled;
+       u8 dma_idx_sz;
+       void *idxbuf;
+       u32 idxbuf_sz;
+       dma_addr_t idxbuf_dmahandle;
+       u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
+       void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+                         u16 value);
 };
 
 struct brcmf_pcie_ringbuf {
@@ -273,15 +289,6 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
 };
 
 
-/* dma flushing needs implementation for mips and arm platforms. Should
- * be put in util. Note, this is not real flushing. It is virtual non
- * cached memory. Only write buffers should have to be drained. Though
- * this may be different depending on platform......
- */
-#define brcmf_dma_flush(addr, len)
-#define brcmf_dma_invalidate_cache(addr, len)
-
-
 static u32
 brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
 {
@@ -329,6 +336,25 @@ brcmf_pcie_write_tcm16(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
 }
 
 
+static u16
+brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+{
+       u16 *address = devinfo->idxbuf + mem_offset;
+
+       return (*(address));
+}
+
+
+static void
+brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
+                    u16 value)
+{
+       u16 *address = devinfo->idxbuf + mem_offset;
+
+       *(address) = value;
+}
+
+
 static u32
 brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
 {
@@ -874,7 +900,7 @@ static int brcmf_pcie_ring_mb_write_rptr(void *ctx)
        brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
                  commonring->w_ptr, ring->id);
 
-       brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
+       devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
 
        return 0;
 }
@@ -892,7 +918,7 @@ static int brcmf_pcie_ring_mb_write_wptr(void *ctx)
        brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
                  commonring->r_ptr, ring->id);
 
-       brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
+       devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
 
        return 0;
 }
@@ -921,7 +947,7 @@ static int brcmf_pcie_ring_mb_update_rptr(void *ctx)
        if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
                return -EIO;
 
-       commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
+       commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
 
        brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
                  commonring->w_ptr, ring->id);
@@ -939,7 +965,7 @@ static int brcmf_pcie_ring_mb_update_wptr(void *ctx)
        if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
                return -EIO;
 
-       commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
+       commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
 
        brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
                  commonring->r_ptr, ring->id);
@@ -1044,6 +1070,13 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo)
        }
        kfree(devinfo->shared.flowrings);
        devinfo->shared.flowrings = NULL;
+       if (devinfo->idxbuf) {
+               dma_free_coherent(&devinfo->pdev->dev,
+                                 devinfo->idxbuf_sz,
+                                 devinfo->idxbuf,
+                                 devinfo->idxbuf_dmahandle);
+               devinfo->idxbuf = NULL;
+       }
 }
 
 
@@ -1059,19 +1092,72 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
        u32 addr;
        u32 ring_mem_ptr;
        u32 i;
+       u64 address;
+       u32 bufsz;
        u16 max_sub_queues;
+       u8 idx_offset;
 
        ring_addr = devinfo->shared.ring_info_addr;
        brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
+       addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
+       max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
+
+       if (devinfo->dma_idx_sz != 0) {
+               bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
+                       devinfo->dma_idx_sz * 2;
+               devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
+                                                    &devinfo->idxbuf_dmahandle,
+                                                    GFP_KERNEL);
+               if (!devinfo->idxbuf)
+                       devinfo->dma_idx_sz = 0;
+       }
 
-       addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
-       d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
-       addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
-       d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
-       addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
-       h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
-       addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
-       h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+       if (devinfo->dma_idx_sz == 0) {
+               addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
+               d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+               addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
+               d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+               addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
+               h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+               addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
+               h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+               idx_offset = sizeof(u32);
+               devinfo->write_ptr = brcmf_pcie_write_tcm16;
+               devinfo->read_ptr = brcmf_pcie_read_tcm16;
+               brcmf_dbg(PCIE, "Using TCM indices\n");
+       } else {
+               memset(devinfo->idxbuf, 0, bufsz);
+               devinfo->idxbuf_sz = bufsz;
+               idx_offset = devinfo->dma_idx_sz;
+               devinfo->write_ptr = brcmf_pcie_write_idx;
+               devinfo->read_ptr = brcmf_pcie_read_idx;
+
+               h2d_w_idx_ptr = 0;
+               addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
+               address = (u64)devinfo->idxbuf_dmahandle;
+               brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+               brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+               h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
+               addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
+               address += max_sub_queues * idx_offset;
+               brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+               brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+               d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
+               addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
+               address += max_sub_queues * idx_offset;
+               brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+               brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+
+               d2h_r_idx_ptr = d2h_w_idx_ptr +
+                               BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+               addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
+               address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
+               brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
+               brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
+               brcmf_dbg(PCIE, "Using host memory indices\n");
+       }
 
        addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
        ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
@@ -1085,8 +1171,8 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
                ring->id = i;
                devinfo->shared.commonrings[i] = ring;
 
-               h2d_w_idx_ptr += sizeof(u32);
-               h2d_r_idx_ptr += sizeof(u32);
+               h2d_w_idx_ptr += idx_offset;
+               h2d_r_idx_ptr += idx_offset;
                ring_mem_ptr += BRCMF_RING_MEM_SZ;
        }
 
@@ -1100,13 +1186,11 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
                ring->id = i;
                devinfo->shared.commonrings[i] = ring;
 
-               d2h_w_idx_ptr += sizeof(u32);
-               d2h_r_idx_ptr += sizeof(u32);
+               d2h_w_idx_ptr += idx_offset;
+               d2h_r_idx_ptr += idx_offset;
                ring_mem_ptr += BRCMF_RING_MEM_SZ;
        }
 
-       addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
-       max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
        devinfo->shared.nrof_flowrings =
                        max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
        rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
@@ -1130,15 +1214,15 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
                                             ring);
                ring->w_idx_addr = h2d_w_idx_ptr;
                ring->r_idx_addr = h2d_r_idx_ptr;
-               h2d_w_idx_ptr += sizeof(u32);
-               h2d_r_idx_ptr += sizeof(u32);
+               h2d_w_idx_ptr += idx_offset;
+               h2d_r_idx_ptr += idx_offset;
        }
        devinfo->shared.flowrings = rings;
 
        return 0;
 
 fail:
-       brcmf_err("Allocating commonring buffers failed\n");
+       brcmf_err("Allocating ring buffers failed\n");
        brcmf_pcie_release_ringbuffers(devinfo);
        return -ENOMEM;
 }
@@ -1171,7 +1255,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
                goto fail;
 
        memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
-       brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
 
        addr = devinfo->shared.tcm_base_address +
               BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
@@ -1189,7 +1272,6 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
                goto fail;
 
        memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
-       brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
 
        addr = devinfo->shared.tcm_base_address +
               BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
@@ -1276,10 +1358,13 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo,
                brcmf_err("Unsupported PCIE version %d\n", version);
                return -EINVAL;
        }
-       if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
-               brcmf_err("Unsupported legacy TX mode 0x%x\n",
-                         shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
-               return -EINVAL;
+
+       /* check firmware support dma indicies */
+       if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
+               if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
+                       devinfo->dma_idx_sz = sizeof(u16);
+               else
+                       devinfo->dma_idx_sz = sizeof(u32);
        }
 
        addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
@@ -1333,6 +1418,10 @@ static int brcmf_pcie_get_fwnames(struct brcmf_pciedev_info *devinfo)
                fw_name = BRCMF_PCIE_43570_FW_NAME;
                nvram_name = BRCMF_PCIE_43570_NVRAM_NAME;
                break;
+       case BRCM_CC_4358_CHIP_ID:
+               fw_name = BRCMF_PCIE_4358_FW_NAME;
+               nvram_name = BRCMF_PCIE_4358_NVRAM_NAME;
+               break;
        default:
                brcmf_err("Unsupported chip 0x%04x\n", devinfo->ci->chip);
                return -ENODEV;
@@ -1609,7 +1698,7 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw,
                bus->msgbuf->commonrings[i] =
                                &devinfo->shared.commonrings[i]->commonring;
 
-       flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
+       flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
                            GFP_KERNEL);
        if (!flowrings)
                goto fail;
@@ -1641,8 +1730,13 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct brcmf_pciedev_info *devinfo;
        struct brcmf_pciedev *pcie_bus_dev;
        struct brcmf_bus *bus;
+       u16 domain_nr;
+       u16 bus_nr;
 
-       brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
+       domain_nr = pci_domain_nr(pdev->bus) + 1;
+       bus_nr = pdev->bus->number;
+       brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
+                 domain_nr, bus_nr);
 
        ret = -ENOMEM;
        devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
@@ -1691,10 +1785,10 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ret)
                goto fail_bus;
 
-       ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
-                                              BRCMF_FW_REQ_NV_OPTIONAL,
-                                    devinfo->fw_name, devinfo->nvram_name,
-                                    brcmf_pcie_setup);
+       ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
+                                                   BRCMF_FW_REQ_NV_OPTIONAL,
+                                         devinfo->fw_name, devinfo->nvram_name,
+                                         brcmf_pcie_setup, domain_nr, bus_nr);
        if (ret == 0)
                return 0;
 fail_bus:
@@ -1850,9 +1944,11 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+       BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID),
+       BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID),
        { /* end: all zeroes */ }
 };
 
index ab0c898330137e494a79c7418c151f9dec860f98..bf7a8b1ad91485afa1c15fb9d1e38c5872ca3362 100644 (file)
@@ -601,6 +601,8 @@ static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
 #define BCM43241B0_NVRAM_NAME          "brcm/brcmfmac43241b0-sdio.txt"
 #define BCM43241B4_FIRMWARE_NAME       "brcm/brcmfmac43241b4-sdio.bin"
 #define BCM43241B4_NVRAM_NAME          "brcm/brcmfmac43241b4-sdio.txt"
+#define BCM43241B5_FIRMWARE_NAME       "brcm/brcmfmac43241b5-sdio.bin"
+#define BCM43241B5_NVRAM_NAME          "brcm/brcmfmac43241b5-sdio.txt"
 #define BCM4329_FIRMWARE_NAME          "brcm/brcmfmac4329-sdio.bin"
 #define BCM4329_NVRAM_NAME             "brcm/brcmfmac4329-sdio.txt"
 #define BCM4330_FIRMWARE_NAME          "brcm/brcmfmac4330-sdio.bin"
@@ -628,6 +630,8 @@ MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
 MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B5_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B5_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
 MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
 MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
@@ -667,7 +671,8 @@ enum brcmf_firmware_type {
 static const struct brcmf_firmware_names brcmf_fwname_data[] = {
        { BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
        { BRCM_CC_43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
-       { BRCM_CC_43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+       { BRCM_CC_43241_CHIP_ID, 0x00000020, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+       { BRCM_CC_43241_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43241B5) },
        { BRCM_CC_4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
        { BRCM_CC_4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
        { BRCM_CC_4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
@@ -3550,10 +3555,6 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
                return;
        }
 
-       if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
-               brcmf_err("bus is down. we have nothing to do\n");
-               return;
-       }
        /* Count the interrupt call */
        bus->sdcnt.intrcount++;
        if (in_interrupt())
index 5df6aa72cc2db32c5ccca6f76c50d79dc05914ed..daba86d881bc1a4930f84ef8ebc5078bde4710e3 100644 (file)
@@ -1270,8 +1270,13 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
        bus->chiprev = bus_pub->chiprev;
 
        /* request firmware here */
-       brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
-                              brcmf_usb_probe_phase2);
+       ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo),
+                                    NULL, brcmf_usb_probe_phase2);
+       if (ret) {
+               brcmf_err("firmware request failed: %d\n", ret);
+               goto fail;
+       }
+
        return 0;
 
 fail:
index 48135063347e4388586085a15127429501ec467a..b46cab250615abaccdb6f5b226705f082f0b951e 100644 (file)
@@ -41,8 +41,7 @@
 #define BRCMS_FLUSH_TIMEOUT    500 /* msec */
 
 /* Flags we support */
-#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
-       FIF_ALLMULTI | \
+#define MAC_FILTERS (FIF_ALLMULTI | \
        FIF_FCSFAIL | \
        FIF_CONTROL | \
        FIF_OTHER_BSS | \
@@ -743,8 +742,6 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
        changed_flags &= MAC_FILTERS;
        *total_flags &= MAC_FILTERS;
 
-       if (changed_flags & FIF_PROMISC_IN_BSS)
-               brcms_dbg_info(core, "FIF_PROMISC_IN_BSS\n");
        if (changed_flags & FIF_ALLMULTI)
                brcms_dbg_info(core, "FIF_ALLMULTI\n");
        if (changed_flags & FIF_FCSFAIL)
index 369527e2768956ee30563d8d330341c802f7bc6d..9728be0e704bc703d9e2bc1868e6656fe3729157 100644 (file)
@@ -3571,7 +3571,7 @@ void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
 
        wlc->filter_flags = filter_flags;
 
-       if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+       if (filter_flags & FIF_OTHER_BSS)
                promisc_bits |= MCTL_PROMISC;
 
        if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
index 4efdd51af9c8fd72ce99ea00e8bc03f6c86c271a..7a6daa37dc6b086853c033f7da3b2b2276b4fe65 100644 (file)
@@ -45,6 +45,7 @@
 #define BRCM_CC_43567_CHIP_ID          43567
 #define BRCM_CC_43569_CHIP_ID          43569
 #define BRCM_CC_43570_CHIP_ID          43570
+#define BRCM_CC_4358_CHIP_ID           0x4358
 #define BRCM_CC_43602_CHIP_ID          43602
 
 /* USB Device IDs */
 #define BRCM_PCIE_4356_DEVICE_ID       0x43ec
 #define BRCM_PCIE_43567_DEVICE_ID      0x43d3
 #define BRCM_PCIE_43570_DEVICE_ID      0x43d9
+#define BRCM_PCIE_4358_DEVICE_ID       0x43e9
 #define BRCM_PCIE_43602_DEVICE_ID      0x43ba
 #define BRCM_PCIE_43602_2G_DEVICE_ID   0x43bb
 #define BRCM_PCIE_43602_5G_DEVICE_ID   0x43bc
+#define BRCM_PCIE_43602_RAW_DEVICE_ID  43602
 
 /* brcmsmac IDs */
 #define BCM4313_D11N2G_ID      0x4727  /* 4313 802.11n 2.4G device */
index b0f65fa094287868f21802c89f395d99996a4f11..b86500b4418f743ffb9363f6bcf70c8d91e94820 100644 (file)
@@ -578,13 +578,11 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
 {
        struct cw1200_common *priv = dev->priv;
        bool listening = !!(*total_flags &
-                           (FIF_PROMISC_IN_BSS |
-                            FIF_OTHER_BSS |
+                           (FIF_OTHER_BSS |
                             FIF_BCN_PRBRESP_PROMISC |
                             FIF_PROBE_REQ));
 
-       *total_flags &= FIF_PROMISC_IN_BSS |
-                       FIF_OTHER_BSS |
+       *total_flags &= FIF_OTHER_BSS |
                        FIF_FCSFAIL |
                        FIF_BCN_PRBRESP_PROMISC |
                        FIF_PROBE_REQ;
@@ -592,14 +590,12 @@ void cw1200_configure_filter(struct ieee80211_hw *dev,
        down(&priv->scan.lock);
        mutex_lock(&priv->conf_mutex);
 
-       priv->rx_filter.promiscuous = (*total_flags & FIF_PROMISC_IN_BSS)
-                       ? 1 : 0;
+       priv->rx_filter.promiscuous = 0;
        priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS |
                        FIF_PROBE_REQ)) ? 1 : 0;
        priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0;
        priv->disable_beacon_filter = !(*total_flags &
                                        (FIF_BCN_PRBRESP_PROMISC |
-                                        FIF_PROMISC_IN_BSS |
                                         FIF_PROBE_REQ));
        if (priv->listening != listening) {
                priv->listening = listening;
index e5665804d9863bd7b78c28bf643aa34ec4a362a6..189cdf58084b6168abf90d0da1daac7c087fe58e 100644 (file)
@@ -3048,7 +3048,7 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
        D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
                   *total_flags);
 
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
        CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
        CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
 
@@ -3074,7 +3074,7 @@ il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
         * filters into the device.
         */
        *total_flags &=
-           FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+           FIF_OTHER_BSS | FIF_ALLMULTI |
            FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
index 976f65fe9c383f8ecd9653dfa0b7c091dbc4f79d..e4b175cbeefd60c38b0e54b0be7f96d9298650d1 100644 (file)
@@ -6166,7 +6166,7 @@ il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
        D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
                   *total_flags);
 
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
        /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
        CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
        CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -6192,7 +6192,7 @@ il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
         * filters into the device.
         */
        *total_flags &=
-           FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+           FIF_OTHER_BSS | FIF_ALLMULTI |
            FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
index f89f446e5c8ae32b5dccc42cae6234ad75283ff8..aba095761ac669a99cc3553b1b2fb4d13f1a6574 100644 (file)
@@ -21,6 +21,7 @@ config IWLWIFI
                Intel 7260 Wi-Fi Adapter
                Intel 3160 Wi-Fi Adapter
                Intel 7265 Wi-Fi Adapter
+               Intel 8260 Wi-Fi Adapter
                Intel 3165 Wi-Fi Adapter
 
 
@@ -54,16 +55,17 @@ config IWLDVM
        tristate "Intel Wireless WiFi DVM Firmware support"
        default IWLWIFI
        help
-         This is the driver that supports the DVM firmware which is
-         used by most existing devices (with the exception of 7260
-         and 3160).
+         This is the driver that supports the DVM firmware. The list
+         of the devices that use this firmware is available here:
+         https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
 
 config IWLMVM
        tristate "Intel Wireless WiFi MVM Firmware support"
        select WANT_DEV_COREDUMP
        help
-         This is the driver that supports the MVM firmware which is
-         currently only available for 7260 and 3160 devices.
+         This is the driver that supports the MVM firmware. The list
+         of the devices that use this firmware is available here:
+         https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
 
 # don't call it _MODULE -- will confuse Kconfig/fixdep/...
 config IWLWIFI_OPMODE_MODULAR
index 3d32f4120174d9fe6d3bb0a337cd5fe97fed6b83..dbfc5b18bcb712fe20dee3e048891ee81bf2876c 100644 (file)
@@ -9,6 +9,7 @@ iwlwifi-objs            += iwl-phy-db.o iwl-nvm-parse.o
 iwlwifi-objs           += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
 iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
 iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
+iwlwifi-objs           += iwl-trans.o
 
 iwlwifi-objs += $(iwlwifi-m)
 
index 5abd62ed8cb47da0c9dfe21de3592dc2482f6926..852461ffe98febdf5757d3139df2ac010d1204bc 100644 (file)
@@ -112,6 +112,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                    IEEE80211_HW_QUEUE_CONTROL |
                    IEEE80211_HW_SUPPORTS_PS |
                    IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
+                   IEEE80211_HW_SUPPORT_FAST_XMIT |
                    IEEE80211_HW_WANT_MONITOR_VIF;
 
        hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
@@ -1061,7 +1062,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
        IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
                        changed_flags, *total_flags);
 
-       CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+       CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
        /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
        CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
        CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
@@ -1088,7 +1089,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
         * since we currently do not support programming multicast
         * filters into the device.
         */
-       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+       *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI |
                        FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
 }
 
@@ -1140,7 +1141,6 @@ static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
                return;
 
        IWL_DEBUG_MAC80211(priv, "enter\n");
-       mutex_lock(&priv->mutex);
 
        if (priv->lib->bt_params &&
            priv->lib->bt_params->advanced_bt_coexist) {
@@ -1149,13 +1149,12 @@ static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
                else if (event->u.rssi.data == RSSI_EVENT_HIGH)
                        priv->bt_enable_pspoll = false;
 
-               iwlagn_send_advance_bt_config(priv);
+               queue_work(priv->workqueue, &priv->bt_runtime_config);
        } else {
                IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
                                "ignoring RSSI callback\n");
        }
 
-       mutex_unlock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "leave\n");
 }
 
index 74ad278116be3feb18b3a2a98e4034aa3145a6a1..cc35f796d406156286164410aa7d1adcd087c015 100644 (file)
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX  13
+#define IWL7260_UCODE_API_MAX  15
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   12
@@ -124,6 +124,28 @@ static const struct iwl_base_params iwl7000_base_params = {
        .apmg_wake_up_wa = true,
 };
 
+static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
+       .ct_kill_entry = 118,
+       .ct_kill_exit = 96,
+       .ct_kill_duration = 5,
+       .dynamic_smps_entry = 114,
+       .dynamic_smps_exit = 110,
+       .tx_protection_entry = 114,
+       .tx_protection_exit = 108,
+       .tx_backoff = {
+               {.temperature = 112, .backoff = 300},
+               {.temperature = 113, .backoff = 800},
+               {.temperature = 114, .backoff = 1500},
+               {.temperature = 115, .backoff = 3000},
+               {.temperature = 116, .backoff = 5000},
+               {.temperature = 117, .backoff = 10000},
+       },
+       .support_ct_kill = true,
+       .support_dynamic_smps = true,
+       .support_tx_protection = true,
+       .support_tx_backoff = true,
+};
+
 static const struct iwl_ht_params iwl7000_ht_params = {
        .stbc = true,
        .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
@@ -166,6 +188,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
        .host_interrupt_operation_mode = true,
        .lp_xtal_workaround = true,
        .dccm_len = IWL7260_DCCM_LEN,
+       .thermal_params = &iwl7000_high_temp_tt_params,
 };
 
 const struct iwl_cfg iwl7260_2n_cfg = {
index ce6321b7d24156269fd553f4409078b1c3fb24df..72040cd0b9794e6790daaa7f887089692ccb1d5e 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -69,7 +69,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  13
+#define IWL8000_UCODE_API_MAX  15
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK   12
@@ -122,24 +122,49 @@ static const struct iwl_ht_params iwl8000_ht_params = {
        .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
 };
 
-#define IWL_DEVICE_8000                                                \
-       .ucode_api_max = IWL8000_UCODE_API_MAX,                 \
-       .ucode_api_ok = IWL8000_UCODE_API_OK,                   \
-       .ucode_api_min = IWL8000_UCODE_API_MIN,                 \
-       .device_family = IWL_DEVICE_FAMILY_8000,                \
-       .max_inst_size = IWL60_RTC_INST_SIZE,                   \
-       .max_data_size = IWL60_RTC_DATA_SIZE,                   \
-       .base_params = &iwl8000_base_params,                    \
-       .led_mode = IWL_LED_RF_STATE,                           \
-       .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,   \
-       .d0i3 = true,                                           \
-       .non_shared_ant = ANT_A,                                \
-       .dccm_offset = IWL8260_DCCM_OFFSET,                     \
-       .dccm_len = IWL8260_DCCM_LEN,                           \
-       .dccm2_offset = IWL8260_DCCM2_OFFSET,                   \
-       .dccm2_len = IWL8260_DCCM2_LEN,                         \
-       .smem_offset = IWL8260_SMEM_OFFSET,                     \
-       .smem_len = IWL8260_SMEM_LEN
+static const struct iwl_tt_params iwl8000_tt_params = {
+       .ct_kill_entry = 115,
+       .ct_kill_exit = 93,
+       .ct_kill_duration = 5,
+       .dynamic_smps_entry = 111,
+       .dynamic_smps_exit = 107,
+       .tx_protection_entry = 112,
+       .tx_protection_exit = 105,
+       .tx_backoff = {
+               {.temperature = 110, .backoff = 200},
+               {.temperature = 111, .backoff = 600},
+               {.temperature = 112, .backoff = 1200},
+               {.temperature = 113, .backoff = 2000},
+               {.temperature = 114, .backoff = 4000},
+       },
+       .support_ct_kill = true,
+       .support_dynamic_smps = true,
+       .support_tx_protection = true,
+       .support_tx_backoff = true,
+};
+
+#define IWL_DEVICE_8000                                                        \
+       .ucode_api_max = IWL8000_UCODE_API_MAX,                         \
+       .ucode_api_ok = IWL8000_UCODE_API_OK,                           \
+       .ucode_api_min = IWL8000_UCODE_API_MIN,                         \
+       .device_family = IWL_DEVICE_FAMILY_8000,                        \
+       .max_inst_size = IWL60_RTC_INST_SIZE,                           \
+       .max_data_size = IWL60_RTC_DATA_SIZE,                           \
+       .base_params = &iwl8000_base_params,                            \
+       .led_mode = IWL_LED_RF_STATE,                                   \
+       .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,           \
+       .d0i3 = true,                                                   \
+       .non_shared_ant = ANT_A,                                        \
+       .dccm_offset = IWL8260_DCCM_OFFSET,                             \
+       .dccm_len = IWL8260_DCCM_LEN,                                   \
+       .dccm2_offset = IWL8260_DCCM2_OFFSET,                           \
+       .dccm2_len = IWL8260_DCCM2_LEN,                                 \
+       .smem_offset = IWL8260_SMEM_OFFSET,                             \
+       .smem_len = IWL8260_SMEM_LEN,                                   \
+       .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,       \
+       .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,       \
+       .thermal_params = &iwl8000_tt_params,                           \
+       .apmg_not_supported = true
 
 const struct iwl_cfg iwl8260_2n_cfg = {
        .name = "Intel(R) Dual Band Wireless N 8260",
@@ -177,8 +202,6 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-       .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
-       .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
        .disable_dummy_notification = true,
        .max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
@@ -192,8 +215,6 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
        .ht_params = &iwl8000_ht_params,
        .nvm_ver = IWL8000_NVM_VERSION,
        .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
-       .default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B,
-       .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,
        .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
        .bt_shared_single_ant = true,
        .disable_dummy_notification = true,
index 3f33f753ce2f9ce4a81ef1247d05fb517aeec0ec..08c14afeb1480aca04d61bd083795eb2aec401c2 100644 (file)
@@ -194,6 +194,49 @@ struct iwl_ht_params {
        u8 ht40_bands;
 };
 
+/*
+ * Tx-backoff threshold
+ * @temperature: The threshold in Celsius
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_tt_tx_backoff {
+       s32 temperature;
+       u32 backoff;
+};
+
+#define TT_TX_BACKOFF_SIZE 6
+
+/**
+ * struct iwl_tt_params - thermal throttling parameters
+ * @ct_kill_entry: CT Kill entry threshold
+ * @ct_kill_exit: CT Kill exit threshold
+ * @ct_kill_duration: The time  intervals (in uSec) in which the driver needs
+ *     to checks whether to exit CT Kill.
+ * @dynamic_smps_entry: Dynamic SMPS entry threshold
+ * @dynamic_smps_exit: Dynamic SMPS exit threshold
+ * @tx_protection_entry: TX protection entry threshold
+ * @tx_protection_exit: TX protection exit threshold
+ * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
+ * @support_ct_kill: Support CT Kill?
+ * @support_dynamic_smps: Support dynamic SMPS?
+ * @support_tx_protection: Support tx protection?
+ * @support_tx_backoff: Support tx-backoff?
+ */
+struct iwl_tt_params {
+       s32 ct_kill_entry;
+       s32 ct_kill_exit;
+       u32 ct_kill_duration;
+       s32 dynamic_smps_entry;
+       s32 dynamic_smps_exit;
+       s32 tx_protection_entry;
+       s32 tx_protection_exit;
+       struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
+       bool support_ct_kill;
+       bool support_dynamic_smps;
+       bool support_tx_protection;
+       bool support_tx_backoff;
+};
+
 /*
  * information on how to parse the EEPROM
  */
@@ -316,6 +359,8 @@ struct iwl_cfg {
        const u32 dccm2_len;
        const u32 smem_offset;
        const u32 smem_len;
+       const struct iwl_tt_params *thermal_params;
+       bool apmg_not_supported;
 };
 
 /*
index 223b8752f924839bcff4028f66213032759ed124..948ce0802fa7ceae995d0257a6a3f8dace6ccf14 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -64,19 +65,21 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
 
 TRACE_EVENT(iwlwifi_dev_rx,
        TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
-                void *rxbuf, size_t len),
-       TP_ARGS(dev, trans, rxbuf, len),
+                struct iwl_rx_packet *pkt, size_t len),
+       TP_ARGS(dev, trans, pkt, len),
        TP_STRUCT__entry(
                DEV_ENTRY
-               __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
+               __field(u8, cmd)
+               __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
        ),
        TP_fast_assign(
                DEV_ASSIGN;
-               memcpy(__get_dynamic_array(rxbuf), rxbuf,
-                      iwl_rx_trace_len(trans, rxbuf, len));
+               __entry->cmd = pkt->hdr.cmd;
+               memcpy(__get_dynamic_array(rxbuf), pkt,
+                      iwl_rx_trace_len(trans, pkt, len));
        ),
        TP_printk("[%s] RX cmd %#.2x",
-                 __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
+                 __get_str(dev), __entry->cmd)
 );
 
 TRACE_EVENT(iwlwifi_dev_tx,
index 7267152e7dc7705aaa514237c17ed908b9e4919d..6685259927f81bd979c20b90d0f9970358c98d46 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -423,13 +423,19 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
 {
        const struct iwl_ucode_api *ucode_api = (void *)data;
        u32 api_index = le32_to_cpu(ucode_api->api_index);
+       u32 api_flags = le32_to_cpu(ucode_api->api_flags);
+       int i;
 
-       if (api_index >= IWL_API_ARRAY_SIZE) {
+       if (api_index >= IWL_API_MAX_BITS / 32) {
                IWL_ERR(drv, "api_index larger than supported by driver\n");
-               return -EINVAL;
+               /* don't return an error so we can load FW that has more bits */
+               return 0;
        }
 
-       capa->api[api_index] = le32_to_cpu(ucode_api->api_flags);
+       for (i = 0; i < 32; i++) {
+               if (api_flags & BIT(i))
+                       __set_bit(i + 32 * api_index, capa->_api);
+       }
 
        return 0;
 }
@@ -439,13 +445,19 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
 {
        const struct iwl_ucode_capa *ucode_capa = (void *)data;
        u32 api_index = le32_to_cpu(ucode_capa->api_index);
+       u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
+       int i;
 
-       if (api_index >= IWL_CAPABILITIES_ARRAY_SIZE) {
+       if (api_index >= IWL_CAPABILITIES_MAX_BITS / 32) {
                IWL_ERR(drv, "api_index larger than supported by driver\n");
-               return -EINVAL;
+               /* don't return an error so we can load FW that has more bits */
+               return 0;
        }
 
-       capa->capa[api_index] = le32_to_cpu(ucode_capa->api_capa);
+       for (i = 0; i < 32; i++) {
+               if (api_flags & BIT(i))
+                       __set_bit(i + 32 * api_index, capa->_capa);
+       }
 
        return 0;
 }
@@ -1148,7 +1160,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        if (err)
                goto try_again;
 
-       if (drv->fw.ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)
+       if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
                api_ver = drv->fw.ucode_ver;
        else
                api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
@@ -1239,6 +1251,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                sizeof(struct iwl_fw_dbg_trigger_txq_timer);
        trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
                sizeof(struct iwl_fw_dbg_trigger_time_event);
+       trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
+               sizeof(struct iwl_fw_dbg_trigger_ba);
 
        for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
                if (pieces->dbg_trigger_tlv[i]) {
index d45dc021cda2c0715b8d7e740ff90b46589ae141..d56064861a9c353dfb9fcf1720e1abde6c3fcf9d 100644 (file)
@@ -438,12 +438,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /**
  * struct iwl_rb_status - reserve buffer status
  *     host memory mapped FH registers
index 251bf8dc4a12133b4ae77757170436c67219afc9..e57dbd0ef2e1f10f8d16bd26537f2ee829b5eade 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
  *     detection.
  * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
  *     events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
  */
 enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_INVALID = 0,
@@ -267,6 +268,7 @@ enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_RSSI,
        FW_DBG_TRIGGER_TXQ_TIMERS,
        FW_DBG_TRIGGER_TIME_EVENT,
+       FW_DBG_TRIGGER_BA,
 
        /* must be last */
        FW_DBG_TRIGGER_MAX,
index 62db2e5e45ebd51793c372e54832b21511057c58..a9b5ae4ebec021277efc454730d03b562add3210 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -237,6 +237,8 @@ enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_GO_UAPSD            = BIT(30),
 };
 
+typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
+
 /**
  * enum iwl_ucode_tlv_api - ucode api
  * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
@@ -255,22 +257,27 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
  * @IWL_UCODE_TLV_API_STATS_V10: uCode supports/uses statistics API version 10
  * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ *     instead of 3.
  */
 enum iwl_ucode_tlv_api {
-       IWL_UCODE_TLV_API_BT_COEX_SPLIT         = BIT(3),
-       IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
-       IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = BIT(9),
-       IWL_UCODE_TLV_API_HDC_PHASE_0           = BIT(10),
-       IWL_UCODE_TLV_API_TX_POWER_DEV          = BIT(11),
-       IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
-       IWL_UCODE_TLV_API_SCD_CFG               = BIT(15),
-       IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = BIT(16),
-       IWL_UCODE_TLV_API_ASYNC_DTM             = BIT(17),
-       IWL_UCODE_TLV_API_LQ_SS_PARAMS          = BIT(18),
-       IWL_UCODE_TLV_API_STATS_V10             = BIT(19),
-       IWL_UCODE_TLV_API_NEW_VERSION           = BIT(20),
+       IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
+       IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = (__force iwl_ucode_tlv_api_t)8,
+       IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = (__force iwl_ucode_tlv_api_t)9,
+       IWL_UCODE_TLV_API_HDC_PHASE_0           = (__force iwl_ucode_tlv_api_t)10,
+       IWL_UCODE_TLV_API_TX_POWER_DEV          = (__force iwl_ucode_tlv_api_t)11,
+       IWL_UCODE_TLV_API_BASIC_DWELL           = (__force iwl_ucode_tlv_api_t)13,
+       IWL_UCODE_TLV_API_SCD_CFG               = (__force iwl_ucode_tlv_api_t)15,
+       IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = (__force iwl_ucode_tlv_api_t)16,
+       IWL_UCODE_TLV_API_ASYNC_DTM             = (__force iwl_ucode_tlv_api_t)17,
+       IWL_UCODE_TLV_API_LQ_SS_PARAMS          = (__force iwl_ucode_tlv_api_t)18,
+       IWL_UCODE_TLV_API_STATS_V10             = (__force iwl_ucode_tlv_api_t)19,
+       IWL_UCODE_TLV_API_NEW_VERSION           = (__force iwl_ucode_tlv_api_t)20,
+       IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY     = (__force iwl_ucode_tlv_api_t)24,
 };
 
+typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
+
 /**
  * enum iwl_ucode_tlv_capa - ucode capabilities
  * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
@@ -290,6 +297,7 @@ enum iwl_ucode_tlv_api {
  *     which also implies support for the scheduler configuration command
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -299,22 +307,23 @@ enum iwl_ucode_tlv_api {
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  */
 enum iwl_ucode_tlv_capa {
-       IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = BIT(0),
-       IWL_UCODE_TLV_CAPA_LAR_SUPPORT                  = BIT(1),
-       IWL_UCODE_TLV_CAPA_UMAC_SCAN                    = BIT(2),
-       IWL_UCODE_TLV_CAPA_BEAMFORMER                   = BIT(3),
-       IWL_UCODE_TLV_CAPA_TDLS_SUPPORT                 = BIT(6),
-       IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT    = BIT(8),
-       IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT      = BIT(9),
-       IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT       = BIT(10),
-       IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = BIT(11),
-       IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = BIT(12),
-       IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = BIT(13),
-       IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = BIT(18),
-       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = BIT(22),
-       IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = BIT(28),
-       IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = BIT(29),
-       IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = BIT(30),
+       IWL_UCODE_TLV_CAPA_D0I3_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)0,
+       IWL_UCODE_TLV_CAPA_LAR_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)1,
+       IWL_UCODE_TLV_CAPA_UMAC_SCAN                    = (__force iwl_ucode_tlv_capa_t)2,
+       IWL_UCODE_TLV_CAPA_BEAMFORMER                   = (__force iwl_ucode_tlv_capa_t)3,
+       IWL_UCODE_TLV_CAPA_TDLS_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)6,
+       IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT    = (__force iwl_ucode_tlv_capa_t)8,
+       IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT      = (__force iwl_ucode_tlv_capa_t)9,
+       IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT       = (__force iwl_ucode_tlv_capa_t)10,
+       IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = (__force iwl_ucode_tlv_capa_t)11,
+       IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)12,
+       IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = (__force iwl_ucode_tlv_capa_t)13,
+       IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = (__force iwl_ucode_tlv_capa_t)18,
+       IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT         = (__force iwl_ucode_tlv_capa_t)19,
+       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = (__force iwl_ucode_tlv_capa_t)22,
+       IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = (__force iwl_ucode_tlv_capa_t)28,
+       IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
+       IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -325,13 +334,14 @@ enum iwl_ucode_tlv_capa {
 /* The default max probe length if not specified by the firmware file */
 #define IWL_DEFAULT_MAX_PROBE_LENGTH   200
 
+#define IWL_API_MAX_BITS               64
+#define IWL_CAPABILITIES_MAX_BITS      64
+
 /*
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
 #define IWL_UCODE_SECTION_MAX 12
-#define IWL_API_ARRAY_SIZE     1
-#define IWL_CAPABILITIES_ARRAY_SIZE    1
 #define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -424,11 +434,13 @@ struct iwl_fw_dbg_reg_op {
  * @SMEM_MODE: monitor stores the data in SMEM
  * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
  * @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
  */
 enum iwl_fw_dbg_monitor_mode {
        SMEM_MODE = 0,
        EXTERNAL_MODE = 1,
        MARBH_MODE = 2,
+       MIPI_MODE = 3,
 };
 
 /**
@@ -436,6 +448,7 @@ enum iwl_fw_dbg_monitor_mode {
  *
  * @version: version of the TLV - currently 0
  * @monitor_mode: %enum iwl_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
  * @base_reg: addr of the base addr register (PRPH)
  * @end_reg:  addr of the end addr register (PRPH)
  * @write_ptr_reg: the addr of the reg of the write pointer
@@ -449,7 +462,8 @@ enum iwl_fw_dbg_monitor_mode {
 struct iwl_fw_dbg_dest_tlv {
        u8 version;
        u8 monitor_mode;
-       u8 reserved[2];
+       u8 size_power;
+       u8 reserved;
        __le32 base_reg;
        __le32 end_reg;
        __le32 write_ptr_reg;
@@ -658,6 +672,33 @@ struct iwl_fw_dbg_trigger_time_event {
        } __packed time_events[16];
 } __packed;
 
+/**
+ * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ *     when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ *     when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ *     when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ *     when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ *     when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ *     when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ *     when a frame times out in the reodering buffer.
+ */
+struct iwl_fw_dbg_trigger_ba {
+       __le16 rx_ba_start;
+       __le16 rx_ba_stop;
+       __le16 tx_ba_start;
+       __le16 tx_ba_stop;
+       __le16 rx_bar;
+       __le16 tx_bar;
+       __le16 frame_timeout;
+} __packed;
+
 /**
  * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
  * @id: conf id
index cf75bafae51da0f60255d9a7f98f7d2f5e18bcac..3e3c9d8b3c37dd93b7ab59529bf5c7b3cd9d7ae0 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -105,10 +105,24 @@ struct iwl_ucode_capabilities {
        u32 n_scan_channels;
        u32 standard_phy_calibration_size;
        u32 flags;
-       u32 api[IWL_API_ARRAY_SIZE];
-       u32 capa[IWL_CAPABILITIES_ARRAY_SIZE];
+       unsigned long _api[BITS_TO_LONGS(IWL_API_MAX_BITS)];
+       unsigned long _capa[BITS_TO_LONGS(IWL_CAPABILITIES_MAX_BITS)];
 };
 
+static inline bool
+fw_has_api(const struct iwl_ucode_capabilities *capabilities,
+          iwl_ucode_tlv_api_t api)
+{
+       return test_bit((__force long)api, capabilities->_api);
+}
+
+static inline bool
+fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
+           iwl_ucode_tlv_capa_t capa)
+{
+       return test_bit((__force long)capa, capabilities->_capa);
+}
+
 /* one for each uCode image (inst/data, init/runtime/wowlan) */
 struct fw_desc {
        const void *data;       /* vmalloc'ed data */
@@ -205,6 +219,8 @@ static inline const char *get_fw_dbg_mode_string(int mode)
                return "EXTERNAL_DRAM";
        case MARBH_MODE:
                return "MARBH";
+       case MIPI_MODE:
+               return "MIPI";
        default:
                return "UNKNOWN";
        }
index 8e604a3931ca6db6a1ab0eff59d2787d8562e494..80fefe7d7b8cb3b46581b32f299fed425e878980 100644 (file)
@@ -249,7 +249,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
         */
        if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
            (flags & IEEE80211_CHAN_NO_IR))
-               flags |= IEEE80211_CHAN_GO_CONCURRENT;
+               flags |= IEEE80211_CHAN_IR_CONCURRENT;
 
        return flags;
 }
index 88a57e6e232ff318dea917d0856da781662bc5eb..5af1c776d2d4381fdd01e0044c4eeb5108236a18 100644 (file)
@@ -348,6 +348,9 @@ enum secure_load_status_reg {
 #define MON_BUFF_WRPTR                 (0xa03c44)
 #define MON_BUFF_CYCLE_CNT             (0xa03c48)
 
+#define MON_DMARB_RD_CTL_ADDR          (0xa03c60)
+#define MON_DMARB_RD_DATA_ADDR         (0xa03c5c)
+
 #define DBGC_IN_SAMPLE                 (0xa03c00)
 
 /* enable the ID buf for read */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
new file mode 100644 (file)
index 0000000..9f8bcef
--- /dev/null
@@ -0,0 +1,113 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include "iwl-trans.h"
+
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+                                 struct device *dev,
+                                 const struct iwl_cfg *cfg,
+                                 const struct iwl_trans_ops *ops,
+                                 size_t dev_cmd_headroom)
+{
+       struct iwl_trans *trans;
+#ifdef CONFIG_LOCKDEP
+       static struct lock_class_key __key;
+#endif
+
+       trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
+       if (!trans)
+               return NULL;
+
+#ifdef CONFIG_LOCKDEP
+       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+                        &__key, 0);
+#endif
+
+       trans->dev = dev;
+       trans->cfg = cfg;
+       trans->ops = ops;
+       trans->dev_cmd_headroom = dev_cmd_headroom;
+
+       snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+                "iwl_cmd_pool:%s", dev_name(trans->dev));
+       trans->dev_cmd_pool =
+               kmem_cache_create(trans->dev_cmd_pool_name,
+                                 sizeof(struct iwl_device_cmd)
+                                 + trans->dev_cmd_headroom,
+                                 sizeof(void *),
+                                 SLAB_HWCACHE_ALIGN,
+                                 NULL);
+       if (!trans->dev_cmd_pool)
+               goto free;
+
+       return trans;
+ free:
+       kfree(trans);
+       return NULL;
+}
+
+void iwl_trans_free(struct iwl_trans *trans)
+{
+       kmem_cache_destroy(trans->dev_cmd_pool);
+       kfree(trans);
+}
index 56254a837214ffad421a7b7a14a9eaec3d7dc029..87a230a7f4b605b8b6db80b69b3db7bc95b59e2f 100644 (file)
@@ -641,6 +641,8 @@ struct iwl_trans {
 
        enum iwl_d0i3_mode d0i3_mode;
 
+       bool wowlan_d0i3;
+
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
        char trans_specific[0] __aligned(sizeof(void *));
@@ -1010,20 +1012,20 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
                iwl_op_mode_nic_error(trans->op_mode);
 }
 
+/*****************************************************
+ * transport helper functions
+ *****************************************************/
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+                                 struct device *dev,
+                                 const struct iwl_cfg *cfg,
+                                 const struct iwl_trans_ops *ops,
+                                 size_t dev_cmd_headroom);
+void iwl_trans_free(struct iwl_trans *trans);
+
 /*****************************************************
 * driver (transport) register/unregister functions
 ******************************************************/
 int __must_check iwl_pci_register_driver(void);
 void iwl_pci_unregister_driver(void);
 
-static inline void trans_lockdep_init(struct iwl_trans *trans)
-{
-#ifdef CONFIG_LOCKDEP
-       static struct lock_class_key __key;
-
-       lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
-                        &__key, 0);
-#endif
-}
-
 #endif /* __iwl_trans_h__ */
index 13a0a03158deb0d1d884b390dfb2e7e3751b5ee2..b4737e296c927582063409917951a210e8ffa74e 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -408,23 +408,12 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
 
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 {
-       struct iwl_bt_coex_cmd *bt_cmd;
-       struct iwl_host_cmd cmd = {
-               .id = BT_CONFIG,
-               .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       int ret;
+       struct iwl_bt_coex_cmd bt_cmd = {};
        u32 mode;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_send_bt_init_conf_old(mvm);
 
-       bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
-       if (!bt_cmd)
-               return -ENOMEM;
-       cmd.data[0] = bt_cmd;
-
        lockdep_assert_held(&mvm->mutex);
 
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -440,36 +429,33 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                        mode = 0;
                }
 
-               bt_cmd->mode = cpu_to_le32(mode);
+               bt_cmd.mode = cpu_to_le32(mode);
                goto send_cmd;
        }
 
        mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
-       bt_cmd->mode = cpu_to_le32(mode);
+       bt_cmd.mode = cpu_to_le32(mode);
 
        if (IWL_MVM_BT_COEX_SYNC2SCO)
-               bt_cmd->enabled_modules |=
+               bt_cmd.enabled_modules |=
                        cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
 
        if (iwl_mvm_bt_is_plcr_supported(mvm))
-               bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
+               bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED);
 
        if (IWL_MVM_BT_COEX_MPLUT) {
-               bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
-               bt_cmd->enabled_modules |=
+               bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+               bt_cmd.enabled_modules |=
                        cpu_to_le32(BT_COEX_MPLUT_BOOST_ENABLED);
        }
 
-       bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+       bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
 
 send_cmd:
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
        memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
-
-       kfree(bt_cmd);
-       return ret;
+       return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
 }
 
 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
@@ -746,7 +732,7 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
 
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
@@ -770,52 +756,14 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        return 0;
 }
 
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
-                                  struct ieee80211_vif *vif)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data *data = _data;
-       struct iwl_mvm *mvm = data->mvm;
-
-       struct ieee80211_sta *sta;
-       struct iwl_mvm_sta *mvmsta;
-
-       struct ieee80211_chanctx_conf *chanctx_conf;
-
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-       /* If channel context is invalid or not on 2.4GHz - don't count it */
-       if (!chanctx_conf ||
-           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
-               rcu_read_unlock();
-               return;
-       }
-       rcu_read_unlock();
-
-       if (vif->type != NL80211_IFTYPE_STATION ||
-           mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
-               return;
-
-       sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
-                                       lockdep_is_held(&mvm->mutex));
-
-       /* This can happen if the station has been removed right now */
-       if (IS_ERR_OR_NULL(sta))
-               return;
-
-       mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           enum ieee80211_rssi_event_data rssi_event)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_bt_iterator_data data = {
-               .mvm = mvm,
-       };
        int ret;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
                iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
                return;
        }
@@ -853,10 +801,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        if (ret)
                IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
-       ieee80211_iterate_active_interfaces_atomic(
-               mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-               iwl_mvm_bt_rssi_iterator, &data);
 }
 
 #define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
@@ -870,7 +814,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
        struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
 
        if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -897,7 +841,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
        struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
        enum iwl_bt_coex_lut_type lut_type;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
 
        if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
@@ -927,7 +871,7 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
        if (ant & mvm->cfg->non_shared_ant)
                return true;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
 
        return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
@@ -940,10 +884,10 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
        if (mvm->cfg->bt_shared_single_ant)
                return true;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
 
-       return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF;
+       return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
 }
 
 bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
@@ -951,7 +895,7 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
 {
        u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
 
        if (band != IEEE80211_BAND_2GHZ)
@@ -994,7 +938,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
                iwl_mvm_bt_coex_vif_change_old(mvm);
                return;
        }
@@ -1012,7 +957,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        u8 __maybe_unused lower_bound, upper_bound;
        u8 lut;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
                return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
 
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
index 4310cf102d78ecd4f3e7baffa13570d878153cb4..4165d104e4c379dde727f01d585904dec6030deb 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -761,7 +761,7 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
 {
-       iwl_mvm_cancel_scan(mvm);
+       iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
 
        iwl_trans_stop_device(mvm->trans);
 
@@ -981,7 +981,8 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
        if (ret)
                return ret;
 
-       ret = iwl_mvm_scan_offload_start(mvm, vif, nd_config, &mvm->nd_ies);
+       ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
+                                      IWL_MVM_SCAN_NETDETECT);
        if (ret)
                return ret;
 
@@ -1169,7 +1170,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
        iwl_trans_suspend(mvm->trans);
-       if (wowlan->any) {
+       mvm->trans->wowlan_d0i3 = wowlan->any;
+       if (mvm->trans->wowlan_d0i3) {
                /* 'any' trigger means d0i3 usage */
                if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
                        int ret = iwl_mvm_enter_d0i3_sync(mvm);
@@ -1784,7 +1786,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
                struct iwl_scan_offload_profile_match *fw_match;
                struct cfg80211_wowlan_nd_match *match;
-               int n_channels = 0;
+               int idx, n_channels = 0;
 
                fw_match = &query.matches[i];
 
@@ -1799,8 +1801,12 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
 
                net_detect->matches[net_detect->n_matches++] = match;
 
-               match->ssid.ssid_len = mvm->nd_match_sets[i].ssid.ssid_len;
-               memcpy(match->ssid.ssid, mvm->nd_match_sets[i].ssid.ssid,
+               /* We inverted the order of the SSIDs in the scan
+                * request, so invert the index here.
+                */
+               idx = mvm->n_nd_match_sets - i - 1;
+               match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+               memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
                       match->ssid.ssid_len);
 
                if (mvm->n_nd_channels < n_channels)
index 5f37eab5008d7a9ebe1719204f024c9d70e01434..5c8a65de0e775a2327d392f617c4433bdd096f6d 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -190,6 +190,21 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
        return ret ?: count;
 }
 
+static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       char buf[64];
+       int bufsz = sizeof(buf);
+       int pos;
+
+       pos = scnprintf(buf, bufsz, "bss limit = %d\n",
+                       vif->bss_conf.txpower);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
 static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
                                        char __user *user_buf,
                                        size_t count, loff_t *ppos)
@@ -607,6 +622,7 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
        } while (0)
 
 MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
@@ -641,6 +657,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
                                         S_IRUSR);
 
+       MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
index 9ac04c1ea7063d985980a5237fe705c1bba7186c..ffb4b5cef27570bbe4af0670683a41891b673330 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -493,7 +493,8 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 
        mutex_lock(&mvm->mutex);
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
                struct iwl_bt_coex_profile_notif_old *notif =
                        &mvm->last_bt_notif_old;
 
@@ -550,7 +551,8 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
 
        mutex_lock(&mvm->mutex);
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
                struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
 
                pos += scnprintf(buf+pos, bufsz-pos,
@@ -916,7 +918,8 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
 
        if (mvm->scan_rx_ant != scan_rx_ant) {
                mvm->scan_rx_ant = scan_rx_ant;
-               if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_UMAC_SCAN))
                        iwl_mvm_config_scan(mvm);
        }
 
@@ -1356,6 +1359,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
        PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
        PRINT_MVM_REF(IWL_MVM_REF_SCAN);
        PRINT_MVM_REF(IWL_MVM_REF_ROC);
+       PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
        PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
        PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
        PRINT_MVM_REF(IWL_MVM_REF_USER);
index d6cced47d561b9601a59166296a229facfd9014e..5e4cbdb44c607ec8399bae489f28990dc907eaeb 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -274,50 +274,18 @@ struct iwl_scan_offload_profile_cfg {
 } __packed;
 
 /**
- * iwl_scan_offload_schedule - schedule of scan offload
+ * iwl_scan_schedule_lmac - schedule of scan offload
  * @delay:             delay between iterations, in seconds.
  * @iterations:                num of scan iterations
  * @full_scan_mul:     number of partial scans before each full scan
  */
-struct iwl_scan_offload_schedule {
+struct iwl_scan_schedule_lmac {
        __le16 delay;
        u8 iterations;
        u8 full_scan_mul;
-} __packed;
-
-/*
- * iwl_scan_offload_flags
- *
- * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
- * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
- * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
- *     beacon period. Finding channel activity in this mode is not guaranteed.
- * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
- *     Assuming beacon period is 100ms finding channel activity is guaranteed.
- */
-enum iwl_scan_offload_flags {
-       IWL_SCAN_OFFLOAD_FLAG_PASS_ALL          = BIT(0),
-       IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL    = BIT(2),
-       IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE    = BIT(5),
-       IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6),
-};
-
-/**
- * iwl_scan_offload_req - scan offload request command
- * @flags:             bitmap - enum iwl_scan_offload_flags.
- * @watchdog:          maximum scan duration in TU.
- * @delay:             delay in seconds before first iteration.
- * @schedule_line:     scan offload schedule, for fast and regular scan.
- */
-struct iwl_scan_offload_req {
-       __le16 flags;
-       __le16 watchdog;
-       __le16 delay;
-       __le16 reserved;
-       struct iwl_scan_offload_schedule schedule_line[2];
-} __packed;
+} __packed; /* SCAN_SCHEDULE_API_S */
 
-enum iwl_scan_offload_compleate_status {
+enum iwl_scan_offload_complete_status {
        IWL_SCAN_OFFLOAD_COMPLETED      = 1,
        IWL_SCAN_OFFLOAD_ABORTED        = 2,
 };
@@ -326,6 +294,7 @@ enum iwl_scan_ebs_status {
        IWL_SCAN_EBS_SUCCESS,
        IWL_SCAN_EBS_FAILED,
        IWL_SCAN_EBS_CHAN_NOT_FOUND,
+       IWL_SCAN_EBS_INACTIVE,
 };
 
 /**
@@ -463,8 +432,19 @@ enum iwl_scan_priority {
        IWL_SCAN_PRIORITY_HIGH,
 };
 
+enum iwl_scan_priority_ext {
+       IWL_SCAN_PRIORITY_EXT_0_LOWEST,
+       IWL_SCAN_PRIORITY_EXT_1,
+       IWL_SCAN_PRIORITY_EXT_2,
+       IWL_SCAN_PRIORITY_EXT_3,
+       IWL_SCAN_PRIORITY_EXT_4,
+       IWL_SCAN_PRIORITY_EXT_5,
+       IWL_SCAN_PRIORITY_EXT_6,
+       IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
 /**
- * iwl_scan_req_unified_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
  * @reserved1: for alignment and future use
  * @channel_num: num of channels to scan
  * @active-dwell: dwell time for active channels
@@ -487,7 +467,7 @@ enum iwl_scan_priority {
  * @channel_opt: channel optimization options, for full and partial scan
  * @data: channel configuration and probe request packet.
  */
-struct iwl_scan_req_unified_lmac {
+struct iwl_scan_req_lmac {
        /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
        __le32 reserved1;
        u8 n_channels;
@@ -508,7 +488,7 @@ struct iwl_scan_req_unified_lmac {
        /* SCAN_REQ_PERIODIC_PARAMS_API_S */
        __le32 iter_num;
        __le32 delay;
-       struct iwl_scan_offload_schedule schedule[2];
+       struct iwl_scan_schedule_lmac schedule[2];
        struct iwl_scan_channel_opt channel_opt[2];
        u8 data[];
 } __packed;
@@ -582,7 +562,11 @@ struct iwl_mvm_umac_cmd_hdr {
        u8 ver;
 } __packed;
 
-#define IWL_MVM_MAX_SIMULTANEOUS_SCANS 8
+/* The maximum of either of these cannot exceed 8, because we use an
+ * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ */
+#define IWL_MVM_MAX_UMAC_SCANS 8
+#define IWL_MVM_MAX_LMAC_SCANS 1
 
 enum scan_config_flags {
        SCAN_CONFIG_FLAG_ACTIVATE                       = BIT(0),
@@ -865,4 +849,27 @@ struct iwl_scan_offload_profiles_query {
        struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
 } __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
 
+/**
+ * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ *     results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_umac_scan_iter_complete_notif {
+       __le32 uid;
+       u8 scanned_channels;
+       u8 status;
+       u8 bt_status;
+       u8 last_channel;
+       __le32 tsf_low;
+       __le32 tsf_high;
+       struct iwl_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
 #endif
index 01b1da6ad35977b349fc79336c15238706ca9078..16e9ef49397f4d055b788f970f1b74bf2712e5e7 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,6 +108,7 @@ enum {
        ANTENNA_COUPLING_NOTIFICATION = 0xa,
 
        /* UMAC scan commands */
+       SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
        SCAN_CFG_CMD = 0xc,
        SCAN_REQ_UMAC = 0xd,
        SCAN_ABORT_UMAC = 0xe,
@@ -147,13 +148,6 @@ enum {
 
        LQ_CMD = 0x4e,
 
-       /* Calibration */
-       TEMPERATURE_NOTIFICATION = 0x62,
-       CALIBRATION_CFG_CMD = 0x65,
-       CALIBRATION_RES_NOTIFICATION = 0x66,
-       CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
-       RADIO_VERSION_NOTIFICATION = 0x68,
-
        /* Scan offload */
        SCAN_OFFLOAD_REQUEST_CMD = 0x51,
        SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -177,12 +171,8 @@ enum {
        /* Thermal Throttling*/
        REPLY_THERMAL_MNG_BACKOFF = 0x7e,
 
-       /* Scanning */
-       SCAN_REQUEST_CMD = 0x80,
-       SCAN_ABORT_CMD = 0x81,
-       SCAN_START_NOTIFICATION = 0x82,
-       SCAN_RESULTS_NOTIFICATION = 0x83,
-       SCAN_COMPLETE_NOTIFICATION = 0x84,
+       /* Set/Get DC2DC frequency tune */
+       DC2DC_CONFIG_CMD = 0x83,
 
        /* NVM */
        NVM_ACCESS_CMD = 0x88,
@@ -1402,6 +1392,49 @@ struct iwl_mvm_marker {
        __le32 metadata[0];
 } __packed; /* MARKER_API_S_VER_1 */
 
+/*
+ * enum iwl_dc2dc_config_id - flag ids
+ *
+ * Ids of dc2dc configuration flags
+ */
+enum iwl_dc2dc_config_id {
+       DCDC_LOW_POWER_MODE_MSK_SET  = 0x1, /* not used */
+       DCDC_FREQ_TUNE_SET = 0x2,
+}; /* MARKER_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_cmd - configure dc2dc values
+ *
+ * (DC2DC_CONFIG_CMD = 0x83)
+ *
+ * Set/Get & configure dc2dc values.
+ * The command always returns the current dc2dc values.
+ *
+ * @flags: set/get dc2dc
+ * @enable_low_power_mode: not used.
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_cmd {
+       __le32 flags;
+       __le32 enable_low_power_mode; /* not used */
+       __le32 dc2dc_freq_tune0;
+       __le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
+ *
+ * Current dc2dc values returned by the FW.
+ *
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_resp {
+       __le32 dc2dc_freq_tune0;
+       __le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
+
 /***********************************
  * Smart Fifo API
  ***********************************/
index df869633f4dd976c9404e036ecf8a49a855fd0b4..eb10c5ee4a1407c5b02babe009a899e9f747d6c9 100644 (file)
@@ -623,7 +623,7 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
        if (!mvm->trans->ltr_enabled)
                return 0;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_HDC_PHASE_0))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
                return iwl_mvm_config_ltr_v1(mvm);
 
        return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
@@ -662,9 +662,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                 * device that are triggered by the INIT firwmare (MFUART).
                 */
                _iwl_trans_stop_device(mvm->trans, false);
-               _iwl_trans_start_hw(mvm->trans, false);
+               ret = _iwl_trans_start_hw(mvm->trans, false);
                if (ret)
-                       return ret;
+                       goto error;
        }
 
        if (iwlmvm_mod_params.init_dbg)
@@ -754,7 +754,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                        goto error;
        }
 
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
                ret = iwl_mvm_config_scan(mvm);
                if (ret)
                        goto error;
@@ -832,21 +832,6 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
        return 0;
 }
 
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                        struct iwl_device_cmd *cmd)
-{
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_radio_version_notif *radio_version = (void *)pkt->data;
-
-       /* TODO: what to do with that? */
-       IWL_DEBUG_INFO(mvm,
-                      "Radio version: flavor: 0x%08x, step 0x%08x, dash 0x%08x\n",
-                      le32_to_cpu(radio_version->radio_flavor),
-                      le32_to_cpu(radio_version->radio_step),
-                      le32_to_cpu(radio_version->radio_dash));
-       return 0;
-}
-
 int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
                            struct iwl_rx_cmd_buffer *rxb,
                            struct iwl_device_cmd *cmd)
index dda9f7b5f3423173e668f507719e47c3540b27d0..08367fbc3bc43d15b89c8dbab7bd31becd038606 100644 (file)
@@ -80,7 +80,6 @@
 #include "sta.h"
 #include "time-event.h"
 #include "iwl-eeprom-parse.h"
-#include "fw-api-scan.h"
 #include "iwl-phy-db.h"
 #include "testmode.h"
 #include "iwl-fw-error-dump.h"
@@ -319,7 +318,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
        resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
        if (IS_ERR_OR_NULL(resp)) {
                IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
-                             PTR_RET(resp));
+                             PTR_ERR_OR_ZERO(resp));
                goto out;
        }
 
@@ -335,7 +334,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
        kfree(resp);
        if (IS_ERR_OR_NULL(regd)) {
                IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
-                             PTR_RET(regd));
+                             PTR_ERR_OR_ZERO(regd));
                goto out;
        }
 
@@ -416,6 +415,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 {
        struct ieee80211_hw *hw = mvm->hw;
        int num_mac, ret, i;
+       static const u32 mvm_ciphers[] = {
+               WLAN_CIPHER_SUITE_WEP40,
+               WLAN_CIPHER_SUITE_WEP104,
+               WLAN_CIPHER_SUITE_TKIP,
+               WLAN_CIPHER_SUITE_CCMP,
+       };
 
        /* Tell mac80211 our characteristics */
        hw->flags = IEEE80211_HW_SIGNAL_DBM |
@@ -429,6 +434,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                    IEEE80211_HW_TIMING_BEACON_ONLY |
                    IEEE80211_HW_CONNECTION_MONITOR |
                    IEEE80211_HW_CHANCTX_STA_CSA |
+                   IEEE80211_HW_SUPPORT_FAST_XMIT |
                    IEEE80211_HW_SUPPORTS_CLONED_SKBS;
 
        hw->queues = mvm->first_agg_queue;
@@ -441,19 +447,38 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
        hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
 
+       BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
+       memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
+       hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
+       hw->wiphy->cipher_suites = mvm->ciphers;
+
        /*
         * Enable 11w if advertised by firmware and software crypto
         * is not enabled (as the firmware will interpret some mgmt
         * packets, so enabling it with software crypto isn't safe)
         */
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
-           !iwlwifi_mod_params.sw_crypto)
+           !iwlwifi_mod_params.sw_crypto) {
                hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+               mvm->ciphers[hw->wiphy->n_cipher_suites] =
+                       WLAN_CIPHER_SUITE_AES_CMAC;
+               hw->wiphy->n_cipher_suites++;
+       }
+
+       /* currently FW API supports only one optional cipher scheme */
+       if (mvm->fw->cs[0].cipher) {
+               mvm->hw->n_cipher_schemes = 1;
+               mvm->hw->cipher_schemes = &mvm->fw->cs[0];
+               mvm->ciphers[hw->wiphy->n_cipher_suites] =
+                       mvm->fw->cs[0].cipher;
+               hw->wiphy->n_cipher_suites++;
+       }
 
        hw->flags |= IEEE80211_SINGLE_HW_SCAN_ON_ALL_BANDS;
        hw->wiphy->features |=
                NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
-               NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+               NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+               NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
 
        hw->sta_data_size = sizeof(struct iwl_mvm_sta);
        hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@ -506,10 +531,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 
        iwl_mvm_reset_phy_ctxts(mvm);
 
-       hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm, false);
+       hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
 
        hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
 
+       BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
+       BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+                    IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+               mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+       else
+               mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+
        if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
                hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
                        &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
@@ -517,10 +551,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
                        &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
 
-               if ((mvm->fw->ucode_capa.capa[0] &
-                    IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
-                   (mvm->fw->ucode_capa.api[0] &
-                    IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+                   fw_has_api(&mvm->fw->ucode_capa,
+                              IWL_UCODE_TLV_API_LQ_SS_PARAMS))
                        hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
                                IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
        }
@@ -532,14 +566,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
-               hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-               hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
-               hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
-               /* we create the 802.11 header and zero length SSID IE. */
-               hw->wiphy->max_sched_scan_ie_len =
-                       SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
-       }
+       hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+       hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+       hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+       /* we create the 802.11 header and zero length SSID IE. */
+       hw->wiphy->max_sched_scan_ie_len =
+               SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
 
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -548,30 +580,24 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                               NL80211_FEATURE_STATIC_SMPS |
                               NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
 
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
                hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
                hw->wiphy->features |= NL80211_FEATURE_QUIET;
 
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
                hw->wiphy->features |=
                        NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
 
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
                hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
 
        mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
 
-       /* currently FW API supports only one optional cipher scheme */
-       if (mvm->fw->cs[0].cipher) {
-               mvm->hw->n_cipher_schemes = 1;
-               mvm->hw->cipher_schemes = &mvm->fw->cs[0];
-       }
-
 #ifdef CONFIG_PM_SLEEP
        if (iwl_mvm_is_d0i3_supported(mvm) &&
            device_can_wakeup(mvm->trans->dev)) {
@@ -611,13 +637,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_TDLS_SUPPORT) {
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
                IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
                hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
        }
 
-       if (mvm->fw->ucode_capa.capa[0] &
-           IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH) {
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
                IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
                hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
        }
@@ -730,6 +757,60 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
        return true;
 }
 
+#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)  \
+       do {                                                    \
+               if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))        \
+                       break;                                  \
+               iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
+       } while (0)
+
+static void
+iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                           struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
+                           enum ieee80211_ampdu_mlme_action action)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+       ba_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+               return;
+
+       switch (action) {
+       case IEEE80211_AMPDU_TX_OPERATIONAL: {
+               struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+               struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+               CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
+                                "TX AGG START: MAC %pM tid %d ssn %d\n",
+                                sta->addr, tid, tid_data->ssn);
+               break;
+               }
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+               CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
+                                "TX AGG STOP: MAC %pM tid %d\n",
+                                sta->addr, tid);
+               break;
+       case IEEE80211_AMPDU_RX_START:
+               CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
+                                "RX AGG START: MAC %pM tid %d ssn %d\n",
+                                sta->addr, tid, rx_ba_ssn);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
+                                "RX AGG STOP: MAC %pM tid %d\n",
+                                sta->addr, tid);
+               break;
+       default:
+               break;
+       }
+}
+
 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
                                    enum ieee80211_ampdu_mlme_action action,
@@ -806,6 +887,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                ret = -EINVAL;
                break;
        }
+
+       if (!ret) {
+               u16 rx_ba_ssn = 0;
+
+               if (action == IEEE80211_AMPDU_RX_START)
+                       rx_ba_ssn = *ssn;
+
+               iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
+                                           rx_ba_ssn, action);
+       }
        mutex_unlock(&mvm->mutex);
 
        /*
@@ -1227,22 +1318,23 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 
        iwl_trans_stop_device(mvm->trans);
 
-       mvm->scan_status = IWL_MVM_SCAN_NONE;
+       mvm->scan_status = 0;
        mvm->ps_disabled = false;
        mvm->calibrating = false;
 
        /* just in case one was running */
        ieee80211_remain_on_channel_expired(mvm->hw);
 
-       ieee80211_iterate_active_interfaces_atomic(
-               mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
-               iwl_mvm_cleanup_iterator, mvm);
+       /*
+        * cleanup all interfaces, even inactive ones, as some might have
+        * gone down during the HW restart
+        */
+       ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
 
        mvm->p2p_device_vif = NULL;
        mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
 
        iwl_mvm_reset_phy_ctxts(mvm);
-       memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
        memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
        memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
@@ -1404,7 +1496,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
         * The work item could be running or queued if the
         * ROC time event stops just as we get here.
         */
-       cancel_work_sync(&mvm->roc_done_wk);
+       flush_work(&mvm->roc_done_wk);
 
        iwl_trans_stop_device(mvm->trans);
 
@@ -1417,20 +1509,24 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
        /*
         * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
         * won't be called in this case).
+        * But make sure to cleanup interfaces that have gone down before/during
+        * HW restart was requested.
         */
-       clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+       if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+               ieee80211_iterate_interfaces(mvm->hw, 0,
+                                            iwl_mvm_cleanup_iterator, mvm);
 
        /* We shouldn't have any UIDs still set.  Loop over all the UIDs to
         * make sure there's nothing left there and warn if any is found.
         */
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
                int i;
 
-               for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
-                       if (WARN_ONCE(mvm->scan_uid[i],
-                                     "UMAC scan UID %d was not cleaned\n",
-                                     mvm->scan_uid[i]))
-                               mvm->scan_uid[i] = 0;
+               for (i = 0; i < mvm->max_scans; i++) {
+                       if (WARN_ONCE(mvm->scan_uid_status[i],
+                                     "UMAC scan UID %d status was not cleaned\n",
+                                     i))
+                               mvm->scan_uid_status[i] = 0;
                }
        }
 
@@ -1495,7 +1591,7 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                .pwr_restriction = cpu_to_le16(8 * tx_power),
        };
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
                return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
 
        if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
@@ -2354,7 +2450,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
        mutex_lock(&mvm->mutex);
 
        if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
-               iwl_mvm_scan_offload_stop(mvm, true);
+               iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
@@ -2373,89 +2469,21 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
        iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
 }
 
-static int iwl_mvm_cancel_scan_wait_notif(struct iwl_mvm *mvm,
-                                         enum iwl_scan_status scan_type)
-{
-       int ret;
-       bool wait_for_handlers = false;
-
-       mutex_lock(&mvm->mutex);
-
-       if (mvm->scan_status != scan_type) {
-               ret = 0;
-               /* make sure there are no pending notifications */
-               wait_for_handlers = true;
-               goto out;
-       }
-
-       switch (scan_type) {
-       case IWL_MVM_SCAN_SCHED:
-               ret = iwl_mvm_scan_offload_stop(mvm, true);
-               break;
-       case IWL_MVM_SCAN_OS:
-               ret = iwl_mvm_cancel_scan(mvm);
-               break;
-       case IWL_MVM_SCAN_NONE:
-       default:
-               WARN_ON_ONCE(1);
-               ret = -EINVAL;
-               break;
-       }
-       if (ret)
-               goto out;
-
-       wait_for_handlers = true;
-out:
-       mutex_unlock(&mvm->mutex);
-
-       /* make sure we consume the completion notification */
-       if (wait_for_handlers)
-               iwl_mvm_wait_for_async_handlers(mvm);
-
-       return ret;
-}
 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               struct ieee80211_scan_request *hw_req)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       struct cfg80211_scan_request *req = &hw_req->req;
        int ret;
 
-       if (req->n_channels == 0 ||
-           req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
+       if (hw_req->req.n_channels == 0 ||
+           hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
                return -EINVAL;
 
-       if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_SCHED);
-               if (ret)
-                       return ret;
-       }
-
        mutex_lock(&mvm->mutex);
-
-       if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
-               IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
-               ret = -EBUSY;
-               goto out;
-       }
-
-       if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
-
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
-               ret = iwl_mvm_scan_umac(mvm, vif, hw_req);
-       else
-               ret = iwl_mvm_unified_scan_lmac(mvm, vif, hw_req);
-
-       if (ret)
-               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-out:
+       ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
        mutex_unlock(&mvm->mutex);
+
        return ret;
 }
 
@@ -2473,12 +2501,8 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
         * cancel scan scan before ieee80211_scan_work() could run.
         * To handle that, simply return if the scan is not running.
        */
-       /* FIXME: for now, we ignore this race for UMAC scans, since
-        * they don't set the scan_status.
-        */
-       if ((mvm->scan_status == IWL_MVM_SCAN_OS) ||
-           (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN))
-               iwl_mvm_cancel_scan(mvm);
+       if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
+               iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
 
        mutex_unlock(&mvm->mutex);
 }
@@ -2794,35 +2818,17 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
                                        struct ieee80211_scan_ies *ies)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
-       int ret;
 
-       if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
-               if (ret)
-                       return ret;
-       }
+       int ret;
 
        mutex_lock(&mvm->mutex);
 
-       if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
-               IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
-               ret = -EBUSY;
-               goto out;
-       }
-
        if (!vif->bss_conf.idle) {
                ret = -EBUSY;
                goto out;
        }
 
-       if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
-               ret = -EBUSY;
-               goto out;
-       }
-
-       ret = iwl_mvm_scan_offload_start(mvm, vif, req, ies);
-       if (ret)
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
+       ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
 
 out:
        mutex_unlock(&mvm->mutex);
@@ -2845,16 +2851,12 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
         * could run.  To handle this, simply return if the scan is
         * not running.
        */
-       /* FIXME: for now, we ignore this race for UMAC scans, since
-        * they don't set the scan_status.
-        */
-       if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
-           !(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+       if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
                mutex_unlock(&mvm->mutex);
                return 0;
        }
 
-       ret = iwl_mvm_scan_offload_stop(mvm, false);
+       ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
        mutex_unlock(&mvm->mutex);
        iwl_mvm_wait_for_async_handlers(mvm);
 
@@ -2922,8 +2924,21 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
                        break;
                }
 
+               /* During FW restart, in order to restore the state as it was,
+                * don't try to reprogram keys we previously failed for.
+                */
+               if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+                   key->hw_key_idx == STA_KEY_IDX_INVALID) {
+                       IWL_DEBUG_MAC80211(mvm,
+                                          "skip invalid idx key programming during restart\n");
+                       ret = 0;
+                       break;
+               }
+
                IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
-               ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, false);
+               ret = iwl_mvm_set_sta_key(mvm, vif, sta, key,
+                                         test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+                                                  &mvm->status));
                if (ret) {
                        IWL_WARN(mvm, "set key failed\n");
                        /*
@@ -3001,7 +3016,7 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
        return true;
 }
 
-#define AUX_ROC_MAX_DELAY_ON_CHANNEL 5000
+#define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
                                    struct ieee80211_channel *channel,
                                    struct ieee80211_vif *vif,
@@ -3106,8 +3121,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
 
        switch (vif->type) {
        case NL80211_IFTYPE_STATION:
-               if (mvm->fw->ucode_capa.capa[0] &
-                   IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
                        /* Use aux roc framework (HS20) */
                        ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
                                                       vif, duration);
@@ -3899,7 +3914,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
        if (idx != 0)
                return -ENOENT;
 
-       if (!(mvm->fw->ucode_capa.capa[0] &
+       if (fw_has_capa(&mvm->fw->ucode_capa,
                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return -ENOENT;
 
@@ -3946,8 +3961,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
-       if (!(mvm->fw->ucode_capa.capa[0] &
-                               IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
                return;
 
        /* if beacon filtering isn't on mac80211 does it anyway */
@@ -3977,9 +3992,9 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        mutex_unlock(&mvm->mutex);
 }
 
-static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
-                                      struct ieee80211_vif *vif,
-                                      const struct ieee80211_event *event)
+static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       const struct ieee80211_event *event)
 {
 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...)   \
        do {                                                    \
@@ -3988,7 +4003,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
                iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
        } while (0)
 
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_fw_dbg_trigger_tlv *trig;
        struct iwl_fw_dbg_trigger_mlme *trig_mlme;
 
@@ -4032,6 +4046,75 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
 #undef CHECK_MLME_TRIGGER
 }
 
+static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         const struct ieee80211_event *event)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+       ba_trig = (void *)trig->data;
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+               return;
+
+       if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                   "BAR received from %pM, tid %d, ssn %d",
+                                   event->u.ba.sta->addr, event->u.ba.tid,
+                                   event->u.ba.ssn);
+}
+
+static void
+iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+                                    struct ieee80211_vif *vif,
+                                    const struct ieee80211_event *event)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+       ba_trig = (void *)trig->data;
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
+               return;
+
+       if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                   "Frame from %pM timed out, tid %d",
+                                   event->u.ba.sta->addr, event->u.ba.tid);
+}
+
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+                                      struct ieee80211_vif *vif,
+                                      const struct ieee80211_event *event)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       switch (event->type) {
+       case MLME_EVENT:
+               iwl_mvm_event_mlme_callback(mvm, vif, event);
+               break;
+       case BAR_RX_EVENT:
+               iwl_mvm_event_bar_rx_callback(mvm, vif, event);
+               break;
+       case BA_FRAME_TIMEOUT:
+               iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
+               break;
+       default:
+               break;
+       }
+}
+
 const struct ieee80211_ops iwl_mvm_hw_ops = {
        .tx = iwl_mvm_mac_tx,
        .ampdu_action = iwl_mvm_mac_ampdu_action,
index cf70f681d1acb7e271717091684ca805749e13ab..2d4bad5fe825fb4802fc1439167323dcf4b23e7b 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -76,6 +76,7 @@
 #include "iwl-notif-wait.h"
 #include "iwl-eeprom-parse.h"
 #include "iwl-fw-file.h"
+#include "iwl-config.h"
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
@@ -275,6 +276,7 @@ enum iwl_mvm_ref_type {
        IWL_MVM_REF_UCODE_DOWN,
        IWL_MVM_REF_SCAN,
        IWL_MVM_REF_ROC,
+       IWL_MVM_REF_ROC_AUX,
        IWL_MVM_REF_P2P_CLIENT,
        IWL_MVM_REF_AP_IBSS,
        IWL_MVM_REF_USER,
@@ -445,10 +447,26 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
 
 extern const u8 tid_to_mac80211_ac[];
 
+#define IWL_MVM_SCAN_STOPPING_SHIFT    8
+
 enum iwl_scan_status {
-       IWL_MVM_SCAN_NONE,
-       IWL_MVM_SCAN_OS,
-       IWL_MVM_SCAN_SCHED,
+       IWL_MVM_SCAN_REGULAR            = BIT(0),
+       IWL_MVM_SCAN_SCHED              = BIT(1),
+       IWL_MVM_SCAN_NETDETECT          = BIT(2),
+
+       IWL_MVM_SCAN_STOPPING_REGULAR   = BIT(8),
+       IWL_MVM_SCAN_STOPPING_SCHED     = BIT(9),
+       IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
+
+       IWL_MVM_SCAN_REGULAR_MASK       = IWL_MVM_SCAN_REGULAR |
+                                         IWL_MVM_SCAN_STOPPING_REGULAR,
+       IWL_MVM_SCAN_SCHED_MASK         = IWL_MVM_SCAN_SCHED |
+                                         IWL_MVM_SCAN_STOPPING_SCHED,
+       IWL_MVM_SCAN_NETDETECT_MASK     = IWL_MVM_SCAN_NETDETECT |
+                                         IWL_MVM_SCAN_STOPPING_NETDETECT,
+
+       IWL_MVM_SCAN_STOPPING_MASK      = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
+       IWL_MVM_SCAN_MASK               = 0xff,
 };
 
 /**
@@ -463,49 +481,6 @@ struct iwl_nvm_section {
        const u8 *data;
 };
 
-/*
- * Tx-backoff threshold
- * @temperature: The threshold in Celsius
- * @backoff: The tx-backoff in uSec
- */
-struct iwl_tt_tx_backoff {
-       s32 temperature;
-       u32 backoff;
-};
-
-#define TT_TX_BACKOFF_SIZE 6
-
-/**
- * struct iwl_tt_params - thermal throttling parameters
- * @ct_kill_entry: CT Kill entry threshold
- * @ct_kill_exit: CT Kill exit threshold
- * @ct_kill_duration: The time  intervals (in uSec) in which the driver needs
- *     to checks whether to exit CT Kill.
- * @dynamic_smps_entry: Dynamic SMPS entry threshold
- * @dynamic_smps_exit: Dynamic SMPS exit threshold
- * @tx_protection_entry: TX protection entry threshold
- * @tx_protection_exit: TX protection exit threshold
- * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
- * @support_ct_kill: Support CT Kill?
- * @support_dynamic_smps: Support dynamic SMPS?
- * @support_tx_protection: Support tx protection?
- * @support_tx_backoff: Support tx-backoff?
- */
-struct iwl_tt_params {
-       s32 ct_kill_entry;
-       s32 ct_kill_exit;
-       u32 ct_kill_duration;
-       s32 dynamic_smps_entry;
-       s32 dynamic_smps_exit;
-       s32 tx_protection_entry;
-       s32 tx_protection_exit;
-       struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
-       bool support_ct_kill;
-       bool support_dynamic_smps;
-       bool support_tx_protection;
-       bool support_tx_backoff;
-};
-
 /**
  * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
  * @ct_kill_exit: worker to exit thermal kill
@@ -520,7 +495,7 @@ struct iwl_mvm_tt_mgmt {
        bool dynamic_smps;
        u32 tx_backoff;
        u32 min_backoff;
-       const struct iwl_tt_params *params;
+       struct iwl_tt_params params;
        bool throttle;
 };
 
@@ -647,13 +622,15 @@ struct iwl_mvm {
        u32 rts_threshold;
 
        /* Scan status, cmd (pre-allocated) and auxiliary station */
-       enum iwl_scan_status scan_status;
+       unsigned int scan_status;
        void *scan_cmd;
        struct iwl_mcast_filter_cmd *mcast_filter_cmd;
 
+       /* max number of simultaneous scans the FW supports */
+       unsigned int max_scans;
+
        /* UMAC scan tracking */
-       u32 scan_uid[IWL_MVM_MAX_SIMULTANEOUS_SCANS];
-       u8 scan_seq_num, sched_scan_seq_num;
+       u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
 
        /* rx chain antennas set through debugfs for the scan command */
        u8 scan_rx_ant;
@@ -843,6 +820,8 @@ struct iwl_mvm {
        } tdls_cs;
 
        struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
+
+       u32 ciphers[6];
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -912,14 +891,15 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
        return mvm->trans->cfg->d0i3 &&
               mvm->trans->d0i3_mode != IWL_D0I3_MODE_OFF &&
               !iwlwifi_mod_params.d0i3_disable &&
-              (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+              fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
 }
 
 static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 {
        bool nvm_lar = mvm->nvm_data->lar_enabled;
-       bool tlv_lar = mvm->fw->ucode_capa.capa[0] &
-               IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+       bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+                                  IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
        if (iwlwifi_mod_params.lar_disable)
                return false;
@@ -936,24 +916,28 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
 
 static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
 {
-       return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE ||
-              mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC;
+       return fw_has_api(&mvm->fw->ucode_capa,
+                         IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+              fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
 }
 
 static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
 {
-       return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG;
+       return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
 }
 
 static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
 {
-       return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) &&
                IWL_MVM_BT_COEX_CORUNNING;
 }
 
 static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
 {
-       return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
                IWL_MVM_BT_COEX_RRC;
 }
 
@@ -1083,8 +1067,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                      struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                        struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                        struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
                                  struct iwl_rx_cmd_buffer *rxb,
                                  struct iwl_device_cmd *cmd);
@@ -1093,8 +1075,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
                                struct iwl_rx_cmd_buffer *rxb,
                                struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                        struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                            struct iwl_device_cmd *cmd);
 int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
@@ -1146,48 +1126,38 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
                          struct ieee80211_vif *disabled_vif);
 
 /* Scanning */
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                          struct cfg80211_scan_request *req,
+                          struct ieee80211_scan_ies *ies);
 int iwl_mvm_scan_size(struct iwl_mvm *mvm);
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
-int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan);
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
-                                          struct iwl_rx_cmd_buffer *rxb,
-                                          struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
-                                               struct iwl_rx_cmd_buffer *rxb,
-                                               struct iwl_device_cmd *cmd);
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
-                                      struct cfg80211_sched_scan_request *req);
-int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
-                              struct ieee80211_vif *vif,
-                              struct cfg80211_sched_scan_request *req,
-                              struct ieee80211_scan_ies *ies);
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify);
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd);
-
-/* Unified scan */
-int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
-                             struct ieee80211_vif *vif,
-                             struct ieee80211_scan_request *req);
-int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
-                                   struct ieee80211_vif *vif,
-                                   struct cfg80211_sched_scan_request *req,
-                                   struct ieee80211_scan_ies *ies);
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                       struct iwl_rx_cmd_buffer *rxb,
+                                       struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                            struct iwl_rx_cmd_buffer *rxb,
+                                            struct iwl_device_cmd *cmd);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+                            struct ieee80211_vif *vif,
+                            struct cfg80211_sched_scan_request *req,
+                            struct ieee80211_scan_ies *ies,
+                            int type);
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb,
+                               struct iwl_device_cmd *cmd);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                     struct ieee80211_scan_request *req);
-int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                           struct cfg80211_sched_scan_request *req,
-                           struct ieee80211_scan_ies *ies);
 int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
                                        struct iwl_rx_cmd_buffer *rxb,
                                        struct iwl_device_cmd *cmd);
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                            struct iwl_rx_cmd_buffer *rxb,
+                                            struct iwl_device_cmd *cmd);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
index 87b2a30a2308439c4e7a3f3bd80e419f9583af5a..2a6be350704a9442245f6e8bd2391503f67f0c6d 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -316,8 +316,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
 
        lar_enabled = !iwlwifi_mod_params.lar_disable &&
-                     (mvm->fw->ucode_capa.capa[0] &
-                      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+                     fw_has_capa(&mvm->fw->ucode_capa,
+                                 IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
 
        return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
                                  regulatory, mac_override, phy_sku,
@@ -583,9 +583,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
                kfree(nvm_buffer);
        }
 
-       /* load external NVM if configured */
+       /* Only if PNVM selected in the mod param - load external NVM  */
        if (mvm->nvm_file_name) {
-               /* read External NVM file - take the default */
+               /* read External NVM file from the mod param */
                ret = iwl_mvm_read_external_nvm(mvm);
                if (ret) {
                        /* choose the nvm_file name according to the
@@ -792,8 +792,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
        char mcc[3];
 
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-               tlv_lar = mvm->fw->ucode_capa.capa[0] &
-                       IWL_UCODE_TLV_CAPA_LAR_SUPPORT;
+               tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+                                     IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
                nvm_lar = mvm->nvm_data->lar_enabled;
                if (tlv_lar != nvm_lar)
                        IWL_INFO(mvm,
index 2ea01238754eb8d1c2470156f0293a2e15988fd6..e4fa50075ffdc3b51c66f4b6587595134da9ade3 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -194,7 +194,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
         * (PCIe power is lost before PERST# is asserted), causing ME FW
         * to lose ownership and not being able to obtain it back.
         */
-       if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+       if (!mvm->trans->cfg->apmg_not_supported)
                iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
                                       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
                                       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
@@ -238,15 +238,16 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
 
        RX_HANDLER(SCAN_ITERATION_COMPLETE,
-                  iwl_mvm_rx_scan_offload_iter_complete_notif, false),
+                  iwl_mvm_rx_lmac_scan_iter_complete_notif, false),
        RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
-                  iwl_mvm_rx_scan_offload_complete_notif, true),
-       RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_offload_results,
+                  iwl_mvm_rx_lmac_scan_complete_notif, true),
+       RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
                   false),
        RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
                   true),
+       RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
+                  iwl_mvm_rx_umac_scan_iter_complete_notif, false),
 
-       RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
        RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
 
        RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
@@ -280,17 +281,11 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(BINDING_CONTEXT_CMD),
        CMD(TIME_QUOTA_CMD),
        CMD(NON_QOS_TX_COUNTER_CMD),
-       CMD(RADIO_VERSION_NOTIFICATION),
-       CMD(SCAN_REQUEST_CMD),
-       CMD(SCAN_ABORT_CMD),
-       CMD(SCAN_START_NOTIFICATION),
-       CMD(SCAN_RESULTS_NOTIFICATION),
-       CMD(SCAN_COMPLETE_NOTIFICATION),
+       CMD(DC2DC_CONFIG_CMD),
        CMD(NVM_ACCESS_CMD),
        CMD(PHY_CONFIGURATION_CMD),
        CMD(CALIB_RES_NOTIF_PHY_DB),
        CMD(SET_CALIB_DEFAULT_CMD),
-       CMD(CALIBRATION_COMPLETE_NOTIFICATION),
        CMD(ADD_STA_KEY),
        CMD(ADD_STA),
        CMD(REMOVE_STA),
@@ -359,6 +354,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION),
        CMD(TDLS_CONFIG_CMD),
        CMD(MCC_UPDATE_CMD),
+       CMD(SCAN_ITERATION_COMPLETE_UMAC),
 };
 #undef CMD
 
@@ -520,15 +516,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        min_backoff = calc_min_backoff(trans, cfg);
        iwl_mvm_tt_initialize(mvm, min_backoff);
-       /* set the nvm_file_name according to priority */
-       if (iwlwifi_mod_params.nvm_file) {
+
+       if (iwlwifi_mod_params.nvm_file)
                mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
-       } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-               if (CSR_HW_REV_STEP(trans->hw_rev) == SILICON_B_STEP)
-                       mvm->nvm_file_name = mvm->cfg->default_nvm_file_B_step;
-               else
-                       mvm->nvm_file_name = mvm->cfg->default_nvm_file_C_step;
-       }
+       else
+               IWL_DEBUG_EEPROM(mvm->trans->dev,
+                                "working without external nvm file\n");
 
        if (WARN(cfg->no_power_up_nic_in_init && !mvm->nvm_file_name,
                 "not allowing power-up and not having nvm_file\n"))
index 33cd68ae7bf9362539fa1a99e34686e0cca3de2b..daff1d0a8e4adad6ebf2cdc5e811411776d63913 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -138,7 +138,7 @@ struct rs_tx_column;
 
 typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
                                     struct ieee80211_sta *sta,
-                                    struct iwl_scale_tbl_info *tbl,
+                                    struct rs_rate *rate,
                                     const struct rs_tx_column *next_col);
 
 struct rs_tx_column {
@@ -150,14 +150,14 @@ struct rs_tx_column {
 };
 
 static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                        struct iwl_scale_tbl_info *tbl,
+                        struct rs_rate *rate,
                         const struct rs_tx_column *next_col)
 {
        return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
 }
 
 static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         struct iwl_scale_tbl_info *tbl,
+                         struct rs_rate *rate,
                          const struct rs_tx_column *next_col)
 {
        struct iwl_mvm_sta *mvmsta;
@@ -187,7 +187,7 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 }
 
 static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                         struct iwl_scale_tbl_info *tbl,
+                         struct rs_rate *rate,
                          const struct rs_tx_column *next_col)
 {
        if (!sta->ht_cap.ht_supported)
@@ -197,10 +197,9 @@ static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 }
 
 static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                        struct iwl_scale_tbl_info *tbl,
+                        struct rs_rate *rate,
                         const struct rs_tx_column *next_col)
 {
-       struct rs_rate *rate = &tbl->rate;
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
 
@@ -1128,8 +1127,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
-       bool allow_ant_mismatch = mvm->fw->ucode_capa.api[0] &
-               IWL_UCODE_TLV_API_LQ_SS_PARAMS;
+       bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
+                                            IWL_UCODE_TLV_API_LQ_SS_PARAMS);
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1659,7 +1658,8 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
 
                for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
                        allow_func = next_col->checks[j];
-                       if (allow_func && !allow_func(mvm, sta, tbl, next_col))
+                       if (allow_func && !allow_func(mvm, sta, &tbl->rate,
+                                                     next_col))
                                break;
                }
 
@@ -2136,7 +2136,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
        }
 
        /* current tx rate */
-       index = lq_sta->last_txrate_idx;
+       index = rate->index;
 
        /* rates available for this association, and for modulation mode */
        rate_mask = rs_get_supported_rates(lq_sta, rate);
@@ -2184,14 +2184,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                 * or search for a new one? */
                rs_stay_in_table(lq_sta, false);
 
-               goto out;
-       }
-       /* Else we have enough samples; calculate estimate of
-        * actual average throughput */
-       if (window->average_tpt != ((window->success_ratio *
-                       tbl->expected_tpt[index] + 64) / 128)) {
-               window->average_tpt = ((window->success_ratio *
-                                       tbl->expected_tpt[index] + 64) / 128);
+               return;
        }
 
        /* If we are searching for better modulation mode, check success. */
@@ -2403,9 +2396,6 @@ lq_update:
                        rs_set_stay_in_table(mvm, 0, lq_sta);
                }
        }
-
-out:
-       lq_sta->last_txrate_idx = index;
 }
 
 struct rs_init_rate_info {
@@ -2548,7 +2538,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
        rate = &tbl->rate;
 
        rs_get_initial_rate(mvm, lq_sta, band, rate);
-       lq_sta->last_txrate_idx = rate->index;
 
        WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
        if (rate->ant == ANT_A)
@@ -2725,7 +2714,7 @@ static void rs_vht_init(struct iwl_mvm *mvm,
            (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
                lq_sta->stbc_capable = true;
 
-       if ((mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
            (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
            (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
                lq_sta->bfer_capable = true;
@@ -3009,7 +2998,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
        valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
 
        /* TODO: remove old API when min FW API hits 14 */
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
            rs_stbc_allow(mvm, sta, lq_sta))
                rate.stbc = true;
 
@@ -3223,12 +3212,9 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
 
        rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LQ_SS_PARAMS)
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
                rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
 
-       if (num_of_ant(initial_rate->ant) == 1)
-               lq_cmd->single_stream_ant_msk = initial_rate->ant;
-
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
 
index e4aa9346a23103f4eb660d087d0f7b8542334e42..2a3da314305ab548e3c72c7a5479e6e666be5184 100644 (file)
@@ -322,8 +322,6 @@ struct iwl_lq_sta {
        struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
        u8 tx_agg_tid_en;
 
-       /* used to be in sta_info */
-       int last_txrate_idx;
        /* last tx rate_n_flags */
        u32 last_rate_n_flags;
        /* packets destined for this STA are aggregated */
index d6314ddf57b5d9638fcfd2fcf6ba917bb136779f..8f1d93b7a13aa1059a4844c64f825822ee07605a 100644 (file)
@@ -570,7 +570,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
        };
        u32 temperature;
 
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_STATS_V10) {
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STATS_V10)) {
                struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data;
 
                if (iwl_rx_packet_payload_len(pkt) != v10_len)
@@ -610,7 +610,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
        /* Only handle rx statistics temperature changes if async temp
         * notifications are not supported
         */
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_ASYNC_DTM))
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_ASYNC_DTM))
                iwl_mvm_tt_temp_changed(mvm, temperature);
 
        ieee80211_iterate_active_interfaces(mvm->hw,
index 1075a213bd6a87156e44ab410ac566cf18bdacc3..5de144968723d4f2a5446f0d6f9fc0607baf2efb 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include <net/mac80211.h>
 
 #include "mvm.h"
-#include "iwl-eeprom-parse.h"
 #include "fw-api-scan.h"
 
-#define IWL_PLCP_QUIET_THRESH 1
-#define IWL_ACTIVE_QUIET_TIME 10
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
@@ -79,23 +76,31 @@ struct iwl_mvm_scan_params {
        u32 max_out_time;
        u32 suspend_time;
        bool passive_fragmented;
+       u32 n_channels;
+       u16 delay;
+       int n_ssids;
+       struct cfg80211_ssid *ssids;
+       struct ieee80211_channel **channels;
+       u16 interval; /* interval between scans (in secs) */
+       u32 flags;
+       u8 *mac_addr;
+       u8 *mac_addr_mask;
+       bool no_cck;
+       bool pass_all;
+       int n_match_sets;
+       struct iwl_scan_probe_req preq;
+       struct cfg80211_match_set *match_sets;
        struct _dwell {
                u16 passive;
                u16 active;
                u16 fragmented;
        } dwell[IEEE80211_NUM_BANDS];
+       struct {
+               u8 iterations;
+               u8 full_scan_mul; /* not used for UMAC */
+       } schedule[2];
 };
 
-enum iwl_umac_scan_uid_type {
-       IWL_UMAC_SCAN_UID_REG_SCAN      = BIT(0),
-       IWL_UMAC_SCAN_UID_SCHED_SCAN    = BIT(1),
-       IWL_UMAC_SCAN_UID_ALL           = IWL_UMAC_SCAN_UID_REG_SCAN |
-                                         IWL_UMAC_SCAN_UID_SCHED_SCAN,
-};
-
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
-                             enum iwl_umac_scan_uid_type type, bool notify);
-
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
 {
        if (mvm->scan_rx_ant != ANT_NONE)
@@ -142,28 +147,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
                return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 }
 
-/*
- * We insert the SSIDs in an inverted order, because the FW will
- * invert it back. The most prioritized SSID, which is first in the
- * request list, is not copied here, but inserted directly to the probe
- * request.
- */
-static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
-                                   struct cfg80211_ssid *ssids,
-                                   int n_ssids, int first)
-{
-       int fw_idx, req_idx;
-
-       for (req_idx = n_ssids - 1, fw_idx = 0; req_idx >= first;
-            req_idx--, fw_idx++) {
-               cmd_ssid[fw_idx].id = WLAN_EID_SSID;
-               cmd_ssid[fw_idx].len = ssids[req_idx].ssid_len;
-               memcpy(cmd_ssid[fw_idx].ssid,
-                      ssids[req_idx].ssid,
-                      ssids[req_idx].ssid_len);
-       }
-}
-
 /*
  * If req->n_ssids > 0, it means we should do an active scan.
  * In case of active scan w/o directed scan, we receive a zero-length SSID
@@ -177,7 +160,7 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
 static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
                                    enum ieee80211_band band, int n_ssids)
 {
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
                return 10;
        if (band == IEEE80211_BAND_2GHZ)
                return 20  + 3 * (n_ssids + 1);
@@ -187,7 +170,7 @@ static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
 static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
                                     enum ieee80211_band band)
 {
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
                        return 110;
        return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
 }
@@ -203,10 +186,9 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                *global_cnt += 1;
 }
 
-static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
-                                    struct ieee80211_vif *vif,
-                                    int n_ssids, u32 flags,
-                                    struct iwl_mvm_scan_params *params)
+static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif,
+                                   struct iwl_mvm_scan_params *params)
 {
        int global_cnt = 0;
        enum ieee80211_band band;
@@ -216,7 +198,6 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                                            IEEE80211_IFACE_ITER_NORMAL,
                                            iwl_mvm_scan_condition_iterator,
                                            &global_cnt);
-
        if (!global_cnt)
                goto not_bound;
 
@@ -224,8 +205,9 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
        params->max_out_time = 120;
 
        if (iwl_mvm_low_latency(mvm)) {
-               if (mvm->fw->ucode_capa.api[0] &
-                   IWL_UCODE_TLV_API_FRAGMENTED_SCAN) {
+               if (fw_has_api(&mvm->fw->ucode_capa,
+                              IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+
                        params->suspend_time = 105;
                        /*
                         * If there is more than one active interface make
@@ -239,8 +221,9 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                }
        }
 
-       if (frag_passive_dwell && (mvm->fw->ucode_capa.api[0] &
-                                  IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+       if (frag_passive_dwell &&
+           fw_has_api(&mvm->fw->ucode_capa,
+                      IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
                /*
                 * P2P device scan should not be fragmented to avoid negative
                 * impact on P2P device discovery. Configure max_out_time to be
@@ -257,7 +240,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
                }
        }
 
-       if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
+       if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
+           (params->max_out_time > 200))
                params->max_out_time = 200;
 
 not_bound:
@@ -268,20 +252,34 @@ not_bound:
 
                params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
                                                                        band);
-               params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
-                                                                     n_ssids);
+               params->dwell[band].active =
+                       iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
        }
+
+       IWL_DEBUG_SCAN(mvm,
+                      "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
+                      params->max_out_time, params->suspend_time,
+                      params->passive_fragmented);
+       IWL_DEBUG_SCAN(mvm,
+                      "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
+                      params->dwell[IEEE80211_BAND_2GHZ].passive,
+                      params->dwell[IEEE80211_BAND_2GHZ].active,
+                      params->dwell[IEEE80211_BAND_2GHZ].fragmented);
+       IWL_DEBUG_SCAN(mvm,
+                      "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
+                      params->dwell[IEEE80211_BAND_5GHZ].passive,
+                      params->dwell[IEEE80211_BAND_5GHZ].active,
+                      params->dwell[IEEE80211_BAND_5GHZ].fragmented);
 }
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
 {
        /* require rrm scan whenever the fw supports it */
-       return mvm->fw->ucode_capa.capa[0] &
-              IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT;
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
 }
 
-static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
-                                          bool is_sched_scan)
+static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
 {
        int max_probe_len;
 
@@ -297,9 +295,9 @@ static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm,
        return max_probe_len;
 }
 
-int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
 {
-       int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm, is_sched_scan);
+       int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
 
        /* TODO: [BUG] This function should return the maximum allowed size of
         * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
@@ -314,22 +312,41 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm, bool is_sched_scan)
        return max_ie_len;
 }
 
-int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
-                                               struct iwl_rx_cmd_buffer *rxb,
-                                               struct iwl_device_cmd *cmd)
+static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
+                                    int num_res, u8 *buf, size_t buf_size)
+{
+       int i;
+       u8 *pos = buf, *end = buf + buf_size;
+
+       for (i = 0; pos < end && i < num_res; i++)
+               pos += snprintf(pos, end - pos, " %u", res[i].channel);
+
+       /* terminate the string in case the buffer was too short */
+       *(buf + buf_size - 1) = '\0';
+
+       return buf;
+}
+
+int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                            struct iwl_rx_cmd_buffer *rxb,
+                                            struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
+       u8 buf[256];
 
        IWL_DEBUG_SCAN(mvm,
-                      "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
-                      notif->status, notif->scanned_channels);
+                      "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
+                      notif->status, notif->scanned_channels,
+                      iwl_mvm_dump_channel_list(notif->results,
+                                                notif->scanned_channels, buf,
+                                                sizeof(buf)));
        return 0;
 }
 
-int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb,
+                               struct iwl_device_cmd *cmd)
 {
        IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
        ieee80211_sched_scan_results(mvm->hw);
@@ -337,41 +354,78 @@ int iwl_mvm_rx_scan_offload_results(struct iwl_mvm *mvm,
        return 0;
 }
 
-int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
-                                          struct iwl_rx_cmd_buffer *rxb,
-                                          struct iwl_device_cmd *cmd)
+static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
 {
-       struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_periodic_scan_complete *scan_notif;
+       switch (status) {
+       case IWL_SCAN_EBS_SUCCESS:
+               return "successful";
+       case IWL_SCAN_EBS_INACTIVE:
+               return "inactive";
+       case IWL_SCAN_EBS_FAILED:
+       case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+       default:
+               return "failed";
+       }
+}
 
-       scan_notif = (void *)pkt->data;
+int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                       struct iwl_rx_cmd_buffer *rxb,
+                                       struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
+       bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
        /* scan status must be locked for proper checking */
        lockdep_assert_held(&mvm->mutex);
 
-       IWL_DEBUG_SCAN(mvm,
-                      "%s completed, status %s, EBS status %s\n",
-                      mvm->scan_status == IWL_MVM_SCAN_SCHED ?
-                               "Scheduled scan" : "Scan",
-                      scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
-                               "completed" : "aborted",
-                      scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
-                               "success" : "failed");
+       /* We first check if we were stopping a scan, in which case we
+        * just clear the stopping flag.  Then we check if it was a
+        * firmware initiated stop, in which case we need to inform
+        * mac80211.
+        * Note that we can have a stopping and a running scan
+        * simultaneously, but we can't have two different types of
+        * scans stopping or running at the same time (since LMAC
+        * doesn't support it).
+        */
+
+       if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
+               WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
+
+               IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+                              aborted ? "aborted" : "completed",
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 
+               mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
+       } else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
+               IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
+                              aborted ? "aborted" : "completed",
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 
-       /* only call mac80211 completion if the stop was initiated by FW */
-       if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
+               mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
+       } else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
+               WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
+
+               IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+                              aborted ? "aborted" : "completed",
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+               mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
                ieee80211_sched_scan_stopped(mvm->hw);
-       } else if (mvm->scan_status == IWL_MVM_SCAN_OS) {
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
+       } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+               IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
+                              aborted ? "aborted" : "completed",
+                              iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+               mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
                ieee80211_scan_completed(mvm->hw,
                                scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
        }
 
-       if (scan_notif->ebs_status)
-               mvm->last_ebs_successful = false;
+       mvm->last_ebs_successful =
+                       scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
+                       scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
 
        return 0;
 }
@@ -390,9 +444,12 @@ static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
        return -1;
 }
 
-static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
-                                       struct iwl_ssid_ie *direct_scan,
-                                       u32 *ssid_bitmap, bool basic_ssid)
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
+                                struct iwl_ssid_ie *ssids,
+                                u32 *ssid_bitmap)
 {
        int i, j;
        int index;
@@ -402,39 +459,41 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
         * iwl_config_sched_scan_profiles() uses the order of these ssids to
         * config match list.
         */
-       for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+       for (i = 0, j = params->n_match_sets - 1;
+            j >= 0 && i < PROBE_OPTION_MAX;
+            i++, j--) {
                /* skip empty SSID matchsets */
-               if (!req->match_sets[i].ssid.ssid_len)
+               if (!params->match_sets[j].ssid.ssid_len)
                        continue;
-               direct_scan[i].id = WLAN_EID_SSID;
-               direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
-               memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
-                      direct_scan[i].len);
+               ssids[i].id = WLAN_EID_SSID;
+               ssids[i].len = params->match_sets[j].ssid.ssid_len;
+               memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+                      ssids[i].len);
        }
 
        /* add SSIDs from scan SSID list */
        *ssid_bitmap = 0;
-       for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
-               index = iwl_ssid_exist(req->ssids[j].ssid,
-                                      req->ssids[j].ssid_len,
-                                      direct_scan);
+       for (j = params->n_ssids - 1;
+            j >= 0 && i < PROBE_OPTION_MAX;
+            i++, j--) {
+               index = iwl_ssid_exist(params->ssids[j].ssid,
+                                      params->ssids[j].ssid_len,
+                                      ssids);
                if (index < 0) {
-                       if (!req->ssids[j].ssid_len && basic_ssid)
-                               continue;
-                       direct_scan[i].id = WLAN_EID_SSID;
-                       direct_scan[i].len = req->ssids[j].ssid_len;
-                       memcpy(direct_scan[i].ssid, req->ssids[j].ssid,
-                              direct_scan[i].len);
-                       *ssid_bitmap |= BIT(i + 1);
-                       i++;
+                       ssids[i].id = WLAN_EID_SSID;
+                       ssids[i].len = params->ssids[j].ssid_len;
+                       memcpy(ssids[i].ssid, params->ssids[j].ssid,
+                              ssids[i].len);
+                       *ssid_bitmap |= BIT(i);
                } else {
-                       *ssid_bitmap |= BIT(index + 1);
+                       *ssid_bitmap |= BIT(index);
                }
        }
 }
 
-int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
-                                      struct cfg80211_sched_scan_request *req)
+static int
+iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+                                  struct cfg80211_sched_scan_request *req)
 {
        struct iwl_scan_offload_profile *profile;
        struct iwl_scan_offload_profile_cfg *profile_cfg;
@@ -515,30 +574,7 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
        return true;
 }
 
-int iwl_mvm_scan_offload_start(struct iwl_mvm *mvm,
-                              struct ieee80211_vif *vif,
-                              struct cfg80211_sched_scan_request *req,
-                              struct ieee80211_scan_ies *ies)
-{
-       int ret;
-
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
-               ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
-               if (ret)
-                       return ret;
-               ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies);
-       } else {
-               mvm->scan_status = IWL_MVM_SCAN_SCHED;
-               ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
-               if (ret)
-                       return ret;
-               ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies);
-       }
-
-       return ret;
-}
-
-static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
+static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
 {
        int ret;
        struct iwl_host_cmd cmd = {
@@ -546,12 +582,6 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
        };
        u32 status;
 
-       /* Exit instantly with error when device is not ready
-        * to receive scan abort command or it does not perform
-        * scheduled scan currently */
-       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-               return -EIO;
-
        ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
        if (ret)
                return ret;
@@ -571,69 +601,9 @@ static int iwl_mvm_send_scan_offload_abort(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
-{
-       int ret;
-       struct iwl_notification_wait wait_scan_done;
-       static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
-       bool sched = mvm->scan_status == IWL_MVM_SCAN_SCHED;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
-               return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
-                                         notify);
-
-       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-               return 0;
-
-       if (iwl_mvm_is_radio_killed(mvm)) {
-               ret = 0;
-               goto out;
-       }
-
-       iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
-                                  scan_done_notif,
-                                  ARRAY_SIZE(scan_done_notif),
-                                  NULL, NULL);
-
-       ret = iwl_mvm_send_scan_offload_abort(mvm);
-       if (ret) {
-               IWL_DEBUG_SCAN(mvm, "Send stop %sscan failed %d\n",
-                              sched ? "offloaded " : "", ret);
-               iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
-               goto out;
-       }
-
-       IWL_DEBUG_SCAN(mvm, "Successfully sent stop %sscan\n",
-                      sched ? "offloaded " : "");
-
-       ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-out:
-       /*
-        * Clear the scan status so the next scan requests will succeed. This
-        * also ensures the Rx handler doesn't do anything, as the scan was
-        * stopped from above. Since the rx handler won't do anything now,
-        * we have to release the scan reference here.
-        */
-       if (mvm->scan_status == IWL_MVM_SCAN_OS)
-               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-
-       mvm->scan_status = IWL_MVM_SCAN_NONE;
-
-       if (notify) {
-               if (sched)
-                       ieee80211_sched_scan_stopped(mvm->hw);
-               else
-                       ieee80211_scan_completed(mvm->hw, true);
-       }
-
-       return ret;
-}
-
-static void iwl_mvm_unified_scan_fill_tx_cmd(struct iwl_mvm *mvm,
-                                            struct iwl_scan_req_tx_cmd *tx_cmd,
-                                            bool no_cck)
+static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
+                                    struct iwl_scan_req_tx_cmd *tx_cmd,
+                                    bool no_cck)
 {
        tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
                                         TX_CMD_FLG_BT_DIS);
@@ -654,7 +624,7 @@ static void
 iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
                               struct ieee80211_channel **channels,
                               int n_channels, u32 ssid_bitmap,
-                              struct iwl_scan_req_unified_lmac *cmd)
+                              struct iwl_scan_req_lmac *cmd)
 {
        struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
        int i;
@@ -707,13 +677,14 @@ static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
 }
 
 static void
-iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                                struct ieee80211_scan_ies *ies,
-                                struct iwl_scan_probe_req *preq,
-                                const u8 *mac_addr, const u8 *mac_addr_mask)
+iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                        struct ieee80211_scan_ies *ies,
+                        struct iwl_mvm_scan_params *params)
 {
-       struct ieee80211_mgmt *frame = (struct ieee80211_mgmt *)preq->buf;
+       struct ieee80211_mgmt *frame = (void *)params->preq.buf;
        u8 *pos, *newpos;
+       const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+               params->mac_addr : NULL;
 
        /*
         * Unfortunately, right now the offload scan doesn't support randomising
@@ -722,7 +693,8 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
         * random, only when it's restarted, but at least that helps a bit.
         */
        if (mac_addr)
-               get_random_mask_addr(frame->sa, mac_addr, mac_addr_mask);
+               get_random_mask_addr(frame->sa, mac_addr,
+                                    params->mac_addr_mask);
        else
                memcpy(frame->sa, vif->addr, ETH_ALEN);
 
@@ -735,245 +707,167 @@ iwl_mvm_build_unified_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        *pos++ = WLAN_EID_SSID;
        *pos++ = 0;
 
-       preq->mac_header.offset = 0;
-       preq->mac_header.len = cpu_to_le16(24 + 2);
+       params->preq.mac_header.offset = 0;
+       params->preq.mac_header.len = cpu_to_le16(24 + 2);
 
        /* Insert ds parameter set element on 2.4 GHz band */
        newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
                                                 ies->ies[IEEE80211_BAND_2GHZ],
                                                 ies->len[IEEE80211_BAND_2GHZ],
                                                 pos);
-       preq->band_data[0].offset = cpu_to_le16(pos - preq->buf);
-       preq->band_data[0].len = cpu_to_le16(newpos - pos);
+       params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+       params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
        pos = newpos;
 
        memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
               ies->len[IEEE80211_BAND_5GHZ]);
-       preq->band_data[1].offset = cpu_to_le16(pos - preq->buf);
-       preq->band_data[1].len = cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
+       params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+       params->preq.band_data[1].len =
+               cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
        pos += ies->len[IEEE80211_BAND_5GHZ];
 
        memcpy(pos, ies->common_ies, ies->common_ie_len);
-       preq->common_data.offset = cpu_to_le16(pos - preq->buf);
-       preq->common_data.len = cpu_to_le16(ies->common_ie_len);
+       params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+       params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
 }
 
-static void
-iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
-                                      struct iwl_scan_req_unified_lmac *cmd,
-                                      struct iwl_mvm_scan_params *params)
+static __le32 iwl_mvm_scan_priority(struct iwl_mvm *mvm,
+                                   enum iwl_scan_priority_ext prio)
+{
+       if (fw_has_api(&mvm->fw->ucode_capa,
+                      IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY))
+               return cpu_to_le32(prio);
+
+       if (prio <= IWL_SCAN_PRIORITY_EXT_2)
+               return cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+
+       if (prio <= IWL_SCAN_PRIORITY_EXT_4)
+               return cpu_to_le32(IWL_SCAN_PRIORITY_MEDIUM);
+
+       return cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+}
+
+static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
+                                   struct iwl_scan_req_lmac *cmd,
+                                   struct iwl_mvm_scan_params *params)
 {
-       memset(cmd, 0, ksize(cmd));
        cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
        cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
        if (params->passive_fragmented)
                cmd->fragmented_dwell =
                                params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-       cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
-       cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-       cmd->iter_num = cpu_to_le32(1);
-
-       if (iwl_mvm_rrm_scan_needed(mvm))
-               cmd->scan_flags |=
-                       cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
+       cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 
-int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
-                             struct ieee80211_vif *vif,
-                             struct ieee80211_scan_request *req)
+static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+                                    struct ieee80211_scan_ies *ies,
+                                    int n_channels)
 {
-       struct iwl_host_cmd hcmd = {
-               .id = SCAN_OFFLOAD_REQUEST_CMD,
-               .len = { sizeof(struct iwl_scan_req_unified_lmac) +
-                        sizeof(struct iwl_scan_channel_cfg_lmac) *
-                               mvm->fw->ucode_capa.n_scan_channels +
-                        sizeof(struct iwl_scan_probe_req), },
-               .data = { mvm->scan_cmd, },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
-       struct iwl_scan_probe_req *preq;
-       struct iwl_mvm_scan_params params = {};
-       u32 flags;
-       u32 ssid_bitmap = 0;
-       int ret, i;
-
-       lockdep_assert_held(&mvm->mutex);
-
-       /* we should have failed registration if scan_cmd was NULL */
-       if (WARN_ON(mvm->scan_cmd == NULL))
-               return -ENOMEM;
-
-       if (req->req.n_ssids > PROBE_OPTION_MAX ||
-           req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] +
-           req->ies.len[NL80211_BAND_5GHZ] >
-               iwl_mvm_max_scan_ie_fw_cmd_room(mvm, false) ||
-           req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
-               return -ENOBUFS;
+       return ((n_ssids <= PROBE_OPTION_MAX) &&
+               (n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
+               (ies->common_ie_len +
+                ies->len[NL80211_BAND_2GHZ] +
+                ies->len[NL80211_BAND_5GHZ] <=
+                iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+}
 
-       mvm->scan_status = IWL_MVM_SCAN_OS;
+static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       int n_iterations)
+{
+       const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
 
-       iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
-                                &params);
+       /* We can only use EBS if:
+        *      1. the feature is supported;
+        *      2. the last EBS was successful;
+        *      3. if only single scan, the single scan EBS API is supported;
+        *      4. it's not a p2p find operation.
+        */
+       return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+               mvm->last_ebs_successful &&
+               (n_iterations > 1 ||
+                fw_has_api(capa, IWL_UCODE_TLV_API_SINGLE_SCAN_EBS)) &&
+               vif->type != NL80211_IFTYPE_P2P_DEVICE);
+}
 
-       iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
+static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
+{
+       return params->schedule[0].iterations + params->schedule[1].iterations;
+}
 
-       cmd->n_channels = (u8)req->req.n_channels;
+static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
+                                  struct iwl_mvm_scan_params *params)
+{
+       int flags = 0;
 
-       flags = IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+       if (params->n_ssids == 0)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
 
-       if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0)
+       if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 
-       if (params.passive_fragmented)
+       if (params->passive_fragmented)
                flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
-       if (req->req.n_ssids == 0)
-               flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
-
-       cmd->scan_flags |= cpu_to_le32(flags);
-
-       cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band);
-       cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
-                                       MAC_FILTER_IN_BEACON);
-       iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck);
-       iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids,
-                               req->req.n_ssids, 0);
-
-       cmd->schedule[0].delay = 0;
-       cmd->schedule[0].iterations = 1;
-       cmd->schedule[0].full_scan_mul = 0;
-       cmd->schedule[1].delay = 0;
-       cmd->schedule[1].iterations = 0;
-       cmd->schedule[1].full_scan_mul = 0;
-
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
-           mvm->last_ebs_successful) {
-               cmd->channel_opt[0].flags =
-                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
-                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
-               cmd->channel_opt[0].non_ebs_ratio =
-                       cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
-               cmd->channel_opt[1].flags =
-                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
-                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
-               cmd->channel_opt[1].non_ebs_ratio =
-                       cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
-       }
-
-       for (i = 1; i <= req->req.n_ssids; i++)
-               ssid_bitmap |= BIT(i);
-
-       iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels,
-                                      req->req.n_channels, ssid_bitmap,
-                                      cmd);
+       if (iwl_mvm_rrm_scan_needed(mvm))
+               flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
 
-       preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
-                       mvm->fw->ucode_capa.n_scan_channels);
+       if (params->pass_all)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+       else
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
 
-       iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq,
-               req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
-                       req->req.mac_addr : NULL,
-               req->req.mac_addr_mask);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvm->scan_iter_notif_enabled)
+               flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+#endif
 
-       ret = iwl_mvm_send_cmd(mvm, &hcmd);
-       if (!ret) {
-               IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
-       } else {
-               /*
-                * If the scan failed, it usually means that the FW was unable
-                * to allocate the time events. Warn on it, but maybe we
-                * should try to send the command again with different params.
-                */
-               IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
-               ret = -EIO;
-       }
-       return ret;
+       return flags;
 }
 
-int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
-                                   struct ieee80211_vif *vif,
-                                   struct cfg80211_sched_scan_request *req,
-                                   struct ieee80211_scan_ies *ies)
+static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                            struct iwl_mvm_scan_params *params)
 {
-       struct iwl_host_cmd hcmd = {
-               .id = SCAN_OFFLOAD_REQUEST_CMD,
-               .len = { sizeof(struct iwl_scan_req_unified_lmac) +
-                        sizeof(struct iwl_scan_channel_cfg_lmac) *
-                               mvm->fw->ucode_capa.n_scan_channels +
-                        sizeof(struct iwl_scan_probe_req), },
-               .data = { mvm->scan_cmd, },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
-       struct iwl_scan_req_unified_lmac *cmd = mvm->scan_cmd;
-       struct iwl_scan_probe_req *preq;
-       struct iwl_mvm_scan_params params = {};
-       int ret;
-       u32 flags = 0, ssid_bitmap = 0;
+       struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
+       struct iwl_scan_probe_req *preq =
+               (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+                        mvm->fw->ucode_capa.n_scan_channels);
+       u32 ssid_bitmap = 0;
+       int n_iterations = iwl_mvm_scan_total_iterations(params);
 
        lockdep_assert_held(&mvm->mutex);
 
-       /* we should have failed registration if scan_cmd was NULL */
-       if (WARN_ON(mvm->scan_cmd == NULL))
-               return -ENOMEM;
-
-       if (req->n_ssids > PROBE_OPTION_MAX ||
-           ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
-           ies->len[NL80211_BAND_5GHZ] >
-               iwl_mvm_max_scan_ie_fw_cmd_room(mvm, true) ||
-           req->n_channels > mvm->fw->ucode_capa.n_scan_channels)
-               return -ENOBUFS;
-
-       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params);
-
-       iwl_mvm_build_generic_unified_scan_cmd(mvm, cmd, &params);
-
-       cmd->n_channels = (u8)req->n_channels;
-
-       cmd->delay = cpu_to_le32(req->delay);
-
-       if (iwl_mvm_scan_pass_all(mvm, req))
-               flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
-       else
-               flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
+       memset(cmd, 0, ksize(cmd));
 
-       if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
-               flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+       iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
 
-       if (params.passive_fragmented)
-               flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
-
-       if (req->n_ssids == 0)
-               flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+       cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
+       cmd->iter_num = cpu_to_le32(1);
+       cmd->n_channels = (u8)params->n_channels;
 
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-       if (mvm->scan_iter_notif_enabled)
-               flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
-#endif
+       cmd->delay = cpu_to_le32(params->delay);
 
-       cmd->scan_flags |= cpu_to_le32(flags);
+       cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params));
 
-       cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band);
+       cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
        cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
                                        MAC_FILTER_IN_BEACON);
-       iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, false);
-       iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false);
+       iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
+       iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
 
-       cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
-       cmd->schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
-       cmd->schedule[0].full_scan_mul = 1;
+       /* this API uses bits 1-20 instead of 0-19 */
+       ssid_bitmap <<= 1;
 
-       cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC);
-       cmd->schedule[1].iterations = 0xff;
-       cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+       cmd->schedule[0].delay = cpu_to_le16(params->interval);
+       cmd->schedule[0].iterations = params->schedule[0].iterations;
+       cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+       cmd->schedule[1].delay = cpu_to_le16(params->interval);
+       cmd->schedule[1].iterations = params->schedule[1].iterations;
+       cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
-           mvm->last_ebs_successful) {
+       if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
                cmd->channel_opt[0].flags =
                        cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
                                    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -988,61 +882,14 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
                        cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
        }
 
-       iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
-                                      ssid_bitmap, cmd);
-
-       preq = (void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
-                       mvm->fw->ucode_capa.n_scan_channels);
+       iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
+                                      params->n_channels, ssid_bitmap, cmd);
 
-       iwl_mvm_build_unified_scan_probe(mvm, vif, ies, preq,
-               req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
-                       req->mac_addr : NULL,
-               req->mac_addr_mask);
+       *preq = params->preq;
 
-       ret = iwl_mvm_send_cmd(mvm, &hcmd);
-       if (!ret) {
-               IWL_DEBUG_SCAN(mvm,
-                              "Sched scan request was sent successfully\n");
-       } else {
-               /*
-                * If the scan failed, it usually means that the FW was unable
-                * to allocate the time events. Warn on it, but maybe we
-                * should try to send the command again with different params.
-                */
-               IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
-               ret = -EIO;
-       }
-       return ret;
-}
-
-
-int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
-{
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
-               return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_REG_SCAN,
-                                         true);
-
-       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
-               return 0;
-
-       if (iwl_mvm_is_radio_killed(mvm)) {
-               ieee80211_scan_completed(mvm->hw, true);
-               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-               mvm->scan_status = IWL_MVM_SCAN_NONE;
-               return 0;
-       }
-
-       return iwl_mvm_scan_offload_stop(mvm, true);
+       return 0;
 }
 
-/* UMAC scan API */
-
-struct iwl_umac_scan_done {
-       struct iwl_mvm *mvm;
-       enum iwl_umac_scan_uid_type type;
-};
-
 static int rate_to_scan_rate_flag(unsigned int rate)
 {
        static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
@@ -1151,79 +998,21 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        return ret;
 }
 
-static int iwl_mvm_find_scan_uid(struct iwl_mvm *mvm, u32 uid)
-{
-       int i;
-
-       for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
-               if (mvm->scan_uid[i] == uid)
-                       return i;
-
-       return i;
-}
-
-static int iwl_mvm_find_free_scan_uid(struct iwl_mvm *mvm)
-{
-       return iwl_mvm_find_scan_uid(mvm, 0);
-}
-
-static bool iwl_mvm_find_scan_type(struct iwl_mvm *mvm,
-                                  enum iwl_umac_scan_uid_type type)
-{
-       int i;
-
-       for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
-               if (mvm->scan_uid[i] & type)
-                       return true;
-
-       return false;
-}
-
-static int iwl_mvm_find_first_scan(struct iwl_mvm *mvm,
-                                  enum iwl_umac_scan_uid_type type)
+static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
 {
        int i;
 
-       for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++)
-               if (mvm->scan_uid[i] & type)
+       for (i = 0; i < mvm->max_scans; i++)
+               if (mvm->scan_uid_status[i] == status)
                        return i;
 
-       return i;
+       return -ENOENT;
 }
 
-static u32 iwl_generate_scan_uid(struct iwl_mvm *mvm,
-                                enum iwl_umac_scan_uid_type type)
-{
-       u32 uid;
-
-       /* make sure exactly one bit is on in scan type */
-       WARN_ON(hweight8(type) != 1);
-
-       /*
-        * Make sure scan uids are unique. If one scan lasts long time while
-        * others are completing frequently, the seq number will wrap up and
-        * we may have more than one scan with the same uid.
-        */
-       do {
-               uid = type | (mvm->scan_seq_num <<
-                             IWL_UMAC_SCAN_UID_SEQ_OFFSET);
-               mvm->scan_seq_num++;
-       } while (iwl_mvm_find_scan_uid(mvm, uid) <
-                IWL_MVM_MAX_SIMULTANEOUS_SCANS);
-
-       IWL_DEBUG_SCAN(mvm, "Generated scan UID %u\n", uid);
-
-       return uid;
-}
-
-static void
-iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
+static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_umac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       memset(cmd, 0, ksize(cmd));
-       cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
-                                   sizeof(struct iwl_mvm_umac_cmd_hdr));
        cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
        cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
        if (params->passive_fragmented)
@@ -1231,7 +1020,15 @@ iwl_mvm_build_generic_umac_scan_cmd(struct iwl_mvm *mvm,
                                params->dwell[IEEE80211_BAND_2GHZ].fragmented;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
-       cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
+       cmd->scan_priority =
+               iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+
+       if (iwl_mvm_scan_total_iterations(params) == 0)
+               cmd->ooc_priority =
+                       iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
+       else
+               cmd->ooc_priority =
+                       iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_2);
 }
 
 static void
@@ -1251,230 +1048,326 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
        }
 }
 
-static u32 iwl_mvm_scan_umac_common_flags(struct iwl_mvm *mvm, int n_ssids,
-                                         struct cfg80211_ssid *ssids,
-                                         int fragmented)
+static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+                                  struct iwl_mvm_scan_params *params)
 {
        int flags = 0;
 
-       if (n_ssids == 0)
+       if (params->n_ssids == 0)
                flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
 
-       if (n_ssids == 1 && ssids[0].ssid_len != 0)
+       if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
-       if (fragmented)
+       if (params->passive_fragmented)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 
        if (iwl_mvm_rrm_scan_needed(mvm))
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
 
+       if (params->pass_all)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+       else
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+       if (iwl_mvm_scan_total_iterations(params) > 1)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvm->scan_iter_notif_enabled)
+               flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+#endif
        return flags;
 }
 
-int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                     struct ieee80211_scan_request *req)
+static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                            struct iwl_mvm_scan_params *params,
+                            int type)
 {
-       struct iwl_host_cmd hcmd = {
-               .id = SCAN_REQ_UMAC,
-               .len = { iwl_mvm_scan_size(mvm), },
-               .data = { mvm->scan_cmd, },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
-       };
        struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
        struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
                sizeof(struct iwl_scan_channel_cfg_umac) *
                        mvm->fw->ucode_capa.n_scan_channels;
-       struct iwl_mvm_scan_params params = {};
-       u32 uid, flags;
+       int uid;
        u32 ssid_bitmap = 0;
-       int ret, i, uid_idx;
+       int n_iterations = iwl_mvm_scan_total_iterations(params);
 
        lockdep_assert_held(&mvm->mutex);
 
-       uid_idx = iwl_mvm_find_free_scan_uid(mvm);
-       if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
-               return -EBUSY;
+       uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+       if (uid < 0)
+               return uid;
 
-       /* we should have failed registration if scan_cmd was NULL */
-       if (WARN_ON(mvm->scan_cmd == NULL))
-               return -ENOMEM;
-
-       if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX ||
-                   req->ies.common_ie_len +
-                   req->ies.len[NL80211_BAND_2GHZ] +
-                   req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 >
-                   SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels >
-                   mvm->fw->ucode_capa.n_scan_channels))
-               return -ENOBUFS;
+       memset(cmd, 0, ksize(cmd));
+       cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
+                                   sizeof(struct iwl_mvm_umac_cmd_hdr));
 
-       iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags,
-                                &params);
+       iwl_mvm_scan_umac_dwell(mvm, cmd, params);
 
-       iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
+       mvm->scan_uid_status[uid] = type;
 
-       uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
-       mvm->scan_uid[uid_idx] = uid;
        cmd->uid = cpu_to_le32(uid);
+       cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
 
-       cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
-
-       flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids,
-                                              req->req.ssids,
-                                              params.passive_fragmented);
-
-       flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
-
-       cmd->general_flags = cpu_to_le32(flags);
-
-       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
-           mvm->last_ebs_successful)
+       if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
                cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
                                     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
                                     IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
 
-       cmd->n_channels = req->req.n_channels;
+       cmd->n_channels = params->n_channels;
+
+       iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
 
-       for (i = 0; i < req->req.n_ssids; i++)
-               ssid_bitmap |= BIT(i);
+       iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+                                      params->n_channels, ssid_bitmap, cmd);
 
-       iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels,
-                                      req->req.n_channels, ssid_bitmap, cmd);
+       /* With UMAC we use only one schedule for now, so use the sum
+        * of the iterations (with a a maximum of 255).
+        */
+       sec_part->schedule[0].iter_count =
+               (n_iterations > 255) ? 255 : n_iterations;
+       sec_part->schedule[0].interval = cpu_to_le16(params->interval);
 
-       sec_part->schedule[0].iter_count = 1;
-       sec_part->delay = 0;
+       sec_part->delay = cpu_to_le16(params->delay);
+       sec_part->preq = params->preq;
 
-       iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq,
-               req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
-                       req->req.mac_addr : NULL,
-               req->req.mac_addr_mask);
+       return 0;
+}
 
-       iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids,
-                               req->req.n_ssids, 0);
+static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+{
+       return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
+}
 
-       ret = iwl_mvm_send_cmd(mvm, &hcmd);
-       if (!ret) {
-               IWL_DEBUG_SCAN(mvm,
-                              "Scan request was sent successfully\n");
-       } else {
-               /*
-                * If the scan failed, it usually means that the FW was unable
-                * to allocate the time events. Warn on it, but maybe we
-                * should try to send the command again with different params.
-                */
-               IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+{
+       /* This looks a bit arbitrary, but the idea is that if we run
+        * out of possible simultaneous scans and the userspace is
+        * trying to run a scan type that is already running, we
+        * return -EBUSY.  But if the userspace wants to start a
+        * different type of scan, we stop the opposite type to make
+        * space for the new request.  The reason is backwards
+        * compatibility with old wpa_supplicant that wouldn't stop a
+        * scheduled scan before starting a normal scan.
+        */
+
+       if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
+               return 0;
+
+       /* Use a switch, even though this is a bitmask, so that more
+        * than one bits set will fall in default and we will warn.
+        */
+       switch (type) {
+       case IWL_MVM_SCAN_REGULAR:
+               if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+                       return -EBUSY;
+               return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+       case IWL_MVM_SCAN_SCHED:
+               if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+                       return -EBUSY;
+               iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+       case IWL_MVM_SCAN_NETDETECT:
+               /* No need to stop anything for net-detect since the
+                * firmware is restarted anyway.  This way, any sched
+                * scans that were running will be restarted when we
+                * resume.
+               */
+               return 0;
+       default:
+               WARN_ON(1);
+               break;
        }
-       return ret;
+
+       return -EIO;
 }
 
-int iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                           struct cfg80211_sched_scan_request *req,
-                           struct ieee80211_scan_ies *ies)
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                          struct cfg80211_scan_request *req,
+                          struct ieee80211_scan_ies *ies)
 {
-
        struct iwl_host_cmd hcmd = {
-               .id = SCAN_REQ_UMAC,
                .len = { iwl_mvm_scan_size(mvm), },
                .data = { mvm->scan_cmd, },
                .dataflags = { IWL_HCMD_DFL_NOCOPY, },
        };
-       struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
-       struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
-               sizeof(struct iwl_scan_channel_cfg_umac) *
-                       mvm->fw->ucode_capa.n_scan_channels;
        struct iwl_mvm_scan_params params = {};
-       u32 uid, flags;
-       u32 ssid_bitmap = 0;
-       int ret, uid_idx;
+       int ret;
 
        lockdep_assert_held(&mvm->mutex);
 
-       uid_idx = iwl_mvm_find_free_scan_uid(mvm);
-       if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+       if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+               IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
                return -EBUSY;
+       }
+
+       ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+       if (ret)
+               return ret;
+
+       iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
 
        /* we should have failed registration if scan_cmd was NULL */
-       if (WARN_ON(mvm->scan_cmd == NULL))
+       if (WARN_ON(!mvm->scan_cmd))
                return -ENOMEM;
 
-       if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX ||
-                   ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] +
-                   ies->len[NL80211_BAND_5GHZ] + 24 + 2 >
-                   SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels >
-                   mvm->fw->ucode_capa.n_scan_channels))
+       if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
                return -ENOBUFS;
 
-       iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags,
-                                        &params);
-
-       iwl_mvm_build_generic_umac_scan_cmd(mvm, cmd, &params);
-
-       cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
-
-       uid = iwl_generate_scan_uid(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN);
-       mvm->scan_uid[uid_idx] = uid;
-       cmd->uid = cpu_to_le32(uid);
+       params.n_ssids = req->n_ssids;
+       params.flags = req->flags;
+       params.n_channels = req->n_channels;
+       params.delay = 0;
+       params.interval = 0;
+       params.ssids = req->ssids;
+       params.channels = req->channels;
+       params.mac_addr = req->mac_addr;
+       params.mac_addr_mask = req->mac_addr_mask;
+       params.no_cck = req->no_cck;
+       params.pass_all = true;
+       params.n_match_sets = 0;
+       params.match_sets = NULL;
+
+       params.schedule[0].iterations = 1;
+       params.schedule[0].full_scan_mul = 0;
+       params.schedule[1].iterations = 0;
+       params.schedule[1].full_scan_mul = 0;
+
+       iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+
+       iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+               hcmd.id = SCAN_REQ_UMAC;
+               ret = iwl_mvm_scan_umac(mvm, vif, &params,
+                                       IWL_MVM_SCAN_REGULAR);
+       } else {
+               hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+               ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+       }
 
-       cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_LOW);
+       if (ret)
+               return ret;
 
-       flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids,
-                                              params.passive_fragmented);
+       ret = iwl_mvm_send_cmd(mvm, &hcmd);
+       if (!ret) {
+               IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+               mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+       } else {
+               /* If the scan failed, it usually means that the FW was unable
+                * to allocate the time events. Warn on it, but maybe we
+                * should try to send the command again with different params.
+                */
+               IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+       }
 
-       flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+       if (ret)
+               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 
-       if (iwl_mvm_scan_pass_all(mvm, req))
-               flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
-       else
-               flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+       return ret;
+}
 
-       cmd->general_flags = cpu_to_le32(flags);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+                            struct ieee80211_vif *vif,
+                            struct cfg80211_sched_scan_request *req,
+                            struct ieee80211_scan_ies *ies,
+                            int type)
+{
+       struct iwl_host_cmd hcmd = {
+               .len = { iwl_mvm_scan_size(mvm), },
+               .data = { mvm->scan_cmd, },
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+       struct iwl_mvm_scan_params params = {};
+       int ret;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
-           mvm->last_ebs_successful)
-               cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
-                                    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-                                    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+       lockdep_assert_held(&mvm->mutex);
 
-       cmd->n_channels = req->n_channels;
+       if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+               IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+               return -EBUSY;
+       }
 
-       iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap,
-                                   false);
+       ret = iwl_mvm_check_running_scans(mvm, type);
+       if (ret)
+               return ret;
 
-       /* This API uses bits 0-19 instead of 1-20. */
-       ssid_bitmap = ssid_bitmap >> 1;
+       /* we should have failed registration if scan_cmd was NULL */
+       if (WARN_ON(!mvm->scan_cmd))
+               return -ENOMEM;
 
-       iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels,
-                                      ssid_bitmap, cmd);
+       if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+               return -ENOBUFS;
 
-       sec_part->schedule[0].interval =
-                               cpu_to_le16(req->interval / MSEC_PER_SEC);
-       sec_part->schedule[0].iter_count = 0xff;
+       params.n_ssids = req->n_ssids;
+       params.flags = req->flags;
+       params.n_channels = req->n_channels;
+       params.ssids = req->ssids;
+       params.channels = req->channels;
+       params.mac_addr = req->mac_addr;
+       params.mac_addr_mask = req->mac_addr_mask;
+       params.no_cck = false;
+       params.pass_all =  iwl_mvm_scan_pass_all(mvm, req);
+       params.n_match_sets = req->n_match_sets;
+       params.match_sets = req->match_sets;
+
+       params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
+       params.schedule[0].full_scan_mul = 1;
+       params.schedule[1].iterations = 0xff;
+       params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+
+       if (req->interval > U16_MAX) {
+               IWL_DEBUG_SCAN(mvm,
+                              "interval value is > 16-bits, set to max possible\n");
+               params.interval = U16_MAX;
+       } else {
+               params.interval = req->interval / MSEC_PER_SEC;
+       }
 
+       /* In theory, LMAC scans can handle a 32-bit delay, but since
+        * waiting for over 18 hours to start the scan is a bit silly
+        * and to keep it aligned with UMAC scans (which only support
+        * 16-bit delays), trim it down to 16-bits.
+        */
        if (req->delay > U16_MAX) {
                IWL_DEBUG_SCAN(mvm,
                               "delay value is > 16-bits, set to max possible\n");
-               sec_part->delay = cpu_to_le16(U16_MAX);
+               params.delay = U16_MAX;
+       } else {
+               params.delay = req->delay;
+       }
+
+       iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+
+       ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+       if (ret)
+               return ret;
+
+       iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+               hcmd.id = SCAN_REQ_UMAC;
+               ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
        } else {
-               sec_part->delay = cpu_to_le16(req->delay);
+               hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+               ret = iwl_mvm_scan_lmac(mvm, vif, &params);
        }
 
-       iwl_mvm_build_unified_scan_probe(mvm, vif, ies, &sec_part->preq,
-               req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
-                       req->mac_addr : NULL,
-               req->mac_addr_mask);
+       if (ret)
+               return ret;
 
        ret = iwl_mvm_send_cmd(mvm, &hcmd);
        if (!ret) {
                IWL_DEBUG_SCAN(mvm,
                               "Sched scan request was sent successfully\n");
+               mvm->scan_status |= type;
        } else {
-               /*
-                * If the scan failed, it usually means that the FW was unable
+               /* If the scan failed, it usually means that the FW was unable
                 * to allocate the time events. Warn on it, but maybe we
                 * should try to send the command again with different params.
                 */
                IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
        }
+
        return ret;
 }
 
@@ -1485,150 +1378,124 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_complete *notif = (void *)pkt->data;
        u32 uid = __le32_to_cpu(notif->uid);
-       bool sched = !!(uid & IWL_UMAC_SCAN_UID_SCHED_SCAN);
-       int uid_idx = iwl_mvm_find_scan_uid(mvm, uid);
+       bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
-       /*
-        * Scan uid may be set to zero in case of scan abort request from above.
-        */
-       if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
+       if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
                return 0;
 
+       /* if the scan is already stopping, we don't need to notify mac80211 */
+       if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
+               ieee80211_scan_completed(mvm->hw, aborted);
+               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+       } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
+               ieee80211_sched_scan_stopped(mvm->hw);
+       }
+
+       mvm->scan_status &= ~mvm->scan_uid_status[uid];
+
        IWL_DEBUG_SCAN(mvm,
-                      "Scan completed, uid %u type %s, status %s, EBS status %s\n",
-                      uid, sched ? "sched" : "regular",
+                      "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+                      uid, mvm->scan_uid_status[uid],
                       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
                                "completed" : "aborted",
-                      notif->ebs_status == IWL_SCAN_EBS_SUCCESS ?
-                               "success" : "failed");
+                      iwl_mvm_ebs_status_str(notif->ebs_status));
 
-       if (notif->ebs_status)
+       if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+           notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
                mvm->last_ebs_successful = false;
 
-       mvm->scan_uid[uid_idx] = 0;
-
-       if (!sched) {
-               ieee80211_scan_completed(mvm->hw,
-                                        notif->status ==
-                                               IWL_SCAN_OFFLOAD_ABORTED);
-               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-       } else if (!iwl_mvm_find_scan_type(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN)) {
-               ieee80211_sched_scan_stopped(mvm->hw);
-       } else {
-               IWL_DEBUG_SCAN(mvm, "Another sched scan is running\n");
-       }
+       mvm->scan_uid_status[uid] = 0;
 
        return 0;
 }
 
-static bool iwl_scan_umac_done_check(struct iwl_notif_wait_data *notif_wait,
-                                    struct iwl_rx_packet *pkt, void *data)
+int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                            struct iwl_rx_cmd_buffer *rxb,
+                                            struct iwl_device_cmd *cmd)
 {
-       struct iwl_umac_scan_done *scan_done = data;
-       struct iwl_umac_scan_complete *notif = (void *)pkt->data;
-       u32 uid = __le32_to_cpu(notif->uid);
-       int uid_idx = iwl_mvm_find_scan_uid(scan_done->mvm, uid);
-
-       if (WARN_ON(pkt->hdr.cmd != SCAN_COMPLETE_UMAC))
-               return false;
-
-       if (uid_idx >= IWL_MVM_MAX_SIMULTANEOUS_SCANS)
-               return false;
-
-       /*
-        * Clear scan uid of scans that was aborted from above and completed
-        * in FW so the RX handler does nothing. Set last_ebs_successful here if
-        * needed.
-        */
-       scan_done->mvm->scan_uid[uid_idx] = 0;
-
-       if (notif->ebs_status)
-               scan_done->mvm->last_ebs_successful = false;
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+       u8 buf[256];
 
-       return !iwl_mvm_find_scan_type(scan_done->mvm, scan_done->type);
+       IWL_DEBUG_SCAN(mvm,
+                      "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
+                      notif->status, notif->scanned_channels,
+                      iwl_mvm_dump_channel_list(notif->results,
+                                                notif->scanned_channels, buf,
+                                                sizeof(buf)));
+       return 0;
 }
 
-static int iwl_umac_scan_abort_one(struct iwl_mvm *mvm, u32 uid)
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 {
        struct iwl_umac_scan_abort cmd = {
                .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
                                        sizeof(struct iwl_mvm_umac_cmd_hdr)),
-               .uid = cpu_to_le32(uid),
        };
+       int uid, ret;
 
        lockdep_assert_held(&mvm->mutex);
 
+       /* We should always get a valid index here, because we already
+        * checked that this type of scan was running in the generic
+        * code.
+        */
+       uid = iwl_mvm_scan_uid_by_status(mvm, type);
+       if (WARN_ON_ONCE(uid < 0))
+               return uid;
+
+       cmd.uid = cpu_to_le32(uid);
+
        IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
-       return iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+       if (!ret)
+               mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
+       return ret;
 }
 
-static int iwl_umac_scan_stop(struct iwl_mvm *mvm,
-                             enum iwl_umac_scan_uid_type type, bool notify)
+static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 {
        struct iwl_notification_wait wait_scan_done;
-       static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC, };
-       struct iwl_umac_scan_done scan_done = {
-               .mvm = mvm,
-               .type = type,
-       };
-       int i, ret = -EIO;
+       static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+                                             SCAN_OFFLOAD_COMPLETE, };
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
                                   scan_done_notif,
                                   ARRAY_SIZE(scan_done_notif),
-                                  iwl_scan_umac_done_check, &scan_done);
+                                  NULL, NULL);
 
        IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
 
-       for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
-               if (mvm->scan_uid[i] & type) {
-                       int err;
-
-                       if (iwl_mvm_is_radio_killed(mvm) &&
-                           (type & IWL_UMAC_SCAN_UID_REG_SCAN)) {
-                               ieee80211_scan_completed(mvm->hw, true);
-                               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-                               break;
-                       }
-
-                       err = iwl_umac_scan_abort_one(mvm, mvm->scan_uid[i]);
-                       if (!err)
-                               ret = 0;
-               }
-       }
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+               ret = iwl_mvm_umac_scan_abort(mvm, type);
+       else
+               ret = iwl_mvm_lmac_scan_abort(mvm);
 
        if (ret) {
-               IWL_DEBUG_SCAN(mvm, "Couldn't stop scan\n");
+               IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
                iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
                return ret;
        }
 
        ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
-       if (ret)
-               return ret;
-
-       if (notify) {
-               if (type & IWL_UMAC_SCAN_UID_SCHED_SCAN)
-                       ieee80211_sched_scan_stopped(mvm->hw);
-               if (type & IWL_UMAC_SCAN_UID_REG_SCAN) {
-                       ieee80211_scan_completed(mvm->hw, true);
-                       iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
-               }
-       }
 
        return ret;
 }
 
 int iwl_mvm_scan_size(struct iwl_mvm *mvm)
 {
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
                return sizeof(struct iwl_scan_req_umac) +
                        sizeof(struct iwl_scan_channel_cfg_umac) *
                                mvm->fw->ucode_capa.n_scan_channels +
                        sizeof(struct iwl_scan_req_umac_tail);
 
-       return sizeof(struct iwl_scan_req_unified_lmac) +
+       return sizeof(struct iwl_scan_req_lmac) +
                sizeof(struct iwl_scan_channel_cfg_lmac) *
                mvm->fw->ucode_capa.n_scan_channels +
                sizeof(struct iwl_scan_probe_req);
@@ -1640,47 +1507,76 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
  */
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
 {
-       if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) {
-               u32 uid, i;
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+               int uid, i;
 
-               uid = iwl_mvm_find_first_scan(mvm, IWL_UMAC_SCAN_UID_REG_SCAN);
-               if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS) {
+               uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
+               if (uid >= 0) {
                        ieee80211_scan_completed(mvm->hw, true);
-                       mvm->scan_uid[uid] = 0;
+                       mvm->scan_uid_status[uid] = 0;
                }
-               uid = iwl_mvm_find_first_scan(mvm,
-                                             IWL_UMAC_SCAN_UID_SCHED_SCAN);
-               if (uid < IWL_MVM_MAX_SIMULTANEOUS_SCANS && !mvm->restart_fw) {
+               uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
+               if (uid >= 0 && !mvm->restart_fw) {
                        ieee80211_sched_scan_stopped(mvm->hw);
-                       mvm->scan_uid[uid] = 0;
+                       mvm->scan_uid_status[uid] = 0;
                }
 
                /* We shouldn't have any UIDs still set.  Loop over all the
                 * UIDs to make sure there's nothing left there and warn if
                 * any is found.
                 */
-               for (i = 0; i < IWL_MVM_MAX_SIMULTANEOUS_SCANS; i++) {
-                       if (WARN_ONCE(mvm->scan_uid[i],
-                                     "UMAC scan UID %d was not cleaned\n",
-                                     mvm->scan_uid[i]))
-                               mvm->scan_uid[i] = 0;
+               for (i = 0; i < mvm->max_scans; i++) {
+                       if (WARN_ONCE(mvm->scan_uid_status[i],
+                                     "UMAC scan UID %d status was not cleaned\n",
+                                     i))
+                               mvm->scan_uid_status[i] = 0;
                }
        } else {
-               switch (mvm->scan_status) {
-               case IWL_MVM_SCAN_NONE:
-                       break;
-               case IWL_MVM_SCAN_OS:
+               if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
                        ieee80211_scan_completed(mvm->hw, true);
-                       break;
-               case IWL_MVM_SCAN_SCHED:
-                       /*
-                        * Sched scan will be restarted by mac80211 in
-                        * restart_hw, so do not report if FW is about to be
-                        * restarted.
-                        */
-                       if (!mvm->restart_fw)
-                               ieee80211_sched_scan_stopped(mvm->hw);
-                       break;
-               }
+
+               /* Sched scan will be restarted by mac80211 in
+                * restart_hw, so do not report if FW is about to be
+                * restarted.
+                */
+               if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw)
+                       ieee80211_sched_scan_stopped(mvm->hw);
+       }
+}
+
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+{
+       int ret;
+
+       if (!(mvm->scan_status & type))
+               return 0;
+
+       if (iwl_mvm_is_radio_killed(mvm)) {
+               ret = 0;
+               goto out;
+       }
+
+       ret = iwl_mvm_scan_stop_wait(mvm, type);
+       if (!ret)
+               mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+out:
+       /* Clear the scan status so the next scan requests will
+        * succeed and mark the scan as stopping, so that the Rx
+        * handler doesn't do anything, as the scan was stopped from
+        * above.
+        */
+       mvm->scan_status &= ~type;
+
+       if (type == IWL_MVM_SCAN_REGULAR) {
+               /* Since the rx handler won't do anything now, we have
+                * to release the scan reference here.
+                */
+               iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+               if (notify)
+                       ieee80211_scan_completed(mvm->hw, true);
+       } else if (notify) {
+               ieee80211_sched_scan_stopped(mvm->hw);
        }
+
+       return ret;
 }
index 1845b79487c81b446e0432bc385e9fc12884ff68..d68dc697a4a06ef2b9c786de0fc98f3e5fe35544 100644 (file)
@@ -5,8 +5,8 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,8 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1000,13 +1000,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
 
+       iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
+                              buf_size, ssn, wdg_timeout);
+
        ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
        if (ret)
                return -EIO;
 
-       iwl_mvm_enable_agg_txq(mvm, queue, fifo, mvmsta->sta_id, tid,
-                              buf_size, ssn, wdg_timeout);
-
        /*
         * Even though in theory the peer could have different
         * aggregation reorder buffer sizes for different sessions,
index fd7b0d36f9a620b8e99dcc7b643806b76f09af4d..d24b6a83e68cfcd4281301c907cb62a24661da96 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
         * in the case that the time event actually completed in the firmware
         * (which is handled in iwl_mvm_te_handle_notif).
         */
-       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
                queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
-       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+               iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+       }
+       if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
                queues |= BIT(mvm->aux_queue);
-
-       iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+               iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+       }
 
        synchronize_net();
 
@@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
        } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
                set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
                te_data->running = true;
+               iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
                ieee80211_ready_on_channel(mvm->hw); /* Start TE */
        } else {
                IWL_DEBUG_TE(mvm,
@@ -794,13 +797,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm_vif *mvmvif;
+       struct iwl_mvm_vif *mvmvif = NULL;
        struct iwl_mvm_time_event_data *te_data;
        bool is_p2p = false;
 
        lockdep_assert_held(&mvm->mutex);
 
-       mvmvif = NULL;
        spin_lock_bh(&mvm->time_event_lock);
 
        /*
@@ -818,17 +820,14 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
                }
        }
 
-       /*
-        * Iterate over the list of aux roc time events and find the time
-        * event that is associated with a BSS interface.
-        * This assumes that a BSS interface can have only a single time
-        * event at any given time and this time event corresponds to a ROC
-        * request
+       /* There can only be at most one AUX ROC time event, we just use the
+        * list to simplify/unify code. Remove it if it exists.
         */
-       list_for_each_entry(te_data, &mvm->aux_roc_te_list, list) {
+       te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
+                                          struct iwl_mvm_time_event_data,
+                                          list);
+       if (te_data)
                mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-               goto remove_te;
-       }
 
 remove_te:
        spin_unlock_bh(&mvm->time_event_lock);
index ba615ad2176cd2d663636d1499d8dd73030c3ad2..80d07db6e7e8c80f472ee84fe6c31fc0f53844b1 100644 (file)
@@ -70,7 +70,7 @@
 static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
 {
        struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
-       u32 duration = mvm->thermal_throttle.params->ct_kill_duration;
+       u32 duration = tt->params.ct_kill_duration;
 
        if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
                return;
@@ -223,7 +223,7 @@ static void check_exit_ctkill(struct work_struct *work)
        tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
        mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
 
-       duration = tt->params->ct_kill_duration;
+       duration = tt->params.ct_kill_duration;
 
        mutex_lock(&mvm->mutex);
 
@@ -247,7 +247,7 @@ static void check_exit_ctkill(struct work_struct *work)
 
        IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
 
-       if (temp <= tt->params->ct_kill_exit) {
+       if (temp <= tt->params.ct_kill_exit) {
                mutex_unlock(&mvm->mutex);
                iwl_mvm_exit_ctkill(mvm);
                return;
@@ -325,7 +325,7 @@ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
 
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
 {
-       const struct iwl_tt_params *params = mvm->thermal_throttle.params;
+       struct iwl_tt_params *params = &mvm->thermal_throttle.params;
        struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
        s32 temperature = mvm->temperature;
        bool throttle_enable = false;
@@ -340,7 +340,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
        }
 
        if (params->support_ct_kill &&
-           temperature <= tt->params->ct_kill_exit) {
+           temperature <= params->ct_kill_exit) {
                iwl_mvm_exit_ctkill(mvm);
                return;
        }
@@ -400,7 +400,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
        }
 }
 
-static const struct iwl_tt_params iwl7000_tt_params = {
+static const struct iwl_tt_params iwl_mvm_default_tt_params = {
        .ct_kill_entry = 118,
        .ct_kill_exit = 96,
        .ct_kill_duration = 5,
@@ -422,38 +422,16 @@ static const struct iwl_tt_params iwl7000_tt_params = {
        .support_tx_backoff = true,
 };
 
-static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
-       .ct_kill_entry = 118,
-       .ct_kill_exit = 96,
-       .ct_kill_duration = 5,
-       .dynamic_smps_entry = 114,
-       .dynamic_smps_exit = 110,
-       .tx_protection_entry = 114,
-       .tx_protection_exit = 108,
-       .tx_backoff = {
-               {.temperature = 112, .backoff = 300},
-               {.temperature = 113, .backoff = 800},
-               {.temperature = 114, .backoff = 1500},
-               {.temperature = 115, .backoff = 3000},
-               {.temperature = 116, .backoff = 5000},
-               {.temperature = 117, .backoff = 10000},
-       },
-       .support_ct_kill = true,
-       .support_dynamic_smps = true,
-       .support_tx_protection = true,
-       .support_tx_backoff = true,
-};
-
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
 {
        struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
 
        IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
 
-       if (mvm->cfg->high_temp)
-               tt->params = &iwl7000_high_temp_tt_params;
+       if (mvm->cfg->thermal_params)
+               tt->params = *mvm->cfg->thermal_params;
        else
-               tt->params = &iwl7000_tt_params;
+               tt->params = iwl_mvm_default_tt_params;
 
        tt->throttle = false;
        tt->dynamic_smps = false;
index ef32e177f662b3ba03772e02b1c9b9e512bcd64f..7ba7a118ff5ca28615f5ceeb745f577999ea52dc 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "mvm.h"
 #include "sta.h"
 
+static void
+iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
+                         u16 tid, u16 ssn)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+       ba_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+               return;
+
+       if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
+               return;
+
+       iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                   "BAR sent to %pM, tid %d, ssn %d",
+                                   addr, tid, ssn);
+}
+
 /*
  * Sets most of the Tx cmd's fields
  */
@@ -101,12 +125,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
        } else if (ieee80211_is_back_req(fc)) {
                struct ieee80211_bar *bar = (void *)skb->data;
                u16 control = le16_to_cpu(bar->control);
+               u16 ssn = le16_to_cpu(bar->start_seq_num);
 
                tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
                tx_cmd->tid_tspec = (control &
                                     IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
                        IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
                WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
+               iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
+                                         ssn);
        } else {
                tx_cmd->tid_tspec = IWL_TID_NON_QOS;
                if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -144,8 +171,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
            !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
                tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
 
-       if ((mvm->fw->ucode_capa.capa[0] &
-            IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
            ieee80211_action_contains_tpc(skb))
                tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
 
index bc55a8b82db6d88ad42ef1151e0c5c53c2cbf64c..03f8e06dded72fc74a302c7e52fced302631dc01 100644 (file)
@@ -584,7 +584,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
        struct iwl_error_event_table table;
        u32 base;
 
-       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_NEW_VERSION)) {
+       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
                iwl_mvm_dump_nic_error_log_old(mvm);
                return;
        }
index b1856973492237dcbee2ebf640ebeb0bf593fecb..2ed1e4d2774da83f1cd609983c8383c48d37a128 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -629,7 +629,18 @@ static int iwl_pci_resume(struct device *device)
        if (!trans->op_mode)
                return 0;
 
-       iwl_enable_rfkill_int(trans);
+       /*
+        * On suspend, ict is disabled, and the interrupt mask
+        * gets cleared. Reconfigure them both in case of d0i3
+        * image. Otherwise, only enable rfkill interrupt (in
+        * order to keep track of the rfkill status)
+        */
+       if (trans->wowlan_d0i3) {
+               iwl_pcie_reset_ict(trans);
+               iwl_enable_interrupts(trans);
+       } else {
+               iwl_enable_rfkill_int(trans);
+       }
 
        hw_rfkill = iwl_is_rfkill_set(trans);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
index 376b84e54ad7e8bbb48d039d354c03748665451c..31f72a61cc3fe06b6d9189f718cc3315677625ad 100644 (file)
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -77,29 +86,29 @@ struct isr_statistics {
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
        __le32 *bd;
        dma_addr_t bd_dma;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
        u32 read;
        u32 write;
        u32 free_count;
+       u32 used_count;
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
@@ -107,6 +116,32 @@ struct iwl_rxq {
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
+       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *     the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *     of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+       struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+       atomic_t req_pending;
+       atomic_t req_ready;
+       struct list_head rbd_allocated;
+       struct list_head rbd_empty;
+       spinlock_t lock;
+       struct workqueue_struct *alloc_wq;
+       struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -250,7 +285,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -273,7 +308,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  */
 struct iwl_trans_pcie {
        struct iwl_rxq rxq;
-       struct work_struct rx_replenish;
+       struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
index 7ff69c642103f1febeea67d9c082a035039b4121..a3fbaa0ef5e04de7d1032c79ca35e82364dac7ea 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_pcie_rx_replenish
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -240,10 +267,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
                rxq->free_count--;
        }
        spin_unlock(&rxq->lock);
-       /* If the pre-allocated buffer pool is dropping low, schedule to
-        * refill it */
-       if (rxq->free_count <= RX_LOW_WATERMARK)
-               schedule_work(&trans_pcie->rx_replenish);
 
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
@@ -254,6 +277,44 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
        }
 }
 
+/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct page *page;
+       gfp_t gfp_mask = GFP_KERNEL;
+
+       if (rxq->free_count > RX_LOW_WATERMARK)
+               gfp_mask |= __GFP_NOWARN;
+
+       if (trans_pcie->rx_page_order > 0)
+               gfp_mask |= __GFP_COMP;
+
+       /* Alloc a new receive buffer */
+       page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+       if (!page) {
+               if (net_ratelimit())
+                       IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+                                      trans_pcie->rx_page_order);
+               /* Issue an error if the hardware has consumed more than half
+                * of its free buffer list and we don't have enough
+                * pre-allocated buffers.
+`               */
+               if (rxq->free_count <= RX_LOW_WATERMARK &&
+                   iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+                   net_ratelimit())
+                       IWL_CRIT(trans,
+                                "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+                                rxq->free_count);
+               return NULL;
+       }
+       return page;
+}
+
 /*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
@@ -263,13 +324,12 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
-       gfp_t gfp_mask = priority;
 
        while (1) {
                spin_lock(&rxq->lock);
@@ -279,32 +339,10 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
                }
                spin_unlock(&rxq->lock);
 
-               if (rxq->free_count > RX_LOW_WATERMARK)
-                       gfp_mask |= __GFP_NOWARN;
-
-               if (trans_pcie->rx_page_order > 0)
-                       gfp_mask |= __GFP_COMP;
-
                /* Alloc a new receive buffer */
-               page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-               if (!page) {
-                       if (net_ratelimit())
-                               IWL_DEBUG_INFO(trans, "alloc_pages failed, "
-                                          "order: %d\n",
-                                          trans_pcie->rx_page_order);
-
-                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-                           net_ratelimit())
-                               IWL_CRIT(trans, "Failed to alloc_pages with %s."
-                                        "Only %u free buffers remaining.\n",
-                                        priority == GFP_ATOMIC ?
-                                        "GFP_ATOMIC" : "GFP_KERNEL",
-                                        rxq->free_count);
-                       /* We don't reschedule replenish work here -- we will
-                        * call the restock method and if it still needs
-                        * more buffers it will schedule replenish */
+               page = iwl_pcie_rx_alloc_page(trans);
+               if (!page)
                        return;
-               }
 
                spin_lock(&rxq->lock);
 
@@ -355,7 +393,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
 
        lockdep_assert_held(&rxq->lock);
 
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+       for (i = 0; i < RX_QUEUE_SIZE; i++) {
                if (!rxq->pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +410,144 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
-       iwl_pcie_rxq_alloc_rbs(trans, gfp);
+       iwl_pcie_rxq_alloc_rbs(trans);
 
        iwl_pcie_rxq_restock(trans);
 }
 
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+       while (atomic_read(&rba->req_pending)) {
+               int i;
+               struct list_head local_empty;
+               struct list_head local_allocated;
+
+               INIT_LIST_HEAD(&local_allocated);
+               spin_lock(&rba->lock);
+               /* swap out the entire rba->rbd_empty to a local list */
+               list_replace_init(&rba->rbd_empty, &local_empty);
+               spin_unlock(&rba->lock);
+
+               for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+                       struct iwl_rx_mem_buffer *rxb;
+                       struct page *page;
+
+                       /* List should never be empty - each reused RBD is
+                        * returned to the list, and initial pool covers any
+                        * possible gap between the time the page is allocated
+                        * to the time the RBD is added.
+                        */
+                       BUG_ON(list_empty(&local_empty));
+                       /* Get the first rxb from the rbd list */
+                       rxb = list_first_entry(&local_empty,
+                                              struct iwl_rx_mem_buffer, list);
+                       BUG_ON(rxb->page);
+
+                       /* Alloc a new receive buffer */
+                       page = iwl_pcie_rx_alloc_page(trans);
+                       if (!page)
+                               continue;
+                       rxb->page = page;
+
+                       /* Get physical address of the RB */
+                       rxb->page_dma = dma_map_page(trans->dev, page, 0,
+                                       PAGE_SIZE << trans_pcie->rx_page_order,
+                                       DMA_FROM_DEVICE);
+                       if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+                               rxb->page = NULL;
+                               __free_pages(page, trans_pcie->rx_page_order);
+                               continue;
+                       }
+                       /* dma address must be no more than 36 bits */
+                       BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+                       /* and also 256 byte aligned! */
+                       BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+                       /* move the allocated entry to the out list */
+                       list_move(&rxb->list, &local_allocated);
+                       i++;
+               }
+
+               spin_lock(&rba->lock);
+               /* add the allocated rbds to the allocator allocated list */
+               list_splice_tail(&local_allocated, &rba->rbd_allocated);
+               /* add the unused rbds back to the allocator empty list */
+               list_splice_tail(&local_empty, &rba->rbd_empty);
+               spin_unlock(&rba->lock);
+
+               atomic_dec(&rba->req_pending);
+               atomic_inc(&rba->req_ready);
+       }
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+                                    struct iwl_rx_mem_buffer
+                                    *out[RX_CLAIM_REQ_ALLOC])
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int i;
+
+       if (atomic_dec_return(&rba->req_ready) < 0) {
+               atomic_inc(&rba->req_ready);
+               IWL_DEBUG_RX(trans,
+                            "Allocation request not ready, pending requests = %d\n",
+                            atomic_read(&rba->req_pending));
+               return -ENOMEM;
+       }
+
+       spin_lock(&rba->lock);
+       for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+               /* Get next free Rx buffer, remove it from free list */
+               out[i] = list_first_entry(&rba->rbd_allocated,
+                              struct iwl_rx_mem_buffer, list);
+               list_del(&out[i]->list);
+       }
+       spin_unlock(&rba->lock);
+
+       return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
 {
+       struct iwl_rb_allocator *rba_p =
+               container_of(data, struct iwl_rb_allocator, rx_alloc);
        struct iwl_trans_pcie *trans_pcie =
-           container_of(data, struct iwl_trans_pcie, rx_replenish);
+               container_of(rba_p, struct iwl_trans_pcie, rba);
 
-       iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+       iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
 
        memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
        spin_lock_init(&rxq->lock);
+       spin_lock_init(&rba->lock);
 
        if (WARN_ON(rxq->bd || rxq->rb_stts))
                return -EINVAL;
@@ -487,15 +637,49 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
        INIT_LIST_HEAD(&rxq->rx_free);
        INIT_LIST_HEAD(&rxq->rx_used);
        rxq->free_count = 0;
+       rxq->used_count = 0;
 
-       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+       for (i = 0; i < RX_QUEUE_SIZE; i++)
                list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+       int i;
+
+       lockdep_assert_held(&rba->lock);
+
+       INIT_LIST_HEAD(&rba->rbd_allocated);
+       INIT_LIST_HEAD(&rba->rbd_empty);
+
+       for (i = 0; i < RX_POOL_SIZE; i++)
+               list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int i;
+
+       lockdep_assert_held(&rba->lock);
+
+       for (i = 0; i < RX_POOL_SIZE; i++) {
+               if (!rba->pool[i].page)
+                       continue;
+               dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+                              PAGE_SIZE << trans_pcie->rx_page_order,
+                              DMA_FROM_DEVICE);
+               __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+               rba->pool[i].page = NULL;
+       }
+}
+
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i, err;
 
        if (!rxq->bd) {
@@ -503,11 +687,21 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
                if (err)
                        return err;
        }
+       if (!rba->alloc_wq)
+               rba->alloc_wq = alloc_workqueue("rb_allocator",
+                                               WQ_HIGHPRI | WQ_UNBOUND, 1);
+       INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+       spin_lock(&rba->lock);
+       atomic_set(&rba->req_pending, 0);
+       atomic_set(&rba->req_ready, 0);
+       /* free all first - we might be reconfigured for a different size */
+       iwl_pcie_rx_free_rba(trans);
+       iwl_pcie_rx_init_rba(rba);
+       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
 
-       INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
        /* free all first - we might be reconfigured for a different size */
        iwl_pcie_rxq_free_rbs(trans);
        iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +716,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
 
-       iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+       iwl_pcie_rx_replenish(trans);
 
        iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -537,6 +731,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
        /*if rxq->bd is NULL, it means that nothing has been allocated,
         * exit now */
@@ -545,7 +740,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
                return;
        }
 
-       cancel_work_sync(&trans_pcie->rx_replenish);
+       cancel_work_sync(&rba->rx_alloc);
+       if (rba->alloc_wq) {
+               destroy_workqueue(rba->alloc_wq);
+               rba->alloc_wq = NULL;
+       }
+
+       spin_lock(&rba->lock);
+       iwl_pcie_rx_free_rba(trans);
+       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
        iwl_pcie_rxq_free_rbs(trans);
@@ -566,6 +769,43 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
        rxq->rb_stts = NULL;
 }
 
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+                                 struct iwl_rx_mem_buffer *rxb,
+                                 struct iwl_rxq *rxq)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+       /* Count the used RBDs */
+       rxq->used_count++;
+
+       /* Move the RBD to the used list, will be moved to allocator in batches
+        * before claiming or posting a request*/
+       list_add_tail(&rxb->list, &rxq->rx_used);
+
+       /* If we have RX_POST_REQ_ALLOC new released rx buffers -
+        * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+        * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+        * after but we still need to post another request.
+        */
+       if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+               /* Move the 2 RBDs to the allocator ownership.
+                Allocator has another 6 from pool for the request completion*/
+               spin_lock(&rba->lock);
+               list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+               spin_unlock(&rba->lock);
+
+               atomic_inc(&rba->req_pending);
+               queue_work(rba->alloc_wq, &rba->rx_alloc);
+       }
+}
+
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                                struct iwl_rx_mem_buffer *rxb)
 {
@@ -688,13 +928,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         */
                        __free_pages(rxb->page, trans_pcie->rx_page_order);
                        rxb->page = NULL;
-                       list_add_tail(&rxb->list, &rxq->rx_used);
+                       iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
                } else {
                        list_add_tail(&rxb->list, &rxq->rx_free);
                        rxq->free_count++;
                }
        } else
-               list_add_tail(&rxb->list, &rxq->rx_used);
+               iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
 }
 
 /*
@@ -704,10 +944,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       u32 r, i;
-       u8 fill_rx = 0;
-       u32 count = 8;
-       int total_empty;
+       u32 r, i, j;
 
 restart:
        spin_lock(&rxq->lock);
@@ -720,14 +957,6 @@ restart:
        if (i == r)
                IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
-       /* calculate total frames need to be restock after handling RX */
-       total_empty = r - rxq->write_actual;
-       if (total_empty < 0)
-               total_empty += RX_QUEUE_SIZE;
-
-       if (total_empty > (RX_QUEUE_SIZE / 2))
-               fill_rx = 1;
-
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
@@ -739,29 +968,48 @@ restart:
                iwl_pcie_rx_handle_rb(trans, rxb);
 
                i = (i + 1) & RX_QUEUE_MASK;
-               /* If there are a lot of unused frames,
-                * restock the Rx queue so ucode wont assert. */
-               if (fill_rx) {
-                       count++;
-                       if (count >= 8) {
-                               rxq->read = i;
-                               spin_unlock(&rxq->lock);
-                               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-                               count = 0;
-                               goto restart;
+
+               /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+                * try to claim the pre-allocated buffers from the allocator */
+               if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+                       struct iwl_rb_allocator *rba = &trans_pcie->rba;
+                       struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+                       /* Add the remaining 6 empty RBDs for allocator use */
+                       spin_lock(&rba->lock);
+                       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+                       spin_unlock(&rba->lock);
+
+                       /* If not ready - continue, will try to reclaim later.
+                       * No need to reschedule work - allocator exits only on
+                       * success */
+                       if (!iwl_pcie_rx_allocator_get(trans, out)) {
+                               /* If success - then RX_CLAIM_REQ_ALLOC
+                                * buffers were retrieved and should be added
+                                * to free list */
+                               rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+                               for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+                                       list_add_tail(&out[j]->list,
+                                                     &rxq->rx_free);
+                                       rxq->free_count++;
+                               }
                        }
                }
+               /* handle restock for two cases:
+               * - we just pulled buffers from the allocator
+               * - we have 8+ unstolen pages accumulated */
+               if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
+                       rxq->read = i;
+                       spin_unlock(&rxq->lock);
+                       iwl_pcie_rxq_restock(trans);
+                       goto restart;
+               }
        }
 
        /* Backtrack one entry */
        rxq->read = i;
        spin_unlock(&rxq->lock);
 
-       if (fill_rx)
-               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-       else
-               iwl_pcie_rxq_restock(trans);
-
        if (trans_pcie->napi.poll)
                napi_gro_flush(&trans_pcie->napi, false);
 }
@@ -775,6 +1023,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 
        /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
        if (trans->cfg->internal_wimax_coex &&
+           !trans->cfg->apmg_not_supported &&
            (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
                             APMS_CLK_VAL_MRB_FUNC_MODE) ||
             (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
index dc179094e6a0d440b2aa29909c05adbc07f3f6b5..43ae658af6ec56506f9f0e8ed00022b890c3b9fb 100644 (file)
@@ -101,14 +101,26 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
        trans_pcie->fw_mon_size = 0;
 }
 
-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
+static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct page *page = NULL;
        dma_addr_t phys;
-       u32 size;
+       u32 size = 0;
        u8 power;
 
+       if (!max_power) {
+               /* default max_power is maximum */
+               max_power = 26;
+       } else {
+               max_power += 11;
+       }
+
+       if (WARN(max_power > 26,
+                "External buffer size for monitor is too big %d, check the FW TLV\n",
+                max_power))
+               return;
+
        if (trans_pcie->fw_mon_page) {
                dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
                                           trans_pcie->fw_mon_size,
@@ -117,7 +129,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
        }
 
        phys = 0;
-       for (power = 26; power >= 11; power--) {
+       for (power = max_power; power >= 11; power--) {
                int order;
 
                size = BIT(power);
@@ -143,6 +155,12 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
        if (WARN_ON_ONCE(!page))
                return;
 
+       if (power != max_power)
+               IWL_ERR(trans,
+                       "Sorry - debug buffer is only %luK while you requested %luK\n",
+                       (unsigned long)BIT(power - 10),
+                       (unsigned long)BIT(max_power - 10));
+
        trans_pcie->fw_mon_page = page;
        trans_pcie->fw_mon_phys = phys;
        trans_pcie->fw_mon_size = size;
@@ -164,6 +182,9 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
 
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
+       if (!trans->cfg->apmg_not_supported)
+               return;
+
        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
                iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
                                       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
@@ -297,7 +318,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
         * bits do not disable clocks.  This preserves any hardware
         * bits already set by default in "CLK_CTRL_REG" after reset.
         */
-       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+       if (!trans->cfg->apmg_not_supported) {
                iwl_write_prph(trans, APMG_CLK_EN_REG,
                               APMG_CLK_VAL_DMA_CLK_RQT);
                udelay(20);
@@ -497,8 +518,7 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
 
        spin_unlock(&trans_pcie->irq_lock);
 
-       if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
-               iwl_pcie_set_pwr(trans, false);
+       iwl_pcie_set_pwr(trans, false);
 
        iwl_op_mode_nic_config(trans->op_mode);
 
@@ -834,7 +854,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
                 get_fw_dbg_mode_string(dest->monitor_mode));
 
        if (dest->monitor_mode == EXTERNAL_MODE)
-               iwl_pcie_alloc_fw_monitor(trans);
+               iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
        else
                IWL_WARN(trans, "PCI should have external buffer debug\n");
 
@@ -908,7 +928,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
        /* supported for 7000 only for the moment */
        if (iwlwifi_mod_params.fw_monitor &&
            trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
-               iwl_pcie_alloc_fw_monitor(trans);
+               iwl_pcie_alloc_fw_monitor(trans, 0);
 
                if (trans_pcie->fw_mon_size) {
                        iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
@@ -955,12 +975,8 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
                return ret;
 
        /* load to FW the binary sections of CPU2 */
-       ret = iwl_pcie_load_cpu_sections_8000(trans, image, 2,
-                                             &first_ucode_section);
-       if (ret)
-               return ret;
-
-       return 0;
+       return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+                                              &first_ucode_section);
 }
 
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
@@ -1049,7 +1065,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
                iwl_pcie_rx_stop(trans);
 
                /* Power-down device's busmaster DMA clocks */
-               if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+               if (!trans->cfg->apmg_not_supported) {
                        iwl_write_prph(trans, APMG_CLK_DIS_REG,
                                       APMG_CLK_VAL_DMA_CLK_RQT);
                        udelay(5);
@@ -1346,14 +1362,13 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        iounmap(trans_pcie->hw_base);
        pci_release_regions(trans_pcie->pci_dev);
        pci_disable_device(trans_pcie->pci_dev);
-       kmem_cache_destroy(trans->dev_cmd_pool);
 
        if (trans_pcie->napi.poll)
                netif_napi_del(&trans_pcie->napi);
 
        iwl_pcie_free_fw_monitor(trans);
 
-       kfree(trans);
+       iwl_trans_free(trans);
 }
 
 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
@@ -2200,6 +2215,29 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
        return sizeof(**data) + fh_regs_len;
 }
 
+static u32
+iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
+                                struct iwl_fw_error_dump_fw_mon *fw_mon_data,
+                                u32 monitor_len)
+{
+       u32 buf_size_in_dwords = (monitor_len >> 2);
+       u32 *buffer = (u32 *)fw_mon_data->data;
+       unsigned long flags;
+       u32 i;
+
+       if (!iwl_trans_grab_nic_access(trans, false, &flags))
+               return 0;
+
+       __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
+       for (i = 0; i < buf_size_in_dwords; i++)
+               buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR);
+       __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
+
+       iwl_trans_release_nic_access(trans, &flags);
+
+       return monitor_len;
+}
+
 static
 struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
 {
@@ -2252,7 +2290,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
                      trans->dbg_dest_tlv->end_shift;
 
                /* Make "end" point to the actual end */
-               if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
+                   trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
                        end += (1 << trans->dbg_dest_tlv->end_shift);
                monitor_len = end - base;
                len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2328,9 +2367,6 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
 
                len += sizeof(*data) + sizeof(*fw_mon_data);
                if (trans_pcie->fw_mon_page) {
-                       data->len = cpu_to_le32(trans_pcie->fw_mon_size +
-                                               sizeof(*fw_mon_data));
-
                        /*
                         * The firmware is now asserted, it won't write anything
                         * to the buffer. CPU can take ownership to fetch the
@@ -2345,10 +2381,8 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
                               page_address(trans_pcie->fw_mon_page),
                               trans_pcie->fw_mon_size);
 
-                       len += trans_pcie->fw_mon_size;
-               } else {
-                       /* If we are here then the buffer is internal */
-
+                       monitor_len = trans_pcie->fw_mon_size;
+               } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
                        /*
                         * Update pointers to reflect actual values after
                         * shifting
@@ -2357,10 +2391,18 @@ struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
                               trans->dbg_dest_tlv->base_shift;
                        iwl_trans_read_mem(trans, base, fw_mon_data->data,
                                           monitor_len / sizeof(u32));
-                       data->len = cpu_to_le32(sizeof(*fw_mon_data) +
-                                               monitor_len);
-                       len += monitor_len;
+               } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+                       monitor_len =
+                               iwl_trans_pci_dump_marbh_monitor(trans,
+                                                                fw_mon_data,
+                                                                monitor_len);
+               } else {
+                       /* Didn't match anything - output no monitor data */
+                       monitor_len = 0;
                }
+
+               len += monitor_len;
+               data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
        }
 
        dump_data->len = len;
@@ -2419,18 +2461,13 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        u16 pci_cmd;
        int err;
 
-       trans = kzalloc(sizeof(struct iwl_trans) +
-                       sizeof(struct iwl_trans_pcie), GFP_KERNEL);
-       if (!trans) {
-               err = -ENOMEM;
-               goto out;
-       }
+       trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+                               &pdev->dev, cfg, &trans_ops_pcie, 0);
+       if (!trans)
+               return ERR_PTR(-ENOMEM);
 
        trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       trans->ops = &trans_ops_pcie;
-       trans->cfg = cfg;
-       trans_lockdep_init(trans);
        trans_pcie->trans = trans;
        spin_lock_init(&trans_pcie->irq_lock);
        spin_lock_init(&trans_pcie->reg_lock);
@@ -2554,25 +2591,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
 
-       snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
-                "iwl_cmd_pool:%s", dev_name(trans->dev));
-
-       trans->dev_cmd_headroom = 0;
-       trans->dev_cmd_pool =
-               kmem_cache_create(trans->dev_cmd_pool_name,
-                                 sizeof(struct iwl_device_cmd)
-                                 + trans->dev_cmd_headroom,
-                                 sizeof(void *),
-                                 SLAB_HWCACHE_ALIGN,
-                                 NULL);
-
-       if (!trans->dev_cmd_pool) {
-               err = -ENOMEM;
-               goto out_pci_disable_msi;
-       }
-
        if (iwl_pcie_alloc_ict(trans))
-               goto out_free_cmd_pool;
+               goto out_pci_disable_msi;
 
        err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
                                   iwl_pcie_irq_handler,
@@ -2589,8 +2609,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
 out_free_ict:
        iwl_pcie_free_ict(trans);
-out_free_cmd_pool:
-       kmem_cache_destroy(trans->dev_cmd_pool);
 out_pci_disable_msi:
        pci_disable_msi(pdev);
 out_pci_release_regions:
@@ -2598,7 +2616,6 @@ out_pci_release_regions:
 out_pci_disable_device:
        pci_disable_device(pdev);
 out_no_pci:
-       kfree(trans);
-out:
+       iwl_trans_free(trans);
        return ERR_PTR(err);
 }
index 5ef8044c2ea3ed7317870902168c71be936cd8df..2b86c2135de36f627b397add88628bc47aa37271 100644 (file)
@@ -1049,8 +1049,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
            !trans_pcie->cmd_hold_nic_awake) {
                __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-               if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-                       udelay(2);
 
                ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
                                   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
index 1a4d558022d8c1dff78b1181c6f9bc79e7573a12..8317afd065b498fd8001a0e83ec82c9b9a5284aa 100644 (file)
@@ -835,14 +835,13 @@ static int lbs_cfg_scan(struct wiphy *wiphy,
  * Events
  */
 
-void lbs_send_disconnect_notification(struct lbs_private *priv)
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+                                     bool locally_generated)
 {
        lbs_deb_enter(LBS_DEB_CFG80211);
 
-       cfg80211_disconnected(priv->dev,
-               0,
-               NULL, 0,
-               GFP_KERNEL);
+       cfg80211_disconnected(priv->dev, 0, NULL, 0, locally_generated,
+                             GFP_KERNEL);
 
        lbs_deb_leave(LBS_DEB_CFG80211);
 }
@@ -1458,7 +1457,7 @@ int lbs_disconnect(struct lbs_private *priv, u16 reason)
 
        cfg80211_disconnected(priv->dev,
                        reason,
-                       NULL, 0,
+                       NULL, 0, true,
                        GFP_KERNEL);
        priv->connect_status = LBS_DISCONNECTED;
 
@@ -2031,7 +2030,7 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
        ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
 
        /* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
-       lbs_mac_event_disconnected(priv);
+       lbs_mac_event_disconnected(priv, true);
 
        lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
        return ret;
index 10995f59fe34a1796db45e914196fc7a03248c07..acccc29224016456f40e152dbe2cae340b9f73e3 100644 (file)
@@ -10,7 +10,8 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev);
 int lbs_cfg_register(struct lbs_private *priv);
 void lbs_cfg_free(struct lbs_private *priv);
 
-void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+                                     bool locally_generated);
 void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
 
 void lbs_scan_done(struct lbs_private *priv);
index 4279e8ab95f2aa4545cef71daa5649b1314fdf3b..0c5444b02c64110d6f814983dd1bc8984ed442da 100644 (file)
@@ -68,7 +68,8 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
 
 /* From cmdresp.c */
 
-void lbs_mac_event_disconnected(struct lbs_private *priv);
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+                               bool locally_generated);
 
 
 
index 65f18f1e869c86559b1fb8f608a4819b4c3c965e..e5442e8956f7ac3b1182058864816dc700548fe5 100644 (file)
  * reset link state etc.
  *
  * @priv:      A pointer to struct lbs_private structure
+ * @locally_generated: indicates disconnect was requested locally
+ *             (usually by userspace)
  *
  * returns:    n/a
  */
-void lbs_mac_event_disconnected(struct lbs_private *priv)
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+                               bool locally_generated)
 {
        if (priv->connect_status != LBS_CONNECTED)
                return;
@@ -36,7 +39,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
        msleep_interruptible(1000);
 
        if (priv->wdev->iftype == NL80211_IFTYPE_STATION)
-               lbs_send_disconnect_notification(priv);
+               lbs_send_disconnect_notification(priv, locally_generated);
 
        /* report disconnect to upper layer */
        netif_stop_queue(priv->dev);
@@ -229,17 +232,17 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
 
        case MACREG_INT_CODE_DEAUTHENTICATED:
                lbs_deb_cmd("EVENT: deauthenticated\n");
-               lbs_mac_event_disconnected(priv);
+               lbs_mac_event_disconnected(priv, false);
                break;
 
        case MACREG_INT_CODE_DISASSOCIATED:
                lbs_deb_cmd("EVENT: disassociated\n");
-               lbs_mac_event_disconnected(priv);
+               lbs_mac_event_disconnected(priv, false);
                break;
 
        case MACREG_INT_CODE_LINK_LOST_NO_SCAN:
                lbs_deb_cmd("EVENT: link lost\n");
-               lbs_mac_event_disconnected(priv);
+               lbs_mac_event_disconnected(priv, true);
                break;
 
        case MACREG_INT_CODE_PS_SLEEP:
index ed02e4bf2c26f5cc333d88b89347b89e577779bb..1bdf18674fb8be42f3759478fbe5a380beb95dee 100644 (file)
@@ -439,7 +439,7 @@ static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
        return mc_count;
 }
 
-#define SUPPORTED_FIF_FLAGS  (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)
+#define SUPPORTED_FIF_FLAGS  FIF_ALLMULTI
 static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
                        unsigned int changed_flags,
                        unsigned int *new_flags,
@@ -458,10 +458,7 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
                return;
        }
 
-       if (*new_flags & (FIF_PROMISC_IN_BSS))
-               priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
-       else
-               priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
+       priv->mac_control &= ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
        if (*new_flags & (FIF_ALLMULTI) ||
            multicast > MRVDRV_MAX_MULTICAST_LIST_SIZE) {
                priv->mac_control |= CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
index d5c0a1af08b997367289c341149fb527e3e1edd6..8d2f6bbf9598b79353a4a8dcf4f4a2317d867bd3 100644 (file)
@@ -1554,8 +1554,6 @@ static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw,
        wiphy_debug(hw->wiphy, "%s\n", __func__);
 
        data->rx_filter = 0;
-       if (*total_flags & FIF_PROMISC_IN_BSS)
-               data->rx_filter |= FIF_PROMISC_IN_BSS;
        if (*total_flags & FIF_ALLMULTI)
                data->rx_filter |= FIF_ALLMULTI;
 
@@ -2399,7 +2397,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                    IEEE80211_HW_WANT_MONITOR_VIF |
                    IEEE80211_HW_QUEUE_CONTROL |
                    IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
-                   IEEE80211_HW_CHANCTX_STA_CSA;
+                   IEEE80211_HW_CHANCTX_STA_CSA |
+                   IEEE80211_HW_SUPPORT_FAST_XMIT;
        if (rctbl)
                hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
 
@@ -2438,6 +2437,31 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                        sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
                        sband->bitrates = data->rates + 4;
                        sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
+
+                       sband->vht_cap.vht_supported = true;
+                       sband->vht_cap.cap =
+                               IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+                               IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+                               IEEE80211_VHT_CAP_RXLDPC |
+                               IEEE80211_VHT_CAP_SHORT_GI_80 |
+                               IEEE80211_VHT_CAP_SHORT_GI_160 |
+                               IEEE80211_VHT_CAP_TXSTBC |
+                               IEEE80211_VHT_CAP_RXSTBC_1 |
+                               IEEE80211_VHT_CAP_RXSTBC_2 |
+                               IEEE80211_VHT_CAP_RXSTBC_3 |
+                               IEEE80211_VHT_CAP_RXSTBC_4 |
+                               IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+                       sband->vht_cap.vht_mcs.rx_mcs_map =
+                               cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+                                           IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+                                           IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
+                                           IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 |
+                                           IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 |
+                                           IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
+                                           IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
+                                           IEEE80211_VHT_MCS_SUPPORT_0_9 << 14);
+                       sband->vht_cap.vht_mcs.tx_mcs_map =
+                               sband->vht_cap.vht_mcs.rx_mcs_map;
                        break;
                default:
                        continue;
@@ -2458,31 +2482,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
 
                hw->wiphy->bands[band] = sband;
-
-               sband->vht_cap.vht_supported = true;
-               sband->vht_cap.cap =
-                       IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
-                       IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
-                       IEEE80211_VHT_CAP_RXLDPC |
-                       IEEE80211_VHT_CAP_SHORT_GI_80 |
-                       IEEE80211_VHT_CAP_SHORT_GI_160 |
-                       IEEE80211_VHT_CAP_TXSTBC |
-                       IEEE80211_VHT_CAP_RXSTBC_1 |
-                       IEEE80211_VHT_CAP_RXSTBC_2 |
-                       IEEE80211_VHT_CAP_RXSTBC_3 |
-                       IEEE80211_VHT_CAP_RXSTBC_4 |
-                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
-               sband->vht_cap.vht_mcs.rx_mcs_map =
-                       cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
-                                   IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
-                                   IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
-                                   IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
-                                   IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
-                                   IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
-                                   IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
-                                   IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
-               sband->vht_cap.vht_mcs.tx_mcs_map =
-                       sband->vht_cap.vht_mcs.rx_mcs_map;
        }
 
        /* By default all radios belong to the first group */
diff --git a/drivers/net/wireless/mediatek/Kconfig b/drivers/net/wireless/mediatek/Kconfig
new file mode 100644 (file)
index 0000000..cba300c
--- /dev/null
@@ -0,0 +1,10 @@
+menuconfig WL_MEDIATEK
+       bool "Mediatek Wireless LAN support"
+       ---help---
+         Enable community drivers for MediaTek WiFi devices.
+         Those drivers make use of the Linux mac80211 stack.
+
+
+if WL_MEDIATEK
+source "drivers/net/wireless/mediatek/mt7601u/Kconfig"
+endif # WL_MEDIATEK
diff --git a/drivers/net/wireless/mediatek/Makefile b/drivers/net/wireless/mediatek/Makefile
new file mode 100644 (file)
index 0000000..9d5f182
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_MT7601U)  += mt7601u/
diff --git a/drivers/net/wireless/mediatek/mt7601u/Kconfig b/drivers/net/wireless/mediatek/mt7601u/Kconfig
new file mode 100644 (file)
index 0000000..f46bed9
--- /dev/null
@@ -0,0 +1,6 @@
+config MT7601U
+       tristate "MediaTek MT7601U (USB) support"
+       depends on MAC80211
+       depends on USB
+       ---help---
+         This adds support for MT7601U-based wireless USB dongles.
diff --git a/drivers/net/wireless/mediatek/mt7601u/Makefile b/drivers/net/wireless/mediatek/mt7601u/Makefile
new file mode 100644 (file)
index 0000000..ea9ed8a
--- /dev/null
@@ -0,0 +1,9 @@
+ccflags-y += -D__CHECK_ENDIAN__
+
+obj-$(CONFIG_MT7601U)  += mt7601u.o
+
+mt7601u-objs   = \
+       usb.o init.o main.o mcu.o trace.o dma.o core.o eeprom.o phy.o \
+       mac.o util.o debugfs.o tx.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt7601u/core.c b/drivers/net/wireless/mediatek/mt7601u/core.c
new file mode 100644 (file)
index 0000000..0aabd79
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev)
+{
+       int i = 100;
+       u32 val;
+
+       do {
+               if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+                       return -EIO;
+
+               val = mt7601u_rr(dev, MT_MAC_CSR0);
+               if (val && ~val)
+                       return 0;
+
+               udelay(10);
+       } while (i--);
+
+       return -EIO;
+}
+
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+              int timeout)
+{
+       u32 cur;
+
+       timeout /= 10;
+       do {
+               if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+                       return false;
+
+               cur = mt7601u_rr(dev, offset) & mask;
+               if (cur == val)
+                       return true;
+
+               udelay(10);
+       } while (timeout-- > 0);
+
+       dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+       return false;
+}
+
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+                   int timeout)
+{
+       u32 cur;
+
+       timeout /= 10;
+       do {
+               if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+                       return false;
+
+               cur = mt7601u_rr(dev, offset) & mask;
+               if (cur == val)
+                       return true;
+
+               msleep(10);
+       } while (timeout-- > 0);
+
+       dev_err(dev->dev, "Error: Time out with reg %08x\n", offset);
+
+       return false;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
new file mode 100644 (file)
index 0000000..fc00847
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static int
+mt76_reg_set(void *data, u64 val)
+{
+       struct mt7601u_dev *dev = data;
+
+       mt76_wr(dev, dev->debugfs_reg, val);
+       return 0;
+}
+
+static int
+mt76_reg_get(void *data, u64 *val)
+{
+       struct mt7601u_dev *dev = data;
+
+       *val = mt76_rr(dev, dev->debugfs_reg);
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
+
+static int
+mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
+{
+       struct mt7601u_dev *dev = file->private;
+       int i, j;
+
+#define stat_printf(grp, off, name)                                    \
+       seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
+
+       stat_printf(rx_stat, 0, rx_crc_err);
+       stat_printf(rx_stat, 1, rx_phy_err);
+       stat_printf(rx_stat, 2, rx_false_cca);
+       stat_printf(rx_stat, 3, rx_plcp_err);
+       stat_printf(rx_stat, 4, rx_fifo_overflow);
+       stat_printf(rx_stat, 5, rx_duplicate);
+
+       stat_printf(tx_stat, 0, tx_fail_cnt);
+       stat_printf(tx_stat, 1, tx_bcn_cnt);
+       stat_printf(tx_stat, 2, tx_success);
+       stat_printf(tx_stat, 3, tx_retransmit);
+       stat_printf(tx_stat, 4, tx_zero_len);
+       stat_printf(tx_stat, 5, tx_underflow);
+
+       stat_printf(aggr_stat, 0, non_aggr_tx);
+       stat_printf(aggr_stat, 1, aggr_tx);
+
+       stat_printf(zero_len_del, 0, tx_zero_len_del);
+       stat_printf(zero_len_del, 1, rx_zero_len_del);
+#undef stat_printf
+
+       seq_puts(file, "Aggregations stats:\n");
+       for (i = 0; i < 4; i++) {
+               for (j = 0; j < 8; j++)
+                       seq_printf(file, "%08llx ",
+                                  dev->stats.aggr_n[i * 8 + j]);
+               seq_putc(file, '\n');
+       }
+
+       seq_printf(file, "recent average AMPDU len: %d\n",
+                  atomic_read(&dev->avg_ampdu_len));
+
+       return 0;
+}
+
+static int
+mt7601u_ampdu_stat_open(struct inode *inode, struct file *f)
+{
+       return single_open(f, mt7601u_ampdu_stat_read, inode->i_private);
+}
+
+static const struct file_operations fops_ampdu_stat = {
+       .open = mt7601u_ampdu_stat_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int
+mt7601u_eeprom_param_read(struct seq_file *file, void *data)
+{
+       struct mt7601u_dev *dev = file->private;
+       struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
+       struct tssi_data *td = &dev->ee->tssi_data;
+       int i;
+
+       seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off);
+       seq_printf(file, "RSSI offset: %hhx %hhx\n",
+                  dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]);
+       seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp);
+       seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain);
+       seq_printf(file, "Reg channels: %hhu-%hhu\n", dev->ee->reg.start,
+                  dev->ee->reg.start + dev->ee->reg.num - 1);
+
+       seq_puts(file, "Per rate power:\n");
+       for (i = 0; i < 2; i++)
+               seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+                          rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40);
+       for (i = 0; i < 4; i++)
+               seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+                          rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40);
+       for (i = 0; i < 4; i++)
+               seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n",
+                          rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40);
+
+       seq_puts(file, "Per channel power:\n");
+       for (i = 0; i < 7; i++)
+               seq_printf(file, "\t tx_power  ch%u:%02hhx ch%u:%02hhx\n",
+                          i * 2 + 1, dev->ee->chan_pwr[i * 2],
+                          i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]);
+
+       if (!dev->ee->tssi_enabled)
+               return 0;
+
+       seq_puts(file, "TSSI:\n");
+       seq_printf(file, "\t slope:%02hhx\n", td->slope);
+       seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n",
+                  td->offset[0], td->offset[1], td->offset[2]);
+       seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset);
+
+       return 0;
+}
+
+static int
+mt7601u_eeprom_param_open(struct inode *inode, struct file *f)
+{
+       return single_open(f, mt7601u_eeprom_param_read, inode->i_private);
+}
+
+static const struct file_operations fops_eeprom_param = {
+       .open = mt7601u_eeprom_param_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev)
+{
+       struct dentry *dir;
+
+       dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir);
+       if (!dir)
+               return;
+
+       debugfs_create_u8("temperature", S_IRUSR, dir, &dev->raw_temp);
+       debugfs_create_u32("temp_mode", S_IRUSR, dir, &dev->temp_mode);
+
+       debugfs_create_u32("regidx", S_IRUSR | S_IWUSR, dir, &dev->debugfs_reg);
+       debugfs_create_file("regval", S_IRUSR | S_IWUSR, dir, dev,
+                           &fops_regval);
+       debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
+       debugfs_create_file("eeprom_param", S_IRUSR, dir, dev,
+                           &fops_eeprom_param);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
new file mode 100644 (file)
index 0000000..9c9e128
--- /dev/null
@@ -0,0 +1,533 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "usb.h"
+#include "trace.h"
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+                                struct mt7601u_dma_buf_rx *e, gfp_t gfp);
+
+static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
+{
+       const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
+       unsigned int hdrlen;
+
+       if (unlikely(len < 10))
+               return 0;
+       hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       if (unlikely(hdrlen > len))
+               return 0;
+       return hdrlen;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+                       u8 *data, u32 seg_len)
+{
+       struct sk_buff *skb;
+       u32 true_len;
+
+       if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD))
+               seg_len -= 2;
+
+       skb = alloc_skb(seg_len, GFP_ATOMIC);
+       if (!skb)
+               return NULL;
+
+       if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+               int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
+
+               memcpy(skb_put(skb, hdr_len), data, hdr_len);
+               data += hdr_len + 2;
+               seg_len -= hdr_len;
+       }
+
+       memcpy(skb_put(skb, seg_len), data, seg_len);
+
+       true_len = mt76_mac_process_rx(dev, skb, skb->data, rxwi);
+       skb_trim(skb, true_len);
+
+       return skb;
+}
+
+static struct sk_buff *
+mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev,
+                             struct mt7601u_rxwi *rxwi, void *data,
+                             u32 seg_len, u32 truesize, struct page *p)
+{
+       unsigned int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
+       unsigned int true_len, copy, frag;
+       struct sk_buff *skb;
+
+       skb = alloc_skb(128, GFP_ATOMIC);
+       if (!skb)
+               return NULL;
+
+       true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
+
+       if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
+               memcpy(skb_put(skb, hdr_len), data, hdr_len);
+               data += hdr_len + 2;
+               true_len -= hdr_len;
+               hdr_len = 0;
+       }
+
+       copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
+       frag = true_len - copy;
+
+       memcpy(skb_put(skb, copy), data, copy);
+       data += copy;
+
+       if (frag) {
+               skb_add_rx_frag(skb, 0, p, data - page_address(p),
+                               frag, truesize);
+               get_page(p);
+       }
+
+       return skb;
+}
+
+static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
+                                  u32 seg_len, struct page *p, bool paged)
+{
+       struct sk_buff *skb;
+       struct mt7601u_rxwi *rxwi;
+       u32 fce_info, truesize = seg_len;
+
+       /* DMA_INFO field at the beginning of the segment contains only some of
+        * the information, we need to read the FCE descriptor from the end.
+        */
+       fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
+       seg_len -= MT_FCE_INFO_LEN;
+
+       data += MT_DMA_HDR_LEN;
+       seg_len -= MT_DMA_HDR_LEN;
+
+       rxwi = (struct mt7601u_rxwi *) data;
+       data += sizeof(struct mt7601u_rxwi);
+       seg_len -= sizeof(struct mt7601u_rxwi);
+
+       if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
+               dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
+       if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
+               dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
+
+       trace_mt_rx(dev, rxwi, fce_info);
+
+       if (paged)
+               skb = mt7601u_rx_skb_from_seg_paged(dev, rxwi, data, seg_len,
+                                                   truesize, p);
+       else
+               skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len);
+       if (!skb)
+               return;
+
+       ieee80211_rx_ni(dev->hw, skb);
+}
+
+static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
+{
+       u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
+               sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
+       u16 dma_len = get_unaligned_le16(data);
+
+       if (data_len < min_seg_len ||
+           WARN_ON(!dma_len) ||
+           WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
+           WARN_ON(dma_len & 0x3))
+               return 0;
+
+       return MT_DMA_HDRS + dma_len;
+}
+
+static void
+mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
+{
+       u32 seg_len, data_len = e->urb->actual_length;
+       u8 *data = page_address(e->p);
+       struct page *new_p = NULL;
+       bool paged = true;
+       int cnt = 0;
+
+       if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
+               return;
+
+       /* Copy if there is very little data in the buffer. */
+       if (data_len < 512) {
+               paged = false;
+       } else {
+               new_p = dev_alloc_pages(MT_RX_ORDER);
+               if (!new_p)
+                       paged = false;
+       }
+
+       while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
+               mt7601u_rx_process_seg(dev, data, seg_len, e->p, paged);
+
+               data_len -= seg_len;
+               data += seg_len;
+               cnt++;
+       }
+
+       if (cnt > 1)
+               trace_mt_rx_dma_aggr(dev, cnt, paged);
+
+       if (paged) {
+               /* we have one extra ref from the allocator */
+               __free_pages(e->p, MT_RX_ORDER);
+
+               e->p = new_p;
+       }
+}
+
+static struct mt7601u_dma_buf_rx *
+mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
+{
+       struct mt7601u_rx_queue *q = &dev->rx_q;
+       struct mt7601u_dma_buf_rx *buf = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->rx_lock, flags);
+
+       if (!q->pending)
+               goto out;
+
+       buf = &q->e[q->start];
+       q->pending--;
+       q->start = (q->start + 1) % q->entries;
+out:
+       spin_unlock_irqrestore(&dev->rx_lock, flags);
+
+       return buf;
+}
+
+static void mt7601u_complete_rx(struct urb *urb)
+{
+       struct mt7601u_dev *dev = urb->context;
+       struct mt7601u_rx_queue *q = &dev->rx_q;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->rx_lock, flags);
+
+       if (mt7601u_urb_has_error(urb))
+               dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
+       if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
+               goto out;
+
+       q->end = (q->end + 1) % q->entries;
+       q->pending++;
+       tasklet_schedule(&dev->rx_tasklet);
+out:
+       spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static void mt7601u_rx_tasklet(unsigned long data)
+{
+       struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+       struct mt7601u_dma_buf_rx *e;
+
+       while ((e = mt7601u_rx_get_pending_entry(dev))) {
+               if (e->urb->status)
+                       continue;
+
+               mt7601u_rx_process_entry(dev, e);
+               mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
+       }
+}
+
+static void mt7601u_complete_tx(struct urb *urb)
+{
+       struct mt7601u_tx_queue *q = urb->context;
+       struct mt7601u_dev *dev = q->dev;
+       struct sk_buff *skb;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->tx_lock, flags);
+
+       if (mt7601u_urb_has_error(urb))
+               dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
+       if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
+               goto out;
+
+       skb = q->e[q->start].skb;
+       trace_mt_tx_dma_done(dev, skb);
+
+       mt7601u_tx_status(dev, skb);
+
+       if (q->used == q->entries - q->entries / 8)
+               ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
+
+       q->start = (q->start + 1) % q->entries;
+       q->used--;
+
+       if (urb->status)
+               goto out;
+
+       set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
+       if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
+               queue_delayed_work(dev->stat_wq, &dev->stat_work,
+                                  msecs_to_jiffies(10));
+out:
+       spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
+                                struct sk_buff *skb, u8 ep)
+{
+       struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+       unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
+       struct mt7601u_dma_buf_tx *e;
+       struct mt7601u_tx_queue *q = &dev->tx_q[ep];
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dev->tx_lock, flags);
+
+       if (WARN_ON(q->entries <= q->used)) {
+               ret = -ENOSPC;
+               goto out;
+       }
+
+       e = &q->e[q->end];
+       e->skb = skb;
+       usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
+                         mt7601u_complete_tx, q);
+       ret = usb_submit_urb(e->urb, GFP_ATOMIC);
+       if (ret) {
+               /* Special-handle ENODEV from TX urb submission because it will
+                * often be the first ENODEV we see after device is removed.
+                */
+               if (ret == -ENODEV)
+                       set_bit(MT7601U_STATE_REMOVED, &dev->state);
+               else
+                       dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
+                               ret);
+               goto out;
+       }
+
+       q->end = (q->end + 1) % q->entries;
+       q->used++;
+
+       if (q->used >= q->entries)
+               ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+out:
+       spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+       return ret;
+}
+
+/* Map hardware Q to USB endpoint number */
+static u8 q2ep(u8 qid)
+{
+       /* TODO: take management packets to queue 5 */
+       return qid + 1;
+}
+
+/* Map USB endpoint number to Q id in the DMA engine */
+static enum mt76_qsel ep2dmaq(u8 ep)
+{
+       if (ep == 5)
+               return MT_QSEL_MGMT;
+       return MT_QSEL_EDCA;
+}
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+                          struct mt76_wcid *wcid, int hw_q)
+{
+       u8 ep = q2ep(hw_q);
+       u32 dma_flags;
+       int ret;
+
+       dma_flags = MT_TXD_PKT_INFO_80211;
+       if (wcid->hw_key_idx == 0xff)
+               dma_flags |= MT_TXD_PKT_INFO_WIV;
+
+       ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
+       if (ret)
+               return ret;
+
+       ret = mt7601u_dma_submit_tx(dev, skb, ep);
+       if (ret) {
+               ieee80211_free_txskb(dev->hw, skb);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void mt7601u_kill_rx(struct mt7601u_dev *dev)
+{
+       int i;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->rx_lock, flags);
+
+       for (i = 0; i < dev->rx_q.entries; i++) {
+               int next = dev->rx_q.end;
+
+               spin_unlock_irqrestore(&dev->rx_lock, flags);
+               usb_poison_urb(dev->rx_q.e[next].urb);
+               spin_lock_irqsave(&dev->rx_lock, flags);
+       }
+
+       spin_unlock_irqrestore(&dev->rx_lock, flags);
+}
+
+static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
+                                struct mt7601u_dma_buf_rx *e, gfp_t gfp)
+{
+       struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+       u8 *buf = page_address(e->p);
+       unsigned pipe;
+       int ret;
+
+       pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
+
+       usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
+                         mt7601u_complete_rx, dev);
+
+       trace_mt_submit_urb(dev, e->urb);
+       ret = usb_submit_urb(e->urb, gfp);
+       if (ret)
+               dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
+
+       return ret;
+}
+
+static int mt7601u_submit_rx(struct mt7601u_dev *dev)
+{
+       int i, ret;
+
+       for (i = 0; i < dev->rx_q.entries; i++) {
+               ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void mt7601u_free_rx(struct mt7601u_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->rx_q.entries; i++) {
+               __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
+               usb_free_urb(dev->rx_q.e[i].urb);
+       }
+}
+
+static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
+{
+       int i;
+
+       memset(&dev->rx_q, 0, sizeof(dev->rx_q));
+       dev->rx_q.dev = dev;
+       dev->rx_q.entries = N_RX_ENTRIES;
+
+       for (i = 0; i < N_RX_ENTRIES; i++) {
+               dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+               dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
+
+               if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
+{
+       int i;
+
+       WARN_ON(q->used);
+
+       for (i = 0; i < q->entries; i++)  {
+               usb_poison_urb(q->e[i].urb);
+               usb_free_urb(q->e[i].urb);
+       }
+}
+
+static void mt7601u_free_tx(struct mt7601u_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < __MT_EP_OUT_MAX; i++)
+               mt7601u_free_tx_queue(&dev->tx_q[i]);
+}
+
+static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
+                                 struct mt7601u_tx_queue *q)
+{
+       int i;
+
+       q->dev = dev;
+       q->entries = N_TX_ENTRIES;
+
+       for (i = 0; i < N_TX_ENTRIES; i++) {
+               q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!q->e[i].urb)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
+{
+       int i;
+
+       dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
+                                sizeof(*dev->tx_q), GFP_KERNEL);
+
+       for (i = 0; i < __MT_EP_OUT_MAX; i++)
+               if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
+                       return -ENOMEM;
+
+       return 0;
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev)
+{
+       int ret = -ENOMEM;
+
+       tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
+
+       ret = mt7601u_alloc_tx(dev);
+       if (ret)
+               goto err;
+       ret = mt7601u_alloc_rx(dev);
+       if (ret)
+               goto err;
+
+       ret = mt7601u_submit_rx(dev);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       mt7601u_dma_cleanup(dev);
+       return ret;
+}
+
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
+{
+       mt7601u_kill_rx(dev);
+
+       tasklet_kill(&dev->rx_tasklet);
+
+       mt7601u_free_rx(dev);
+       mt7601u_free_tx(dev);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.h b/drivers/net/wireless/mediatek/mt7601u/dma.h
new file mode 100644 (file)
index 0000000..978e8a9
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_DMA_H
+#define __MT7601U_DMA_H
+
+#include <asm/unaligned.h>
+#include <linux/skbuff.h>
+
+#include "util.h"
+
+#define MT_DMA_HDR_LEN                 4
+#define MT_RX_INFO_LEN                 4
+#define MT_FCE_INFO_LEN                        4
+#define MT_DMA_HDRS                    (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
+
+/* Common Tx DMA descriptor fields */
+#define MT_TXD_INFO_LEN                        GENMASK(15, 0)
+#define MT_TXD_INFO_D_PORT             GENMASK(29, 27)
+#define MT_TXD_INFO_TYPE               GENMASK(31, 30)
+
+enum mt76_msg_port {
+       WLAN_PORT,
+       CPU_RX_PORT,
+       CPU_TX_PORT,
+       HOST_PORT,
+       VIRTUAL_CPU_RX_PORT,
+       VIRTUAL_CPU_TX_PORT,
+       DISCARD,
+};
+
+enum mt76_info_type {
+       DMA_PACKET,
+       DMA_COMMAND,
+};
+
+/* Tx DMA packet specific flags */
+#define MT_TXD_PKT_INFO_NEXT_VLD       BIT(16)
+#define MT_TXD_PKT_INFO_TX_BURST       BIT(17)
+#define MT_TXD_PKT_INFO_80211          BIT(19)
+#define MT_TXD_PKT_INFO_TSO            BIT(20)
+#define MT_TXD_PKT_INFO_CSO            BIT(21)
+#define MT_TXD_PKT_INFO_WIV            BIT(24)
+#define MT_TXD_PKT_INFO_QSEL           GENMASK(26, 25)
+
+enum mt76_qsel {
+       MT_QSEL_MGMT,
+       MT_QSEL_HCCA,
+       MT_QSEL_EDCA,
+       MT_QSEL_EDCA_2,
+};
+
+/* Tx DMA MCU command specific flags */
+#define MT_TXD_CMD_INFO_SEQ            GENMASK(19, 16)
+#define MT_TXD_CMD_INFO_TYPE           GENMASK(26, 20)
+
+static inline int mt7601u_dma_skb_wrap(struct sk_buff *skb,
+                                      enum mt76_msg_port d_port,
+                                      enum mt76_info_type type, u32 flags)
+{
+       u32 info;
+
+       /* Buffer layout:
+        *      |   4B   | xfer len |      pad       |  4B  |
+        *      | TXINFO | pkt/cmd  | zero pad to 4B | zero |
+        *
+        * length field of TXINFO should be set to 'xfer len'.
+        */
+
+       info = flags |
+               MT76_SET(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
+               MT76_SET(MT_TXD_INFO_D_PORT, d_port) |
+               MT76_SET(MT_TXD_INFO_TYPE, type);
+
+       put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+       return skb_put_padto(skb, round_up(skb->len, 4) + 4);
+}
+
+static inline int
+mt7601u_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
+{
+       flags |= MT76_SET(MT_TXD_PKT_INFO_QSEL, qsel);
+       return mt7601u_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
+}
+
+/* Common Rx DMA descriptor fields */
+#define MT_RXD_INFO_LEN                        GENMASK(13, 0)
+#define MT_RXD_INFO_PCIE_INTR          BIT(24)
+#define MT_RXD_INFO_QSEL               GENMASK(26, 25)
+#define MT_RXD_INFO_PORT               GENMASK(29, 27)
+#define MT_RXD_INFO_TYPE               GENMASK(31, 30)
+
+/* Rx DMA packet specific flags */
+#define MT_RXD_PKT_INFO_UDP_ERR                BIT(16)
+#define MT_RXD_PKT_INFO_TCP_ERR                BIT(17)
+#define MT_RXD_PKT_INFO_IP_ERR         BIT(18)
+#define MT_RXD_PKT_INFO_PKT_80211      BIT(19)
+#define MT_RXD_PKT_INFO_L3L4_DONE      BIT(20)
+#define MT_RXD_PKT_INFO_MAC_LEN                GENMASK(23, 21)
+
+/* Rx DMA MCU command specific flags */
+#define MT_RXD_CMD_INFO_SELF_GEN       BIT(15)
+#define MT_RXD_CMD_INFO_CMD_SEQ                GENMASK(19, 16)
+#define MT_RXD_CMD_INFO_EVT_TYPE       GENMASK(23, 20)
+
+enum mt76_evt_type {
+       CMD_DONE,
+       CMD_ERROR,
+       CMD_RETRY,
+       EVENT_PWR_RSP,
+       EVENT_WOW_RSP,
+       EVENT_CARRIER_DETECT_RSP,
+       EVENT_DFS_DETECT_RSP,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
new file mode 100644 (file)
index 0000000..ce3837f
--- /dev/null
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "mt7601u.h"
+#include "eeprom.h"
+
+static bool
+field_valid(u8 val)
+{
+       return val != 0xff;
+}
+
+static s8
+field_validate(u8 val)
+{
+       if (!field_valid(val))
+               return 0;
+
+       return val;
+}
+
+static int
+mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data,
+                  enum mt7601u_eeprom_access_modes mode)
+{
+       u32 val;
+       int i;
+
+       val = mt76_rr(dev, MT_EFUSE_CTRL);
+       val &= ~(MT_EFUSE_CTRL_AIN |
+                MT_EFUSE_CTRL_MODE);
+       val |= MT76_SET(MT_EFUSE_CTRL_AIN, addr & ~0xf) |
+              MT76_SET(MT_EFUSE_CTRL_MODE, mode) |
+              MT_EFUSE_CTRL_KICK;
+       mt76_wr(dev, MT_EFUSE_CTRL, val);
+
+       if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+               return -ETIMEDOUT;
+
+       val = mt76_rr(dev, MT_EFUSE_CTRL);
+       if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
+               /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0)
+                * will not return valid data but it's ok.
+                */
+               memset(data, 0xff, 16);
+               return 0;
+       }
+
+       for (i = 0; i < 4; i++) {
+               val = mt76_rr(dev, MT_EFUSE_DATA(i));
+               put_unaligned_le32(val, data + 4 * i);
+       }
+
+       return 0;
+}
+
+static int
+mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev)
+{
+       const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16);
+       u8 data[map_reads * 16];
+       int ret, i;
+       u32 start = 0, end = 0, cnt_free;
+
+       for (i = 0; i < map_reads; i++) {
+               ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16,
+                                        data + i * 16, MT_EE_PHYSICAL_READ);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++)
+               if (!data[i]) {
+                       if (!start)
+                               start = MT_EE_USAGE_MAP_START + i;
+                       end = MT_EE_USAGE_MAP_START + i;
+               }
+       cnt_free = end - start + 1;
+
+       if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) {
+               dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static bool
+mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
+{
+       u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+       return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
+}
+
+static void
+mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom)
+{
+       u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0);
+       u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
+
+       if (!field_valid(nic_conf1 & 0xff))
+               nic_conf1 &= 0xff00;
+
+       dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) &&
+                               !(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC);
+
+       if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL)
+               dev_err(dev->dev,
+                       "Error: this driver does not support HW RF ctrl\n");
+
+       if (!field_valid(nic_conf0 >> 8))
+               return;
+
+       if (MT76_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 ||
+           MT76_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1)
+               dev_err(dev->dev,
+                       "Error: device has more than 1 RX/TX stream!\n");
+}
+
+static int
+mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *eeprom)
+{
+       const void *src = eeprom + MT_EE_MAC_ADDR;
+
+       ether_addr_copy(dev->macaddr, src);
+
+       if (!is_valid_ether_addr(dev->macaddr)) {
+               eth_random_addr(dev->macaddr);
+               dev_info(dev->dev,
+                        "Invalid MAC address, using random address %pM\n",
+                        dev->macaddr);
+       }
+
+       mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
+       mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) |
+               MT76_SET(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
+
+       return 0;
+}
+
+static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev,
+                                            u8 *eeprom, u8 max_pwr)
+{
+       u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER];
+
+       if (trgt_pwr > max_pwr || !trgt_pwr) {
+               dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n",
+                        trgt_pwr);
+               trgt_pwr = 0x20;
+       }
+
+       memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr));
+}
+
+static void
+mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom)
+{
+       u32 i, val;
+       u8 max_pwr;
+
+       val = mt7601u_rr(dev, MT_TX_ALC_CFG_0);
+       max_pwr = MT76_GET(MT_TX_ALC_CFG_0_LIMIT_0, val);
+
+       if (mt7601u_has_tssi(dev, eeprom)) {
+               mt7601u_set_channel_target_power(dev, eeprom, max_pwr);
+               return;
+       }
+
+       for (i = 0; i < 14; i++) {
+               s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]);
+
+               if (power > max_pwr || power < 0)
+                       power = MT7601U_DEFAULT_TX_POWER;
+
+               dev->ee->chan_pwr[i] = power;
+       }
+}
+
+static void
+mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom)
+{
+       /* Note: - region 31 is not valid for mt7601u (see rtmp_init.c)
+        *       - comments in rtmp_def.h are incorrect (see rt_channel.c)
+        */
+       static const struct reg_channel_bounds chan_bounds[] = {
+               /* EEPROM country regions 0 - 7 */
+               {  1, 11 },     {  1, 13 },     { 10,  2 },     { 10,  4 },
+               { 14,  1 },     {  1, 14 },     {  3,  7 },     {  5,  9 },
+               /* EEPROM country regions 32 - 33 */
+               {  1, 11 },     {  1, 14 }
+       };
+       u8 val = eeprom[MT_EE_COUNTRY_REGION];
+       int idx = -1;
+
+       if (val < 8)
+               idx = val;
+       if (val > 31 && val < 33)
+               idx = val - 32 + 8;
+
+       if (idx != -1)
+               dev_info(dev->dev,
+                        "EEPROM country region %02hhx (channels %hhd-%hhd)\n",
+                        val, chan_bounds[idx].start,
+                        chan_bounds[idx].start + chan_bounds[idx].num - 1);
+       else
+               idx = 5; /* channels 1 - 14 */
+
+       dev->ee->reg = chan_bounds[idx];
+
+       /* TODO: country region 33 is special - phy should be set to B-mode
+        *       before entering channel 14 (see sta/connect.c)
+        */
+}
+
+static void
+mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom)
+{
+       u8 comp;
+
+       dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]);
+       comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]);
+
+       if (comp & BIT(7))
+               dev->ee->rf_freq_off -= comp & 0x7f;
+       else
+               dev->ee->rf_freq_off += comp;
+}
+
+static void
+mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom)
+{
+       int i;
+       s8 *rssi_offset = dev->ee->rssi_offset;
+
+       for (i = 0; i < 2; i++) {
+               rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i];
+
+               if (rssi_offset[i] < -10 || rssi_offset[i] > 10) {
+                       dev_warn(dev->dev,
+                                "Warning: EEPROM RSSI is invalid %02hhx\n",
+                                rssi_offset[i]);
+                       rssi_offset[i] = 0;
+               }
+       }
+}
+
+static void
+mt7601u_extra_power_over_mac(struct mt7601u_dev *dev)
+{
+       u32 val;
+
+       val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8);
+       val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8);
+       mt7601u_wr(dev, MT_TX_PWR_CFG_7, val);
+
+       val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8);
+       mt7601u_wr(dev, MT_TX_PWR_CFG_9, val);
+}
+
+static void
+mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value)
+{
+       rate->raw = s6_validate(value);
+       rate->bw20 = s6_to_int(value);
+       /* Note: vendor driver does cap the value to s6 right away */
+       rate->bw40 = rate->bw20 + delta;
+}
+
+static void
+mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i)
+{
+       struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+       switch (i) {
+       case 0:
+               mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff);
+               mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff);
+               /* Save cck bw20 for fixups of channel 14 */
+               dev->ee->real_cck_bw20[0] = t->cck[0].bw20;
+               dev->ee->real_cck_bw20[1] = t->cck[1].bw20;
+
+               mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff);
+               mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff);
+               break;
+       case 1:
+               mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff);
+               mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff);
+               mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff);
+               mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff);
+               break;
+       case 2:
+               mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff);
+               mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff);
+               break;
+       }
+}
+
+static s8
+get_delta(u8 val)
+{
+       s8 ret;
+
+       if (!field_valid(val) || !(val & BIT(7)))
+               return 0;
+
+       ret = val & 0x1f;
+       if (ret > 8)
+               ret = 8;
+       if (val & BIT(6))
+               ret = -ret;
+
+       return ret;
+}
+
+static void
+mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom)
+{
+       u32 val;
+       s8 bw40_delta;
+       int i;
+
+       bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]);
+
+       for (i = 0; i < 5; i++) {
+               val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i));
+
+               mt7601u_save_power_rate(dev, bw40_delta, val, i);
+
+               if (~val)
+                       mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val);
+       }
+
+       mt7601u_extra_power_over_mac(dev);
+}
+
+static void
+mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom)
+{
+       struct tssi_data *d = &dev->ee->tssi_data;
+
+       if (!dev->ee->tssi_enabled)
+               return;
+
+       d->slope = eeprom[MT_EE_TX_TSSI_SLOPE];
+       d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024;
+       d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP];
+       d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1];
+       d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2];
+}
+
+int
+mt7601u_eeprom_init(struct mt7601u_dev *dev)
+{
+       u8 *eeprom;
+       int i, ret;
+
+       ret = mt7601u_efuse_physical_size_check(dev);
+       if (ret)
+               return ret;
+
+       dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL);
+       if (!dev->ee)
+               return -ENOMEM;
+
+       eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL);
+       if (!eeprom)
+               return -ENOMEM;
+
+       for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) {
+               ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ);
+               if (ret)
+                       goto out;
+       }
+
+       if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER)
+               dev_warn(dev->dev,
+                        "Warning: unsupported EEPROM version %02hhx\n",
+                        eeprom[MT_EE_VERSION_EE]);
+       dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n",
+                eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]);
+
+       mt7601u_set_macaddr(dev, eeprom);
+       mt7601u_set_chip_cap(dev, eeprom);
+       mt7601u_set_channel_power(dev, eeprom);
+       mt7601u_set_country_reg(dev, eeprom);
+       mt7601u_set_rf_freq_off(dev, eeprom);
+       mt7601u_set_rssi_offset(dev, eeprom);
+       dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP];
+       dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN];
+
+       mt7601u_config_tx_power_per_rate(dev, eeprom);
+
+       mt7601u_init_tssi_params(dev, eeprom);
+out:
+       kfree(eeprom);
+       return ret;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
new file mode 100644 (file)
index 0000000..662d127
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_EEPROM_H
+#define __MT7601U_EEPROM_H
+
+struct mt7601u_dev;
+
+#define MT7601U_EE_MAX_VER                     0x0c
+#define MT7601U_EEPROM_SIZE                    256
+
+#define MT7601U_DEFAULT_TX_POWER               6
+
+enum mt76_eeprom_field {
+       MT_EE_CHIP_ID =                         0x00,
+       MT_EE_VERSION_FAE =                     0x02,
+       MT_EE_VERSION_EE =                      0x03,
+       MT_EE_MAC_ADDR =                        0x04,
+       MT_EE_NIC_CONF_0 =                      0x34,
+       MT_EE_NIC_CONF_1 =                      0x36,
+       MT_EE_COUNTRY_REGION =                  0x39,
+       MT_EE_FREQ_OFFSET =                     0x3a,
+       MT_EE_NIC_CONF_2 =                      0x42,
+
+       MT_EE_LNA_GAIN =                        0x44,
+       MT_EE_RSSI_OFFSET =                     0x46,
+
+       MT_EE_TX_POWER_DELTA_BW40 =             0x50,
+       MT_EE_TX_POWER_OFFSET =                 0x52,
+
+       MT_EE_TX_TSSI_SLOPE =                   0x6e,
+       MT_EE_TX_TSSI_OFFSET_GROUP =            0x6f,
+       MT_EE_TX_TSSI_OFFSET =                  0x76,
+
+       MT_EE_TX_TSSI_TARGET_POWER =            0xd0,
+       MT_EE_REF_TEMP =                        0xd1,
+       MT_EE_FREQ_OFFSET_COMPENSATION =        0xdb,
+       MT_EE_TX_POWER_BYRATE_BASE =            0xde,
+
+       MT_EE_USAGE_MAP_START =                 0x1e0,
+       MT_EE_USAGE_MAP_END =                   0x1fc,
+};
+
+#define MT_EE_NIC_CONF_0_RX_PATH               GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH               GENMASK(7, 4)
+#define MT_EE_NIC_CONF_0_BOARD_TYPE            GENMASK(13, 12)
+
+#define MT_EE_NIC_CONF_1_HW_RF_CTRL            BIT(0)
+#define MT_EE_NIC_CONF_1_TEMP_TX_ALC           BIT(1)
+#define MT_EE_NIC_CONF_1_LNA_EXT_2G            BIT(2)
+#define MT_EE_NIC_CONF_1_LNA_EXT_5G            BIT(3)
+#define MT_EE_NIC_CONF_1_TX_ALC_EN             BIT(13)
+
+#define MT_EE_NIC_CONF_2_RX_STREAM             GENMASK(3, 0)
+#define MT_EE_NIC_CONF_2_TX_STREAM             GENMASK(7, 4)
+#define MT_EE_NIC_CONF_2_HW_ANTDIV             BIT(8)
+#define MT_EE_NIC_CONF_2_XTAL_OPTION           GENMASK(10, 9)
+#define MT_EE_NIC_CONF_2_TEMP_DISABLE          BIT(11)
+#define MT_EE_NIC_CONF_2_COEX_METHOD           GENMASK(15, 13)
+
+#define MT_EE_TX_POWER_BYRATE(i)               (MT_EE_TX_POWER_BYRATE_BASE + \
+                                                (i) * 4)
+
+#define MT_EFUSE_USAGE_MAP_SIZE                        (MT_EE_USAGE_MAP_END -  \
+                                                MT_EE_USAGE_MAP_START + 1)
+
+enum mt7601u_eeprom_access_modes {
+       MT_EE_READ = 0,
+       MT_EE_PHYSICAL_READ = 1,
+};
+
+struct power_per_rate  {
+       u8 raw;  /* validated s6 value */
+       s8 bw20; /* sign-extended int */
+       s8 bw40; /* sign-extended int */
+};
+
+/* Power per rate - one value per two rates */
+struct mt7601u_rate_power {
+       struct power_per_rate cck[2];
+       struct power_per_rate ofdm[4];
+       struct power_per_rate ht[4];
+};
+
+struct reg_channel_bounds {
+       u8 start;
+       u8 num;
+};
+
+struct mt7601u_eeprom_params {
+       bool tssi_enabled;
+       u8 rf_freq_off;
+       s8 rssi_offset[2];
+       s8 ref_temp;
+       s8 lna_gain;
+
+       u8 chan_pwr[14];
+       struct mt7601u_rate_power power_rate_table;
+       s8 real_cck_bw20[2];
+
+       /* TSSI stuff - only with internal TX ALC */
+       struct tssi_data {
+               int tx0_delta_offset;
+               u8 slope;
+               u8 offset[3];
+       } tssi_data;
+
+       struct reg_channel_bounds reg;
+};
+
+int mt7601u_eeprom_init(struct mt7601u_dev *dev);
+
+static inline u32 s6_validate(u32 reg)
+{
+       WARN_ON(reg & ~GENMASK(5, 0));
+       return reg & GENMASK(5, 0);
+}
+
+static inline int s6_to_int(u32 reg)
+{
+       int s6;
+
+       s6 = s6_validate(reg);
+       if (s6 & BIT(5))
+               s6 -= BIT(6);
+
+       return s6;
+}
+
+static inline u32 int_to_s6(int val)
+{
+       if (val < -0x20)
+               return 0x20;
+       if (val > 0x1f)
+               return 0x1f;
+
+       return val & 0x3f;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
new file mode 100644 (file)
index 0000000..1fc86e8
--- /dev/null
@@ -0,0 +1,625 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "mcu.h"
+
+#include "initvals.h"
+
+static void
+mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable)
+{
+       int i;
+
+       /* Note: we don't turn off WLAN_CLK because that makes the device
+        *       not respond properly on the probe path.
+        *       In case anyone (PSM?) wants to use this function we can
+        *       bring the clock stuff back and fixup the probe path.
+        */
+
+       if (enable)
+               val |= (MT_WLAN_FUN_CTRL_WLAN_EN |
+                       MT_WLAN_FUN_CTRL_WLAN_CLK_EN);
+       else
+               val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN);
+
+       mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+       udelay(20);
+
+       if (enable) {
+               set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+       } else {
+               clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state);
+               return;
+       }
+
+       for (i = 200; i; i--) {
+               val = mt7601u_rr(dev, MT_CMB_CTRL);
+
+               if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD)
+                       break;
+
+               udelay(20);
+       }
+
+       /* Note: vendor driver tries to disable/enable wlan here and retry
+        *       but the code which does it is so buggy it must have never
+        *       triggered, so don't bother.
+        */
+       if (!i)
+               dev_err(dev->dev, "Error: PLL and XTAL check failed!\n");
+}
+
+static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset)
+{
+       u32 val;
+
+       mutex_lock(&dev->hw_atomic_mutex);
+
+       val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL);
+
+       if (reset) {
+               val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN;
+               val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL;
+
+               if (val & MT_WLAN_FUN_CTRL_WLAN_EN) {
+                       val |= (MT_WLAN_FUN_CTRL_WLAN_RESET |
+                               MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+                       mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+                       udelay(20);
+
+                       val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET |
+                                MT_WLAN_FUN_CTRL_WLAN_RESET_RF);
+               }
+       }
+
+       mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val);
+       udelay(20);
+
+       mt7601u_set_wlan_state(dev, val, enable);
+
+       mutex_unlock(&dev->hw_atomic_mutex);
+}
+
+static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev)
+{
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR |
+                                         MT_MAC_SYS_CTRL_RESET_BBP));
+       mt7601u_wr(dev, MT_USB_DMA_CFG, 0);
+       msleep(1);
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+}
+
+static void mt7601u_init_usb_dma(struct mt7601u_dev *dev)
+{
+       u32 val;
+
+       val = MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) |
+             MT76_SET(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) |
+             MT_USB_DMA_CFG_RX_BULK_EN |
+             MT_USB_DMA_CFG_TX_BULK_EN;
+       if (dev->in_max_packet == 512)
+               val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN;
+       mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+       val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+       mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+       val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP;
+       mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+}
+
+static int mt7601u_init_bbp(struct mt7601u_dev *dev)
+{
+       int ret;
+
+       ret = mt7601u_wait_bbp_ready(dev);
+       if (ret)
+               return ret;
+
+       ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals,
+                                     ARRAY_SIZE(bbp_common_vals));
+       if (ret)
+               return ret;
+
+       return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals,
+                                      ARRAY_SIZE(bbp_chip_vals));
+}
+
+static void
+mt76_init_beacon_offsets(struct mt7601u_dev *dev)
+{
+       u16 base = MT_BEACON_BASE;
+       u32 regs[4] = {};
+       int i;
+
+       for (i = 0; i < 16; i++) {
+               u16 addr = dev->beacon_offsets[i];
+
+               regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4));
+       }
+
+       for (i = 0; i < 4; i++)
+               mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev)
+{
+       int ret;
+
+       ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals,
+                                     ARRAY_SIZE(mac_common_vals));
+       if (ret)
+               return ret;
+       ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN,
+                                     mac_chip_vals, ARRAY_SIZE(mac_chip_vals));
+       if (ret)
+               return ret;
+
+       mt76_init_beacon_offsets(dev);
+
+       mt7601u_wr(dev, MT_AUX_CLK_CFG, 0);
+
+       return 0;
+}
+
+static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev)
+{
+       u32 *vals;
+       int i, ret;
+
+       vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+       if (!vals)
+               return -ENOMEM;
+
+       for (i = 0; i < N_WCIDS; i++)  {
+               vals[i * 2] = 0xffffffff;
+               vals[i * 2 + 1] = 0x00ffffff;
+       }
+
+       ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE,
+                                      vals, N_WCIDS * 2);
+       kfree(vals);
+
+       return ret;
+}
+
+static int mt7601u_init_key_mem(struct mt7601u_dev *dev)
+{
+       u32 vals[4] = {};
+
+       return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0,
+                                       vals, ARRAY_SIZE(vals));
+}
+
+static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev)
+{
+       u32 *vals;
+       int i, ret;
+
+       vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL);
+       if (!vals)
+               return -ENOMEM;
+
+       for (i = 0; i < N_WCIDS * 2; i++)
+               vals[i] = 1;
+
+       ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE,
+                                      vals, N_WCIDS * 2);
+       kfree(vals);
+
+       return ret;
+}
+
+static void mt7601u_reset_counters(struct mt7601u_dev *dev)
+{
+       mt7601u_rr(dev, MT_RX_STA_CNT0);
+       mt7601u_rr(dev, MT_RX_STA_CNT1);
+       mt7601u_rr(dev, MT_RX_STA_CNT2);
+       mt7601u_rr(dev, MT_TX_STA_CNT0);
+       mt7601u_rr(dev, MT_TX_STA_CNT1);
+       mt7601u_rr(dev, MT_TX_STA_CNT2);
+}
+
+int mt7601u_mac_start(struct mt7601u_dev *dev)
+{
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
+
+       if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+                      MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000))
+               return -ETIMEDOUT;
+
+       dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR |
+               MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC |
+               MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP |
+               MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND |
+               MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS |
+               MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL |
+               MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV;
+       mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL,
+                  MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+
+       if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+                      MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev)
+{
+       int i, ok;
+
+       if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+               return;
+
+       mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
+                  MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
+                  MT_BEACON_TIME_CFG_BEACON_TX);
+
+       if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
+               dev_warn(dev->dev, "Warning: TX DMA did not stop!\n");
+
+       /* Page count on TxQ */
+       i = 200;
+       while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) ||
+                      (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+                      (mt76_rr(dev, 0x0a34) & 0x00ff00ff)))
+               msleep(10);
+
+       if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000))
+               dev_warn(dev->dev, "Warning: MAC TX did not stop!\n");
+
+       mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX |
+                                        MT_MAC_SYS_CTRL_ENABLE_TX);
+
+       /* Page count on RxQ */
+       ok = 0;
+       i = 200;
+       while (i--) {
+               if ((mt76_rr(dev, 0x0430) & 0x00ff0000) ||
+                   (mt76_rr(dev, 0x0a30) & 0xffffffff) ||
+                   (mt76_rr(dev, 0x0a34) & 0xffffffff))
+                       ok++;
+               if (ok > 6)
+                       break;
+
+               msleep(1);
+       }
+
+       if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000))
+               dev_warn(dev->dev, "Warning: MAC RX did not stop!\n");
+
+       if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000))
+               dev_warn(dev->dev, "Warning: RX DMA did not stop!\n");
+}
+
+void mt7601u_mac_stop(struct mt7601u_dev *dev)
+{
+       mt7601u_mac_stop_hw(dev);
+       flush_delayed_work(&dev->stat_work);
+       cancel_delayed_work_sync(&dev->stat_work);
+}
+
+static void mt7601u_stop_hardware(struct mt7601u_dev *dev)
+{
+       mt7601u_chip_onoff(dev, false, false);
+}
+
+int mt7601u_init_hardware(struct mt7601u_dev *dev)
+{
+       static const u16 beacon_offsets[16] = {
+               /* 512 byte per beacon */
+               0xc000, 0xc200, 0xc400, 0xc600,
+               0xc800, 0xca00, 0xcc00, 0xce00,
+               0xd000, 0xd200, 0xd400, 0xd600,
+               0xd800, 0xda00, 0xdc00, 0xde00
+       };
+       int ret;
+
+       dev->beacon_offsets = beacon_offsets;
+
+       mt7601u_chip_onoff(dev, true, false);
+
+       ret = mt7601u_wait_asic_ready(dev);
+       if (ret)
+               goto err;
+       ret = mt7601u_mcu_init(dev);
+       if (ret)
+               goto err;
+
+       if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG,
+                           MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+                           MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) {
+               ret = -EIO;
+               goto err;
+       }
+
+       /* Wait for ASIC ready after FW load. */
+       ret = mt7601u_wait_asic_ready(dev);
+       if (ret)
+               goto err;
+
+       mt7601u_reset_csr_bbp(dev);
+       mt7601u_init_usb_dma(dev);
+
+       ret = mt7601u_mcu_cmd_init(dev);
+       if (ret)
+               goto err;
+       ret = mt7601u_dma_init(dev);
+       if (ret)
+               goto err_mcu;
+       ret = mt7601u_write_mac_initvals(dev);
+       if (ret)
+               goto err_rx;
+
+       if (!mt76_poll_msec(dev, MT_MAC_STATUS,
+                           MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) {
+               ret = -EIO;
+               goto err_rx;
+       }
+
+       ret = mt7601u_init_bbp(dev);
+       if (ret)
+               goto err_rx;
+       ret = mt7601u_init_wcid_mem(dev);
+       if (ret)
+               goto err_rx;
+       ret = mt7601u_init_key_mem(dev);
+       if (ret)
+               goto err_rx;
+       ret = mt7601u_init_wcid_attr_mem(dev);
+       if (ret)
+               goto err_rx;
+
+       mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+                                            MT_BEACON_TIME_CFG_SYNC_MODE |
+                                            MT_BEACON_TIME_CFG_TBTT_EN |
+                                            MT_BEACON_TIME_CFG_BEACON_TX));
+
+       mt7601u_reset_counters(dev);
+
+       mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
+
+       mt7601u_wr(dev, MT_TXOP_CTRL_CFG, MT76_SET(MT_TXOP_TRUN_EN, 0x3f) |
+                                         MT76_SET(MT_TXOP_EXT_CCA_DLY, 0x58));
+
+       ret = mt7601u_eeprom_init(dev);
+       if (ret)
+               goto err_rx;
+
+       ret = mt7601u_phy_init(dev);
+       if (ret)
+               goto err_rx;
+
+       mt7601u_set_rx_path(dev, 0);
+       mt7601u_set_tx_dac(dev, 0);
+
+       mt7601u_mac_set_ctrlch(dev, false);
+       mt7601u_bbp_set_ctrlch(dev, false);
+       mt7601u_bbp_set_bw(dev, MT_BW_20);
+
+       return 0;
+
+err_rx:
+       mt7601u_dma_cleanup(dev);
+err_mcu:
+       mt7601u_mcu_cmd_deinit(dev);
+err:
+       mt7601u_chip_onoff(dev, false, false);
+       return ret;
+}
+
+void mt7601u_cleanup(struct mt7601u_dev *dev)
+{
+       mt7601u_stop_hardware(dev);
+       mt7601u_dma_cleanup(dev);
+       mt7601u_mcu_cmd_deinit(dev);
+}
+
+struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
+{
+       struct ieee80211_hw *hw;
+       struct mt7601u_dev *dev;
+
+       hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops);
+       if (!hw)
+               return NULL;
+
+       dev = hw->priv;
+       dev->dev = pdev;
+       dev->hw = hw;
+       mutex_init(&dev->vendor_req_mutex);
+       mutex_init(&dev->reg_atomic_mutex);
+       mutex_init(&dev->hw_atomic_mutex);
+       mutex_init(&dev->mutex);
+       spin_lock_init(&dev->tx_lock);
+       spin_lock_init(&dev->rx_lock);
+       spin_lock_init(&dev->lock);
+       spin_lock_init(&dev->con_mon_lock);
+       atomic_set(&dev->avg_ampdu_len, 1);
+
+       dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
+       if (!dev->stat_wq) {
+               ieee80211_free_hw(hw);
+               return NULL;
+       }
+
+       return dev;
+}
+
+#define CHAN2G(_idx, _freq) {                  \
+       .band = IEEE80211_BAND_2GHZ,            \
+       .center_freq = (_freq),                 \
+       .hw_value = (_idx),                     \
+       .max_power = 30,                        \
+}
+
+static const struct ieee80211_channel mt76_channels_2ghz[] = {
+       CHAN2G(1, 2412),
+       CHAN2G(2, 2417),
+       CHAN2G(3, 2422),
+       CHAN2G(4, 2427),
+       CHAN2G(5, 2432),
+       CHAN2G(6, 2437),
+       CHAN2G(7, 2442),
+       CHAN2G(8, 2447),
+       CHAN2G(9, 2452),
+       CHAN2G(10, 2457),
+       CHAN2G(11, 2462),
+       CHAN2G(12, 2467),
+       CHAN2G(13, 2472),
+       CHAN2G(14, 2484),
+};
+
+#define CCK_RATE(_idx, _rate) {                                        \
+       .bitrate = _rate,                                       \
+       .flags = IEEE80211_RATE_SHORT_PREAMBLE,                 \
+       .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx,              \
+       .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx),  \
+}
+
+#define OFDM_RATE(_idx, _rate) {                               \
+       .bitrate = _rate,                                       \
+       .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx,             \
+       .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx,       \
+}
+
+static struct ieee80211_rate mt76_rates[] = {
+       CCK_RATE(0, 10),
+       CCK_RATE(1, 20),
+       CCK_RATE(2, 55),
+       CCK_RATE(3, 110),
+       OFDM_RATE(0, 60),
+       OFDM_RATE(1, 90),
+       OFDM_RATE(2, 120),
+       OFDM_RATE(3, 180),
+       OFDM_RATE(4, 240),
+       OFDM_RATE(5, 360),
+       OFDM_RATE(6, 480),
+       OFDM_RATE(7, 540),
+};
+
+static int
+mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband,
+               const struct ieee80211_channel *chan, int n_chan,
+               struct ieee80211_rate *rates, int n_rates)
+{
+       struct ieee80211_sta_ht_cap *ht_cap;
+       void *chanlist;
+       int size;
+
+       size = n_chan * sizeof(*chan);
+       chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
+       if (!chanlist)
+               return -ENOMEM;
+
+       sband->channels = chanlist;
+       sband->n_channels = n_chan;
+       sband->bitrates = rates;
+       sband->n_bitrates = n_rates;
+
+       ht_cap = &sband->ht_cap;
+       ht_cap->ht_supported = true;
+       ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+                     IEEE80211_HT_CAP_GRN_FLD |
+                     IEEE80211_HT_CAP_SGI_20 |
+                     IEEE80211_HT_CAP_SGI_40 |
+                     (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+       ht_cap->mcs.rx_mask[0] = 0xff;
+       ht_cap->mcs.rx_mask[4] = 0x1;
+       ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+       ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+       ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2;
+
+       dev->chandef.chan = &sband->channels[0];
+
+       return 0;
+}
+
+static int
+mt76_init_sband_2g(struct mt7601u_dev *dev)
+{
+       dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
+                                    GFP_KERNEL);
+       dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
+
+       WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
+               ARRAY_SIZE(mt76_channels_2ghz));
+
+       return mt76_init_sband(dev, dev->sband_2g,
+                              &mt76_channels_2ghz[dev->ee->reg.start - 1],
+                              dev->ee->reg.num,
+                              mt76_rates, ARRAY_SIZE(mt76_rates));
+}
+
+int mt7601u_register_device(struct mt7601u_dev *dev)
+{
+       struct ieee80211_hw *hw = dev->hw;
+       struct wiphy *wiphy = hw->wiphy;
+       int ret;
+
+       /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
+        * entry no. 1 like it does in the vendor driver.
+        */
+       dev->wcid_mask[0] |= 1;
+
+       /* init fake wcid for monitor interfaces */
+       dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid),
+                                    GFP_KERNEL);
+       if (!dev->mon_wcid)
+               return -ENOMEM;
+       dev->mon_wcid->idx = 0xff;
+       dev->mon_wcid->hw_key_idx = -1;
+
+       SET_IEEE80211_DEV(hw, dev->dev);
+
+       hw->queues = 4;
+       hw->flags = IEEE80211_HW_SIGNAL_DBM |
+                   IEEE80211_HW_PS_NULLFUNC_STACK |
+                   IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
+                   IEEE80211_HW_AMPDU_AGGREGATION |
+                   IEEE80211_HW_SUPPORTS_RC_TABLE;
+       hw->max_rates = 1;
+       hw->max_report_rates = 7;
+       hw->max_rate_tries = 1;
+
+       hw->sta_data_size = sizeof(struct mt76_sta);
+       hw->vif_data_size = sizeof(struct mt76_vif);
+
+       SET_IEEE80211_PERM_ADDR(hw, dev->macaddr);
+
+       wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
+       wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+       ret = mt76_init_sband_2g(dev);
+       if (ret)
+               return ret;
+
+       INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work);
+       INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat);
+
+       ret = ieee80211_register_hw(hw);
+       if (ret)
+               return ret;
+
+       mt7601u_init_debugfs(dev);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals.h b/drivers/net/wireless/mediatek/mt7601u/initvals.h
new file mode 100644 (file)
index 0000000..ec11ff6
--- /dev/null
@@ -0,0 +1,164 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_INITVALS_H
+#define __MT7601U_INITVALS_H
+
+static const struct mt76_reg_pair bbp_common_vals[] = {
+       {  65,  0x2c },
+       {  66,  0x38 },
+       {  68,  0x0b },
+       {  69,  0x12 },
+       {  70,  0x0a },
+       {  73,  0x10 },
+       {  81,  0x37 },
+       {  82,  0x62 },
+       {  83,  0x6a },
+       {  84,  0x99 },
+       {  86,  0x00 },
+       {  91,  0x04 },
+       {  92,  0x00 },
+       { 103,  0x00 },
+       { 105,  0x05 },
+       { 106,  0x35 },
+};
+
+static const struct mt76_reg_pair bbp_chip_vals[] = {
+       {   1, 0x04 },  {   4, 0x40 },  {  20, 0x06 },  {  31, 0x08 },
+       /* CCK Tx Control */
+       { 178, 0xff },
+       /* AGC/Sync controls */
+       {  66, 0x14 },  {  68, 0x8b },  {  69, 0x12 },  {  70, 0x09 },
+       {  73, 0x11 },  {  75, 0x60 },  {  76, 0x44 },  {  84, 0x9a },
+       {  86, 0x38 },  {  91, 0x07 },  {  92, 0x02 },
+       /* Rx Path Controls */
+       {  99, 0x50 },  { 101, 0x00 },  { 103, 0xc0 },  { 104, 0x92 },
+       { 105, 0x3c },  { 106, 0x03 },  { 128, 0x12 },
+       /* Change RXWI content: Gain Report */
+       { 142, 0x04 },  { 143, 0x37 },
+       /* Change RXWI content: Antenna Report */
+       { 142, 0x03 },  { 143, 0x99 },
+       /* Calibration Index Register */
+       /* CCK Receiver Control */
+       { 160, 0xeb },  { 161, 0xc4 },  { 162, 0x77 },  { 163, 0xf9 },
+       { 164, 0x88 },  { 165, 0x80 },  { 166, 0xff },  { 167, 0xe4 },
+       /* Added AGC controls - these AGC/GLRT registers are accessed
+        * through R195 and R196.
+        */
+       { 195, 0x00 },  { 196, 0x00 },
+       { 195, 0x01 },  { 196, 0x04 },
+       { 195, 0x02 },  { 196, 0x20 },
+       { 195, 0x03 },  { 196, 0x0a },
+       { 195, 0x06 },  { 196, 0x16 },
+       { 195, 0x07 },  { 196, 0x05 },
+       { 195, 0x08 },  { 196, 0x37 },
+       { 195, 0x0a },  { 196, 0x15 },
+       { 195, 0x0b },  { 196, 0x17 },
+       { 195, 0x0c },  { 196, 0x06 },
+       { 195, 0x0d },  { 196, 0x09 },
+       { 195, 0x0e },  { 196, 0x05 },
+       { 195, 0x0f },  { 196, 0x09 },
+       { 195, 0x10 },  { 196, 0x20 },
+       { 195, 0x20 },  { 196, 0x17 },
+       { 195, 0x21 },  { 196, 0x06 },
+       { 195, 0x22 },  { 196, 0x09 },
+       { 195, 0x23 },  { 196, 0x17 },
+       { 195, 0x24 },  { 196, 0x06 },
+       { 195, 0x25 },  { 196, 0x09 },
+       { 195, 0x26 },  { 196, 0x17 },
+       { 195, 0x27 },  { 196, 0x06 },
+       { 195, 0x28 },  { 196, 0x09 },
+       { 195, 0x29 },  { 196, 0x05 },
+       { 195, 0x2a },  { 196, 0x09 },
+       { 195, 0x80 },  { 196, 0x8b },
+       { 195, 0x81 },  { 196, 0x12 },
+       { 195, 0x82 },  { 196, 0x09 },
+       { 195, 0x83 },  { 196, 0x17 },
+       { 195, 0x84 },  { 196, 0x11 },
+       { 195, 0x85 },  { 196, 0x00 },
+       { 195, 0x86 },  { 196, 0x00 },
+       { 195, 0x87 },  { 196, 0x18 },
+       { 195, 0x88 },  { 196, 0x60 },
+       { 195, 0x89 },  { 196, 0x44 },
+       { 195, 0x8a },  { 196, 0x8b },
+       { 195, 0x8b },  { 196, 0x8b },
+       { 195, 0x8c },  { 196, 0x8b },
+       { 195, 0x8d },  { 196, 0x8b },
+       { 195, 0x8e },  { 196, 0x09 },
+       { 195, 0x8f },  { 196, 0x09 },
+       { 195, 0x90 },  { 196, 0x09 },
+       { 195, 0x91 },  { 196, 0x09 },
+       { 195, 0x92 },  { 196, 0x11 },
+       { 195, 0x93 },  { 196, 0x11 },
+       { 195, 0x94 },  { 196, 0x11 },
+       { 195, 0x95 },  { 196, 0x11 },
+       /* PPAD */
+       {  47, 0x80 },  {  60, 0x80 },  { 150, 0xd2 },  { 151, 0x32 },
+       { 152, 0x23 },  { 153, 0x41 },  { 154, 0x00 },  { 155, 0x4f },
+       { 253, 0x7e },  { 195, 0x30 },  { 196, 0x32 },  { 195, 0x31 },
+       { 196, 0x23 },  { 195, 0x32 },  { 196, 0x45 },  { 195, 0x35 },
+       { 196, 0x4a },  { 195, 0x36 },  { 196, 0x5a },  { 195, 0x37 },
+       { 196, 0x5a },
+};
+
+static const struct mt76_reg_pair mac_common_vals[] = {
+       { MT_LEGACY_BASIC_RATE,         0x0000013f },
+       { MT_HT_BASIC_RATE,             0x00008003 },
+       { MT_MAC_SYS_CTRL,              0x00000000 },
+       { MT_RX_FILTR_CFG,              0x00017f97 },
+       { MT_BKOFF_SLOT_CFG,            0x00000209 },
+       { MT_TX_SW_CFG0,                0x00000000 },
+       { MT_TX_SW_CFG1,                0x00080606 },
+       { MT_TX_LINK_CFG,               0x00001020 },
+       { MT_TX_TIMEOUT_CFG,            0x000a2090 },
+       { MT_MAX_LEN_CFG,               0x00003fff },
+       { MT_PBF_TX_MAX_PCNT,           0x1fbf1f1f },
+       { MT_PBF_RX_MAX_PCNT,           0x0000009f },
+       { MT_TX_RETRY_CFG,              0x47d01f0f },
+       { MT_AUTO_RSP_CFG,              0x00000013 },
+       { MT_CCK_PROT_CFG,              0x05740003 },
+       { MT_OFDM_PROT_CFG,             0x05740003 },
+       { MT_MM40_PROT_CFG,             0x03f44084 },
+       { MT_GF20_PROT_CFG,             0x01744004 },
+       { MT_GF40_PROT_CFG,             0x03f44084 },
+       { MT_MM20_PROT_CFG,             0x01744004 },
+       { MT_TXOP_CTRL_CFG,             0x0000583f },
+       { MT_TX_RTS_CFG,                0x01092b20 },
+       { MT_EXP_ACK_TIME,              0x002400ca },
+       { MT_TXOP_HLDR_ET,              0x00000002 },
+       { MT_XIFS_TIME_CFG,             0x33a41010 },
+       { MT_PWR_PIN_CFG,               0x00000000 },
+};
+
+static const struct mt76_reg_pair mac_chip_vals[] = {
+       { MT_TSO_CTRL,                  0x00006050 },
+       { MT_BCN_OFFSET(0),             0x18100800 },
+       { MT_BCN_OFFSET(1),             0x38302820 },
+       { MT_PBF_SYS_CTRL,              0x00080c00 },
+       { MT_PBF_CFG,                   0x7f723c1f },
+       { MT_FCE_PSE_CTRL,              0x00000001 },
+       { MT_PAUSE_ENABLE_CONTROL1,     0x00000000 },
+       { MT_TX0_RF_GAIN_CORR,          0x003b0005 },
+       { MT_TX0_RF_GAIN_ATTEN,         0x00006900 },
+       { MT_TX0_BB_GAIN_ATTEN,         0x00000400 },
+       { MT_TX_ALC_VGA3,               0x00060006 },
+       { MT_TX_SW_CFG0,                0x00000402 },
+       { MT_TX_SW_CFG1,                0x00000000 },
+       { MT_TX_SW_CFG2,                0x00000000 },
+       { MT_HEADER_TRANS_CTRL_REG,     0x00000000 },
+       { MT_FCE_CSO,                   0x0000030f },
+       { MT_FCE_PARAMETERS,            0x00256f0f },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h b/drivers/net/wireless/mediatek/mt7601u/initvals_phy.h
new file mode 100644 (file)
index 0000000..a2bdc3e
--- /dev/null
@@ -0,0 +1,291 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_PHY_INITVALS_H
+#define __MT7601U_PHY_INITVALS_H
+
+#define RF_REG_PAIR(bank, reg, value)                          \
+       { MT_MCU_MEMMAP_RF | (bank) << 16 | (reg), value }
+
+static const struct mt76_reg_pair rf_central[] = {
+       /* Bank 0 - for central blocks: BG, PLL, XTAL, LO, ADC/DAC */
+       RF_REG_PAIR(0,   0, 0x02),
+       RF_REG_PAIR(0,   1, 0x01),
+       RF_REG_PAIR(0,   2, 0x11),
+       RF_REG_PAIR(0,   3, 0xff),
+       RF_REG_PAIR(0,   4, 0x0a),
+       RF_REG_PAIR(0,   5, 0x20),
+       RF_REG_PAIR(0,   6, 0x00),
+       /* B/G */
+       RF_REG_PAIR(0,   7, 0x00),
+       RF_REG_PAIR(0,   8, 0x00),
+       RF_REG_PAIR(0,   9, 0x00),
+       RF_REG_PAIR(0,  10, 0x00),
+       RF_REG_PAIR(0,  11, 0x21),
+       /* XO */
+       RF_REG_PAIR(0,  13, 0x00),              /* 40mhz xtal */
+       /* RF_REG_PAIR(0,       13, 0x13), */   /* 20mhz xtal */
+       RF_REG_PAIR(0,  14, 0x7c),
+       RF_REG_PAIR(0,  15, 0x22),
+       RF_REG_PAIR(0,  16, 0x80),
+       /* PLL */
+       RF_REG_PAIR(0,  17, 0x99),
+       RF_REG_PAIR(0,  18, 0x99),
+       RF_REG_PAIR(0,  19, 0x09),
+       RF_REG_PAIR(0,  20, 0x50),
+       RF_REG_PAIR(0,  21, 0xb0),
+       RF_REG_PAIR(0,  22, 0x00),
+       RF_REG_PAIR(0,  23, 0xc5),
+       RF_REG_PAIR(0,  24, 0xfc),
+       RF_REG_PAIR(0,  25, 0x40),
+       RF_REG_PAIR(0,  26, 0x4d),
+       RF_REG_PAIR(0,  27, 0x02),
+       RF_REG_PAIR(0,  28, 0x72),
+       RF_REG_PAIR(0,  29, 0x01),
+       RF_REG_PAIR(0,  30, 0x00),
+       RF_REG_PAIR(0,  31, 0x00),
+       /* test ports */
+       RF_REG_PAIR(0,  32, 0x00),
+       RF_REG_PAIR(0,  33, 0x00),
+       RF_REG_PAIR(0,  34, 0x23),
+       RF_REG_PAIR(0,  35, 0x01), /* change setting to reduce spurs */
+       RF_REG_PAIR(0,  36, 0x00),
+       RF_REG_PAIR(0,  37, 0x00),
+       /* ADC/DAC */
+       RF_REG_PAIR(0,  38, 0x00),
+       RF_REG_PAIR(0,  39, 0x20),
+       RF_REG_PAIR(0,  40, 0x00),
+       RF_REG_PAIR(0,  41, 0xd0),
+       RF_REG_PAIR(0,  42, 0x1b),
+       RF_REG_PAIR(0,  43, 0x02),
+       RF_REG_PAIR(0,  44, 0x00),
+};
+
+static const struct mt76_reg_pair rf_channel[] = {
+       RF_REG_PAIR(4,   0, 0x01),
+       RF_REG_PAIR(4,   1, 0x00),
+       RF_REG_PAIR(4,   2, 0x00),
+       RF_REG_PAIR(4,   3, 0x00),
+       /* LDO */
+       RF_REG_PAIR(4,   4, 0x00),
+       RF_REG_PAIR(4,   5, 0x08),
+       RF_REG_PAIR(4,   6, 0x00),
+       /* RX */
+       RF_REG_PAIR(4,   7, 0x5b),
+       RF_REG_PAIR(4,   8, 0x52),
+       RF_REG_PAIR(4,   9, 0xb6),
+       RF_REG_PAIR(4,  10, 0x57),
+       RF_REG_PAIR(4,  11, 0x33),
+       RF_REG_PAIR(4,  12, 0x22),
+       RF_REG_PAIR(4,  13, 0x3d),
+       RF_REG_PAIR(4,  14, 0x3e),
+       RF_REG_PAIR(4,  15, 0x13),
+       RF_REG_PAIR(4,  16, 0x22),
+       RF_REG_PAIR(4,  17, 0x23),
+       RF_REG_PAIR(4,  18, 0x02),
+       RF_REG_PAIR(4,  19, 0xa4),
+       RF_REG_PAIR(4,  20, 0x01),
+       RF_REG_PAIR(4,  21, 0x12),
+       RF_REG_PAIR(4,  22, 0x80),
+       RF_REG_PAIR(4,  23, 0xb3),
+       RF_REG_PAIR(4,  24, 0x00), /* reserved */
+       RF_REG_PAIR(4,  25, 0x00), /* reserved */
+       RF_REG_PAIR(4,  26, 0x00), /* reserved */
+       RF_REG_PAIR(4,  27, 0x00), /* reserved */
+       /* LOGEN */
+       RF_REG_PAIR(4,  28, 0x18),
+       RF_REG_PAIR(4,  29, 0xee),
+       RF_REG_PAIR(4,  30, 0x6b),
+       RF_REG_PAIR(4,  31, 0x31),
+       RF_REG_PAIR(4,  32, 0x5d),
+       RF_REG_PAIR(4,  33, 0x00), /* reserved */
+       /* TX */
+       RF_REG_PAIR(4,  34, 0x96),
+       RF_REG_PAIR(4,  35, 0x55),
+       RF_REG_PAIR(4,  36, 0x08),
+       RF_REG_PAIR(4,  37, 0xbb),
+       RF_REG_PAIR(4,  38, 0xb3),
+       RF_REG_PAIR(4,  39, 0xb3),
+       RF_REG_PAIR(4,  40, 0x03),
+       RF_REG_PAIR(4,  41, 0x00), /* reserved */
+       RF_REG_PAIR(4,  42, 0x00), /* reserved */
+       RF_REG_PAIR(4,  43, 0xc5),
+       RF_REG_PAIR(4,  44, 0xc5),
+       RF_REG_PAIR(4,  45, 0xc5),
+       RF_REG_PAIR(4,  46, 0x07),
+       RF_REG_PAIR(4,  47, 0xa8),
+       RF_REG_PAIR(4,  48, 0xef),
+       RF_REG_PAIR(4,  49, 0x1a),
+       /* PA */
+       RF_REG_PAIR(4,  54, 0x07),
+       RF_REG_PAIR(4,  55, 0xa7),
+       RF_REG_PAIR(4,  56, 0xcc),
+       RF_REG_PAIR(4,  57, 0x14),
+       RF_REG_PAIR(4,  58, 0x07),
+       RF_REG_PAIR(4,  59, 0xa8),
+       RF_REG_PAIR(4,  60, 0xd7),
+       RF_REG_PAIR(4,  61, 0x10),
+       RF_REG_PAIR(4,  62, 0x1c),
+       RF_REG_PAIR(4,  63, 0x00), /* reserved */
+};
+
+static const struct mt76_reg_pair rf_vga[] = {
+       RF_REG_PAIR(5,   0, 0x47),
+       RF_REG_PAIR(5,   1, 0x00),
+       RF_REG_PAIR(5,   2, 0x00),
+       RF_REG_PAIR(5,   3, 0x08),
+       RF_REG_PAIR(5,   4, 0x04),
+       RF_REG_PAIR(5,   5, 0x20),
+       RF_REG_PAIR(5,   6, 0x3a),
+       RF_REG_PAIR(5,   7, 0x3a),
+       RF_REG_PAIR(5,   8, 0x00),
+       RF_REG_PAIR(5,   9, 0x00),
+       RF_REG_PAIR(5,  10, 0x10),
+       RF_REG_PAIR(5,  11, 0x10),
+       RF_REG_PAIR(5,  12, 0x10),
+       RF_REG_PAIR(5,  13, 0x10),
+       RF_REG_PAIR(5,  14, 0x10),
+       RF_REG_PAIR(5,  15, 0x20),
+       RF_REG_PAIR(5,  16, 0x22),
+       RF_REG_PAIR(5,  17, 0x7c),
+       RF_REG_PAIR(5,  18, 0x00),
+       RF_REG_PAIR(5,  19, 0x00),
+       RF_REG_PAIR(5,  20, 0x00),
+       RF_REG_PAIR(5,  21, 0xf1),
+       RF_REG_PAIR(5,  22, 0x11),
+       RF_REG_PAIR(5,  23, 0x02),
+       RF_REG_PAIR(5,  24, 0x41),
+       RF_REG_PAIR(5,  25, 0x20),
+       RF_REG_PAIR(5,  26, 0x00),
+       RF_REG_PAIR(5,  27, 0xd7),
+       RF_REG_PAIR(5,  28, 0xa2),
+       RF_REG_PAIR(5,  29, 0x20),
+       RF_REG_PAIR(5,  30, 0x49),
+       RF_REG_PAIR(5,  31, 0x20),
+       RF_REG_PAIR(5,  32, 0x04),
+       RF_REG_PAIR(5,  33, 0xf1),
+       RF_REG_PAIR(5,  34, 0xa1),
+       RF_REG_PAIR(5,  35, 0x01),
+       RF_REG_PAIR(5,  41, 0x00),
+       RF_REG_PAIR(5,  42, 0x00),
+       RF_REG_PAIR(5,  43, 0x00),
+       RF_REG_PAIR(5,  44, 0x00),
+       RF_REG_PAIR(5,  45, 0x00),
+       RF_REG_PAIR(5,  46, 0x00),
+       RF_REG_PAIR(5,  47, 0x00),
+       RF_REG_PAIR(5,  48, 0x00),
+       RF_REG_PAIR(5,  49, 0x00),
+       RF_REG_PAIR(5,  50, 0x00),
+       RF_REG_PAIR(5,  51, 0x00),
+       RF_REG_PAIR(5,  52, 0x00),
+       RF_REG_PAIR(5,  53, 0x00),
+       RF_REG_PAIR(5,  54, 0x00),
+       RF_REG_PAIR(5,  55, 0x00),
+       RF_REG_PAIR(5,  56, 0x00),
+       RF_REG_PAIR(5,  57, 0x00),
+       RF_REG_PAIR(5,  58, 0x31),
+       RF_REG_PAIR(5,  59, 0x31),
+       RF_REG_PAIR(5,  60, 0x0a),
+       RF_REG_PAIR(5,  61, 0x02),
+       RF_REG_PAIR(5,  62, 0x00),
+       RF_REG_PAIR(5,  63, 0x00),
+};
+
+/* TODO: BBP178 is set to 0xff for "CCK CH14 OBW" which overrides the settings
+ *      from channel switching. Seems stupid at best.
+ */
+static const struct mt76_reg_pair bbp_high_temp[] = {
+       {  75, 0x60 },
+       {  92, 0x02 },
+       { 178, 0xff }, /* For CCK CH14 OBW */
+       { 195, 0x88 }, { 196, 0x60 },
+}, bbp_high_temp_bw20[] = {
+       {  69, 0x12 },
+       {  91, 0x07 },
+       { 195, 0x23 }, { 196, 0x17 },
+       { 195, 0x24 }, { 196, 0x06 },
+       { 195, 0x81 }, { 196, 0x12 },
+       { 195, 0x83 }, { 196, 0x17 },
+}, bbp_high_temp_bw40[] = {
+       {  69, 0x15 },
+       {  91, 0x04 },
+       { 195, 0x23 }, { 196, 0x12 },
+       { 195, 0x24 }, { 196, 0x08 },
+       { 195, 0x81 }, { 196, 0x15 },
+       { 195, 0x83 }, { 196, 0x16 },
+}, bbp_low_temp[] = {
+       { 178, 0xff }, /* For CCK CH14 OBW */
+}, bbp_low_temp_bw20[] = {
+       {  69, 0x12 },
+       {  75, 0x5e },
+       {  91, 0x07 },
+       {  92, 0x02 },
+       { 195, 0x23 }, { 196, 0x17 },
+       { 195, 0x24 }, { 196, 0x06 },
+       { 195, 0x81 }, { 196, 0x12 },
+       { 195, 0x83 }, { 196, 0x17 },
+       { 195, 0x88 }, { 196, 0x5e },
+}, bbp_low_temp_bw40[] = {
+       {  69, 0x15 },
+       {  75, 0x5c },
+       {  91, 0x04 },
+       {  92, 0x03 },
+       { 195, 0x23 }, { 196, 0x10 },
+       { 195, 0x24 }, { 196, 0x08 },
+       { 195, 0x81 }, { 196, 0x15 },
+       { 195, 0x83 }, { 196, 0x16 },
+       { 195, 0x88 }, { 196, 0x5b },
+}, bbp_normal_temp[] = {
+       {  75, 0x60 },
+       {  92, 0x02 },
+       { 178, 0xff }, /* For CCK CH14 OBW */
+       { 195, 0x88 }, { 196, 0x60 },
+}, bbp_normal_temp_bw20[] = {
+       {  69, 0x12 },
+       {  91, 0x07 },
+       { 195, 0x23 }, { 196, 0x17 },
+       { 195, 0x24 }, { 196, 0x06 },
+       { 195, 0x81 }, { 196, 0x12 },
+       { 195, 0x83 }, { 196, 0x17 },
+}, bbp_normal_temp_bw40[] = {
+       {  69, 0x15 },
+       {  91, 0x04 },
+       { 195, 0x23 }, { 196, 0x12 },
+       { 195, 0x24 }, { 196, 0x08 },
+       { 195, 0x81 }, { 196, 0x15 },
+       { 195, 0x83 }, { 196, 0x16 },
+};
+
+#define BBP_TABLE(arr) { arr, ARRAY_SIZE(arr), }
+
+static const struct reg_table {
+       const struct mt76_reg_pair *regs;
+       size_t n;
+} bbp_mode_table[3][3] = {
+       {
+               BBP_TABLE(bbp_normal_temp_bw20),
+               BBP_TABLE(bbp_normal_temp_bw40),
+               BBP_TABLE(bbp_normal_temp),
+       }, {
+               BBP_TABLE(bbp_high_temp_bw20),
+               BBP_TABLE(bbp_high_temp_bw40),
+               BBP_TABLE(bbp_high_temp),
+       }, {
+               BBP_TABLE(bbp_low_temp_bw20),
+               BBP_TABLE(bbp_low_temp_bw40),
+               BBP_TABLE(bbp_low_temp),
+       }
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
new file mode 100644 (file)
index 0000000..c161bcc
--- /dev/null
@@ -0,0 +1,569 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+#include <linux/etherdevice.h>
+
+static void
+mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
+{
+       u8 idx = MT76_GET(MT_TXWI_RATE_MCS, rate);
+
+       txrate->idx = 0;
+       txrate->flags = 0;
+       txrate->count = 1;
+
+       switch (MT76_GET(MT_TXWI_RATE_PHY_MODE, rate)) {
+       case MT_PHY_TYPE_OFDM:
+               txrate->idx = idx + 4;
+               return;
+       case MT_PHY_TYPE_CCK:
+               if (idx >= 8)
+                       idx -= 8;
+
+               txrate->idx = idx;
+               return;
+       case MT_PHY_TYPE_HT_GF:
+               txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+               /* fall through */
+       case MT_PHY_TYPE_HT:
+               txrate->flags |= IEEE80211_TX_RC_MCS;
+               txrate->idx = idx;
+               break;
+       default:
+               WARN_ON(1);
+               return;
+       }
+
+       if (MT76_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40)
+               txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+       if (rate & MT_TXWI_RATE_SGI)
+               txrate->flags |= IEEE80211_TX_RC_SHORT_GI;
+}
+
+static void
+mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info,
+                       struct mt76_tx_status *st)
+{
+       struct ieee80211_tx_rate *rate = info->status.rates;
+       int cur_idx, last_rate;
+       int i;
+
+       last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
+       mt76_mac_process_tx_rate(&rate[last_rate], st->rate);
+       if (last_rate < IEEE80211_TX_MAX_RATES - 1)
+               rate[last_rate + 1].idx = -1;
+
+       cur_idx = rate[last_rate].idx + st->retry;
+       for (i = 0; i <= last_rate; i++) {
+               rate[i].flags = rate[last_rate].flags;
+               rate[i].idx = max_t(int, 0, cur_idx - i);
+               rate[i].count = 1;
+       }
+
+       if (last_rate > 0)
+               rate[last_rate - 1].count = st->retry + 1 - last_rate;
+
+       info->status.ampdu_len = 1;
+       info->status.ampdu_ack_len = st->success;
+
+       if (st->is_probe)
+               info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+
+       if (st->aggr)
+               info->flags |= IEEE80211_TX_CTL_AMPDU |
+                              IEEE80211_TX_STAT_AMPDU;
+
+       if (!st->ack_req)
+               info->flags |= IEEE80211_TX_CTL_NO_ACK;
+       else if (st->success)
+               info->flags |= IEEE80211_TX_STAT_ACK;
+}
+
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+                        const struct ieee80211_tx_rate *rate, u8 *nss_val)
+{
+       u16 rateval;
+       u8 phy, rate_idx;
+       u8 nss = 1;
+       u8 bw = 0;
+
+       if (rate->flags & IEEE80211_TX_RC_MCS) {
+               rate_idx = rate->idx;
+               nss = 1 + (rate->idx >> 3);
+               phy = MT_PHY_TYPE_HT;
+               if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+                       phy = MT_PHY_TYPE_HT_GF;
+               if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+                       bw = 1;
+       } else {
+               const struct ieee80211_rate *r;
+               int band = dev->chandef.chan->band;
+               u16 val;
+
+               r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
+               if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+                       val = r->hw_value_short;
+               else
+                       val = r->hw_value;
+
+               phy = val >> 8;
+               rate_idx = val & 0xff;
+               bw = 0;
+       }
+
+       rateval = MT76_SET(MT_RXWI_RATE_MCS, rate_idx);
+       rateval |= MT76_SET(MT_RXWI_RATE_PHY, phy);
+       rateval |= MT76_SET(MT_RXWI_RATE_BW, bw);
+       if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+               rateval |= MT_RXWI_RATE_SGI;
+
+       *nss_val = nss;
+       return rateval;
+}
+
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+                           const struct ieee80211_tx_rate *rate)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->lock, flags);
+       wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
+       wcid->tx_rate_set = true;
+       spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev)
+{
+       struct mt76_tx_status stat = {};
+       u32 val;
+
+       val = mt7601u_rr(dev, MT_TX_STAT_FIFO);
+       stat.valid = !!(val & MT_TX_STAT_FIFO_VALID);
+       stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS);
+       stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR);
+       stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ);
+       stat.pktid = MT76_GET(MT_TX_STAT_FIFO_PID_TYPE, val);
+       stat.wcid = MT76_GET(MT_TX_STAT_FIFO_WCID, val);
+       stat.rate = MT76_GET(MT_TX_STAT_FIFO_RATE, val);
+
+       return stat;
+}
+
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+       struct ieee80211_tx_info info = {};
+       struct ieee80211_sta *sta = NULL;
+       struct mt76_wcid *wcid = NULL;
+       void *msta;
+
+       rcu_read_lock();
+       if (stat->wcid < ARRAY_SIZE(dev->wcid))
+               wcid = rcu_dereference(dev->wcid[stat->wcid]);
+
+       if (wcid) {
+               msta = container_of(wcid, struct mt76_sta, wcid);
+               sta = container_of(msta, struct ieee80211_sta,
+                                  drv_priv);
+       }
+
+       mt76_mac_fill_tx_status(dev, &info, stat);
+       ieee80211_tx_status_noskb(dev->hw, sta, &info);
+       rcu_read_unlock();
+}
+
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+                               int ht_mode)
+{
+       int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+       bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+       u32 prot[6];
+       bool ht_rts[4] = {};
+       int i;
+
+       prot[0] = MT_PROT_NAV_SHORT |
+                 MT_PROT_TXOP_ALLOW_ALL |
+                 MT_PROT_RTS_THR_EN;
+       prot[1] = prot[0];
+       if (legacy_prot)
+               prot[1] |= MT_PROT_CTRL_CTS2SELF;
+
+       prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
+       prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
+
+       if (legacy_prot) {
+               prot[2] |= MT_PROT_RATE_CCK_11;
+               prot[3] |= MT_PROT_RATE_CCK_11;
+               prot[4] |= MT_PROT_RATE_CCK_11;
+               prot[5] |= MT_PROT_RATE_CCK_11;
+       } else {
+               prot[2] |= MT_PROT_RATE_OFDM_24;
+               prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
+               prot[4] |= MT_PROT_RATE_OFDM_24;
+               prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
+       }
+
+       switch (mode) {
+       case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+               break;
+
+       case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+               ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+               break;
+
+       case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+               ht_rts[1] = ht_rts[3] = true;
+               break;
+
+       case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+               ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
+               break;
+       }
+
+       if (non_gf)
+               ht_rts[2] = ht_rts[3] = true;
+
+       for (i = 0; i < 4; i++)
+               if (ht_rts[i])
+                       prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
+
+       for (i = 0; i < 6; i++)
+               mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
+}
+
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb)
+{
+       if (short_preamb)
+               mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+       else
+               mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval)
+{
+       u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG);
+
+       val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
+                MT_BEACON_TIME_CFG_SYNC_MODE |
+                MT_BEACON_TIME_CFG_TBTT_EN);
+
+       if (!enable) {
+               mt7601u_wr(dev, MT_BEACON_TIME_CFG, val);
+               return;
+       }
+
+       val &= ~MT_BEACON_TIME_CFG_INTVAL;
+       val |= MT76_SET(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
+               MT_BEACON_TIME_CFG_TIMER_EN |
+               MT_BEACON_TIME_CFG_SYNC_MODE |
+               MT_BEACON_TIME_CFG_TBTT_EN;
+}
+
+static void mt7601u_check_mac_err(struct mt7601u_dev *dev)
+{
+       u32 val = mt7601u_rr(dev, 0x10f4);
+
+       if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+               return;
+
+       dev_err(dev->dev, "Error: MAC specific condition occurred\n");
+
+       mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+       udelay(10);
+       mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+}
+
+void mt7601u_mac_work(struct work_struct *work)
+{
+       struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+                                              mac_work.work);
+       struct {
+               u32 addr_base;
+               u32 span;
+               u64 *stat_base;
+       } spans[] = {
+               { MT_RX_STA_CNT0,       3,      dev->stats.rx_stat },
+               { MT_TX_STA_CNT0,       3,      dev->stats.tx_stat },
+               { MT_TX_AGG_STAT,       1,      dev->stats.aggr_stat },
+               { MT_MPDU_DENSITY_CNT,  1,      dev->stats.zero_len_del },
+               { MT_TX_AGG_CNT_BASE0,  8,      &dev->stats.aggr_n[0] },
+               { MT_TX_AGG_CNT_BASE1,  8,      &dev->stats.aggr_n[16] },
+       };
+       u32 sum, n;
+       int i, j, k;
+
+       /* Note: using MCU_RANDOM_READ is actually slower then reading all the
+        *       registers by hand.  MCU takes ca. 20ms to complete read of 24
+        *       registers while reading them one by one will takes roughly
+        *       24*200us =~ 5ms.
+        */
+
+       k = 0;
+       n = 0;
+       sum = 0;
+       for (i = 0; i < ARRAY_SIZE(spans); i++)
+               for (j = 0; j < spans[i].span; j++) {
+                       u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4);
+
+                       spans[i].stat_base[j * 2] += val & 0xffff;
+                       spans[i].stat_base[j * 2 + 1] += val >> 16;
+
+                       /* Calculate average AMPDU length */
+                       if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
+                           spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
+                               continue;
+
+                       n += (val >> 16) + (val & 0xffff);
+                       sum += (val & 0xffff) * (1 + k * 2) +
+                               (val >> 16) * (2 + k * 2);
+                       k++;
+               }
+
+       atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
+
+       mt7601u_check_mac_err(dev);
+
+       ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ);
+}
+
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
+{
+       u8 zmac[ETH_ALEN] = {};
+       u32 attr;
+
+       attr = MT76_SET(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
+              MT76_SET(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
+
+       mt76_wr(dev, MT_WCID_ATTR(idx), attr);
+
+       if (mac)
+               memcpy(zmac, mac, sizeof(zmac));
+
+       mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac);
+}
+
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev)
+{
+       struct ieee80211_sta *sta;
+       struct mt76_wcid *wcid;
+       void *msta;
+       u8 min_factor = 3;
+       int i;
+
+       rcu_read_lock();
+       for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) {
+               wcid = rcu_dereference(dev->wcid[i]);
+               if (!wcid)
+                       continue;
+
+               msta = container_of(wcid, struct mt76_sta, wcid);
+               sta = container_of(msta, struct ieee80211_sta, drv_priv);
+
+               min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
+       }
+       rcu_read_unlock();
+
+       mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
+                  MT76_SET(MT_MAX_LEN_CFG_AMPDU, min_factor));
+}
+
+static void
+mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
+{
+       u8 idx = MT76_GET(MT_RXWI_RATE_MCS, rate);
+
+       switch (MT76_GET(MT_RXWI_RATE_PHY, rate)) {
+       case MT_PHY_TYPE_OFDM:
+               if (WARN_ON(idx >= 8))
+                       idx = 0;
+               idx += 4;
+
+               status->rate_idx = idx;
+               return;
+       case MT_PHY_TYPE_CCK:
+               if (idx >= 8) {
+                       idx -= 8;
+                       status->flag |= RX_FLAG_SHORTPRE;
+               }
+
+               if (WARN_ON(idx >= 4))
+                       idx = 0;
+
+               status->rate_idx = idx;
+               return;
+       case MT_PHY_TYPE_HT_GF:
+               status->flag |= RX_FLAG_HT_GF;
+               /* fall through */
+       case MT_PHY_TYPE_HT:
+               status->flag |= RX_FLAG_HT;
+               status->rate_idx = idx;
+               break;
+       default:
+               WARN_ON(1);
+               return;
+       }
+
+       if (rate & MT_RXWI_RATE_SGI)
+               status->flag |= RX_FLAG_SHORT_GI;
+
+       if (rate & MT_RXWI_RATE_STBC)
+               status->flag |= 1 << RX_FLAG_STBC_SHIFT;
+
+       if (rate & MT_RXWI_RATE_BW)
+               status->flag |= RX_FLAG_40MHZ;
+}
+
+static void
+mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
+                         u16 rate, int rssi)
+{
+       dev->bcn_freq_off = rxwi->freq_off;
+       dev->bcn_phy_mode = MT76_GET(MT_RXWI_RATE_PHY, rate);
+       dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+}
+
+static int
+mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data;
+
+       return ieee80211_is_beacon(hdr->frame_control) &&
+               ether_addr_equal(hdr->addr2, dev->ap_bssid);
+}
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+                       u8 *data, void *rxi)
+{
+       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+       struct mt7601u_rxwi *rxwi = rxi;
+       u32 ctl = le32_to_cpu(rxwi->ctl);
+       u16 rate = le16_to_cpu(rxwi->rate);
+       int rssi;
+
+       if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) {
+               status->flag |= RX_FLAG_DECRYPTED;
+               status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
+       }
+
+       status->chains = BIT(0);
+       rssi = mt7601u_phy_get_rssi(dev, rxwi, rate);
+       status->chain_signal[0] = status->signal = rssi;
+       status->freq = dev->chandef.chan->center_freq;
+       status->band = dev->chandef.chan->band;
+
+       mt76_mac_process_rate(status, rate);
+
+       spin_lock_bh(&dev->con_mon_lock);
+       if (mt7601u_rx_is_our_beacon(dev, data))
+               mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi);
+       else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M))
+               dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8);
+       spin_unlock_bh(&dev->con_mon_lock);
+
+       return MT76_GET(MT_RXWI_CTL_MPDU_LEN, ctl);
+}
+
+static enum mt76_cipher_type
+mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+       memset(key_data, 0, 32);
+       if (!key)
+               return MT_CIPHER_NONE;
+
+       if (key->keylen > 32)
+               return MT_CIPHER_NONE;
+
+       memcpy(key_data, key->key, key->keylen);
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               return MT_CIPHER_WEP40;
+       case WLAN_CIPHER_SUITE_WEP104:
+               return MT_CIPHER_WEP104;
+       case WLAN_CIPHER_SUITE_TKIP:
+               return MT_CIPHER_TKIP;
+       case WLAN_CIPHER_SUITE_CCMP:
+               return MT_CIPHER_AES_CCMP;
+       default:
+               return MT_CIPHER_NONE;
+       }
+}
+
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+                         struct ieee80211_key_conf *key)
+{
+       enum mt76_cipher_type cipher;
+       u8 key_data[32];
+       u8 iv_data[8];
+       u32 val;
+
+       cipher = mt76_mac_get_key_info(key, key_data);
+       if (cipher == MT_CIPHER_NONE && key)
+               return -EINVAL;
+
+       trace_set_key(dev, idx);
+
+       mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
+
+       memset(iv_data, 0, sizeof(iv_data));
+       if (key) {
+               iv_data[3] = key->keyidx << 6;
+               if (cipher >= MT_CIPHER_TKIP) {
+                       /* Note: start with 1 to comply with spec,
+                        *       (see comment on common/cmm_wpa.c:4291).
+                        */
+                       iv_data[0] |= 1;
+                       iv_data[3] |= 0x20;
+               }
+       }
+       mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
+
+       val = mt7601u_rr(dev, MT_WCID_ATTR(idx));
+       val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT;
+       val |= MT76_SET(MT_WCID_ATTR_PKEY_MODE, cipher & 7) |
+              MT76_SET(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3);
+       val &= ~MT_WCID_ATTR_PAIRWISE;
+       val |= MT_WCID_ATTR_PAIRWISE *
+               !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+       mt7601u_wr(dev, MT_WCID_ATTR(idx), val);
+
+       return 0;
+}
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+                             struct ieee80211_key_conf *key)
+{
+       enum mt76_cipher_type cipher;
+       u8 key_data[32];
+       u32 val;
+
+       cipher = mt76_mac_get_key_info(key, key_data);
+       if (cipher == MT_CIPHER_NONE && key)
+               return -EINVAL;
+
+       trace_set_shared_key(dev, vif_idx, key_idx);
+
+       mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx),
+                       key_data, sizeof(key_data));
+
+       val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
+       val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
+       val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
+       mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.h b/drivers/net/wireless/mediatek/mt7601u/mac.h
new file mode 100644 (file)
index 0000000..2c22d63
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_MAC_H
+#define __MT76_MAC_H
+
+struct mt76_tx_status {
+       u8 valid:1;
+       u8 success:1;
+       u8 aggr:1;
+       u8 ack_req:1;
+       u8 is_probe:1;
+       u8 wcid;
+       u8 pktid;
+       u8 retry;
+       u16 rate;
+} __packed __aligned(2);
+
+/* Note: values in original "RSSI" and "SNR" fields are not actually what they
+ *      are called for MT7601U, names used by this driver are educated guesses
+ *      (see vendor mac/ral_omac.c).
+ */
+struct mt7601u_rxwi {
+       __le32 rxinfo;
+
+       __le32 ctl;
+
+       __le16 frag_sn;
+       __le16 rate;
+
+       u8 unknown;
+       u8 zero[3];
+
+       u8 snr;
+       u8 ant;
+       u8 gain;
+       u8 freq_off;
+
+       __le32 resv2;
+       __le32 expert_ant;
+} __packed __aligned(4);
+
+#define MT_RXINFO_BA                   BIT(0)
+#define MT_RXINFO_DATA                 BIT(1)
+#define MT_RXINFO_NULL                 BIT(2)
+#define MT_RXINFO_FRAG                 BIT(3)
+#define MT_RXINFO_U2M                  BIT(4)
+#define MT_RXINFO_MULTICAST            BIT(5)
+#define MT_RXINFO_BROADCAST            BIT(6)
+#define MT_RXINFO_MYBSS                        BIT(7)
+#define MT_RXINFO_CRCERR               BIT(8)
+#define MT_RXINFO_ICVERR               BIT(9)
+#define MT_RXINFO_MICERR               BIT(10)
+#define MT_RXINFO_AMSDU                        BIT(11)
+#define MT_RXINFO_HTC                  BIT(12)
+#define MT_RXINFO_RSSI                 BIT(13)
+#define MT_RXINFO_L2PAD                        BIT(14)
+#define MT_RXINFO_AMPDU                        BIT(15)
+#define MT_RXINFO_DECRYPT              BIT(16)
+#define MT_RXINFO_BSSIDX3              BIT(17)
+#define MT_RXINFO_WAPI_KEY             BIT(18)
+#define MT_RXINFO_PN_LEN               GENMASK(21, 19)
+#define MT_RXINFO_SW_PKT_80211         BIT(22)
+#define MT_RXINFO_TCP_SUM_BYPASS       BIT(28)
+#define MT_RXINFO_IP_SUM_BYPASS                BIT(29)
+#define MT_RXINFO_TCP_SUM_ERR          BIT(30)
+#define MT_RXINFO_IP_SUM_ERR           BIT(31)
+
+#define MT_RXWI_CTL_WCID               GENMASK(7, 0)
+#define MT_RXWI_CTL_KEY_IDX            GENMASK(9, 8)
+#define MT_RXWI_CTL_BSS_IDX            GENMASK(12, 10)
+#define MT_RXWI_CTL_UDF                        GENMASK(15, 13)
+#define MT_RXWI_CTL_MPDU_LEN           GENMASK(27, 16)
+#define MT_RXWI_CTL_TID                        GENMASK(31, 28)
+
+#define MT_RXWI_FRAG                   GENMASK(3, 0)
+#define MT_RXWI_SN                     GENMASK(15, 4)
+
+#define MT_RXWI_RATE_MCS               GENMASK(6, 0)
+#define MT_RXWI_RATE_BW                        BIT(7)
+#define MT_RXWI_RATE_SGI               BIT(8)
+#define MT_RXWI_RATE_STBC              GENMASK(10, 9)
+#define MT_RXWI_RATE_ETXBF             BIT(11)
+#define MT_RXWI_RATE_SND               BIT(12)
+#define MT_RXWI_RATE_ITXBF             BIT(13)
+#define MT_RXWI_RATE_PHY               GENMASK(15, 14)
+
+#define MT_RXWI_GAIN_RSSI_VAL          GENMASK(5, 0)
+#define MT_RXWI_GAIN_RSSI_LNA_ID       GENMASK(7, 6)
+#define MT_RXWI_ANT_AUX_LNA            BIT(7)
+
+#define MT_RXWI_EANT_ENC_ANT_ID                GENMASK(7, 0)
+
+enum mt76_phy_type {
+       MT_PHY_TYPE_CCK,
+       MT_PHY_TYPE_OFDM,
+       MT_PHY_TYPE_HT,
+       MT_PHY_TYPE_HT_GF,
+};
+
+enum mt76_phy_bandwidth {
+       MT_PHY_BW_20,
+       MT_PHY_BW_40,
+};
+
+struct mt76_txwi {
+       __le16 flags;
+       __le16 rate_ctl;
+
+       u8 ack_ctl;
+       u8 wcid;
+       __le16 len_ctl;
+
+       __le32 iv;
+
+       __le32 eiv;
+
+       u8 aid;
+       u8 txstream;
+       __le16 ctl;
+} __packed __aligned(4);
+
+#define MT_TXWI_FLAGS_FRAG             BIT(0)
+#define MT_TXWI_FLAGS_MMPS             BIT(1)
+#define MT_TXWI_FLAGS_CFACK            BIT(2)
+#define MT_TXWI_FLAGS_TS               BIT(3)
+#define MT_TXWI_FLAGS_AMPDU            BIT(4)
+#define MT_TXWI_FLAGS_MPDU_DENSITY     GENMASK(7, 5)
+#define MT_TXWI_FLAGS_TXOP             GENMASK(9, 8)
+#define MT_TXWI_FLAGS_CWMIN            GENMASK(12, 10)
+#define MT_TXWI_FLAGS_NO_RATE_FALLBACK BIT(13)
+#define MT_TXWI_FLAGS_TX_RPT           BIT(14)
+#define MT_TXWI_FLAGS_TX_RATE_LUT      BIT(15)
+
+#define MT_TXWI_RATE_MCS               GENMASK(6, 0)
+#define MT_TXWI_RATE_BW                        BIT(7)
+#define MT_TXWI_RATE_SGI               BIT(8)
+#define MT_TXWI_RATE_STBC              GENMASK(10, 9)
+#define MT_TXWI_RATE_PHY_MODE          GENMASK(15, 14)
+
+#define MT_TXWI_ACK_CTL_REQ            BIT(0)
+#define MT_TXWI_ACK_CTL_NSEQ           BIT(1)
+#define MT_TXWI_ACK_CTL_BA_WINDOW      GENMASK(7, 2)
+
+#define MT_TXWI_LEN_BYTE_CNT           GENMASK(11, 0)
+#define MT_TXWI_LEN_PKTID              GENMASK(15, 12)
+
+#define MT_TXWI_CTL_TX_POWER_ADJ       GENMASK(3, 0)
+#define MT_TXWI_CTL_CHAN_CHECK_PKT     BIT(4)
+#define MT_TXWI_CTL_PIFS_REV           BIT(6)
+
+u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb,
+                       u8 *data, void *rxi);
+int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx,
+                         struct ieee80211_key_conf *key);
+void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid,
+                           const struct ieee80211_tx_rate *rate);
+
+int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx,
+                             struct ieee80211_key_conf *key);
+u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev,
+                        const struct ieee80211_tx_rate *rate, u8 *nss_val);
+struct mt76_tx_status
+mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev);
+void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
new file mode 100644 (file)
index 0000000..ced82ab
--- /dev/null
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "mac.h"
+#include <linux/etherdevice.h>
+#include <linux/version.h>
+
+static int mt7601u_start(struct ieee80211_hw *hw)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       int ret;
+
+       mutex_lock(&dev->mutex);
+
+       ret = mt7601u_mac_start(dev);
+       if (ret)
+               goto out;
+
+       ieee80211_queue_delayed_work(dev->hw, &dev->mac_work,
+                                    MT_CALIBRATE_INTERVAL);
+       ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+out:
+       mutex_unlock(&dev->mutex);
+       return ret;
+}
+
+static void mt7601u_stop(struct ieee80211_hw *hw)
+{
+       struct mt7601u_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mutex);
+
+       cancel_delayed_work_sync(&dev->cal_work);
+       cancel_delayed_work_sync(&dev->mac_work);
+       mt7601u_mac_stop(dev);
+
+       mutex_unlock(&dev->mutex);
+}
+
+static int mt7601u_add_interface(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+       unsigned int idx = 0;
+       unsigned int wcid = GROUP_WCID(idx);
+
+       /* Note: for AP do the AP-STA things mt76 does:
+        *      - beacon offsets
+        *      - do mac address tricks
+        *      - shift vif idx
+        */
+       mvif->idx = idx;
+
+       if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG))
+               return -ENOSPC;
+       dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG);
+       mvif->group_wcid.idx = wcid;
+       mvif->group_wcid.hw_key_idx = -1;
+
+       return 0;
+}
+
+static void mt7601u_remove_interface(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+       unsigned int wcid = mvif->group_wcid.idx;
+
+       dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+}
+
+static int mt7601u_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       int ret = 0;
+
+       mutex_lock(&dev->mutex);
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               ieee80211_stop_queues(hw);
+               ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef);
+               ieee80211_wake_queues(hw);
+       }
+
+       mutex_unlock(&dev->mutex);
+
+       return ret;
+}
+
+static void
+mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+                     unsigned int *total_flags, u64 multicast)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+               flags |= *total_flags & FIF_##_flag;                    \
+               dev->rxfilter &= ~(_hw);                                \
+               dev->rxfilter |= !(flags & FIF_##_flag) * (_hw);        \
+       } while (0)
+
+       mutex_lock(&dev->mutex);
+
+       dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
+
+       MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
+       MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
+       MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK |
+                            MT_RX_FILTR_CFG_CTS |
+                            MT_RX_FILTR_CFG_CFEND |
+                            MT_RX_FILTR_CFG_CFACK |
+                            MT_RX_FILTR_CFG_BA |
+                            MT_RX_FILTR_CFG_CTRL_RSV);
+       MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
+
+       *total_flags = flags;
+       mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
+
+       mutex_unlock(&dev->mutex);
+}
+
+static void
+mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        struct ieee80211_bss_conf *info, u32 changed)
+{
+       struct mt7601u_dev *dev = hw->priv;
+
+       mutex_lock(&dev->mutex);
+
+       if (changed & BSS_CHANGED_ASSOC)
+               mt7601u_phy_con_cal_onoff(dev, info);
+
+       if (changed & BSS_CHANGED_BSSID) {
+               mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
+
+               /* Note: this is a hack because beacon_int is not changed
+                *       on leave nor is any more appropriate event generated.
+                *       rt2x00 doesn't seem to be bothered though.
+                */
+               if (is_zero_ether_addr(info->bssid))
+                       mt7601u_mac_config_tsf(dev, false, 0);
+       }
+
+       if (changed & BSS_CHANGED_BASIC_RATES) {
+               mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
+               mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100);
+               mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980);
+               mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
+               mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
+       }
+
+       if (changed & BSS_CHANGED_BEACON_INT)
+               mt7601u_mac_config_tsf(dev, true, info->beacon_int);
+
+       if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
+               mt7601u_mac_set_protection(dev, info->use_cts_prot,
+                                          info->ht_operation_mode);
+
+       if (changed & BSS_CHANGED_ERP_PREAMBLE)
+               mt7601u_mac_set_short_preamble(dev, info->use_short_preamble);
+
+       if (changed & BSS_CHANGED_ERP_SLOT) {
+               int slottime = info->use_short_slot ? 9 : 20;
+
+               mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+                              MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+       }
+
+       if (changed & BSS_CHANGED_ASSOC)
+               mt7601u_phy_recalibrate_after_assoc(dev);
+
+       mutex_unlock(&dev->mutex);
+}
+
+static int
+mt76_wcid_alloc(struct mt7601u_dev *dev)
+{
+       int i, idx = 0;
+
+       for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
+               idx = ffs(~dev->wcid_mask[i]);
+               if (!idx)
+                       continue;
+
+               idx--;
+               dev->wcid_mask[i] |= BIT(idx);
+               break;
+       }
+
+       idx = i * BITS_PER_LONG + idx;
+       if (idx > 119)
+               return -1;
+
+       return idx;
+}
+
+static int
+mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+               struct ieee80211_sta *sta)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+       struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+       int ret = 0;
+       int idx = 0;
+
+       mutex_lock(&dev->mutex);
+
+       idx = mt76_wcid_alloc(dev);
+       if (idx < 0) {
+               ret = -ENOSPC;
+               goto out;
+       }
+
+       msta->wcid.idx = idx;
+       msta->wcid.hw_key_idx = -1;
+       mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
+       mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+       rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
+       mt7601u_mac_set_ampdu_factor(dev);
+
+out:
+       mutex_unlock(&dev->mutex);
+
+       return ret;
+}
+
+static int
+mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  struct ieee80211_sta *sta)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+       int idx = msta->wcid.idx;
+
+       mutex_lock(&dev->mutex);
+       rcu_assign_pointer(dev->wcid[idx], NULL);
+       mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx));
+       dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG);
+       mt7601u_mac_wcid_setup(dev, idx, 0, NULL);
+       mt7601u_mac_set_ampdu_factor(dev);
+       mutex_unlock(&dev->mutex);
+
+       return 0;
+}
+
+static void
+mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                  enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+}
+
+static void
+mt7601u_sw_scan(struct ieee80211_hw *hw,
+               struct ieee80211_vif *vif,
+               const u8 *mac_addr)
+{
+       struct mt7601u_dev *dev = hw->priv;
+
+       mt7601u_agc_save(dev);
+       set_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static void
+mt7601u_sw_scan_complete(struct ieee80211_hw *hw,
+                        struct ieee80211_vif *vif)
+{
+       struct mt7601u_dev *dev = hw->priv;
+
+       mt7601u_agc_restore(dev);
+       clear_bit(MT7601U_STATE_SCANNING, &dev->state);
+}
+
+static int
+mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+               struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+               struct ieee80211_key_conf *key)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
+       struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL;
+       struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid;
+       int idx = key->keyidx;
+       int ret;
+
+       if (cmd == SET_KEY) {
+               key->hw_key_idx = wcid->idx;
+               wcid->hw_key_idx = idx;
+       } else {
+               if (idx == wcid->hw_key_idx)
+                       wcid->hw_key_idx = -1;
+
+               key = NULL;
+       }
+
+       if (!msta) {
+               if (key || wcid->hw_key_idx == idx) {
+                       ret = mt76_mac_wcid_set_key(dev, wcid->idx, key);
+                       if (ret)
+                               return ret;
+               }
+
+               return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key);
+       }
+
+       return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key);
+}
+
+static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       struct mt7601u_dev *dev = hw->priv;
+
+       mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
+
+       return 0;
+}
+
+static int
+mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                 enum ieee80211_ampdu_mlme_action action,
+                 struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+
+       WARN_ON(msta->wcid.idx > GROUP_WCID(0));
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
+                          BIT(16 + tid));
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_FLUSH:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               msta->agg_ssn[tid] = *ssn << 4;
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+       }
+
+       return 0;
+}
+
+static void
+mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                        struct ieee80211_sta *sta)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
+       struct ieee80211_sta_rates *rates;
+       struct ieee80211_tx_rate rate = {};
+
+       rcu_read_lock();
+       rates = rcu_dereference(sta->rates);
+
+       if (!rates)
+               goto out;
+
+       rate.idx = rates->rate[0].idx;
+       rate.flags = rates->rate[0].flags;
+       mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate);
+
+out:
+       rcu_read_unlock();
+}
+
+const struct ieee80211_ops mt7601u_ops = {
+       .tx = mt7601u_tx,
+       .start = mt7601u_start,
+       .stop = mt7601u_stop,
+       .add_interface = mt7601u_add_interface,
+       .remove_interface = mt7601u_remove_interface,
+       .config = mt7601u_config,
+       .configure_filter = mt76_configure_filter,
+       .bss_info_changed = mt7601u_bss_info_changed,
+       .sta_add = mt7601u_sta_add,
+       .sta_remove = mt7601u_sta_remove,
+       .sta_notify = mt7601u_sta_notify,
+       .set_key = mt7601u_set_key,
+       .conf_tx = mt7601u_conf_tx,
+       .sw_scan_start = mt7601u_sw_scan,
+       .sw_scan_complete = mt7601u_sw_scan_complete,
+       .ampdu_action = mt76_ampdu_action,
+       .sta_rate_tbl_update = mt76_sta_rate_tbl_update,
+       .set_rts_threshold = mt7601u_set_rts_threshold,
+};
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c
new file mode 100644 (file)
index 0000000..fbb1986
--- /dev/null
@@ -0,0 +1,534 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include "mt7601u.h"
+#include "dma.h"
+#include "mcu.h"
+#include "usb.h"
+#include "trace.h"
+
+#define MCU_FW_URB_MAX_PAYLOAD         0x3800
+#define MCU_FW_URB_SIZE                        (MCU_FW_URB_MAX_PAYLOAD + 12)
+#define MCU_RESP_URB_SIZE              1024
+
+static inline int firmware_running(struct mt7601u_dev *dev)
+{
+       return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1;
+}
+
+static inline void skb_put_le32(struct sk_buff *skb, u32 val)
+{
+       put_unaligned_le32(val, skb_put(skb, 4));
+}
+
+static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb,
+                                           u8 seq, enum mcu_cmd cmd)
+{
+       WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND,
+                                    MT76_SET(MT_TXD_CMD_INFO_SEQ, seq) |
+                                    MT76_SET(MT_TXD_CMD_INFO_TYPE, cmd)));
+}
+
+static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev,
+                                           struct sk_buff *skb, bool need_resp)
+{
+       u32 i, csum = 0;
+
+       for (i = 0; i < skb->len / 4; i++)
+               csum ^= get_unaligned_le32(skb->data + i * 4);
+
+       trace_mt_mcu_msg_send(dev, skb, csum, need_resp);
+}
+
+static struct sk_buff *
+mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len)
+{
+       struct sk_buff *skb;
+
+       WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */
+
+       skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+       skb_reserve(skb, MT_DMA_HDR_LEN);
+       memcpy(skb_put(skb, len), data, len);
+
+       return skb;
+}
+
+static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq)
+{
+       struct urb *urb = dev->mcu.resp.urb;
+       u32 rxfce;
+       int urb_status, ret, i = 5;
+
+       while (i--) {
+               if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl,
+                                                msecs_to_jiffies(300))) {
+                       dev_warn(dev->dev, "Warning: %s retrying\n", __func__);
+                       continue;
+               }
+
+               /* Make copies of important data before reusing the urb */
+               rxfce = get_unaligned_le32(dev->mcu.resp.buf);
+               urb_status = urb->status * mt7601u_urb_has_error(urb);
+
+               ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+                                            &dev->mcu.resp, GFP_KERNEL,
+                                            mt7601u_complete_urb,
+                                            &dev->mcu.resp_cmpl);
+               if (ret)
+                       return ret;
+
+               if (urb_status)
+                       dev_err(dev->dev, "Error: MCU resp urb failed:%d\n",
+                               urb_status);
+
+               if (MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq &&
+                   MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE)
+                       return 0;
+
+               dev_err(dev->dev, "Error: MCU resp evt:%hhx seq:%hhx-%hhx!\n",
+                       MT76_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce),
+                       seq, MT76_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce));
+       }
+
+       dev_err(dev->dev, "Error: %s timed out\n", __func__);
+       return -ETIMEDOUT;
+}
+
+static int
+mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
+                    enum mcu_cmd cmd, bool wait_resp)
+{
+       struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+       unsigned cmd_pipe = usb_sndbulkpipe(usb_dev,
+                                           dev->out_eps[MT_EP_OUT_INBAND_CMD]);
+       int sent, ret;
+       u8 seq = 0;
+
+       if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+               return 0;
+
+       mutex_lock(&dev->mcu.mutex);
+
+       if (wait_resp)
+               while (!seq)
+                       seq = ++dev->mcu.msg_seq & 0xf;
+
+       mt7601u_dma_skb_wrap_cmd(skb, seq, cmd);
+
+       if (dev->mcu.resp_cmpl.done)
+               dev_err(dev->dev, "Error: MCU response pre-completed!\n");
+
+       trace_mt_mcu_msg_send_cs(dev, skb, wait_resp);
+       trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len);
+       ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500);
+       if (ret) {
+               dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret);
+               goto out;
+       }
+       if (sent != skb->len)
+               dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__);
+
+       if (wait_resp)
+               ret = mt7601u_mcu_wait_resp(dev, seq);
+out:
+       mutex_unlock(&dev->mcu.mutex);
+
+       consume_skb(skb);
+
+       return ret;
+}
+
+static int mt7601u_mcu_function_select(struct mt7601u_dev *dev,
+                                      enum mcu_function func, u32 val)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 id;
+               __le32 value;
+       } __packed __aligned(4) msg = {
+               .id = cpu_to_le32(func),
+               .value = cpu_to_le32(val),
+       };
+
+       skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+       return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5);
+}
+
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga)
+{
+       int ret;
+
+       if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state))
+               return 0;
+
+       ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING,
+                                         use_hvga);
+       if (ret) {
+               dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n");
+               return ret;
+       }
+
+       dev->tssi_read_trig = true;
+
+       return 0;
+}
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val)
+{
+       struct sk_buff *skb;
+       struct {
+               __le32 id;
+               __le32 value;
+       } __packed __aligned(4) msg = {
+               .id = cpu_to_le32(cal),
+               .value = cpu_to_le32(val),
+       };
+
+       skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg));
+       return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+                           const struct mt76_reg_pair *data, int n)
+{
+       const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8;
+       struct sk_buff *skb;
+       int cnt, i, ret;
+
+       if (!n)
+               return 0;
+
+       cnt = min(max_vals_per_cmd, n);
+
+       skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+       skb_reserve(skb, MT_DMA_HDR_LEN);
+
+       for (i = 0; i < cnt; i++) {
+               skb_put_le32(skb, base + data[i].reg);
+               skb_put_le32(skb, data[i].value);
+       }
+
+       ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+       if (ret)
+               return ret;
+
+       return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt);
+}
+
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+                            const u32 *data, int n)
+{
+       const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1;
+       struct sk_buff *skb;
+       int cnt, i, ret;
+
+       if (!n)
+               return 0;
+
+       cnt = min(max_regs_per_cmd, n);
+
+       skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+       skb_reserve(skb, MT_DMA_HDR_LEN);
+
+       skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset);
+       for (i = 0; i < cnt; i++)
+               skb_put_le32(skb, data[i]);
+
+       ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n);
+       if (ret)
+               return ret;
+
+       return mt7601u_burst_write_regs(dev, offset + cnt * 4,
+                                       data + cnt, n - cnt);
+}
+
+struct mt76_fw_header {
+       __le32 ilm_len;
+       __le32 dlm_len;
+       __le16 build_ver;
+       __le16 fw_ver;
+       u8 pad[4];
+       char build_time[16];
+};
+
+struct mt76_fw {
+       struct mt76_fw_header hdr;
+       u8 ivb[MT_MCU_IVB_SIZE];
+       u8 ilm[];
+};
+
+static int __mt7601u_dma_fw(struct mt7601u_dev *dev,
+                           const struct mt7601u_dma_buf *dma_buf,
+                           const void *data, u32 len, u32 dst_addr)
+{
+       DECLARE_COMPLETION_ONSTACK(cmpl);
+       struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */
+       __le32 reg;
+       u32 val;
+       int ret;
+
+       reg = cpu_to_le32(MT76_SET(MT_TXD_INFO_TYPE, DMA_PACKET) |
+                         MT76_SET(MT_TXD_INFO_D_PORT, CPU_TX_PORT) |
+                         MT76_SET(MT_TXD_INFO_LEN, len));
+       memcpy(buf.buf, &reg, sizeof(reg));
+       memcpy(buf.buf + sizeof(reg), data, len);
+       memset(buf.buf + sizeof(reg) + len, 0, 8);
+
+       ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+                                      MT_FCE_DMA_ADDR, dst_addr);
+       if (ret)
+               return ret;
+       len = roundup(len, 4);
+       ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE,
+                                      MT_FCE_DMA_LEN, len << 16);
+       if (ret)
+               return ret;
+
+       buf.len = MT_DMA_HDR_LEN + len + 4;
+       ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD,
+                                    &buf, GFP_KERNEL,
+                                    mt7601u_complete_urb, &cmpl);
+       if (ret)
+               return ret;
+
+       if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) {
+               dev_err(dev->dev, "Error: firmware upload timed out\n");
+               usb_kill_urb(buf.urb);
+               return -ETIMEDOUT;
+       }
+       if (mt7601u_urb_has_error(buf.urb)) {
+               dev_err(dev->dev, "Error: firmware upload urb failed:%d\n",
+                       buf.urb->status);
+               return buf.urb->status;
+       }
+
+       val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
+       val++;
+       mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
+
+       return 0;
+}
+
+static int
+mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf,
+              const void *data, int len, u32 dst_addr)
+{
+       int n, ret;
+
+       if (len == 0)
+               return 0;
+
+       n = min(MCU_FW_URB_MAX_PAYLOAD, len);
+       ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr);
+       if (ret)
+               return ret;
+
+       if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500))
+               return -ETIMEDOUT;
+
+       return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n);
+}
+
+static int
+mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw)
+{
+       struct mt7601u_dma_buf dma_buf;
+       void *ivb;
+       u32 ilm_len, dlm_len;
+       int i, ret;
+
+       ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL);
+       if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb);
+       dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n",
+               ilm_len, sizeof(fw->ivb));
+       ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb));
+       if (ret)
+               goto error;
+
+       dlm_len = le32_to_cpu(fw->hdr.dlm_len);
+       dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len);
+       ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len,
+                            dlm_len, MT_MCU_DLM_OFFSET);
+       if (ret)
+               goto error;
+
+       ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+                                    0x12, 0, ivb, sizeof(fw->ivb));
+       if (ret < 0)
+               goto error;
+       ret = 0;
+
+       for (i = 100; i && !firmware_running(dev); i--)
+               msleep(10);
+       if (!i) {
+               ret = -ETIMEDOUT;
+               goto error;
+       }
+
+       dev_dbg(dev->dev, "Firmware running!\n");
+error:
+       kfree(ivb);
+       mt7601u_usb_free_buf(dev, &dma_buf);
+
+       return ret;
+}
+
+static int mt7601u_load_firmware(struct mt7601u_dev *dev)
+{
+       const struct firmware *fw;
+       const struct mt76_fw_header *hdr;
+       int len, ret;
+       u32 val;
+
+       mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+                                        MT_USB_DMA_CFG_TX_BULK_EN));
+
+       if (firmware_running(dev))
+               return 0;
+
+       ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev);
+       if (ret)
+               return ret;
+
+       if (!fw || !fw->data || fw->size < sizeof(*hdr))
+               goto err_inv_fw;
+
+       hdr = (const struct mt76_fw_header *) fw->data;
+
+       if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE)
+               goto err_inv_fw;
+
+       len = sizeof(*hdr);
+       len += le32_to_cpu(hdr->ilm_len);
+       len += le32_to_cpu(hdr->dlm_len);
+
+       if (fw->size != len)
+               goto err_inv_fw;
+
+       val = le16_to_cpu(hdr->fw_ver);
+       dev_info(dev->dev,
+                "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n",
+                (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf,
+                le16_to_cpu(hdr->build_ver), hdr->build_time);
+
+       len = le32_to_cpu(hdr->ilm_len);
+
+       mt7601u_wr(dev, 0x94c, 0);
+       mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0);
+
+       mt7601u_vendor_reset(dev);
+       msleep(5);
+
+       mt7601u_wr(dev, 0xa44, 0);
+       mt7601u_wr(dev, 0x230, 0x84210);
+       mt7601u_wr(dev, 0x400, 0x80c00);
+       mt7601u_wr(dev, 0x800, 1);
+
+       mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |
+                                        MT_PBF_CFG_TX1Q_EN |
+                                        MT_PBF_CFG_TX2Q_EN |
+                                        MT_PBF_CFG_TX3Q_EN));
+
+       mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1);
+
+       mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN |
+                                        MT_USB_DMA_CFG_TX_BULK_EN));
+       val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR);
+       val &= ~MT_USB_DMA_CFG_TX_CLR;
+       mt7601u_wr(dev, MT_USB_DMA_CFG, val);
+
+       /* FCE tx_fs_base_ptr */
+       mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230);
+       /* FCE tx_fs_max_cnt */
+       mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1);
+       /* FCE pdma enable */
+       mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44);
+       /* FCE skip_fs_en */
+       mt7601u_wr(dev, MT_FCE_SKIP_FS, 3);
+
+       ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data);
+
+       release_firmware(fw);
+
+       return ret;
+
+err_inv_fw:
+       dev_err(dev->dev, "Invalid firmware image\n");
+       release_firmware(fw);
+       return -ENOENT;
+}
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev)
+{
+       int ret;
+
+       mutex_init(&dev->mcu.mutex);
+
+       ret = mt7601u_load_firmware(dev);
+       if (ret)
+               return ret;
+
+       set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state);
+
+       return 0;
+}
+
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev)
+{
+       int ret;
+
+       ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1);
+       if (ret)
+               return ret;
+
+       init_completion(&dev->mcu.resp_cmpl);
+       if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) {
+               mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+               return -ENOMEM;
+       }
+
+       ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP,
+                                    &dev->mcu.resp, GFP_KERNEL,
+                                    mt7601u_complete_urb, &dev->mcu.resp_cmpl);
+       if (ret) {
+               mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+               return ret;
+       }
+
+       return 0;
+}
+
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev)
+{
+       usb_kill_urb(dev->mcu.resp.urb);
+       mt7601u_usb_free_buf(dev, &dev->mcu.resp);
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.h b/drivers/net/wireless/mediatek/mt7601u/mcu.h
new file mode 100644 (file)
index 0000000..4a66d10
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_MCU_H
+#define __MT7601U_MCU_H
+
+struct mt7601u_dev;
+
+/* Register definitions */
+#define MT_MCU_RESET_CTL               0x070C
+#define MT_MCU_INT_LEVEL               0x0718
+#define MT_MCU_COM_REG0                        0x0730
+#define MT_MCU_COM_REG1                        0x0734
+#define MT_MCU_COM_REG2                        0x0738
+#define MT_MCU_COM_REG3                        0x073C
+
+#define MT_MCU_IVB_SIZE                        0x40
+#define MT_MCU_DLM_OFFSET              0x80000
+
+#define MT_MCU_MEMMAP_WLAN             0x00410000
+#define MT_MCU_MEMMAP_BBP              0x40000000
+#define MT_MCU_MEMMAP_RF               0x80000000
+
+#define INBAND_PACKET_MAX_LEN          192
+
+enum mcu_cmd {
+       CMD_FUN_SET_OP = 1,
+       CMD_LOAD_CR = 2,
+       CMD_INIT_GAIN_OP = 3,
+       CMD_DYNC_VGA_OP = 6,
+       CMD_TDLS_CH_SW = 7,
+       CMD_BURST_WRITE = 8,
+       CMD_READ_MODIFY_WRITE = 9,
+       CMD_RANDOM_READ = 10,
+       CMD_BURST_READ = 11,
+       CMD_RANDOM_WRITE = 12,
+       CMD_LED_MODE_OP = 16,
+       CMD_POWER_SAVING_OP = 20,
+       CMD_WOW_CONFIG = 21,
+       CMD_WOW_QUERY = 22,
+       CMD_WOW_FEATURE = 24,
+       CMD_CARRIER_DETECT_OP = 28,
+       CMD_RADOR_DETECT_OP = 29,
+       CMD_SWITCH_CHANNEL_OP = 30,
+       CMD_CALIBRATION_OP = 31,
+       CMD_BEACON_OP = 32,
+       CMD_ANTENNA_OP = 33,
+};
+
+enum mcu_function {
+       Q_SELECT = 1,
+       ATOMIC_TSSI_SETTING = 5,
+};
+
+enum mcu_power_mode {
+       RADIO_OFF = 0x30,
+       RADIO_ON = 0x31,
+       RADIO_OFF_AUTO_WAKEUP = 0x32,
+       RADIO_OFF_ADVANCE = 0x33,
+       RADIO_ON_ADVANCE = 0x34,
+};
+
+enum mcu_calibrate {
+       MCU_CAL_R = 1,
+       MCU_CAL_DCOC,
+       MCU_CAL_LC,
+       MCU_CAL_LOFT,
+       MCU_CAL_TXIQ,
+       MCU_CAL_BW,
+       MCU_CAL_DPD,
+       MCU_CAL_RXIQ,
+       MCU_CAL_TXDCOC,
+};
+
+int mt7601u_mcu_init(struct mt7601u_dev *dev);
+int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev);
+void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev);
+
+int
+mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val);
+int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
new file mode 100644 (file)
index 0000000..9102be6
--- /dev/null
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MT7601U_H
+#define MT7601U_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/completion.h>
+#include <net/mac80211.h>
+#include <linux/debugfs.h>
+
+#include "regs.h"
+#include "util.h"
+
+#define MT_CALIBRATE_INTERVAL          (4 * HZ)
+
+#define MT_FREQ_CAL_INIT_DELAY         (30 * HZ)
+#define MT_FREQ_CAL_CHECK_INTERVAL     (10 * HZ)
+#define MT_FREQ_CAL_ADJ_INTERVAL       (HZ / 2)
+
+#define MT_BBP_REG_VERSION             0x00
+
+#define MT_USB_AGGR_SIZE_LIMIT         28 /* * 1024B */
+#define MT_USB_AGGR_TIMEOUT            0x80 /* * 33ns */
+#define MT_RX_ORDER                    3
+#define MT_RX_URB_SIZE                 (PAGE_SIZE << MT_RX_ORDER)
+
+struct mt7601u_dma_buf {
+       struct urb *urb;
+       void *buf;
+       dma_addr_t dma;
+       size_t len;
+};
+
+struct mt7601u_mcu {
+       struct mutex mutex;
+
+       u8 msg_seq;
+
+       struct mt7601u_dma_buf resp;
+       struct completion resp_cmpl;
+};
+
+struct mt7601u_freq_cal {
+       struct delayed_work work;
+       u8 freq;
+       bool enabled;
+       bool adjusting;
+};
+
+struct mac_stats {
+       u64 rx_stat[6];
+       u64 tx_stat[6];
+       u64 aggr_stat[2];
+       u64 aggr_n[32];
+       u64 zero_len_del[2];
+};
+
+#define N_RX_ENTRIES   16
+struct mt7601u_rx_queue {
+       struct mt7601u_dev *dev;
+
+       struct mt7601u_dma_buf_rx {
+               struct urb *urb;
+               struct page *p;
+       } e[N_RX_ENTRIES];
+
+       unsigned int start;
+       unsigned int end;
+       unsigned int entries;
+       unsigned int pending;
+};
+
+#define N_TX_ENTRIES   64
+
+struct mt7601u_tx_queue {
+       struct mt7601u_dev *dev;
+
+       struct mt7601u_dma_buf_tx {
+               struct urb *urb;
+               struct sk_buff *skb;
+       } e[N_TX_ENTRIES];
+
+       unsigned int start;
+       unsigned int end;
+       unsigned int entries;
+       unsigned int used;
+       unsigned int fifo_seq;
+};
+
+/* WCID allocation:
+ *     0: mcast wcid
+ *     1: bssid wcid
+ *  1...: STAs
+ * ...7e: group wcids
+ *    7f: reserved
+ */
+#define N_WCIDS                128
+#define GROUP_WCID(idx)        (N_WCIDS - 2 - idx)
+
+struct mt7601u_eeprom_params;
+
+#define MT_EE_TEMPERATURE_SLOPE                39
+#define MT_FREQ_OFFSET_INVALID         -128
+
+enum mt_temp_mode {
+       MT_TEMP_MODE_NORMAL,
+       MT_TEMP_MODE_HIGH,
+       MT_TEMP_MODE_LOW,
+};
+
+enum mt_bw {
+       MT_BW_20,
+       MT_BW_40,
+};
+
+enum {
+       MT7601U_STATE_INITIALIZED,
+       MT7601U_STATE_REMOVED,
+       MT7601U_STATE_WLAN_RUNNING,
+       MT7601U_STATE_MCU_RUNNING,
+       MT7601U_STATE_SCANNING,
+       MT7601U_STATE_READING_STATS,
+       MT7601U_STATE_MORE_STATS,
+};
+
+/**
+ * struct mt7601u_dev - adapter structure
+ * @lock:              protects @wcid->tx_rate.
+ * @tx_lock:           protects @tx_q and changes of MT7601U_STATE_*_STATS
+                       flags in @state.
+ * @rx_lock:           protects @rx_q.
+ * @con_mon_lock:      protects @ap_bssid, @bcn_*, @avg_rssi.
+ * @mutex:             ensures exclusive access from mac80211 callbacks.
+ * @vendor_req_mutex:  ensures atomicity of vendor requests.
+ * @reg_atomic_mutex:  ensures atomicity of indirect register accesses
+ *                     (accesses to RF and BBP).
+ * @hw_atomic_mutex:   ensures exclusive access to HW during critical
+ *                     operations (power management, channel switch).
+ */
+struct mt7601u_dev {
+       struct ieee80211_hw *hw;
+       struct device *dev;
+
+       unsigned long state;
+
+       struct mutex mutex;
+
+       unsigned long wcid_mask[N_WCIDS / BITS_PER_LONG];
+
+       struct cfg80211_chan_def chandef;
+       struct ieee80211_supported_band *sband_2g;
+
+       struct mt7601u_mcu mcu;
+
+       struct delayed_work cal_work;
+       struct delayed_work mac_work;
+
+       struct workqueue_struct *stat_wq;
+       struct delayed_work stat_work;
+
+       struct mt76_wcid *mon_wcid;
+       struct mt76_wcid __rcu *wcid[N_WCIDS];
+
+       spinlock_t lock;
+
+       const u16 *beacon_offsets;
+
+       u8 macaddr[ETH_ALEN];
+       struct mt7601u_eeprom_params *ee;
+
+       struct mutex vendor_req_mutex;
+       struct mutex reg_atomic_mutex;
+       struct mutex hw_atomic_mutex;
+
+       u32 rxfilter;
+       u32 debugfs_reg;
+
+       u8 out_eps[8];
+       u8 in_eps[8];
+       u16 out_max_packet;
+       u16 in_max_packet;
+
+       /* TX */
+       spinlock_t tx_lock;
+       struct mt7601u_tx_queue *tx_q;
+
+       atomic_t avg_ampdu_len;
+
+       /* RX */
+       spinlock_t rx_lock;
+       struct tasklet_struct rx_tasklet;
+       struct mt7601u_rx_queue rx_q;
+
+       /* Connection monitoring things */
+       spinlock_t con_mon_lock;
+       u8 ap_bssid[ETH_ALEN];
+
+       s8 bcn_freq_off;
+       u8 bcn_phy_mode;
+
+       int avg_rssi; /* starts at 0 and converges */
+
+       u8 agc_save;
+
+       struct mt7601u_freq_cal freq_cal;
+
+       bool tssi_read_trig;
+
+       s8 tssi_init;
+       s8 tssi_init_hvga;
+       s16 tssi_init_hvga_offset_db;
+
+       int prev_pwr_diff;
+
+       enum mt_temp_mode temp_mode;
+       int curr_temp;
+       int dpd_temp;
+       s8 raw_temp;
+       bool pll_lock_protect;
+
+       u8 bw;
+       bool chan_ext_below;
+
+       /* PA mode */
+       u32 rf_pa_mode[2];
+
+       struct mac_stats stats;
+};
+
+struct mt7601u_tssi_params {
+       char tssi0;
+       int trgt_power;
+};
+
+struct mt76_wcid {
+       u8 idx;
+       u8 hw_key_idx;
+
+       u16 tx_rate;
+       bool tx_rate_set;
+       u8 tx_rate_nss;
+};
+
+struct mt76_vif {
+       u8 idx;
+
+       struct mt76_wcid group_wcid;
+};
+
+struct mt76_sta {
+       struct mt76_wcid wcid;
+       u16 agg_ssn[IEEE80211_NUM_TIDS];
+};
+
+struct mt76_reg_pair {
+       u32 reg;
+       u32 value;
+};
+
+struct mt7601u_rxwi;
+
+extern const struct ieee80211_ops mt7601u_ops;
+
+void mt7601u_init_debugfs(struct mt7601u_dev *dev);
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset);
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val);
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val);
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+                    const void *data, int len);
+
+int mt7601u_wait_asic_ready(struct mt7601u_dev *dev);
+bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+              int timeout);
+bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val,
+                   int timeout);
+
+/* Compatibility with mt76 */
+#define mt76_rmw_field(_dev, _reg, _field, _val)       \
+       mt76_rmw(_dev, _reg, _field, MT76_SET(_field, _val))
+
+static inline u32 mt76_rr(struct mt7601u_dev *dev, u32 offset)
+{
+       return mt7601u_rr(dev, offset);
+}
+
+static inline void mt76_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+       return mt7601u_wr(dev, offset, val);
+}
+
+static inline u32
+mt76_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+       return mt7601u_rmw(dev, offset, mask, val);
+}
+
+static inline u32 mt76_set(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+       return mt76_rmw(dev, offset, 0, val);
+}
+
+static inline u32 mt76_clear(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+       return mt76_rmw(dev, offset, val, 0);
+}
+
+int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base,
+                           const struct mt76_reg_pair *data, int len);
+int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset,
+                            const u32 *data, int n);
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr);
+
+/* Init */
+struct mt7601u_dev *mt7601u_alloc_device(struct device *dev);
+int mt7601u_init_hardware(struct mt7601u_dev *dev);
+int mt7601u_register_device(struct mt7601u_dev *dev);
+void mt7601u_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_mac_start(struct mt7601u_dev *dev);
+void mt7601u_mac_stop(struct mt7601u_dev *dev);
+
+/* PHY */
+int mt7601u_phy_init(struct mt7601u_dev *dev);
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev);
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path);
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 path);
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw);
+void mt7601u_agc_save(struct mt7601u_dev *dev);
+void mt7601u_agc_restore(struct mt7601u_dev *dev);
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+                           struct cfg80211_chan_def *chandef);
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev);
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+                        struct mt7601u_rxwi *rxwi, u16 rate);
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+                              struct ieee80211_bss_conf *info);
+
+/* MAC */
+void mt7601u_mac_work(struct work_struct *work);
+void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot,
+                               int ht_mode);
+void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb);
+void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval);
+void
+mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
+void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev);
+
+/* TX */
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+               struct sk_buff *skb);
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                   u16 queue, const struct ieee80211_tx_queue_params *params);
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb);
+void mt7601u_tx_stat(struct work_struct *work);
+
+/* util */
+void mt76_remove_hdr_pad(struct sk_buff *skb);
+int mt76_insert_hdr_pad(struct sk_buff *skb);
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below);
+
+static inline u32 mt7601u_mac_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+       return mt7601u_rmc(dev, MT_TX_BAND_CFG, 1, below);
+}
+
+int mt7601u_dma_init(struct mt7601u_dev *dev);
+void mt7601u_dma_cleanup(struct mt7601u_dev *dev);
+
+int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
+                          struct mt76_wcid *wcid, int hw_q);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
new file mode 100644 (file)
index 0000000..1908af6
--- /dev/null
@@ -0,0 +1,1251 @@
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "mcu.h"
+#include "eeprom.h"
+#include "trace.h"
+#include "initvals_phy.h"
+
+#include <linux/etherdevice.h>
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev);
+
+static int
+mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value)
+{
+       int ret = 0;
+
+       if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+           WARN_ON(offset > 63))
+               return -EINVAL;
+       if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+               return 0;
+
+       mutex_lock(&dev->reg_atomic_mutex);
+
+       if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) {
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+
+       mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_DATA, value) |
+                                      MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
+                                      MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
+                                      MT_RF_CSR_CFG_WR |
+                                      MT_RF_CSR_CFG_KICK);
+       trace_rf_write(dev, bank, offset, value);
+out:
+       mutex_unlock(&dev->reg_atomic_mutex);
+
+       if (ret < 0)
+               dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n",
+                       bank, offset, ret);
+
+       return ret;
+}
+
+static int
+mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset)
+{
+       int ret = -ETIMEDOUT;
+       u32 val;
+
+       if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+           WARN_ON(offset > 63))
+               return -EINVAL;
+       if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+               return 0xff;
+
+       mutex_lock(&dev->reg_atomic_mutex);
+
+       if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+               goto out;
+
+       mt7601u_wr(dev, MT_RF_CSR_CFG, MT76_SET(MT_RF_CSR_CFG_REG_BANK, bank) |
+                                      MT76_SET(MT_RF_CSR_CFG_REG_ID, offset) |
+                                      MT_RF_CSR_CFG_KICK);
+
+       if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
+               goto out;
+
+       val = mt7601u_rr(dev, MT_RF_CSR_CFG);
+       if (MT76_GET(MT_RF_CSR_CFG_REG_ID, val) == offset &&
+           MT76_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+               ret = MT76_GET(MT_RF_CSR_CFG_DATA, val);
+               trace_rf_read(dev, bank, offset, ret);
+       }
+out:
+       mutex_unlock(&dev->reg_atomic_mutex);
+
+       if (ret < 0)
+               dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n",
+                       bank, offset, ret);
+
+       return ret;
+}
+
+static int
+mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val)
+{
+       int ret;
+
+       ret = mt7601u_rf_rr(dev, bank, offset);
+       if (ret < 0)
+               return ret;
+       val |= ret & ~mask;
+       ret = mt7601u_rf_wr(dev, bank, offset, val);
+       if (ret)
+               return ret;
+
+       return val;
+}
+
+static int
+mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val)
+{
+       return mt7601u_rf_rmw(dev, bank, offset, 0, val);
+}
+
+static int
+mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask)
+{
+       return mt7601u_rf_rmw(dev, bank, offset, mask, 0);
+}
+
+static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val)
+{
+       if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) ||
+           test_bit(MT7601U_STATE_REMOVED, &dev->state))
+               return;
+
+       mutex_lock(&dev->reg_atomic_mutex);
+
+       if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) {
+               dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset);
+               goto out;
+       }
+
+       mt7601u_wr(dev, MT_BBP_CSR_CFG,
+                  MT76_SET(MT_BBP_CSR_CFG_VAL, val) |
+                  MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+                  MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY);
+       trace_bbp_write(dev, offset, val);
+out:
+       mutex_unlock(&dev->reg_atomic_mutex);
+}
+
+static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset)
+{
+       u32 val;
+       int ret = -ETIMEDOUT;
+
+       if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)))
+               return -EINVAL;
+       if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+               return 0xff;
+
+       mutex_lock(&dev->reg_atomic_mutex);
+
+       if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+               goto out;
+
+       mt7601u_wr(dev, MT_BBP_CSR_CFG,
+                  MT76_SET(MT_BBP_CSR_CFG_REG_NUM, offset) |
+                  MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY |
+                  MT_BBP_CSR_CFG_READ);
+
+       if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000))
+               goto out;
+
+       val = mt7601u_rr(dev, MT_BBP_CSR_CFG);
+       if (MT76_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) {
+               ret = MT76_GET(MT_BBP_CSR_CFG_VAL, val);
+               trace_bbp_read(dev, offset, ret);
+       }
+out:
+       mutex_unlock(&dev->reg_atomic_mutex);
+
+       if (ret < 0)
+               dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n",
+                       offset, ret);
+
+       return ret;
+}
+
+static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+       int ret;
+
+       ret = mt7601u_bbp_rr(dev, offset);
+       if (ret < 0)
+               return ret;
+       val |= ret & ~mask;
+       mt7601u_bbp_wr(dev, offset, val);
+
+       return val;
+}
+
+static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val)
+{
+       int ret;
+
+       ret = mt7601u_bbp_rr(dev, offset);
+       if (ret < 0)
+               return ret;
+       val |= ret & ~mask;
+       if (ret != val)
+               mt7601u_bbp_wr(dev, offset, val);
+
+       return val;
+}
+
+int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
+{
+       int i = 20;
+       u8 val;
+
+       do {
+               val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
+               if (val && ~val)
+                       break;
+       } while (--i);
+
+       if (!i) {
+               dev_err(dev->dev, "Error: BBP is not ready\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below)
+{
+       return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0);
+}
+
+int mt7601u_phy_get_rssi(struct mt7601u_dev *dev,
+                        struct mt7601u_rxwi *rxwi, u16 rate)
+{
+       static const s8 lna[2][2][3] = {
+               /* main LNA */ {
+                       /* bw20 */ { -2, 15, 33 },
+                       /* bw40 */ {  0, 16, 34 }
+               },
+               /*  aux LNA */ {
+                       /* bw20 */ { -2, 15, 33 },
+                       /* bw40 */ { -2, 16, 34 }
+               }
+       };
+       int bw = MT76_GET(MT_RXWI_RATE_BW, rate);
+       int aux_lna = MT76_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant);
+       int lna_id = MT76_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain);
+       int val;
+
+       if (lna_id) /* LNA id can be 0, 2, 3. */
+               lna_id--;
+
+       val = 8;
+       val -= lna[aux_lna][bw][lna_id];
+       val -= MT76_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain);
+       val -= dev->ee->lna_gain;
+       val -= dev->ee->rssi_offset[0];
+
+       return val;
+}
+
+static void mt7601u_vco_cal(struct mt7601u_dev *dev)
+{
+       mt7601u_rf_wr(dev, 0, 4, 0x0a);
+       mt7601u_rf_wr(dev, 0, 5, 0x20);
+       mt7601u_rf_set(dev, 0, 4, BIT(7));
+       msleep(2);
+}
+
+static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal)
+{
+       u32 filter = 0;
+       int ret;
+
+       if (!cal)
+               filter |= 0x10000;
+       if (dev->bw != MT_BW_20)
+               filter |= 0x00100;
+
+       /* TX */
+       ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1);
+       if (ret)
+               return ret;
+       /* RX */
+       return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter);
+}
+
+static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev)
+{
+       const struct reg_table *t;
+
+       if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW))
+               return -EINVAL;
+
+       t = &bbp_mode_table[dev->temp_mode][dev->bw];
+
+       return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n);
+}
+
+static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name)
+{
+       const struct reg_table *t;
+       int ret;
+
+       if (dev->temp_mode == mode)
+               return 0;
+
+       dev->temp_mode = mode;
+       trace_temp_mode(dev, mode);
+
+       t = bbp_mode_table[dev->temp_mode];
+       ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+                                     t[2].regs, t[2].n);
+       if (ret)
+               return ret;
+
+       return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+                                      t[dev->bw].regs, t[dev->bw].n);
+}
+
+static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan)
+{
+       struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+
+       if (hw_chan != 14 || dev->bw != MT_BW_20) {
+               mt7601u_bbp_rmw(dev, 4, 0x20, 0);
+               mt7601u_bbp_wr(dev, 178, 0xff);
+
+               t->cck[0].bw20 = dev->ee->real_cck_bw20[0];
+               t->cck[1].bw20 = dev->ee->real_cck_bw20[1];
+       } else { /* Apply CH14 OBW fixup */
+               mt7601u_bbp_wr(dev, 4, 0x60);
+               mt7601u_bbp_wr(dev, 178, 0);
+
+               /* Note: vendor code is buggy here for negative values */
+               t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2;
+               t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2;
+       }
+}
+
+static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+                                    struct cfg80211_chan_def *chandef)
+{
+#define FREQ_PLAN_REGS 4
+       static const u8 freq_plan[14][FREQ_PLAN_REGS] = {
+               { 0x99, 0x99,   0x09,   0x50 },
+               { 0x46, 0x44,   0x0a,   0x50 },
+               { 0xec, 0xee,   0x0a,   0x50 },
+               { 0x99, 0x99,   0x0b,   0x50 },
+               { 0x46, 0x44,   0x08,   0x51 },
+               { 0xec, 0xee,   0x08,   0x51 },
+               { 0x99, 0x99,   0x09,   0x51 },
+               { 0x46, 0x44,   0x0a,   0x51 },
+               { 0xec, 0xee,   0x0a,   0x51 },
+               { 0x99, 0x99,   0x0b,   0x51 },
+               { 0x46, 0x44,   0x08,   0x52 },
+               { 0xec, 0xee,   0x08,   0x52 },
+               { 0x99, 0x99,   0x09,   0x52 },
+               { 0x33, 0x33,   0x0b,   0x52 },
+       };
+       struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = {
+               { 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 },
+       };
+       struct mt76_reg_pair bbp_settings[3] = {
+               { 62, 0x37 - dev->ee->lna_gain },
+               { 63, 0x37 - dev->ee->lna_gain },
+               { 64, 0x37 - dev->ee->lna_gain },
+       };
+
+       struct ieee80211_channel *chan = chandef->chan;
+       enum nl80211_channel_type chan_type =
+               cfg80211_get_chandef_type(chandef);
+       struct mt7601u_rate_power *t = &dev->ee->power_rate_table;
+       int chan_idx;
+       bool chan_ext_below;
+       u8 bw;
+       int i, ret;
+
+       bw = MT_BW_20;
+       chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS);
+       chan_idx = chan->hw_value - 1;
+
+       if (chandef->width == NL80211_CHAN_WIDTH_40) {
+               bw = MT_BW_40;
+
+               if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS)
+                       chan_idx -= 2;
+               else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS)
+                       chan_idx += 2;
+               else
+                       dev_err(dev->dev, "Error: invalid 40MHz channel!!\n");
+       }
+
+       if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) {
+               dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n",
+                       bw, chan_ext_below);
+
+               mt7601u_bbp_set_bw(dev, bw);
+
+               mt7601u_bbp_set_ctrlch(dev, chan_ext_below);
+               mt7601u_mac_set_ctrlch(dev, chan_ext_below);
+               dev->chan_ext_below = chan_ext_below;
+       }
+
+       for (i = 0; i < FREQ_PLAN_REGS; i++)
+               channel_freq_plan[i].value = freq_plan[chan_idx][i];
+
+       ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF,
+                                     channel_freq_plan, FREQ_PLAN_REGS);
+       if (ret)
+               return ret;
+
+       mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f,
+                   dev->ee->chan_pwr[chan_idx] & 0x3f);
+
+       ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+                                     bbp_settings, ARRAY_SIZE(bbp_settings));
+       if (ret)
+               return ret;
+
+       mt7601u_vco_cal(dev);
+       mt7601u_bbp_set_bw(dev, bw);
+       ret = mt7601u_set_bw_filter(dev, false);
+       if (ret)
+               return ret;
+
+       mt7601u_apply_ch14_fixup(dev, chan->hw_value);
+       mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 |
+                                        int_to_s6(t->ofdm[0].bw20) << 16 |
+                                        int_to_s6(t->cck[1].bw20) << 8 |
+                                        int_to_s6(t->cck[0].bw20));
+
+       if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+               mt7601u_agc_reset(dev);
+
+       dev->chandef = *chandef;
+
+       return 0;
+}
+
+int mt7601u_phy_set_channel(struct mt7601u_dev *dev,
+                           struct cfg80211_chan_def *chandef)
+{
+       int ret;
+
+       cancel_delayed_work_sync(&dev->cal_work);
+       cancel_delayed_work_sync(&dev->freq_cal.work);
+
+       mutex_lock(&dev->hw_atomic_mutex);
+       ret = __mt7601u_phy_set_channel(dev, chandef);
+       mutex_unlock(&dev->hw_atomic_mutex);
+       if (ret)
+               return ret;
+
+       if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+               return 0;
+
+       ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+       if (dev->freq_cal.enabled)
+               ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+                                            MT_FREQ_CAL_INIT_DELAY);
+       return 0;
+}
+
+#define BBP_R47_FLAG           GENMASK(2, 0)
+#define BBP_R47_F_TSSI         0
+#define BBP_R47_F_PKT_T                1
+#define BBP_R47_F_TX_RATE      2
+#define BBP_R47_F_TEMP         4
+/**
+ * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair
+ * @dev:       pointer to adapter structure
+ * @reg:       value of BBP R47 before the operation
+ * @flag:      one of the BBP_R47_F_* flags
+ *
+ * Convenience helper for reading values through BBP R47/R49 pair.
+ * Takes old value of BBP R47 as @reg, because callers usually have it
+ * cached already.
+ *
+ * Return: value of BBP R49.
+ */
+static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag)
+{
+       flag |= reg & ~BBP_R47_FLAG;
+       mt7601u_bbp_wr(dev, 47, flag);
+       usleep_range(500, 700);
+       return mt7601u_bbp_rr(dev, 49);
+}
+
+static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev)
+{
+       u8 bbp_val, temp;
+       u32 rf_bp, rf_set;
+       int i;
+
+       rf_set = mt7601u_rr(dev, MT_RF_SETTING_0);
+       rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0);
+
+       mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+       mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010);
+       mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010);
+
+       bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10);
+
+       mt7601u_bbp_wr(dev, 22, 0x40);
+
+       for (i = 100; i && (bbp_val & 0x10); i--)
+               bbp_val = mt7601u_bbp_rr(dev, 47);
+
+       temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP);
+
+       mt7601u_bbp_wr(dev, 22, 0);
+
+       bbp_val = mt7601u_bbp_rr(dev, 21);
+       bbp_val |= 0x02;
+       mt7601u_bbp_wr(dev, 21, bbp_val);
+       bbp_val &= ~0x02;
+       mt7601u_bbp_wr(dev, 21, bbp_val);
+
+       mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+       mt7601u_wr(dev, MT_RF_SETTING_0, rf_set);
+       mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp);
+
+       trace_read_temp(dev, temp);
+       return temp;
+}
+
+static s8 mt7601u_read_temp(struct mt7601u_dev *dev)
+{
+       int i;
+       u8 val;
+       s8 temp;
+
+       val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10);
+
+       /* Note: this rarely succeeds, temp can change even if it fails. */
+       for (i = 100; i && (val & 0x10); i--)
+               val = mt7601u_bbp_rr(dev, 47);
+
+       temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP);
+
+       trace_read_temp(dev, temp);
+       return temp;
+}
+
+static void mt7601u_rxdc_cal(struct mt7601u_dev *dev)
+{
+       static const struct mt76_reg_pair intro[] = {
+               { 158, 0x8d }, { 159, 0xfc },
+               { 158, 0x8c }, { 159, 0x4c },
+       }, outro[] = {
+               { 158, 0x8d }, { 159, 0xe0 },
+       };
+       u32 mac_ctrl;
+       int i, ret;
+
+       mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX);
+
+       ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+                                     intro, ARRAY_SIZE(intro));
+       if (ret)
+               dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret);
+
+       for (i = 20; i; i--) {
+               usleep_range(300, 500);
+
+               mt7601u_bbp_wr(dev, 158, 0x8c);
+               if (mt7601u_bbp_rr(dev, 159) == 0x0c)
+                       break;
+       }
+       if (!i)
+               dev_err(dev->dev, "%s timed out\n", __func__);
+
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+       ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP,
+                                     outro, ARRAY_SIZE(outro));
+       if (ret)
+               dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret);
+
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+}
+
+void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev)
+{
+       mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp);
+
+       mt7601u_rxdc_cal(dev);
+}
+
+/* Note: function copied from vendor driver */
+static s16 lin2dBd(u16 linear)
+{
+       short exp = 0;
+       unsigned int mantisa;
+       int app, dBd;
+
+       if (WARN_ON(!linear))
+               return -10000;
+
+       mantisa = linear;
+
+       exp = fls(mantisa) - 16;
+       if (exp > 0)
+               mantisa >>= exp;
+       else
+               mantisa <<= abs(exp);
+
+       if (mantisa <= 0xb800)
+               app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600);
+       else
+               app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00);
+       if (app < 0)
+               app = 0;
+
+       dBd = ((15 + exp) << 15) + app;
+       dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7);
+       dBd = (dBd >> 10);
+
+       return dBd;
+}
+
+static void
+mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db)
+{
+       struct tssi_data *d = &dev->ee->tssi_data;
+       int init_offset;
+
+       init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10;
+
+       mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
+                int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP);
+}
+
+static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev)
+{
+       u8 rf_vga, rf_mixer, bbp_r47;
+       int i, j;
+       s8 res[4];
+       s16 tssi_init_db, tssi_init_hvga_db;
+
+       mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030);
+       mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030);
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0);
+
+       mt7601u_bbp_wr(dev, 58, 0);
+       mt7601u_bbp_wr(dev, 241, 0x2);
+       mt7601u_bbp_wr(dev, 23, 0x8);
+       bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+       /* Set VGA gain */
+       rf_vga = mt7601u_rf_rr(dev, 5, 3);
+       mt7601u_rf_wr(dev, 5, 3, 8);
+
+       /* Mixer disable */
+       rf_mixer = mt7601u_rf_rr(dev, 4, 39);
+       mt7601u_rf_wr(dev, 4, 39, 0);
+
+       for (i = 0; i < 4; i++) {
+               mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0);
+
+               mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02);
+               mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11);
+
+               /* BBP TSSI initial and soft reset */
+               mt7601u_bbp_wr(dev, 22, 0);
+               mt7601u_bbp_wr(dev, 244, 0);
+
+               mt7601u_bbp_wr(dev, 21, 1);
+               udelay(1);
+               mt7601u_bbp_wr(dev, 21, 0);
+
+               /* TSSI measurement */
+               mt7601u_bbp_wr(dev, 47, 0x50);
+               mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40);
+
+               for (j = 20; j; j--)
+                       if (!(mt7601u_bbp_rr(dev, 47) & 0x10))
+                               break;
+               if (!j)
+                       dev_err(dev->dev, "%s timed out\n", __func__);
+
+               /* TSSI read */
+               mt7601u_bbp_wr(dev, 47, 0x40);
+               res[i] = mt7601u_bbp_rr(dev, 49);
+       }
+
+       tssi_init_db = lin2dBd((short)res[1] - res[0]);
+       tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4);
+       dev->tssi_init = res[0];
+       dev->tssi_init_hvga = res[2];
+       dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db;
+
+       dev_dbg(dev->dev,
+               "TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n",
+               dev->tssi_init, tssi_init_db, dev->tssi_init_hvga,
+               tssi_init_hvga_db, dev->tssi_init_hvga_offset_db);
+
+       mt7601u_bbp_wr(dev, 22, 0);
+       mt7601u_bbp_wr(dev, 244, 0);
+
+       mt7601u_bbp_wr(dev, 21, 1);
+       udelay(1);
+       mt7601u_bbp_wr(dev, 21, 0);
+
+       mt7601u_wr(dev, MT_RF_BYPASS_0, 0);
+       mt7601u_wr(dev, MT_RF_SETTING_0, 0);
+
+       mt7601u_rf_wr(dev, 5, 3, rf_vga);
+       mt7601u_rf_wr(dev, 4, 39, rf_mixer);
+       mt7601u_bbp_wr(dev, 47, bbp_r47);
+
+       mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db);
+}
+
+static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on)
+{
+       int ret, temp, hi_temp = 400, lo_temp = -200;
+
+       temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE;
+       dev->curr_temp = temp;
+
+       /* DPD Calibration */
+       if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) {
+               dev->dpd_temp = temp;
+
+               ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+               if (ret)
+                       return ret;
+
+               mt7601u_vco_cal(dev);
+
+               dev_dbg(dev->dev, "Recalibrate DPD\n");
+       }
+
+       /* PLL Lock Protect */
+       if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */
+               dev->pll_lock_protect =  true;
+
+               mt7601u_rf_wr(dev, 4, 4, 6);
+               mt7601u_rf_clear(dev, 4, 10, 0x30);
+
+               dev_dbg(dev->dev, "PLL lock protect on - too cold\n");
+       } else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */
+               dev->pll_lock_protect = false;
+
+               mt7601u_rf_wr(dev, 4, 4, 0);
+               mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10);
+
+               dev_dbg(dev->dev, "PLL lock protect off\n");
+       }
+
+       if (on) {
+               hi_temp -= 50;
+               lo_temp -= 50;
+       }
+
+       /* BBP CR for H, L, N temperature */
+       if (temp > hi_temp)
+               return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high");
+       else if (temp > lo_temp)
+               return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal");
+       else
+               return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low");
+}
+
+/* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */
+static int mt7601u_current_tx_power(struct mt7601u_dev *dev)
+{
+       return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1];
+}
+
+static bool mt7601u_use_hvga(struct mt7601u_dev *dev)
+{
+       return !(mt7601u_current_tx_power(dev) > 20);
+}
+
+static s16
+mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
+{
+       static const s16 decode_tb[] = { 0, 8847, -5734, -5734 };
+       u32 reg;
+
+       switch (phy_mode) {
+       case MT_PHY_TYPE_OFDM:
+               tx_rate += 4;
+       case MT_PHY_TYPE_CCK:
+               reg = dev->rf_pa_mode[0];
+               break;
+       default:
+               reg = dev->rf_pa_mode[1];
+               break;
+       }
+
+       return decode_tb[(reg >> (tx_rate * 2)) & 0x3];
+}
+
+static struct mt7601u_tssi_params
+mt7601u_tssi_params_get(struct mt7601u_dev *dev)
+{
+       static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 };
+       static const int static_power[4] = { 0, -49152, -98304, 49152 };
+       struct mt7601u_tssi_params p;
+       u8 bbp_r47, pkt_type, tx_rate;
+       struct power_per_rate *rate_table;
+
+       bbp_r47 = mt7601u_bbp_rr(dev, 47);
+
+       p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI);
+       dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP);
+       pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T);
+
+       p.trgt_power = mt7601u_current_tx_power(dev);
+
+       switch (pkt_type & 0x03) {
+       case MT_PHY_TYPE_CCK:
+               tx_rate = (pkt_type >> 4) & 0x03;
+               rate_table = dev->ee->power_rate_table.cck;
+               break;
+
+       case MT_PHY_TYPE_OFDM:
+               tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07];
+               rate_table = dev->ee->power_rate_table.ofdm;
+               break;
+
+       default:
+               tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE);
+               tx_rate &= 0x7f;
+               rate_table = dev->ee->power_rate_table.ht;
+               break;
+       }
+
+       if (dev->bw == MT_BW_20)
+               p.trgt_power += rate_table[tx_rate / 2].bw20;
+       else
+               p.trgt_power += rate_table[tx_rate / 2].bw40;
+
+       p.trgt_power <<= 12;
+
+       dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power);
+
+       p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03,
+                                                  tx_rate);
+
+       /* Channel 14, cck, bw20 */
+       if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) {
+               if (mt7601u_bbp_rr(dev, 4) & 0x20)
+                       p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830;
+               else
+                       p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576;
+       }
+
+       p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03];
+
+       p.trgt_power += dev->ee->tssi_data.tx0_delta_offset;
+
+       dev_dbg(dev->dev,
+               "tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n",
+               p.tssi0, p.trgt_power, dev->raw_temp, pkt_type);
+
+       return p;
+}
+
+static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev)
+{
+       return !(mt7601u_bbp_rr(dev, 47) & 0x10);
+}
+
+static int mt7601u_tssi_cal(struct mt7601u_dev *dev)
+{
+       struct mt7601u_tssi_params params;
+       int curr_pwr, diff_pwr;
+       char tssi_offset;
+       s8 tssi_init;
+       s16 tssi_m_dc, tssi_db;
+       bool hvga;
+       u32 val;
+
+       if (!dev->ee->tssi_enabled)
+               return 0;
+
+       hvga = mt7601u_use_hvga(dev);
+       if (!dev->tssi_read_trig)
+               return mt7601u_mcu_tssi_read_kick(dev, hvga);
+
+       if (!mt7601u_tssi_read_ready(dev))
+               return 0;
+
+       params = mt7601u_tssi_params_get(dev);
+
+       tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init);
+       tssi_m_dc = params.tssi0 - tssi_init;
+       tssi_db = lin2dBd(tssi_m_dc);
+       dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n",
+               tssi_m_dc, tssi_db, hvga);
+
+       if (dev->chandef.chan->hw_value < 5)
+               tssi_offset = dev->ee->tssi_data.offset[0];
+       else if (dev->chandef.chan->hw_value < 9)
+               tssi_offset = dev->ee->tssi_data.offset[1];
+       else
+               tssi_offset = dev->ee->tssi_data.offset[2];
+
+       if (hvga)
+               tssi_db -= dev->tssi_init_hvga_offset_db;
+
+       curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9);
+       diff_pwr = params.trgt_power - curr_pwr;
+       dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr);
+
+       if (params.tssi0 > 126 && diff_pwr > 0) {
+               dev_err(dev->dev, "Error: TSSI upper saturation\n");
+               diff_pwr = 0;
+       }
+       if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) {
+               dev_err(dev->dev, "Error: TSSI lower saturation\n");
+               diff_pwr = 0;
+       }
+
+       if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 &&
+           (abs(diff_pwr) > abs(dev->prev_pwr_diff) ||
+            (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff)))
+               diff_pwr = 0;
+       else
+               dev->prev_pwr_diff = diff_pwr;
+
+       diff_pwr += (diff_pwr > 0) ? 2048 : -2048;
+       diff_pwr /= 4096;
+
+       dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr);
+
+       val = mt7601u_rr(dev, MT_TX_ALC_CFG_1);
+       curr_pwr = s6_to_int(MT76_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val));
+       diff_pwr += curr_pwr;
+       val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr);
+       mt7601u_wr(dev, MT_TX_ALC_CFG_1, val);
+
+       return mt7601u_mcu_tssi_read_kick(dev, hvga);
+}
+
+static u8 mt7601u_agc_default(struct mt7601u_dev *dev)
+{
+       return (dev->ee->lna_gain - 8) * 2 + 0x34;
+}
+
+static void mt7601u_agc_reset(struct mt7601u_dev *dev)
+{
+       u8 agc = mt7601u_agc_default(dev);
+
+       mt7601u_bbp_wr(dev, 66, agc);
+}
+
+void mt7601u_agc_save(struct mt7601u_dev *dev)
+{
+       dev->agc_save = mt7601u_bbp_rr(dev, 66);
+}
+
+void mt7601u_agc_restore(struct mt7601u_dev *dev)
+{
+       mt7601u_bbp_wr(dev, 66, dev->agc_save);
+}
+
+static void mt7601u_agc_tune(struct mt7601u_dev *dev)
+{
+       u8 val = mt7601u_agc_default(dev);
+
+       if (test_bit(MT7601U_STATE_SCANNING, &dev->state))
+               return;
+
+       /* Note: only in STA mode and not dozing; perhaps do this only if
+        *       there is enough rssi updates since last run?
+        *       Rssi updates are only on beacons and U2M so should work...
+        */
+       spin_lock_bh(&dev->con_mon_lock);
+       if (dev->avg_rssi <= -70)
+               val -= 0x20;
+       else if (dev->avg_rssi <= -60)
+               val -= 0x10;
+       spin_unlock_bh(&dev->con_mon_lock);
+
+       if (val != mt7601u_bbp_rr(dev, 66))
+               mt7601u_bbp_wr(dev, 66, val);
+
+       /* TODO: also if lost a lot of beacons try resetting
+        *       (see RTMPSetAGCInitValue() call in mlme.c).
+        */
+}
+
+static void mt7601u_phy_calibrate(struct work_struct *work)
+{
+       struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+                                           cal_work.work);
+
+       mt7601u_agc_tune(dev);
+       mt7601u_tssi_cal(dev);
+       /* If TSSI calibration was run it already updated temperature. */
+       if (!dev->ee->tssi_enabled)
+               dev->raw_temp = mt7601u_read_temp(dev);
+       mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */
+
+       ieee80211_queue_delayed_work(dev->hw, &dev->cal_work,
+                                    MT_CALIBRATE_INTERVAL);
+}
+
+static unsigned long
+__mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode)
+{
+       u8 activate_threshold, deactivate_threshold;
+
+       trace_freq_cal_offset(dev, phy_mode, last_offset);
+
+       /* No beacons received - reschedule soon */
+       if (last_offset == MT_FREQ_OFFSET_INVALID)
+               return MT_FREQ_CAL_ADJ_INTERVAL;
+
+       switch (phy_mode) {
+       case MT_PHY_TYPE_CCK:
+               activate_threshold = 19;
+               deactivate_threshold = 5;
+               break;
+       case MT_PHY_TYPE_OFDM:
+               activate_threshold = 102;
+               deactivate_threshold = 32;
+               break;
+       case MT_PHY_TYPE_HT:
+       case MT_PHY_TYPE_HT_GF:
+               activate_threshold = 82;
+               deactivate_threshold = 20;
+               break;
+       default:
+               WARN_ON(1);
+               return MT_FREQ_CAL_CHECK_INTERVAL;
+       }
+
+       if (abs(last_offset) >= activate_threshold)
+               dev->freq_cal.adjusting = true;
+       else if (abs(last_offset) <= deactivate_threshold)
+               dev->freq_cal.adjusting = false;
+
+       if (!dev->freq_cal.adjusting)
+               return MT_FREQ_CAL_CHECK_INTERVAL;
+
+       if (last_offset > deactivate_threshold) {
+               if (dev->freq_cal.freq > 0)
+                       dev->freq_cal.freq--;
+               else
+                       dev->freq_cal.adjusting = false;
+       } else if (last_offset < -deactivate_threshold) {
+               if (dev->freq_cal.freq < 0xbf)
+                       dev->freq_cal.freq++;
+               else
+                       dev->freq_cal.adjusting = false;
+       }
+
+       trace_freq_cal_adjust(dev, dev->freq_cal.freq);
+       mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq);
+       mt7601u_vco_cal(dev);
+
+       return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL :
+                                        MT_FREQ_CAL_CHECK_INTERVAL;
+}
+
+static void mt7601u_phy_freq_cal(struct work_struct *work)
+{
+       struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+                                              freq_cal.work.work);
+       s8 last_offset;
+       u8 phy_mode;
+       unsigned long delay;
+
+       spin_lock_bh(&dev->con_mon_lock);
+       last_offset = dev->bcn_freq_off;
+       phy_mode = dev->bcn_phy_mode;
+       spin_unlock_bh(&dev->con_mon_lock);
+
+       delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode);
+       ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay);
+
+       spin_lock_bh(&dev->con_mon_lock);
+       dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+       spin_unlock_bh(&dev->con_mon_lock);
+}
+
+void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev,
+                              struct ieee80211_bss_conf *info)
+{
+       if (!info->assoc)
+               cancel_delayed_work_sync(&dev->freq_cal.work);
+
+       /* Start/stop collecting beacon data */
+       spin_lock_bh(&dev->con_mon_lock);
+       ether_addr_copy(dev->ap_bssid, info->bssid);
+       dev->avg_rssi = 0;
+       dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID;
+       spin_unlock_bh(&dev->con_mon_lock);
+
+       dev->freq_cal.freq = dev->ee->rf_freq_off;
+       dev->freq_cal.enabled = info->assoc;
+       dev->freq_cal.adjusting = false;
+
+       if (info->assoc)
+               ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work,
+                                            MT_FREQ_CAL_INIT_DELAY);
+}
+
+static int mt7601u_init_cal(struct mt7601u_dev *dev)
+{
+       u32 mac_ctrl;
+       int ret;
+
+       dev->raw_temp = mt7601u_read_bootup_temp(dev);
+       dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) *
+               MT_EE_TEMPERATURE_SLOPE;
+       dev->dpd_temp = dev->curr_temp;
+
+       mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+
+       ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0);
+       if (ret)
+               return ret;
+
+       ret = mt7601u_rf_rr(dev, 0, 4);
+       if (ret < 0)
+               return ret;
+       ret |= 0x80;
+       ret = mt7601u_rf_wr(dev, 0, 4, ret);
+       if (ret)
+               return ret;
+       msleep(2);
+
+       ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0);
+       if (ret)
+               return ret;
+
+       mt7601u_rxdc_cal(dev);
+
+       ret = mt7601u_set_bw_filter(dev, true);
+       if (ret)
+               return ret;
+       ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0);
+       if (ret)
+               return ret;
+       ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0);
+       if (ret)
+               return ret;
+       ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0);
+       if (ret)
+               return ret;
+       ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp);
+       if (ret)
+               return ret;
+
+       mt7601u_rxdc_cal(dev);
+
+       mt7601u_tssi_dc_gain_cal(dev);
+
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl);
+
+       mt7601u_temp_comp(dev, true);
+
+       return 0;
+}
+
+int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw)
+{
+       u32 val, old;
+
+       if (bw == dev->bw) {
+               /* Vendor driver does the rmc even when no change is needed. */
+               mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+               return 0;
+       }
+       dev->bw = bw;
+
+       /* Stop MAC for the time of bw change */
+       old = mt7601u_rr(dev, MT_MAC_SYS_CTRL);
+       val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, val);
+       mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX,
+                 0, 500000);
+
+       mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10);
+
+       mt7601u_wr(dev, MT_MAC_SYS_CTRL, old);
+
+       return mt7601u_load_bbp_temp_table_bw(dev);
+}
+
+/**
+ * mt7601u_set_rx_path - set rx path in BBP
+ * @dev:       pointer to adapter structure
+ * @path:      rx path to set values are 0-based
+ */
+void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path)
+{
+       mt7601u_bbp_rmw(dev, 3, 0x18, path << 3);
+}
+
+/**
+ * mt7601u_set_tx_dac - set which tx DAC to use
+ * @dev:       pointer to adapter structure
+ * @path:      DAC index, values are 0-based
+ */
+void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac)
+{
+       mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3);
+}
+
+int mt7601u_phy_init(struct mt7601u_dev *dev)
+{
+       int ret;
+
+       dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0);
+       dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1);
+
+       ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off);
+       if (ret)
+               return ret;
+       ret = mt7601u_write_reg_pairs(dev, 0, rf_central,
+                                     ARRAY_SIZE(rf_central));
+       if (ret)
+               return ret;
+       ret = mt7601u_write_reg_pairs(dev, 0, rf_channel,
+                                     ARRAY_SIZE(rf_channel));
+       if (ret)
+               return ret;
+       ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga));
+       if (ret)
+               return ret;
+
+       ret = mt7601u_init_cal(dev);
+       if (ret)
+               return ret;
+
+       dev->prev_pwr_diff = 100;
+
+       INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate);
+       INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/regs.h b/drivers/net/wireless/mediatek/mt7601u/regs.h
new file mode 100644 (file)
index 0000000..afd8978
--- /dev/null
@@ -0,0 +1,636 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_REGS_H
+#define __MT76_REGS_H
+
+#include <linux/bitops.h>
+
+#ifndef GENMASK
+#define GENMASK(h, l)       (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif
+
+#define MT_ASIC_VERSION                        0x0000
+
+#define MT76XX_REV_E3          0x22
+#define MT76XX_REV_E4          0x33
+
+#define MT_CMB_CTRL                    0x0020
+#define MT_CMB_CTRL_XTAL_RDY           BIT(22)
+#define MT_CMB_CTRL_PLL_LD             BIT(23)
+
+#define MT_EFUSE_CTRL                  0x0024
+#define MT_EFUSE_CTRL_AOUT             GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE             GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME     GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME      GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN              GENMASK(25, 16)
+#define MT_EFUSE_CTRL_KICK             BIT(30)
+#define MT_EFUSE_CTRL_SEL              BIT(31)
+
+#define MT_EFUSE_DATA_BASE             0x0028
+#define MT_EFUSE_DATA(_n)              (MT_EFUSE_DATA_BASE + ((_n) << 2))
+
+#define MT_COEXCFG0                    0x0040
+#define MT_COEXCFG0_COEX_EN            BIT(0)
+
+#define MT_WLAN_FUN_CTRL               0x0080
+#define MT_WLAN_FUN_CTRL_WLAN_EN       BIT(0)
+#define MT_WLAN_FUN_CTRL_WLAN_CLK_EN   BIT(1)
+#define MT_WLAN_FUN_CTRL_WLAN_RESET_RF BIT(2)
+
+#define MT_WLAN_FUN_CTRL_WLAN_RESET    BIT(3) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_CSR_F20M_CKEN BIT(3) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_PCIE_CLK_REQ  BIT(4)
+#define MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL        BIT(5)
+#define MT_WLAN_FUN_CTRL_INV_ANT_SEL   BIT(6)
+#define MT_WLAN_FUN_CTRL_WAKE_HOST     BIT(7)
+
+#define MT_WLAN_FUN_CTRL_THERM_RST     BIT(8) /* MT76x2 */
+#define MT_WLAN_FUN_CTRL_THERM_CKEN    BIT(9) /* MT76x2 */
+
+#define MT_WLAN_FUN_CTRL_GPIO_IN       GENMASK(15, 8) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT      GENMASK(23, 16) /* MT76x0 */
+#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN   GENMASK(31, 24) /* MT76x0 */
+
+#define MT_XO_CTRL0                    0x0100
+#define MT_XO_CTRL1                    0x0104
+#define MT_XO_CTRL2                    0x0108
+#define MT_XO_CTRL3                    0x010c
+#define MT_XO_CTRL4                    0x0110
+
+#define MT_XO_CTRL5                    0x0114
+#define MT_XO_CTRL5_C2_VAL             GENMASK(14, 8)
+
+#define MT_XO_CTRL6                    0x0118
+#define MT_XO_CTRL6_C2_CTRL            GENMASK(14, 8)
+
+#define MT_XO_CTRL7                    0x011c
+
+#define MT_WLAN_MTC_CTRL               0x10148
+#define MT_WLAN_MTC_CTRL_MTCMOS_PWR_UP BIT(0)
+#define MT_WLAN_MTC_CTRL_PWR_ACK       BIT(12)
+#define MT_WLAN_MTC_CTRL_PWR_ACK_S     BIT(13)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_PD    GENMASK(19, 16)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_PD    BIT(20)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_PD    BIT(21)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_PD    BIT(22)
+#define MT_WLAN_MTC_CTRL_BBP_MEM_RB    BIT(24)
+#define MT_WLAN_MTC_CTRL_PBF_MEM_RB    BIT(25)
+#define MT_WLAN_MTC_CTRL_FCE_MEM_RB    BIT(26)
+#define MT_WLAN_MTC_CTRL_TSO_MEM_RB    BIT(27)
+#define MT_WLAN_MTC_CTRL_STATE_UP      BIT(28)
+
+#define MT_INT_SOURCE_CSR              0x0200
+#define MT_INT_MASK_CSR                        0x0204
+
+#define MT_INT_RX_DONE(_n)             BIT(_n)
+#define MT_INT_RX_DONE_ALL             GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL             GENMASK(13, 4)
+#define MT_INT_TX_DONE(_n)             BIT(_n + 4)
+#define MT_INT_RX_COHERENT             BIT(16)
+#define MT_INT_TX_COHERENT             BIT(17)
+#define MT_INT_ANY_COHERENT            BIT(18)
+#define MT_INT_MCU_CMD                 BIT(19)
+#define MT_INT_TBTT                    BIT(20)
+#define MT_INT_PRE_TBTT                        BIT(21)
+#define MT_INT_TX_STAT                 BIT(22)
+#define MT_INT_AUTO_WAKEUP             BIT(23)
+#define MT_INT_GPTIMER                 BIT(24)
+#define MT_INT_RXDELAYINT              BIT(26)
+#define MT_INT_TXDELAYINT              BIT(27)
+
+#define MT_WPDMA_GLO_CFG               0x0208
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN     BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY   BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN     BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY   BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE        GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE     BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN    BIT(7)
+#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN   GENMASK(15, 8)
+#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS  BIT(30)
+#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET  BIT(31)
+
+#define MT_WPDMA_RST_IDX               0x020c
+
+#define MT_WPDMA_DELAY_INT_CFG         0x0210
+
+#define MT_WMM_AIFSN           0x0214
+#define MT_WMM_AIFSN_MASK              GENMASK(3, 0)
+#define MT_WMM_AIFSN_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_CWMIN           0x0218
+#define MT_WMM_CWMIN_MASK              GENMASK(3, 0)
+#define MT_WMM_CWMIN_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_CWMAX           0x021c
+#define MT_WMM_CWMAX_MASK              GENMASK(3, 0)
+#define MT_WMM_CWMAX_SHIFT(_n)         ((_n) * 4)
+
+#define MT_WMM_TXOP_BASE               0x0220
+#define MT_WMM_TXOP(_n)                        (MT_WMM_TXOP_BASE + (((_n) / 2) << 2))
+#define MT_WMM_TXOP_SHIFT(_n)          ((_n & 1) * 16)
+#define MT_WMM_TXOP_MASK               GENMASK(15, 0)
+
+#define MT_FCE_DMA_ADDR                        0x0230
+#define MT_FCE_DMA_LEN                 0x0234
+
+#define MT_USB_DMA_CFG                 0x238
+#define MT_USB_DMA_CFG_RX_BULK_AGG_TOUT        GENMASK(7, 0)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_LMT GENMASK(15, 8)
+#define MT_USB_DMA_CFG_PHY_CLR         BIT(16)
+#define MT_USB_DMA_CFG_TX_CLR          BIT(19)
+#define MT_USB_DMA_CFG_TXOP_HALT       BIT(20)
+#define MT_USB_DMA_CFG_RX_BULK_AGG_EN  BIT(21)
+#define MT_USB_DMA_CFG_RX_BULK_EN      BIT(22)
+#define MT_USB_DMA_CFG_TX_BULK_EN      BIT(23)
+#define MT_USB_DMA_CFG_UDMA_RX_WL_DROP BIT(25)
+#define MT_USB_DMA_CFG_EP_OUT_VALID    GENMASK(29, 27)
+#define MT_USB_DMA_CFG_RX_BUSY         BIT(30)
+#define MT_USB_DMA_CFG_TX_BUSY         BIT(31)
+
+#define MT_TSO_CTRL                    0x0250
+#define MT_HEADER_TRANS_CTRL_REG       0x0260
+
+#define MT_US_CYC_CFG                  0x02a4
+#define MT_US_CYC_CNT                  GENMASK(7, 0)
+
+#define MT_TX_RING_BASE                        0x0300
+#define MT_RX_RING_BASE                        0x03c0
+#define MT_RING_SIZE                   0x10
+
+#define MT_TX_HW_QUEUE_MCU             8
+#define MT_TX_HW_QUEUE_MGMT            9
+
+#define MT_PBF_SYS_CTRL                        0x0400
+#define MT_PBF_SYS_CTRL_MCU_RESET      BIT(0)
+#define MT_PBF_SYS_CTRL_DMA_RESET      BIT(1)
+#define MT_PBF_SYS_CTRL_MAC_RESET      BIT(2)
+#define MT_PBF_SYS_CTRL_PBF_RESET      BIT(3)
+#define MT_PBF_SYS_CTRL_ASY_RESET      BIT(4)
+
+#define MT_PBF_CFG                     0x0404
+#define MT_PBF_CFG_TX0Q_EN             BIT(0)
+#define MT_PBF_CFG_TX1Q_EN             BIT(1)
+#define MT_PBF_CFG_TX2Q_EN             BIT(2)
+#define MT_PBF_CFG_TX3Q_EN             BIT(3)
+#define MT_PBF_CFG_RX0Q_EN             BIT(4)
+#define MT_PBF_CFG_RX_DROP_EN          BIT(8)
+
+#define MT_PBF_TX_MAX_PCNT             0x0408
+#define MT_PBF_RX_MAX_PCNT             0x040c
+
+#define MT_BCN_OFFSET_BASE             0x041c
+#define MT_BCN_OFFSET(_n)              (MT_BCN_OFFSET_BASE + ((_n) << 2))
+
+#define        MT_RF_CSR_CFG                   0x0500
+#define MT_RF_CSR_CFG_DATA             GENMASK(7, 0)
+#define MT_RF_CSR_CFG_REG_ID           GENMASK(13, 8)
+#define MT_RF_CSR_CFG_REG_BANK         GENMASK(17, 14)
+#define MT_RF_CSR_CFG_WR               BIT(30)
+#define MT_RF_CSR_CFG_KICK             BIT(31)
+
+#define MT_RF_BYPASS_0                 0x0504
+#define MT_RF_BYPASS_1                 0x0508
+#define MT_RF_SETTING_0                        0x050c
+
+#define MT_RF_DATA_WRITE               0x0524
+
+#define MT_RF_CTRL                     0x0528
+#define MT_RF_CTRL_ADDR                        GENMASK(11, 0)
+#define MT_RF_CTRL_WRITE               BIT(12)
+#define MT_RF_CTRL_BUSY                        BIT(13)
+#define MT_RF_CTRL_IDX                 BIT(16)
+
+#define MT_RF_DATA_READ                        0x052c
+
+#define MT_FCE_PSE_CTRL                        0x0800
+#define MT_FCE_PARAMETERS              0x0804
+#define MT_FCE_CSO                     0x0808
+
+#define MT_FCE_L2_STUFF                        0x080c
+#define MT_FCE_L2_STUFF_HT_L2_EN       BIT(0)
+#define MT_FCE_L2_STUFF_QOS_L2_EN      BIT(1)
+#define MT_FCE_L2_STUFF_RX_STUFF_EN    BIT(2)
+#define MT_FCE_L2_STUFF_TX_STUFF_EN    BIT(3)
+#define MT_FCE_L2_STUFF_WR_MPDU_LEN_EN BIT(4)
+#define MT_FCE_L2_STUFF_MVINV_BSWAP    BIT(5)
+#define MT_FCE_L2_STUFF_TS_CMD_QSEL_EN GENMASK(15, 8)
+#define MT_FCE_L2_STUFF_TS_LEN_EN      GENMASK(23, 16)
+#define MT_FCE_L2_STUFF_OTHER_PORT     GENMASK(25, 24)
+
+#define MT_FCE_WLAN_FLOW_CONTROL1      0x0824
+
+#define MT_TX_CPU_FROM_FCE_BASE_PTR    0x09a0
+#define MT_TX_CPU_FROM_FCE_MAX_COUNT   0x09a4
+#define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX        0x09a8
+
+#define MT_FCE_PDMA_GLOBAL_CONF                0x09c4
+
+#define MT_PAUSE_ENABLE_CONTROL1       0x0a38
+
+#define MT_FCE_SKIP_FS                 0x0a6c
+
+#define MT_MAC_CSR0                    0x1000
+
+#define MT_MAC_SYS_CTRL                        0x1004
+#define MT_MAC_SYS_CTRL_RESET_CSR      BIT(0)
+#define MT_MAC_SYS_CTRL_RESET_BBP      BIT(1)
+#define MT_MAC_SYS_CTRL_ENABLE_TX      BIT(2)
+#define MT_MAC_SYS_CTRL_ENABLE_RX      BIT(3)
+
+#define MT_MAC_ADDR_DW0                        0x1008
+#define MT_MAC_ADDR_DW1                        0x100c
+#define MT_MAC_ADDR_DW1_U2ME_MASK      GENMASK(23, 16)
+
+#define MT_MAC_BSSID_DW0               0x1010
+#define MT_MAC_BSSID_DW1               0x1014
+#define MT_MAC_BSSID_DW1_ADDR          GENMASK(15, 0)
+#define MT_MAC_BSSID_DW1_MBSS_MODE     GENMASK(17, 16)
+#define MT_MAC_BSSID_DW1_MBEACON_N     GENMASK(20, 18)
+#define MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT        BIT(21)
+#define MT_MAC_BSSID_DW1_MBSS_MODE_B2  BIT(22)
+#define MT_MAC_BSSID_DW1_MBEACON_N_B3  BIT(23)
+#define MT_MAC_BSSID_DW1_MBSS_IDX_BYTE GENMASK(26, 24)
+
+#define MT_MAX_LEN_CFG                 0x1018
+#define MT_MAX_LEN_CFG_AMPDU           GENMASK(13, 12)
+
+#define MT_BBP_CSR_CFG                 0x101c
+#define MT_BBP_CSR_CFG_VAL             GENMASK(7, 0)
+#define MT_BBP_CSR_CFG_REG_NUM         GENMASK(15, 8)
+#define MT_BBP_CSR_CFG_READ            BIT(16)
+#define MT_BBP_CSR_CFG_BUSY            BIT(17)
+#define MT_BBP_CSR_CFG_PAR_DUR         BIT(18)
+#define MT_BBP_CSR_CFG_RW_MODE         BIT(19)
+
+#define MT_AMPDU_MAX_LEN_20M1S         0x1030
+#define MT_AMPDU_MAX_LEN_20M2S         0x1034
+#define MT_AMPDU_MAX_LEN_40M1S         0x1038
+#define MT_AMPDU_MAX_LEN_40M2S         0x103c
+#define MT_AMPDU_MAX_LEN               0x1040
+
+#define MT_WCID_DROP_BASE              0x106c
+#define MT_WCID_DROP(_n)               (MT_WCID_DROP_BASE + ((_n) >> 5) * 4)
+#define MT_WCID_DROP_MASK(_n)          BIT((_n) % 32)
+
+#define MT_BCN_BYPASS_MASK             0x108c
+
+#define MT_MAC_APC_BSSID_BASE          0x1090
+#define MT_MAC_APC_BSSID_L(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8))
+#define MT_MAC_APC_BSSID_H(_n)         (MT_MAC_APC_BSSID_BASE + ((_n) * 8 + 4))
+#define MT_MAC_APC_BSSID_H_ADDR                GENMASK(15, 0)
+#define MT_MAC_APC_BSSID0_H_EN         BIT(16)
+
+#define MT_XIFS_TIME_CFG               0x1100
+#define MT_XIFS_TIME_CFG_CCK_SIFS      GENMASK(7, 0)
+#define MT_XIFS_TIME_CFG_OFDM_SIFS     GENMASK(15, 8)
+#define MT_XIFS_TIME_CFG_OFDM_XIFS     GENMASK(19, 16)
+#define MT_XIFS_TIME_CFG_EIFS          GENMASK(28, 20)
+#define MT_XIFS_TIME_CFG_BB_RXEND_EN   BIT(29)
+
+#define MT_BKOFF_SLOT_CFG              0x1104
+#define MT_BKOFF_SLOT_CFG_SLOTTIME     GENMASK(7, 0)
+#define MT_BKOFF_SLOT_CFG_CC_DELAY     GENMASK(11, 8)
+
+#define MT_BEACON_TIME_CFG             0x1114
+#define MT_BEACON_TIME_CFG_INTVAL      GENMASK(15, 0)
+#define MT_BEACON_TIME_CFG_TIMER_EN    BIT(16)
+#define MT_BEACON_TIME_CFG_SYNC_MODE   GENMASK(18, 17)
+#define MT_BEACON_TIME_CFG_TBTT_EN     BIT(19)
+#define MT_BEACON_TIME_CFG_BEACON_TX   BIT(20)
+#define MT_BEACON_TIME_CFG_TSF_COMP    GENMASK(31, 24)
+
+#define MT_TBTT_SYNC_CFG               0x1118
+#define MT_TBTT_TIMER_CFG              0x1124
+
+#define MT_INT_TIMER_CFG               0x1128
+#define MT_INT_TIMER_CFG_PRE_TBTT      GENMASK(15, 0)
+#define MT_INT_TIMER_CFG_GP_TIMER      GENMASK(31, 16)
+
+#define MT_INT_TIMER_EN                        0x112c
+#define MT_INT_TIMER_EN_PRE_TBTT_EN    BIT(0)
+#define MT_INT_TIMER_EN_GP_TIMER_EN    BIT(1)
+
+#define MT_MAC_STATUS                  0x1200
+#define MT_MAC_STATUS_TX               BIT(0)
+#define MT_MAC_STATUS_RX               BIT(1)
+
+#define MT_PWR_PIN_CFG                 0x1204
+#define MT_AUX_CLK_CFG                 0x120c
+
+#define MT_BB_PA_MODE_CFG0             0x1214
+#define MT_BB_PA_MODE_CFG1             0x1218
+#define MT_RF_PA_MODE_CFG0             0x121c
+#define MT_RF_PA_MODE_CFG1             0x1220
+
+#define MT_RF_PA_MODE_ADJ0             0x1228
+#define MT_RF_PA_MODE_ADJ1             0x122c
+
+#define MT_DACCLK_EN_DLY_CFG           0x1264
+
+#define MT_EDCA_CFG_BASE               0x1300
+#define MT_EDCA_CFG_AC(_n)             (MT_EDCA_CFG_BASE + ((_n) << 2))
+#define MT_EDCA_CFG_TXOP               GENMASK(7, 0)
+#define MT_EDCA_CFG_AIFSN              GENMASK(11, 8)
+#define MT_EDCA_CFG_CWMIN              GENMASK(15, 12)
+#define MT_EDCA_CFG_CWMAX              GENMASK(19, 16)
+
+#define MT_TX_PWR_CFG_0                        0x1314
+#define MT_TX_PWR_CFG_1                        0x1318
+#define MT_TX_PWR_CFG_2                        0x131c
+#define MT_TX_PWR_CFG_3                        0x1320
+#define MT_TX_PWR_CFG_4                        0x1324
+
+#define MT_TX_BAND_CFG                 0x132c
+#define MT_TX_BAND_CFG_UPPER_40M       BIT(0)
+#define MT_TX_BAND_CFG_5G              BIT(1)
+#define MT_TX_BAND_CFG_2G              BIT(2)
+
+#define MT_HT_FBK_TO_LEGACY            0x1384
+#define MT_TX_MPDU_ADJ_INT             0x1388
+
+#define MT_TX_PWR_CFG_7                        0x13d4
+#define MT_TX_PWR_CFG_8                        0x13d8
+#define MT_TX_PWR_CFG_9                        0x13dc
+
+#define MT_TX_SW_CFG0                  0x1330
+#define MT_TX_SW_CFG1                  0x1334
+#define MT_TX_SW_CFG2                  0x1338
+
+#define MT_TXOP_CTRL_CFG               0x1340
+#define MT_TXOP_TRUN_EN                        GENMASK(5, 0)
+#define MT_TXOP_EXT_CCA_DLY            GENMASK(15, 8)
+#define MT_TXOP_CTRL
+
+#define MT_TX_RTS_CFG                  0x1344
+#define MT_TX_RTS_CFG_RETRY_LIMIT      GENMASK(7, 0)
+#define MT_TX_RTS_CFG_THRESH           GENMASK(23, 8)
+#define MT_TX_RTS_FALLBACK             BIT(24)
+
+#define MT_TX_TIMEOUT_CFG              0x1348
+#define MT_TX_RETRY_CFG                        0x134c
+#define MT_TX_LINK_CFG                 0x1350
+#define MT_HT_FBK_CFG0                 0x1354
+#define MT_HT_FBK_CFG1                 0x1358
+#define MT_LG_FBK_CFG0                 0x135c
+#define MT_LG_FBK_CFG1                 0x1360
+
+#define MT_CCK_PROT_CFG                        0x1364
+#define MT_OFDM_PROT_CFG               0x1368
+#define MT_MM20_PROT_CFG               0x136c
+#define MT_MM40_PROT_CFG               0x1370
+#define MT_GF20_PROT_CFG               0x1374
+#define MT_GF40_PROT_CFG               0x1378
+
+#define MT_PROT_RATE                   GENMASK(15, 0)
+#define MT_PROT_CTRL_RTS_CTS           BIT(16)
+#define MT_PROT_CTRL_CTS2SELF          BIT(17)
+#define MT_PROT_NAV_SHORT              BIT(18)
+#define MT_PROT_NAV_LONG               BIT(19)
+#define MT_PROT_TXOP_ALLOW_CCK         BIT(20)
+#define MT_PROT_TXOP_ALLOW_OFDM                BIT(21)
+#define MT_PROT_TXOP_ALLOW_MM20                BIT(22)
+#define MT_PROT_TXOP_ALLOW_MM40                BIT(23)
+#define MT_PROT_TXOP_ALLOW_GF20                BIT(24)
+#define MT_PROT_TXOP_ALLOW_GF40                BIT(25)
+#define MT_PROT_RTS_THR_EN             BIT(26)
+#define MT_PROT_RATE_CCK_11            0x0003
+#define MT_PROT_RATE_OFDM_6            0x4000
+#define MT_PROT_RATE_OFDM_24           0x4004
+#define MT_PROT_RATE_DUP_OFDM_24       0x4084
+#define MT_PROT_TXOP_ALLOW_ALL         GENMASK(25, 20)
+#define MT_PROT_TXOP_ALLOW_BW20                (MT_PROT_TXOP_ALLOW_ALL &       \
+                                        ~MT_PROT_TXOP_ALLOW_MM40 &     \
+                                        ~MT_PROT_TXOP_ALLOW_GF40)
+
+#define MT_EXP_ACK_TIME                        0x1380
+
+#define MT_TX_PWR_CFG_0_EXT            0x1390
+#define MT_TX_PWR_CFG_1_EXT            0x1394
+
+#define MT_TX_FBK_LIMIT                        0x1398
+#define MT_TX_FBK_LIMIT_MPDU_FBK       GENMASK(7, 0)
+#define MT_TX_FBK_LIMIT_AMPDU_FBK      GENMASK(15, 8)
+#define MT_TX_FBK_LIMIT_MPDU_UP_CLEAR  BIT(16)
+#define MT_TX_FBK_LIMIT_AMPDU_UP_CLEAR BIT(17)
+#define MT_TX_FBK_LIMIT_RATE_LUT       BIT(18)
+
+#define MT_TX0_RF_GAIN_CORR            0x13a0
+#define MT_TX1_RF_GAIN_CORR            0x13a4
+#define MT_TX0_RF_GAIN_ATTEN           0x13a8
+
+#define MT_TX_ALC_CFG_0                        0x13b0
+#define MT_TX_ALC_CFG_0_CH_INIT_0      GENMASK(5, 0)
+#define MT_TX_ALC_CFG_0_CH_INIT_1      GENMASK(13, 8)
+#define MT_TX_ALC_CFG_0_LIMIT_0                GENMASK(21, 16)
+#define MT_TX_ALC_CFG_0_LIMIT_1                GENMASK(29, 24)
+
+#define MT_TX_ALC_CFG_1                        0x13b4
+#define MT_TX_ALC_CFG_1_TEMP_COMP      GENMASK(5, 0)
+
+#define MT_TX_ALC_CFG_2                        0x13a8
+#define MT_TX_ALC_CFG_2_TEMP_COMP      GENMASK(5, 0)
+
+#define MT_TX0_BB_GAIN_ATTEN           0x13c0
+
+#define MT_TX_ALC_VGA3                 0x13c8
+
+#define MT_TX_PROT_CFG6                        0x13e0
+#define MT_TX_PROT_CFG7                        0x13e4
+#define MT_TX_PROT_CFG8                        0x13e8
+
+#define MT_PIFS_TX_CFG                 0x13ec
+
+#define MT_RX_FILTR_CFG                        0x1400
+
+#define MT_RX_FILTR_CFG_CRC_ERR                BIT(0)
+#define MT_RX_FILTR_CFG_PHY_ERR                BIT(1)
+#define MT_RX_FILTR_CFG_PROMISC                BIT(2)
+#define MT_RX_FILTR_CFG_OTHER_BSS      BIT(3)
+#define MT_RX_FILTR_CFG_VER_ERR                BIT(4)
+#define MT_RX_FILTR_CFG_MCAST          BIT(5)
+#define MT_RX_FILTR_CFG_BCAST          BIT(6)
+#define MT_RX_FILTR_CFG_DUP            BIT(7)
+#define MT_RX_FILTR_CFG_CFACK          BIT(8)
+#define MT_RX_FILTR_CFG_CFEND          BIT(9)
+#define MT_RX_FILTR_CFG_ACK            BIT(10)
+#define MT_RX_FILTR_CFG_CTS            BIT(11)
+#define MT_RX_FILTR_CFG_RTS            BIT(12)
+#define MT_RX_FILTR_CFG_PSPOLL         BIT(13)
+#define MT_RX_FILTR_CFG_BA             BIT(14)
+#define MT_RX_FILTR_CFG_BAR            BIT(15)
+#define MT_RX_FILTR_CFG_CTRL_RSV       BIT(16)
+
+#define MT_AUTO_RSP_CFG                        0x1404
+
+#define MT_AUTO_RSP_PREAMB_SHORT       BIT(4)
+
+#define MT_LEGACY_BASIC_RATE           0x1408
+#define MT_HT_BASIC_RATE               0x140c
+
+#define MT_RX_PARSER_CFG               0x1418
+#define MT_RX_PARSER_RX_SET_NAV_ALL    BIT(0)
+
+#define MT_EXT_CCA_CFG                 0x141c
+#define MT_EXT_CCA_CFG_CCA0            GENMASK(1, 0)
+#define MT_EXT_CCA_CFG_CCA1            GENMASK(3, 2)
+#define MT_EXT_CCA_CFG_CCA2            GENMASK(5, 4)
+#define MT_EXT_CCA_CFG_CCA3            GENMASK(7, 6)
+#define MT_EXT_CCA_CFG_CCA_MASK                GENMASK(11, 8)
+#define MT_EXT_CCA_CFG_ED_CCA_MASK     GENMASK(15, 12)
+
+#define MT_TX_SW_CFG3                  0x1478
+
+#define MT_PN_PAD_MODE                 0x150c
+
+#define MT_TXOP_HLDR_ET                        0x1608
+
+#define MT_PROT_AUTO_TX_CFG            0x1648
+
+#define MT_RX_STA_CNT0                 0x1700
+#define MT_RX_STA_CNT1                 0x1704
+#define MT_RX_STA_CNT2                 0x1708
+#define MT_TX_STA_CNT0                 0x170c
+#define MT_TX_STA_CNT1                 0x1710
+#define MT_TX_STA_CNT2                 0x1714
+
+/* Vendor driver defines content of the second word of STAT_FIFO as follows:
+ *     MT_TX_STAT_FIFO_RATE            GENMASK(26, 16)
+ *     MT_TX_STAT_FIFO_ETXBF           BIT(27)
+ *     MT_TX_STAT_FIFO_SND             BIT(28)
+ *     MT_TX_STAT_FIFO_ITXBF           BIT(29)
+ * However, tests show that b16-31 have the same layout as TXWI rate_ctl
+ * with rate set to rate at which frame was acked.
+ */
+#define MT_TX_STAT_FIFO                        0x1718
+#define MT_TX_STAT_FIFO_VALID          BIT(0)
+#define MT_TX_STAT_FIFO_PID_TYPE       GENMASK(4, 1)
+#define MT_TX_STAT_FIFO_SUCCESS                BIT(5)
+#define MT_TX_STAT_FIFO_AGGR           BIT(6)
+#define MT_TX_STAT_FIFO_ACKREQ         BIT(7)
+#define MT_TX_STAT_FIFO_WCID           GENMASK(15, 8)
+#define MT_TX_STAT_FIFO_RATE           GENMASK(31, 16)
+
+#define MT_TX_AGG_STAT                 0x171c
+
+#define MT_TX_AGG_CNT_BASE0            0x1720
+
+#define MT_MPDU_DENSITY_CNT            0x1740
+
+#define MT_TX_AGG_CNT_BASE1            0x174c
+
+#define MT_TX_AGG_CNT(_id)             ((_id) < 8 ?                    \
+                                        MT_TX_AGG_CNT_BASE0 + ((_id) << 2) : \
+                                        MT_TX_AGG_CNT_BASE1 + ((_id - 8) << 2))
+
+#define MT_TX_STAT_FIFO_EXT            0x1798
+#define MT_TX_STAT_FIFO_EXT_RETRY      GENMASK(7, 0)
+
+#define MT_BBP_CORE_BASE               0x2000
+#define MT_BBP_IBI_BASE                        0x2100
+#define MT_BBP_AGC_BASE                        0x2300
+#define MT_BBP_TXC_BASE                        0x2400
+#define MT_BBP_RXC_BASE                        0x2500
+#define MT_BBP_TXO_BASE                        0x2600
+#define MT_BBP_TXBE_BASE               0x2700
+#define MT_BBP_RXFE_BASE               0x2800
+#define MT_BBP_RXO_BASE                        0x2900
+#define MT_BBP_DFS_BASE                        0x2a00
+#define MT_BBP_TR_BASE                 0x2b00
+#define MT_BBP_CAL_BASE                        0x2c00
+#define MT_BBP_DSC_BASE                        0x2e00
+#define MT_BBP_PFMU_BASE               0x2f00
+
+#define MT_BBP(_type, _n)              (MT_BBP_##_type##_BASE + ((_n) << 2))
+
+#define MT_BBP_CORE_R1_BW              GENMASK(4, 3)
+
+#define MT_BBP_AGC_R0_CTRL_CHAN                GENMASK(9, 8)
+#define MT_BBP_AGC_R0_BW               GENMASK(14, 12)
+
+/* AGC, R4/R5 */
+#define MT_BBP_AGC_LNA_GAIN            GENMASK(21, 16)
+
+/* AGC, R8/R9 */
+#define MT_BBP_AGC_GAIN                        GENMASK(14, 8)
+
+#define MT_BBP_AGC20_RSSI0             GENMASK(7, 0)
+#define MT_BBP_AGC20_RSSI1             GENMASK(15, 8)
+
+#define MT_BBP_TXBE_R0_CTRL_CHAN       GENMASK(1, 0)
+
+#define MT_WCID_ADDR_BASE              0x1800
+#define MT_WCID_ADDR(_n)               (MT_WCID_ADDR_BASE + (_n) * 8)
+
+#define MT_SRAM_BASE                   0x4000
+
+#define MT_WCID_KEY_BASE               0x8000
+#define MT_WCID_KEY(_n)                        (MT_WCID_KEY_BASE + (_n) * 32)
+
+#define MT_WCID_IV_BASE                        0xa000
+#define MT_WCID_IV(_n)                 (MT_WCID_IV_BASE + (_n) * 8)
+
+#define MT_WCID_ATTR_BASE              0xa800
+#define MT_WCID_ATTR(_n)               (MT_WCID_ATTR_BASE + (_n) * 4)
+
+#define MT_WCID_ATTR_PAIRWISE          BIT(0)
+#define MT_WCID_ATTR_PKEY_MODE         GENMASK(3, 1)
+#define MT_WCID_ATTR_BSS_IDX           GENMASK(6, 4)
+#define MT_WCID_ATTR_RXWI_UDF          GENMASK(9, 7)
+#define MT_WCID_ATTR_PKEY_MODE_EXT     BIT(10)
+#define MT_WCID_ATTR_BSS_IDX_EXT       BIT(11)
+#define MT_WCID_ATTR_WAPI_MCBC         BIT(15)
+#define MT_WCID_ATTR_WAPI_KEYID                GENMASK(31, 24)
+
+#define MT_SKEY_BASE_0                 0xac00
+#define MT_SKEY_BASE_1                 0xb400
+#define MT_SKEY_0(_bss, _idx)          \
+       (MT_SKEY_BASE_0 + (4 * (_bss) + _idx) * 32)
+#define MT_SKEY_1(_bss, _idx)          \
+       (MT_SKEY_BASE_1 + (4 * ((_bss) & 7) + _idx) * 32)
+#define MT_SKEY(_bss, _idx)            \
+       ((_bss & 8) ? MT_SKEY_1(_bss, _idx) : MT_SKEY_0(_bss, _idx))
+
+#define MT_SKEY_MODE_BASE_0            0xb000
+#define MT_SKEY_MODE_BASE_1            0xb3f0
+#define MT_SKEY_MODE_0(_bss)           \
+       (MT_SKEY_MODE_BASE_0 + ((_bss / 2) << 2))
+#define MT_SKEY_MODE_1(_bss)           \
+       (MT_SKEY_MODE_BASE_1 + ((((_bss) & 7) / 2) << 2))
+#define MT_SKEY_MODE(_bss)             \
+       ((_bss & 8) ? MT_SKEY_MODE_1(_bss) : MT_SKEY_MODE_0(_bss))
+#define MT_SKEY_MODE_MASK              GENMASK(3, 0)
+#define MT_SKEY_MODE_SHIFT(_bss, _idx) (4 * ((_idx) + 4 * (_bss & 1)))
+
+#define MT_BEACON_BASE                 0xc000
+
+#define MT_TEMP_SENSOR                 0x1d000
+#define MT_TEMP_SENSOR_VAL             GENMASK(6, 0)
+
+enum mt76_cipher_type {
+       MT_CIPHER_NONE,
+       MT_CIPHER_WEP40,
+       MT_CIPHER_WEP104,
+       MT_CIPHER_TKIP,
+       MT_CIPHER_AES_CCMP,
+       MT_CIPHER_CKIP40,
+       MT_CIPHER_CKIP104,
+       MT_CIPHER_CKIP128,
+       MT_CIPHER_WAPI,
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.c b/drivers/net/wireless/mediatek/mt7601u/trace.c
new file mode 100644 (file)
index 0000000..8abdd3c
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/trace.h b/drivers/net/wireless/mediatek/mt7601u/trace.h
new file mode 100644 (file)
index 0000000..2898973
--- /dev/null
@@ -0,0 +1,400 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(__MT7601U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MT7601U_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "mt7601u.h"
+#include "mac.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mt7601u
+
+#define MAXNAME                32
+#define DEV_ENTRY      __array(char, wiphy_name, 32)
+#define DEV_ASSIGN     strlcpy(__entry->wiphy_name,                    \
+                               wiphy_name(dev->hw->wiphy), MAXNAME)
+#define DEV_PR_FMT     "%s "
+#define DEV_PR_ARG     __entry->wiphy_name
+
+#define REG_ENTRY      __field(u32, reg) __field(u32, val)
+#define REG_ASSIGN     __entry->reg = reg; __entry->val = val
+#define REG_PR_FMT     "%04x=%08x"
+#define REG_PR_ARG     __entry->reg, __entry->val
+
+DECLARE_EVENT_CLASS(dev_reg_evt,
+       TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+       TP_ARGS(dev, reg, val),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               REG_ENTRY
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               REG_ASSIGN;
+       ),
+       TP_printk(
+               DEV_PR_FMT REG_PR_FMT,
+               DEV_PR_ARG, REG_PR_ARG
+       )
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_read,
+       TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+       TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_reg_evt, reg_write,
+       TP_PROTO(struct mt7601u_dev *dev, u32 reg, u32 val),
+       TP_ARGS(dev, reg, val)
+);
+
+TRACE_EVENT(mt_submit_urb,
+       TP_PROTO(struct mt7601u_dev *dev, struct urb *u),
+       TP_ARGS(dev, u),
+       TP_STRUCT__entry(
+               DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->pipe = u->pipe;
+               __entry->len = u->transfer_buffer_length;
+       ),
+       TP_printk(DEV_PR_FMT "p:%08x len:%u",
+                 DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+#define trace_mt_submit_urb_sync(__dev, __pipe, __len) ({      \
+       struct urb u;                                   \
+       u.pipe = __pipe;                                \
+       u.transfer_buffer_length = __len;               \
+       trace_mt_submit_urb(__dev, &u);                 \
+})
+
+TRACE_EVENT(mt_mcu_msg_send,
+       TP_PROTO(struct mt7601u_dev *dev,
+                struct sk_buff *skb, u32 csum, bool resp),
+       TP_ARGS(dev, skb, csum, resp),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u32, info)
+               __field(u32, csum)
+               __field(bool, resp)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->info = *(u32 *)skb->data;
+               __entry->csum = csum;
+               __entry->resp = resp;
+       ),
+       TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
+                 DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
+);
+
+TRACE_EVENT(mt_vend_req,
+       TP_PROTO(struct mt7601u_dev *dev, unsigned pipe, u8 req, u8 req_type,
+                u16 val, u16 offset, void *buf, size_t buflen, int ret),
+       TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
+               __field(u16, val) __field(u16, offset) __field(void*, buf)
+               __field(int, buflen) __field(int, ret)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->pipe = pipe;
+               __entry->req = req;
+               __entry->req_type = req_type;
+               __entry->val = val;
+               __entry->offset = offset;
+               __entry->buf = buf;
+               __entry->buflen = buflen;
+               __entry->ret = ret;
+       ),
+       TP_printk(DEV_PR_FMT
+                 "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
+                 DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
+                 __entry->req_type, __entry->val, __entry->offset,
+                 !!__entry->buf, __entry->buflen)
+);
+
+TRACE_EVENT(ee_read,
+       TP_PROTO(struct mt7601u_dev *dev, int offset, u16 val),
+       TP_ARGS(dev, offset, val),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(int, o) __field(u16, v)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->o = offset;
+               __entry->v = val;
+       ),
+       TP_printk(DEV_PR_FMT "%04x=%04x", DEV_PR_ARG, __entry->o, __entry->v)
+);
+
+DECLARE_EVENT_CLASS(dev_rf_reg_evt,
+       TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+       TP_ARGS(dev, bank, reg, val),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u8, bank)
+               __field(u8, reg)
+               __field(u8, val)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               REG_ASSIGN;
+               __entry->bank = bank;
+       ),
+       TP_printk(
+               DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
+               DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
+       )
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_read,
+       TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+       TP_ARGS(dev, bank, reg, val)
+);
+
+DEFINE_EVENT(dev_rf_reg_evt, rf_write,
+       TP_PROTO(struct mt7601u_dev *dev, u8 bank, u8 reg, u8 val),
+       TP_ARGS(dev, bank, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_bbp_reg_evt,
+       TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+       TP_ARGS(dev, reg, val),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u8, reg)
+               __field(u8, val)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               REG_ASSIGN;
+       ),
+       TP_printk(
+               DEV_PR_FMT "%02hhx=%02hhx",
+               DEV_PR_ARG, __entry->reg, __entry->val
+       )
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_read,
+       TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+       TP_ARGS(dev, reg, val)
+);
+
+DEFINE_EVENT(dev_bbp_reg_evt, bbp_write,
+       TP_PROTO(struct mt7601u_dev *dev, u8 reg, u8 val),
+       TP_ARGS(dev, reg, val)
+);
+
+DECLARE_EVENT_CLASS(dev_simple_evt,
+       TP_PROTO(struct mt7601u_dev *dev, u8 val),
+       TP_ARGS(dev, val),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u8, val)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->val = val;
+       ),
+       TP_printk(
+               DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
+       )
+);
+
+DEFINE_EVENT(dev_simple_evt, temp_mode,
+       TP_PROTO(struct mt7601u_dev *dev, u8 val),
+       TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, read_temp,
+       TP_PROTO(struct mt7601u_dev *dev, u8 val),
+       TP_ARGS(dev, val)
+);
+
+DEFINE_EVENT(dev_simple_evt, freq_cal_adjust,
+       TP_PROTO(struct mt7601u_dev *dev, u8 val),
+       TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(freq_cal_offset,
+       TP_PROTO(struct mt7601u_dev *dev, u8 phy_mode, s8 freq_off),
+       TP_ARGS(dev, phy_mode, freq_off),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u8, phy_mode)
+               __field(s8, freq_off)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->phy_mode = phy_mode;
+               __entry->freq_off = freq_off;
+       ),
+       TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+                 DEV_PR_ARG, __entry->phy_mode, __entry->freq_off)
+);
+
+TRACE_EVENT(mt_rx,
+       TP_PROTO(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u32 f),
+       TP_ARGS(dev, rxwi, f),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field_struct(struct mt7601u_rxwi, rxwi)
+               __field(u32, fce_info)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->rxwi = *rxwi;
+               __entry->fce_info = f;
+       ),
+       TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x frag_sn:%04hx rate:%04hx "
+                 "uknw:%02hhx z:%02hhx%02hhx%02hhx snr:%02hhx "
+                 "ant:%02hhx gain:%02hhx freq_o:%02hhx "
+                 "r:%08x ea:%08x fce:%08x", DEV_PR_ARG,
+                 le32_to_cpu(__entry->rxwi.rxinfo),
+                 le32_to_cpu(__entry->rxwi.ctl),
+                 le16_to_cpu(__entry->rxwi.frag_sn),
+                 le16_to_cpu(__entry->rxwi.rate),
+                 __entry->rxwi.unknown,
+                 __entry->rxwi.zero[0], __entry->rxwi.zero[1],
+                 __entry->rxwi.zero[2],
+                 __entry->rxwi.snr, __entry->rxwi.ant,
+                 __entry->rxwi.gain, __entry->rxwi.freq_off,
+                 __entry->rxwi.resv2, __entry->rxwi.expert_ant,
+                 __entry->fce_info)
+);
+
+TRACE_EVENT(mt_tx,
+       TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb,
+                struct mt76_sta *sta, struct mt76_txwi *h),
+       TP_ARGS(dev, skb, sta, h),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field_struct(struct mt76_txwi, h)
+               __field(struct sk_buff *, skb)
+               __field(struct mt76_sta *, sta)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->h = *h;
+               __entry->skb = skb;
+               __entry->sta = sta;
+       ),
+       TP_printk(DEV_PR_FMT "skb:%p sta:%p  flg:%04hx rate_ctl:%04hx "
+                 "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
+                 __entry->skb, __entry->sta,
+                 le16_to_cpu(__entry->h.flags),
+                 le16_to_cpu(__entry->h.rate_ctl),
+                 __entry->h.ack_ctl, __entry->h.wcid,
+                 le16_to_cpu(__entry->h.len_ctl))
+);
+
+TRACE_EVENT(mt_tx_dma_done,
+       TP_PROTO(struct mt7601u_dev *dev, struct sk_buff *skb),
+       TP_ARGS(dev, skb),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(struct sk_buff *, skb)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->skb = skb;
+       ),
+       TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
+);
+
+TRACE_EVENT(mt_tx_status_cleaned,
+       TP_PROTO(struct mt7601u_dev *dev, int cleaned),
+       TP_ARGS(dev, cleaned),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(int, cleaned)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->cleaned = cleaned;
+       ),
+       TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
+);
+
+TRACE_EVENT(mt_tx_status,
+       TP_PROTO(struct mt7601u_dev *dev, u32 stat1, u32 stat2),
+       TP_ARGS(dev, stat1, stat2),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u32, stat1)     __field(u32, stat2)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->stat1 = stat1;
+               __entry->stat2 = stat2;
+       ),
+       TP_printk(DEV_PR_FMT "%08x %08x",
+                 DEV_PR_ARG, __entry->stat1, __entry->stat2)
+);
+
+TRACE_EVENT(mt_rx_dma_aggr,
+       TP_PROTO(struct mt7601u_dev *dev, int cnt, bool paged),
+       TP_ARGS(dev, cnt, paged),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u8, cnt)
+               __field(bool, paged)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->cnt = cnt;
+               __entry->paged = paged;
+       ),
+       TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
+                 DEV_PR_ARG, __entry->cnt, __entry->paged)
+);
+
+DEFINE_EVENT(dev_simple_evt, set_key,
+       TP_PROTO(struct mt7601u_dev *dev, u8 val),
+       TP_ARGS(dev, val)
+);
+
+TRACE_EVENT(set_shared_key,
+       TP_PROTO(struct mt7601u_dev *dev, u8 vid, u8 key),
+       TP_ARGS(dev, vid, key),
+       TP_STRUCT__entry(
+               DEV_ENTRY
+               __field(u8, vid)
+               __field(u8, key)
+       ),
+       TP_fast_assign(
+               DEV_ASSIGN;
+               __entry->vid = vid;
+               __entry->key = key;
+       ),
+       TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
+                 DEV_PR_ARG, __entry->vid, __entry->key)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
new file mode 100644 (file)
index 0000000..0be2080
--- /dev/null
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+#include "trace.h"
+
+enum mt76_txq_id {
+       MT_TXQ_VO = IEEE80211_AC_VO,
+       MT_TXQ_VI = IEEE80211_AC_VI,
+       MT_TXQ_BE = IEEE80211_AC_BE,
+       MT_TXQ_BK = IEEE80211_AC_BK,
+       MT_TXQ_PSD,
+       MT_TXQ_MCU,
+       __MT_TXQ_MAX
+};
+
+/* Hardware uses mirrored order of queues with Q0 having the highest priority */
+static u8 q2hwq(u8 q)
+{
+       return q ^ 0x3;
+}
+
+/* Take mac80211 Q id from the skb and translate it to hardware Q id */
+static u8 skb2q(struct sk_buff *skb)
+{
+       int qid = skb_get_queue_mapping(skb);
+
+       if (WARN_ON(qid >= MT_TXQ_PSD)) {
+               qid = MT_TXQ_BE;
+               skb_set_queue_mapping(skb, qid);
+       }
+
+       return q2hwq(qid);
+}
+
+/* Note: TX retry reporting is a bit broken.
+ *      Retries are reported only once per AMPDU and often come a frame early
+ *      i.e. they are reported in the last status preceding the AMPDU. Apart
+ *      from the fact that it's hard to know the length of the AMPDU (which is
+ *      required to know to how many consecutive frames retries should be
+ *      applied), if status comes early on full FIFO it gets lost and retries
+ *      of the whole AMPDU become invisible.
+ *      As a work-around encode the desired rate in PKT_ID of TX descriptor
+ *      and based on that guess the retries (every rate is tried once).
+ *      Only downside here is that for MCS0 we have to rely solely on
+ *      transmission failures as no retries can ever be reported.
+ *      Not having to read EXT_FIFO has a nice effect of doubling the number
+ *      of reports which can be fetched.
+ *      Also the vendor driver never uses the EXT_FIFO register so it may be
+ *      undertested.
+ */
+static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe)
+{
+       u8 encoded = (rate + 1) + is_probe *  8;
+
+       /* Because PKT_ID 0 disables status reporting only 15 values are
+        * available but 16 are needed (8 MCS * 2 for encoding is_probe)
+        * - we need to cram together two rates. MCS0 and MCS7 with is_probe
+        * share PKT_ID 9.
+        */
+       if (is_probe && rate == 7)
+               return encoded - 7;
+
+       return encoded;
+}
+
+static void
+mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
+{
+       u8 req_rate = stat->pktid;
+       u8 eff_rate = stat->rate & 0x7;
+
+       req_rate -= 1;
+
+       if (req_rate > 7) {
+               stat->is_probe = true;
+               req_rate -= 8;
+
+               /* Decide between MCS0 and MCS7 which share pktid 9 */
+               if (!req_rate && eff_rate)
+                       req_rate = 7;
+       }
+
+       stat->retry = req_rate - eff_rate;
+}
+
+static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb,
+                                              struct ieee80211_tx_info *info)
+{
+       int pkt_len = (unsigned long)info->status.status_driver_data[0];
+
+       skb_pull(skb, sizeof(struct mt76_txwi) + 4);
+       if (ieee80211_get_hdrlen_from_skb(skb) % 4)
+               mt76_remove_hdr_pad(skb);
+
+       skb_trim(skb, pkt_len);
+}
+
+void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       mt7601u_tx_skb_remove_dma_overhead(skb, info);
+
+       ieee80211_tx_info_clear_status(info);
+       info->status.rates[0].idx = -1;
+       info->flags |= IEEE80211_TX_STAT_ACK;
+       ieee80211_tx_status(dev->hw, skb);
+}
+
+static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
+{
+       int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+       u32 need_head;
+
+       need_head = sizeof(struct mt76_txwi) + 4;
+       if (hdr_len % 4)
+               need_head += 2;
+
+       return skb_cow(skb, need_head);
+}
+
+static struct mt76_txwi *
+mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb,
+                 struct ieee80211_sta *sta, struct mt76_wcid *wcid,
+                 int pkt_len)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_rate *rate = &info->control.rates[0];
+       struct mt76_txwi *txwi;
+       unsigned long flags;
+       bool is_probe;
+       u32 pkt_id;
+       u16 rate_ctl;
+       u8 nss;
+
+       txwi = (struct mt76_txwi *)skb_push(skb, sizeof(struct mt76_txwi));
+       memset(txwi, 0, sizeof(*txwi));
+
+       if (!wcid->tx_rate_set)
+               ieee80211_get_tx_rates(info->control.vif, sta, skb,
+                                      info->control.rates, 1);
+
+       spin_lock_irqsave(&dev->lock, flags);
+       if (rate->idx < 0 || !rate->count)
+               rate_ctl = wcid->tx_rate;
+       else
+               rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss);
+       spin_unlock_irqrestore(&dev->lock, flags);
+       txwi->rate_ctl = cpu_to_le16(rate_ctl);
+
+       if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+               txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
+       if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+               txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
+
+       if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
+               u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
+
+               ba_size <<= sta->ht_cap.ampdu_factor;
+               ba_size = min_t(int, 63, ba_size);
+               if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+                       ba_size = 0;
+               txwi->ack_ctl |= MT76_SET(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
+
+               txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU |
+                                         MT76_SET(MT_TXWI_FLAGS_MPDU_DENSITY,
+                                                  sta->ht_cap.ampdu_density));
+               if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+                       txwi->flags = 0;
+       }
+
+       txwi->wcid = wcid->idx;
+
+       is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+       pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe);
+       pkt_len |= MT76_SET(MT_TXWI_LEN_PKTID, pkt_id);
+       txwi->len_ctl = cpu_to_le16(pkt_len);
+
+       return txwi;
+}
+
+void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
+               struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct mt7601u_dev *dev = hw->priv;
+       struct ieee80211_vif *vif = info->control.vif;
+       struct ieee80211_sta *sta = control->sta;
+       struct mt76_sta *msta = NULL;
+       struct mt76_wcid *wcid = dev->mon_wcid;
+       struct mt76_txwi *txwi;
+       int pkt_len = skb->len;
+       int hw_q = skb2q(skb);
+
+       BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+       info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len;
+
+       if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) {
+               ieee80211_free_txskb(dev->hw, skb);
+               return;
+       }
+
+       if (sta) {
+               msta = (struct mt76_sta *) sta->drv_priv;
+               wcid = &msta->wcid;
+       } else if (vif) {
+               struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+
+               wcid = &mvif->group_wcid;
+       }
+
+       txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len);
+
+       if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q))
+               return;
+
+       trace_mt_tx(dev, skb, msta, txwi);
+}
+
+void mt7601u_tx_stat(struct work_struct *work)
+{
+       struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev,
+                                              stat_work.work);
+       struct mt76_tx_status stat;
+       unsigned long flags;
+       int cleaned = 0;
+
+       while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
+               stat = mt7601u_mac_fetch_tx_status(dev);
+               if (!stat.valid)
+                       break;
+
+               mt7601u_tx_pktid_dec(dev, &stat);
+               mt76_send_tx_status(dev, &stat);
+
+               cleaned++;
+       }
+       trace_mt_tx_status_cleaned(dev, cleaned);
+
+       spin_lock_irqsave(&dev->tx_lock, flags);
+       if (cleaned)
+               queue_delayed_work(dev->stat_wq, &dev->stat_work,
+                                  msecs_to_jiffies(10));
+       else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state))
+               queue_delayed_work(dev->stat_wq, &dev->stat_work,
+                                  msecs_to_jiffies(20));
+       else
+               clear_bit(MT7601U_STATE_READING_STATS, &dev->state);
+       spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
+
+int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                   u16 queue, const struct ieee80211_tx_queue_params *params)
+{
+       struct mt7601u_dev *dev = hw->priv;
+       u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue);
+       u32 val;
+
+       /* TODO: should we do funny things with the parameters?
+        *       See what mt7601u_set_default_edca() used to do in init.c.
+        */
+
+       if (params->cw_min)
+               cw_min = fls(params->cw_min);
+       if (params->cw_max)
+               cw_max = fls(params->cw_max);
+
+       WARN_ON(params->txop > 0xff);
+       WARN_ON(params->aifs > 0xf);
+       WARN_ON(cw_min > 0xf);
+       WARN_ON(cw_max > 0xf);
+
+       val = MT76_SET(MT_EDCA_CFG_AIFSN, params->aifs) |
+             MT76_SET(MT_EDCA_CFG_CWMIN, cw_min) |
+             MT76_SET(MT_EDCA_CFG_CWMAX, cw_max);
+       /* TODO: based on user-controlled EnableTxBurst var vendor drv sets
+        *       a really long txop on AC0 (see connect.c:2009) but only on
+        *       connect? When not connected should be 0.
+        */
+       if (!hw_q)
+               val |= 0x60;
+       else
+               val |= MT76_SET(MT_EDCA_CFG_TXOP, params->txop);
+       mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val);
+
+       val = mt76_rr(dev, MT_WMM_TXOP(hw_q));
+       val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q));
+       val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q);
+       mt76_wr(dev, MT_WMM_TXOP(hw_q), val);
+
+       val = mt76_rr(dev, MT_WMM_AIFSN);
+       val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q));
+       val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q);
+       mt76_wr(dev, MT_WMM_AIFSN, val);
+
+       val = mt76_rr(dev, MT_WMM_CWMIN);
+       val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q));
+       val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q);
+       mt76_wr(dev, MT_WMM_CWMIN, val);
+
+       val = mt76_rr(dev, MT_WMM_CWMAX);
+       val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q));
+       val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q);
+       mt76_wr(dev, MT_WMM_CWMAX, val);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
new file mode 100644 (file)
index 0000000..99e2b39
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7601u.h"
+#include "usb.h"
+#include "trace.h"
+
+static struct usb_device_id mt7601u_device_table[] = {
+       { USB_DEVICE(0x0b05, 0x17d3) },
+       { USB_DEVICE(0x0e8d, 0x760a) },
+       { USB_DEVICE(0x0e8d, 0x760b) },
+       { USB_DEVICE(0x13d3, 0x3431) },
+       { USB_DEVICE(0x13d3, 0x3434) },
+       { USB_DEVICE(0x148f, 0x7601) },
+       { USB_DEVICE(0x148f, 0x760a) },
+       { USB_DEVICE(0x148f, 0x760b) },
+       { USB_DEVICE(0x148f, 0x760c) },
+       { USB_DEVICE(0x148f, 0x760d) },
+       { USB_DEVICE(0x2001, 0x3d04) },
+       { USB_DEVICE(0x2717, 0x4106) },
+       { USB_DEVICE(0x2955, 0x0001) },
+       { USB_DEVICE(0x2955, 0x1001) },
+       { USB_DEVICE(0x2a5f, 0x1000) },
+       { USB_DEVICE(0x7392, 0x7710) },
+       { 0, }
+};
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+                          struct mt7601u_dma_buf *buf)
+{
+       struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+       buf->len = len;
+       buf->urb = usb_alloc_urb(0, GFP_KERNEL);
+       buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma);
+
+       return !buf->urb || !buf->buf;
+}
+
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf)
+{
+       struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+
+       usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma);
+       usb_free_urb(buf->urb);
+}
+
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+                          struct mt7601u_dma_buf *buf, gfp_t gfp,
+                          usb_complete_t complete_fn, void *context)
+{
+       struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+       unsigned pipe;
+       int ret;
+
+       if (dir == USB_DIR_IN)
+               pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]);
+       else
+               pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]);
+
+       usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len,
+                         complete_fn, context);
+       buf->urb->transfer_dma = buf->dma;
+       buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+       trace_mt_submit_urb(dev, buf->urb);
+       ret = usb_submit_urb(buf->urb, gfp);
+       if (ret)
+               dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n",
+                       dir, ep_idx, ret);
+       return ret;
+}
+
+void mt7601u_complete_urb(struct urb *urb)
+{
+       struct completion *cmpl = urb->context;
+
+       complete(cmpl);
+}
+
+static int
+__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+                        const u8 direction, const u16 val, const u16 offset,
+                        void *buf, const size_t buflen)
+{
+       int i, ret;
+       struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
+       const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+       const unsigned int pipe = (direction == USB_DIR_IN) ?
+               usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
+
+       for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
+               ret = usb_control_msg(usb_dev, pipe, req, req_type,
+                                     val, offset, buf, buflen,
+                                     MT_VEND_REQ_TOUT_MS);
+               trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
+                                 buf, buflen, ret);
+
+               if (ret >= 0 || ret == -ENODEV)
+                       return ret;
+
+               msleep(5);
+       }
+
+       dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n",
+               req, offset, ret);
+
+       return ret;
+}
+
+int
+mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+                      const u8 direction, const u16 val, const u16 offset,
+                      void *buf, const size_t buflen)
+{
+       int ret;
+
+       mutex_lock(&dev->vendor_req_mutex);
+
+       ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
+                                      buf, buflen);
+       if (ret == -ENODEV)
+               set_bit(MT7601U_STATE_REMOVED, &dev->state);
+
+       mutex_unlock(&dev->vendor_req_mutex);
+
+       return ret;
+}
+
+void mt7601u_vendor_reset(struct mt7601u_dev *dev)
+{
+       mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
+                              MT_VEND_DEV_MODE_RESET, 0, NULL, 0);
+}
+
+u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
+{
+       int ret;
+       __le32 reg;
+       u32 val;
+
+       WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
+
+       ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
+                                    0, offset, &reg, sizeof(reg));
+       val = le32_to_cpu(reg);
+       if (ret > 0 && ret != sizeof(reg)) {
+               dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
+                       ret, offset);
+               val = ~0;
+       }
+
+       trace_reg_read(dev, offset, val);
+       return val;
+}
+
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+                            const u16 offset, const u32 val)
+{
+       int ret;
+
+       ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+                                    val & 0xffff, offset, NULL, 0);
+       if (ret)
+               return ret;
+       return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+                                     val >> 16, offset + 2, NULL, 0);
+}
+
+void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
+{
+       WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset);
+
+       mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val);
+       trace_reg_write(dev, offset, val);
+}
+
+u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+       val |= mt7601u_rr(dev, offset) & ~mask;
+       mt7601u_wr(dev, offset, val);
+       return val;
+}
+
+u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val)
+{
+       u32 reg = mt7601u_rr(dev, offset);
+
+       val |= reg & ~mask;
+       if (reg != val)
+               mt7601u_wr(dev, offset, val);
+       return val;
+}
+
+void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset,
+                    const void *data, int len)
+{
+       WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset);
+       WARN_ONCE(len & 3, "short write copy off:%08x", offset);
+
+       mt7601u_burst_write_regs(dev, offset, data, len / 4);
+}
+
+void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr)
+{
+       mt7601u_wr(dev, offset, get_unaligned_le32(addr));
+       mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8);
+}
+
+static int mt7601u_assign_pipes(struct usb_interface *usb_intf,
+                               struct mt7601u_dev *dev)
+{
+       struct usb_endpoint_descriptor *ep_desc;
+       struct usb_host_interface *intf_desc = usb_intf->cur_altsetting;
+       unsigned i, ep_i = 0, ep_o = 0;
+
+       BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX);
+       BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX);
+
+       for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
+               ep_desc = &intf_desc->endpoint[i].desc;
+
+               if (usb_endpoint_is_bulk_in(ep_desc) &&
+                   ep_i++ < __MT_EP_IN_MAX) {
+                       dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc);
+                       dev->in_max_packet = usb_endpoint_maxp(ep_desc);
+                       /* Note: this is ignored by usb sub-system but vendor
+                        *       code does it. We can drop this at some point.
+                        */
+                       dev->in_eps[ep_i - 1] |= USB_DIR_IN;
+               } else if (usb_endpoint_is_bulk_out(ep_desc) &&
+                          ep_o++ < __MT_EP_OUT_MAX) {
+                       dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc);
+                       dev->out_max_packet = usb_endpoint_maxp(ep_desc);
+               }
+       }
+
+       if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) {
+               dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n",
+                       ep_i, ep_o);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int mt7601u_probe(struct usb_interface *usb_intf,
+                        const struct usb_device_id *id)
+{
+       struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
+       struct mt7601u_dev *dev;
+       u32 asic_rev, mac_rev;
+       int ret;
+
+       dev = mt7601u_alloc_device(&usb_intf->dev);
+       if (!dev)
+               return -ENOMEM;
+
+       usb_dev = usb_get_dev(usb_dev);
+       usb_reset_device(usb_dev);
+
+       usb_set_intfdata(usb_intf, dev);
+
+       ret = mt7601u_assign_pipes(usb_intf, dev);
+       if (ret)
+               goto err;
+       ret = mt7601u_wait_asic_ready(dev);
+       if (ret)
+               goto err;
+
+       asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION);
+       mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
+       dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
+                asic_rev, mac_rev);
+
+       /* Note: vendor driver skips this check for MT7601U */
+       if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
+               dev_warn(dev->dev, "Warning: eFUSE not present\n");
+
+       ret = mt7601u_init_hardware(dev);
+       if (ret)
+               goto err;
+       ret = mt7601u_register_device(dev);
+       if (ret)
+               goto err_hw;
+
+       set_bit(MT7601U_STATE_INITIALIZED, &dev->state);
+
+       return 0;
+err_hw:
+       mt7601u_cleanup(dev);
+err:
+       usb_set_intfdata(usb_intf, NULL);
+       usb_put_dev(interface_to_usbdev(usb_intf));
+
+       destroy_workqueue(dev->stat_wq);
+       ieee80211_free_hw(dev->hw);
+       return ret;
+}
+
+static void mt7601u_disconnect(struct usb_interface *usb_intf)
+{
+       struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+       ieee80211_unregister_hw(dev->hw);
+       mt7601u_cleanup(dev);
+
+       usb_set_intfdata(usb_intf, NULL);
+       usb_put_dev(interface_to_usbdev(usb_intf));
+
+       destroy_workqueue(dev->stat_wq);
+       ieee80211_free_hw(dev->hw);
+}
+
+static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state)
+{
+       struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+       mt7601u_cleanup(dev);
+
+       return 0;
+}
+
+static int mt7601u_resume(struct usb_interface *usb_intf)
+{
+       struct mt7601u_dev *dev = usb_get_intfdata(usb_intf);
+
+       return mt7601u_init_hardware(dev);
+}
+
+MODULE_DEVICE_TABLE(usb, mt7601u_device_table);
+MODULE_FIRMWARE(MT7601U_FIRMWARE);
+MODULE_LICENSE("GPL");
+
+static struct usb_driver mt7601u_driver = {
+       .name           = KBUILD_MODNAME,
+       .id_table       = mt7601u_device_table,
+       .probe          = mt7601u_probe,
+       .disconnect     = mt7601u_disconnect,
+       .suspend        = mt7601u_suspend,
+       .resume         = mt7601u_resume,
+       .reset_resume   = mt7601u_resume,
+       .soft_unbind    = 1,
+       .disable_hub_initiated_lpm = 1,
+};
+module_usb_driver(mt7601u_driver);
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h
new file mode 100644 (file)
index 0000000..49e188f
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT7601U_USB_H
+#define __MT7601U_USB_H
+
+#include "mt7601u.h"
+
+#define MT7601U_FIRMWARE       "mt7601u.bin"
+
+#define MT_VEND_REQ_MAX_RETRY  10
+#define MT_VEND_REQ_TOUT_MS    300
+
+#define MT_VEND_DEV_MODE_RESET 1
+
+enum mt_vendor_req {
+       MT_VEND_DEV_MODE = 1,
+       MT_VEND_WRITE = 2,
+       MT_VEND_MULTI_READ = 7,
+       MT_VEND_WRITE_FCE = 0x42,
+};
+
+enum mt_usb_ep_in {
+       MT_EP_IN_PKT_RX,
+       MT_EP_IN_CMD_RESP,
+       __MT_EP_IN_MAX,
+};
+
+enum mt_usb_ep_out {
+       MT_EP_OUT_INBAND_CMD,
+       MT_EP_OUT_AC_BK,
+       MT_EP_OUT_AC_BE,
+       MT_EP_OUT_AC_VI,
+       MT_EP_OUT_AC_VO,
+       MT_EP_OUT_HCCA,
+       __MT_EP_OUT_MAX,
+};
+
+static inline struct usb_device *mt7601u_to_usb_dev(struct mt7601u_dev *mt7601u)
+{
+       return interface_to_usbdev(to_usb_interface(mt7601u->dev));
+}
+
+static inline bool mt7601u_urb_has_error(struct urb *urb)
+{
+       return urb->status &&
+               urb->status != -ENOENT &&
+               urb->status != -ECONNRESET &&
+               urb->status != -ESHUTDOWN;
+}
+
+bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len,
+                          struct mt7601u_dma_buf *buf);
+void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf);
+int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx,
+                          struct mt7601u_dma_buf *buf, gfp_t gfp,
+                          usb_complete_t complete_fn, void *context);
+void mt7601u_complete_urb(struct urb *urb);
+
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+                          const u8 direction, const u16 val, const u16 offset,
+                          void *buf, const size_t buflen);
+void mt7601u_vendor_reset(struct mt7601u_dev *dev);
+int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
+                            const u16 offset, const u32 val);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.c b/drivers/net/wireless/mediatek/mt7601u/util.c
new file mode 100644 (file)
index 0000000..7c1787c
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "mt7601u.h"
+
+void mt76_remove_hdr_pad(struct sk_buff *skb)
+{
+       int len = ieee80211_get_hdrlen_from_skb(skb);
+
+       memmove(skb->data + 2, skb->data, len);
+       skb_pull(skb, 2);
+}
+
+int mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+       int len = ieee80211_get_hdrlen_from_skb(skb);
+       int ret;
+
+       if (len % 4 == 0)
+               return 0;
+
+       ret = skb_cow(skb, 2);
+       if (ret)
+               return ret;
+
+       skb_push(skb, 2);
+       memmove(skb->data, skb->data + 2, len);
+
+       skb->data[len] = 0;
+       skb->data[len + 1] = 0;
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt7601u/util.h b/drivers/net/wireless/mediatek/mt7601u/util.h
new file mode 100644 (file)
index 0000000..b89140b
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT76_UTIL_H
+#define __MT76_UTIL_H
+
+/*
+ * Power of two check, this will check
+ * if the mask that has been given contains and contiguous set of bits.
+ * Note that we cannot use the is_power_of_2() function since this
+ * check must be done at compile-time.
+ */
+#define is_power_of_two(x)     ( !((x) & ((x)-1)) )
+#define low_bit_mask(x)                ( ((x)-1) & ~(x) )
+#define is_valid_mask(x)       is_power_of_two(1LU + (x) + low_bit_mask(x))
+
+/*
+ * Macros to find first set bit in a variable.
+ * These macros behave the same as the __ffs() functions but
+ * the most important difference that this is done during
+ * compile-time rather then run-time.
+ */
+#define compile_ffs2(__x) \
+       __builtin_choose_expr(((__x) & 0x1), 0, 1)
+
+#define compile_ffs4(__x) \
+       __builtin_choose_expr(((__x) & 0x3), \
+                             (compile_ffs2((__x))), \
+                             (compile_ffs2((__x) >> 2) + 2))
+
+#define compile_ffs8(__x) \
+       __builtin_choose_expr(((__x) & 0xf), \
+                             (compile_ffs4((__x))), \
+                             (compile_ffs4((__x) >> 4) + 4))
+
+#define compile_ffs16(__x) \
+       __builtin_choose_expr(((__x) & 0xff), \
+                             (compile_ffs8((__x))), \
+                             (compile_ffs8((__x) >> 8) + 8))
+
+#define compile_ffs32(__x) \
+       __builtin_choose_expr(((__x) & 0xffff), \
+                             (compile_ffs16((__x))), \
+                             (compile_ffs16((__x) >> 16) + 16))
+
+/*
+ * This macro will check the requirements for the FIELD{8,16,32} macros
+ * The mask should be a constant non-zero contiguous set of bits which
+ * does not exceed the given typelimit.
+ */
+#define FIELD_CHECK(__mask) \
+       BUILD_BUG_ON(!(__mask) || !is_valid_mask(__mask))
+
+#define MT76_SET(_mask, _val)                                          \
+       ({                                                              \
+               FIELD_CHECK(_mask);                                     \
+               (((u32) (_val)) << compile_ffs32(_mask)) & _mask;       \
+       })
+
+#define MT76_GET(_mask, _val)                                          \
+       ({                                                              \
+               FIELD_CHECK(_mask);                                     \
+               (u32) (((_val) & _mask) >> compile_ffs32(_mask));       \
+       })
+
+#endif
index 3ab87a8551225625a7a0d92283deb77a2e0a7d5b..65cd461c88db5505a99d113a9650d26fd84ccb96 100644 (file)
@@ -134,8 +134,8 @@ void mwifiex_dfs_cac_work_queue(struct work_struct *work)
 
        chandef = priv->dfs_chandef;
        if (priv->wdev.cac_started) {
-               dev_dbg(priv->adapter->dev,
-                       "CAC timer finished; No radar detected\n");
+               mwifiex_dbg(priv->adapter, MSG,
+                           "CAC timer finished; No radar detected\n");
                cfg80211_cac_event(priv->netdev, &chandef,
                                   NL80211_RADAR_CAC_FINISHED,
                                   GFP_KERNEL);
@@ -161,9 +161,9 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
        cr_req->chan_desc.chan_width = radar_params->chandef->width;
        cr_req->msec_dwell_time = cpu_to_le32(radar_params->cac_time_ms);
 
-       dev_dbg(priv->adapter->dev,
-               "11h: issuing DFS Radar check for channel=%d\n",
-               radar_params->chandef->chan->hw_value);
+       mwifiex_dbg(priv->adapter, MSG,
+                   "11h: issuing DFS Radar check for channel=%d\n",
+                   radar_params->chandef->chan->hw_value);
 
        return 0;
 }
@@ -174,8 +174,8 @@ int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
 void mwifiex_abort_cac(struct mwifiex_private *priv)
 {
        if (priv->wdev.cac_started) {
-               dev_dbg(priv->adapter->dev,
-                       "Aborting delayed work for CAC.\n");
+               mwifiex_dbg(priv->adapter, MSG,
+                           "Aborting delayed work for CAC.\n");
                cancel_delayed_work_sync(&priv->dfs_cac_work);
                cfg80211_cac_event(priv->netdev, &priv->dfs_chandef,
                                   NL80211_RADAR_CAC_ABORTED, GFP_KERNEL);
@@ -199,7 +199,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
                                sizeof(u32));
 
        if (le32_to_cpu(rpt_event->result) != HostCmd_RESULT_OK) {
-               dev_err(priv->adapter->dev, "Error in channel report event\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Error in channel report event\n");
                return -1;
        }
 
@@ -212,8 +213,8 @@ int mwifiex_11h_handle_chanrpt_ready(struct mwifiex_private *priv,
                switch (le16_to_cpu(rpt->header.type)) {
                case TLV_TYPE_CHANRPT_11H_BASIC:
                        if (rpt->map.radar) {
-                               dev_notice(priv->adapter->dev,
-                                          "RADAR Detected on channel %d!\n",
+                               mwifiex_dbg(priv->adapter, MSG,
+                                           "RADAR Detected on channel %d!\n",
                                            priv->dfs_chandef.chan->hw_value);
                                cancel_delayed_work_sync(&priv->dfs_cac_work);
                                cfg80211_cac_event(priv->netdev,
@@ -242,16 +243,17 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv,
        rdr_event = (void *)(skb->data + sizeof(u32));
 
        if (le32_to_cpu(rdr_event->passed)) {
-               dev_notice(priv->adapter->dev,
-                          "radar detected; indicating kernel\n");
+               mwifiex_dbg(priv->adapter, MSG,
+                           "radar detected; indicating kernel\n");
                cfg80211_radar_event(priv->adapter->wiphy, &priv->dfs_chandef,
                                     GFP_KERNEL);
-               dev_dbg(priv->adapter->dev, "regdomain: %d\n",
-                       rdr_event->reg_domain);
-               dev_dbg(priv->adapter->dev, "radar detection type: %d\n",
-                       rdr_event->det_type);
+               mwifiex_dbg(priv->adapter, MSG, "regdomain: %d\n",
+                           rdr_event->reg_domain);
+               mwifiex_dbg(priv->adapter, MSG, "radar detection type: %d\n",
+                           rdr_event->det_type);
        } else {
-               dev_dbg(priv->adapter->dev, "false radar detection event!\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "false radar detection event!\n");
        }
 
        return 0;
@@ -276,20 +278,20 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
 
        bss_cfg = &priv->bss_cfg;
        if (!bss_cfg->beacon_period) {
-               dev_err(priv->adapter->dev,
-                       "channel switch: AP already stopped\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "channel switch: AP already stopped\n");
                return;
        }
 
        mwifiex_uap_set_channel(bss_cfg, priv->dfs_chandef);
 
        if (mwifiex_config_start_uap(priv, bss_cfg)) {
-               dev_dbg(priv->adapter->dev,
-                       "Failed to start AP after channel switch\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to start AP after channel switch\n");
                return;
        }
 
-       dev_notice(priv->adapter->dev,
-                  "indicating channel switch completion to kernel\n");
+       mwifiex_dbg(priv->adapter, MSG,
+                   "indicating channel switch completion to kernel\n");
        cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef);
 }
index 433bd6837c79042b3a5e7daa2bb20f594fd7dd28..8422986cd7a9137c171f706b123120a6e279d524 100644 (file)
@@ -42,7 +42,7 @@ int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
                                        priv->wdev.wiphy->bands[radio_type];
 
        if (WARN_ON_ONCE(!sband)) {
-               dev_err(priv->adapter->dev, "Invalid radio type!\n");
+               mwifiex_dbg(priv->adapter, ERROR, "Invalid radio type!\n");
                return -EINVAL;
        }
 
@@ -184,7 +184,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
 
        tx_ba_tbl = mwifiex_get_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr);
        if (tx_ba_tbl) {
-               dev_dbg(priv->adapter->dev, "info: BA stream complete\n");
+               mwifiex_dbg(priv->adapter, EVENT, "info: BA stream complete\n");
                tx_ba_tbl->ba_status = BA_SETUP_COMPLETE;
                if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
                    priv->add_ba_param.tx_amsdu &&
@@ -197,7 +197,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
                        ra_list->ba_status = BA_SETUP_COMPLETE;
                }
        } else {
-               dev_err(priv->adapter->dev, "BA stream not created\n");
+               mwifiex_dbg(priv->adapter, ERROR, "BA stream not created\n");
        }
 
        return 0;
@@ -224,7 +224,8 @@ int mwifiex_cmd_recfg_tx_buf(struct mwifiex_private *priv,
        tx_buf->action = cpu_to_le16(action);
        switch (action) {
        case HostCmd_ACT_GEN_SET:
-               dev_dbg(priv->adapter->dev, "cmd: set tx_buf=%d\n", *buf_size);
+               mwifiex_dbg(priv->adapter, CMD,
+                           "cmd: set tx_buf=%d\n", *buf_size);
                tx_buf->buff_size = cpu_to_le16(*buf_size);
                break;
        case HostCmd_ACT_GEN_GET:
@@ -466,7 +467,8 @@ void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
            mwifiex_is_tx_ba_stream_ptr_valid(priv, tx_ba_tsr_tbl))
                return;
 
-       dev_dbg(priv->adapter->dev, "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: tx_ba_tsr_tbl %p\n", tx_ba_tsr_tbl);
 
        list_del(&tx_ba_tsr_tbl->list);
 
@@ -563,7 +565,7 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
        unsigned long flags;
        u16 block_ack_param_set;
 
-       dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
+       mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid);
 
        if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
            ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
@@ -575,9 +577,9 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
                sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
                if (!sta_ptr) {
                        spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-                       dev_warn(priv->adapter->dev,
-                                "BA setup with unknown TDLS peer %pM!\n",
-                               peer_mac);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "BA setup with unknown TDLS peer %pM!\n",
+                                   peer_mac);
                        return -1;
                }
                if (sta_ptr->is_11ac_enabled)
@@ -706,8 +708,8 @@ int mwifiex_get_tx_ba_stream_tbl(struct mwifiex_private *priv,
        spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
        list_for_each_entry(tx_ba_tsr_tbl, &priv->tx_ba_stream_tbl_ptr, list) {
                rx_reo_tbl->tid = (u16) tx_ba_tsr_tbl->tid;
-               dev_dbg(priv->adapter->dev, "data: %s tid=%d\n",
-                       __func__, rx_reo_tbl->tid);
+               mwifiex_dbg(priv->adapter, DATA, "data: %s tid=%d\n",
+                           __func__, rx_reo_tbl->tid);
                memcpy(rx_reo_tbl->ra, tx_ba_tsr_tbl->ra, ETH_ALEN);
                rx_reo_tbl->amsdu = tx_ba_tsr_tbl->amsdu;
                rx_reo_tbl++;
index 6183e255e62ac380a614cb593cb226d27da5909d..f7c717253a664aaa7170b3216890ebd1b22ad295 100644 (file)
@@ -187,7 +187,6 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size,
                                               GFP_ATOMIC | GFP_DMA);
        if (!skb_aggr) {
-               dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__);
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
                return -1;
@@ -297,13 +296,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
                tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
-               dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+               mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
                break;
        case -1:
                if (adapter->iface_type != MWIFIEX_PCIE)
                        adapter->data_sent = false;
-               dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
-                       __func__, ret);
+               mwifiex_dbg(adapter, ERROR, "%s: host_to_card failed: %#x\n",
+                           __func__, ret);
                adapter->dbg.num_tx_host_to_card_failure++;
                mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
                return 0;
index f75f8acfaca0332cef494e8146d4e331db683385..39d7a957674c2b6cd83d3593e7a10a08745ad56d 100644 (file)
@@ -51,8 +51,8 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
                        rx_skb = __skb_dequeue(&list);
                        ret = mwifiex_recv_packet(priv, rx_skb);
                        if (ret == -1)
-                               dev_err(priv->adapter->dev,
-                                       "Rx of A-MSDU failed");
+                               mwifiex_dbg(priv->adapter, ERROR,
+                                           "Rx of A-MSDU failed");
                }
                return 0;
        }
@@ -304,7 +304,7 @@ mwifiex_flush_data(unsigned long context)
        if (seq_num < 0)
                return;
 
-       dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
+       mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
        start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
        mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
                                                 start_win);
@@ -367,8 +367,9 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        }
        spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
 
-       dev_dbg(priv->adapter->dev, "info: last_seq=%d start_win=%d\n",
-               last_seq, new_node->start_win);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: last_seq=%d start_win=%d\n",
+                   last_seq, new_node->start_win);
 
        if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
            last_seq >= new_node->start_win) {
@@ -382,8 +383,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
                                        GFP_KERNEL);
        if (!new_node->rx_reorder_ptr) {
                kfree((u8 *) new_node);
-               dev_err(priv->adapter->dev,
-                       "%s: failed to alloc reorder_ptr\n", __func__);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: failed to alloc reorder_ptr\n", __func__);
                return;
        }
 
@@ -467,9 +468,9 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
                                                cmd_addba_req->peer_mac_addr);
                if (!sta_ptr) {
                        spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
-                       dev_warn(priv->adapter->dev,
-                                "BA setup with unknown TDLS peer %pM!\n",
-                                cmd_addba_req->peer_mac_addr);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "BA setup with unknown TDLS peer %pM!\n",
+                                   cmd_addba_req->peer_mac_addr);
                        return -1;
                }
                if (sta_ptr->is_11ac_enabled)
@@ -573,14 +574,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
        }
 
        if (tbl->flags & RXREOR_FORCE_NO_DROP) {
-               dev_dbg(priv->adapter->dev,
-                       "RXREOR_FORCE_NO_DROP when HS is activated\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "RXREOR_FORCE_NO_DROP when HS is activated\n");
                tbl->flags &= ~RXREOR_FORCE_NO_DROP;
        } else if (init_window_shift && seq_num < start_win &&
                   seq_num >= tbl->init_win) {
-               dev_dbg(priv->adapter->dev,
-                       "Sender TID sequence number reset %d->%d for SSN %d\n",
-                       start_win, seq_num, tbl->init_win);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "Sender TID sequence number reset %d->%d for SSN %d\n",
+                           start_win, seq_num, tbl->init_win);
                tbl->start_win = start_win = seq_num;
                end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
        } else {
@@ -668,23 +669,23 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
        else
                cleanup_rx_reorder_tbl = (initiator) ? false : true;
 
-       dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n",
-               peer_mac, tid, initiator);
+       mwifiex_dbg(priv->adapter, EVENT, "event: DELBA: %pM tid=%d initiator=%d\n",
+                   peer_mac, tid, initiator);
 
        if (cleanup_rx_reorder_tbl) {
                tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
                                                                 peer_mac);
                if (!tbl) {
-                       dev_dbg(priv->adapter->dev,
-                               "event: TID, TA not found in table\n");
+                       mwifiex_dbg(priv->adapter, EVENT,
+                                   "event: TID, TA not found in table\n");
                        return;
                }
                mwifiex_del_rx_reorder_entry(priv, tbl);
        } else {
                ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
                if (!ptx_tbl) {
-                       dev_dbg(priv->adapter->dev,
-                               "event: TID, RA not found in table\n");
+                       mwifiex_dbg(priv->adapter, EVENT,
+                                   "event: TID, RA not found in table\n");
                        return;
                }
                ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac);
@@ -721,8 +722,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
         * the stream
         */
        if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
-               dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
-                       add_ba_rsp->peer_mac_addr, tid);
+               mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
+                           add_ba_rsp->peer_mac_addr, tid);
 
                tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
                                                     add_ba_rsp->peer_mac_addr);
@@ -746,8 +747,8 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
                        tbl->amsdu = false;
        }
 
-       dev_dbg(priv->adapter->dev,
-               "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
+       mwifiex_dbg(priv->adapter, CMD,
+                   "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
                add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
 
        return 0;
index aa01c9bc77f916db68de7c51f8f848d45dc8fa26..48edf387683ebbd79a98f5257689f816378be3cf 100644 (file)
@@ -12,6 +12,7 @@ config MWIFIEX_SDIO
        tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
        depends on MWIFIEX && MMC
        select FW_LOADER
+       select WANT_DEV_COREDUMP
        ---help---
          This adds support for wireless adapters based on Marvell
          8786/8787/8797/8887/8897 chipsets with SDIO interface.
@@ -23,6 +24,7 @@ config MWIFIEX_PCIE
        tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
        depends on MWIFIEX && PCI
        select FW_LOADER
+       select WANT_DEV_COREDUMP
        ---help---
          This adds support for wireless adapters based on Marvell
          8766/8897 chipsets with PCIe interface.
index 31928caeeed225edbae57f38bd7488edee7b726d..2f0f9b5609d0139a301f7e57866f212cc1daefe1 100644 (file)
@@ -230,9 +230,9 @@ getlog
 
        cat getlog
 
-fw_dump
-       This command is used to dump firmware memory into files.
-       Separate file will be created for each memory segment.
+device_dump
+       This command is used to dump driver information and firmware memory
+       segments.
        Usage:
 
        cat fw_dump
index bf9020ff2d33cf1cf9dc2c044f06a403b655e593..4eecedadefbff64c7a2656444809c21470ab73e8 100644 (file)
@@ -104,11 +104,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
        const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
 
        if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index, peer_mac, 1)) {
-               wiphy_err(wiphy, "deleting the crypto keys\n");
+               mwifiex_dbg(priv->adapter, ERROR, "deleting the crypto keys\n");
                return -EFAULT;
        }
 
-       wiphy_dbg(wiphy, "info: crypto keys deleted\n");
+       mwifiex_dbg(priv->adapter, INFO, "info: crypto keys deleted\n");
        return 0;
 }
 
@@ -163,7 +163,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
 
        if (!buf || !len) {
-               wiphy_err(wiphy, "invalid buffer and length\n");
+               mwifiex_dbg(priv->adapter, ERROR, "invalid buffer and length\n");
                return -EFAULT;
        }
 
@@ -172,8 +172,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
            ieee80211_is_probe_resp(mgmt->frame_control)) {
                /* Since we support offload probe resp, we need to skip probe
                 * resp in AP or GO mode */
-               wiphy_dbg(wiphy,
-                         "info: skip to send probe resp in AP or GO mode\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: skip to send probe resp in AP or GO mode\n");
                return 0;
        }
 
@@ -183,7 +183,8 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                            pkt_len + sizeof(pkt_len));
 
        if (!skb) {
-               wiphy_err(wiphy, "allocate skb failed for management frame\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "allocate skb failed for management frame\n");
                return -ENOMEM;
        }
 
@@ -206,7 +207,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
 
        mwifiex_queue_tx_pkt(priv, skb);
 
-       wiphy_dbg(wiphy, "info: management frame transmitted\n");
+       mwifiex_dbg(priv->adapter, INFO, "info: management frame transmitted\n");
        return 0;
 }
 
@@ -231,7 +232,7 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
                mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
                                 HostCmd_ACT_GEN_SET, 0,
                                 &priv->mgmt_frame_mask, false);
-               wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+               mwifiex_dbg(priv->adapter, INFO, "info: mgmt frame registered\n");
        }
 }
 
@@ -248,13 +249,14 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
        int ret;
 
        if (!chan || !cookie) {
-               wiphy_err(wiphy, "Invalid parameter for ROC\n");
+               mwifiex_dbg(priv->adapter, ERROR, "Invalid parameter for ROC\n");
                return -EINVAL;
        }
 
        if (priv->roc_cfg.cookie) {
-               wiphy_dbg(wiphy, "info: ongoing ROC, cookie = 0x%llx\n",
-                         priv->roc_cfg.cookie);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: ongoing ROC, cookie = 0x%llx\n",
+                           priv->roc_cfg.cookie);
                return -EBUSY;
        }
 
@@ -269,7 +271,8 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
                cfg80211_ready_on_channel(wdev, *cookie, chan,
                                          duration, GFP_ATOMIC);
 
-               wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: ROC, cookie = 0x%llx\n", *cookie);
        }
 
        return ret;
@@ -298,7 +301,8 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
 
                memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
 
-               wiphy_dbg(wiphy, "info: cancel ROC, cookie = 0x%llx\n", cookie);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: cancel ROC, cookie = 0x%llx\n", cookie);
        }
 
        return ret;
@@ -344,8 +348,8 @@ mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy,
        u32 ps_mode;
 
        if (timeout)
-               wiphy_dbg(wiphy,
-                         "info: ignore timeout value for IEEE Power Save\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: ignore timeout value for IEEE Power Save\n");
 
        ps_mode = enabled;
 
@@ -370,7 +374,7 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
                priv->wep_key_curr_index = key_index;
        } else if (mwifiex_set_encode(priv, NULL, NULL, 0, key_index,
                                      NULL, 0)) {
-               wiphy_err(wiphy, "set default Tx key index\n");
+               mwifiex_dbg(priv->adapter, ERROR, "set default Tx key index\n");
                return -EFAULT;
        }
 
@@ -407,7 +411,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
 
        if (mwifiex_set_encode(priv, params, params->key, params->key_len,
                               key_index, peer_mac, 0)) {
-               wiphy_err(wiphy, "crypto keys added\n");
+               mwifiex_dbg(priv->adapter, ERROR, "crypto keys added\n");
                return -EFAULT;
        }
 
@@ -442,7 +446,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
 
        band = mwifiex_band_to_radio_type(adapter->config_bands);
        if (!wiphy->bands[band]) {
-               wiphy_err(wiphy, "11D: setting domain info in FW\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "11D: setting domain info in FW\n");
                return -1;
        }
 
@@ -493,7 +498,8 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
 
        if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
                             HostCmd_ACT_GEN_SET, 0, NULL, false)) {
-               wiphy_err(wiphy, "11D: setting domain info in FW\n");
+               mwifiex_dbg(adapter, INFO,
+                           "11D: setting domain info in FW\n");
                return -1;
        }
 
@@ -516,9 +522,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
        struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
        struct mwifiex_private *priv = mwifiex_get_priv(adapter,
                                                        MWIFIEX_BSS_ROLE_ANY);
-
-       wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for %c%c\n",
-                 request->alpha2[0], request->alpha2[1]);
+       mwifiex_dbg(adapter, INFO,
+                   "info: cfg80211 regulatory domain callback for %c%c\n",
+                   request->alpha2[0], request->alpha2[1]);
 
        switch (request->initiator) {
        case NL80211_REGDOM_SET_BY_DRIVER:
@@ -527,8 +533,9 @@ static void mwifiex_reg_notifier(struct wiphy *wiphy,
        case NL80211_REGDOM_SET_BY_COUNTRY_IE:
                break;
        default:
-               wiphy_err(wiphy, "unknown regdom initiator: %d\n",
-                         request->initiator);
+               mwifiex_dbg(adapter, ERROR,
+                           "unknown regdom initiator: %d\n",
+                           request->initiator);
                return;
        }
 
@@ -597,8 +604,8 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
        switch (priv->bss_role) {
        case MWIFIEX_BSS_ROLE_UAP:
                if (priv->bss_started) {
-                       dev_err(adapter->dev,
-                               "cannot change wiphy params when bss started");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cannot change wiphy params when bss started");
                        return -EINVAL;
                }
 
@@ -622,15 +629,16 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
 
                kfree(bss_cfg);
                if (ret) {
-                       wiphy_err(wiphy, "Failed to set wiphy phy params\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Failed to set wiphy phy params\n");
                        return ret;
                }
                break;
 
                case MWIFIEX_BSS_ROLE_STA:
                if (priv->media_connected) {
-                       dev_err(adapter->dev,
-                               "cannot change wiphy params when connected");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cannot change wiphy params when connected");
                        return -EINVAL;
                }
                if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -724,8 +732,8 @@ static int mwifiex_deinit_priv_params(struct mwifiex_private *priv)
        if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG,
                             HostCmd_ACT_GEN_SET, 0,
                             &priv->mgmt_frame_mask, false)) {
-               dev_warn(priv->adapter->dev,
-                        "could not unregister mgmt frame rx\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "could not unregister mgmt frame rx\n");
                return -1;
        }
 
@@ -789,9 +797,9 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
                priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
                break;
        default:
-               dev_err(priv->adapter->dev,
-                       "%s: changing to %d not supported\n",
-                       dev->name, type);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: changing to %d not supported\n",
+                           dev->name, type);
                return -EOPNOTSUPP;
        }
 
@@ -824,12 +832,13 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
 
        if (adapter->curr_iface_comb.p2p_intf ==
            adapter->iface_limit.p2p_intf) {
-               dev_err(adapter->dev,
-                       "cannot create multiple P2P ifaces\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot create multiple P2P ifaces\n");
                return -1;
        }
 
-       dev_dbg(priv->adapter->dev, "%s: changing role to p2p\n", dev->name);
+       mwifiex_dbg(adapter, INFO,
+                   "%s: changing role to p2p\n", dev->name);
 
        if (mwifiex_deinit_priv_params(priv))
                return -1;
@@ -846,9 +855,9 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
                        return -EFAULT;
                break;
        default:
-               dev_err(priv->adapter->dev,
-                       "%s: changing to %d not supported\n",
-                       dev->name, type);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: changing to %d not supported\n",
+                           dev->name, type);
                return -EOPNOTSUPP;
        }
 
@@ -897,17 +906,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
             curr_iftype != NL80211_IFTYPE_P2P_GO) &&
            (adapter->curr_iface_comb.sta_intf ==
             adapter->iface_limit.sta_intf)) {
-               dev_err(adapter->dev,
-                       "cannot create multiple station/adhoc ifaces\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot create multiple station/adhoc ifaces\n");
                return -1;
        }
 
        if (type == NL80211_IFTYPE_STATION)
-               dev_notice(adapter->dev,
-                          "%s: changing role to station\n", dev->name);
+               mwifiex_dbg(adapter, INFO,
+                           "%s: changing role to station\n", dev->name);
        else
-               dev_notice(adapter->dev,
-                          "%s: changing role to adhoc\n", dev->name);
+               mwifiex_dbg(adapter, INFO,
+                           "%s: changing role to adhoc\n", dev->name);
 
        if (mwifiex_deinit_priv_params(priv))
                return -1;
@@ -954,12 +963,13 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
 
        if (adapter->curr_iface_comb.uap_intf ==
            adapter->iface_limit.uap_intf) {
-               dev_err(adapter->dev,
-                       "cannot create multiple AP ifaces\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot create multiple AP ifaces\n");
                return -1;
        }
 
-       dev_notice(adapter->dev, "%s: changing role to AP\n", dev->name);
+       mwifiex_dbg(adapter, INFO,
+                   "%s: changing role to AP\n", dev->name);
 
        if (mwifiex_deinit_priv_params(priv))
                return -1;
@@ -1020,12 +1030,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
                        return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
                                                        flags, params);
                case NL80211_IFTYPE_UNSPECIFIED:
-                       wiphy_warn(wiphy, "%s: kept type as IBSS\n", dev->name);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "%s: kept type as IBSS\n", dev->name);
                case NL80211_IFTYPE_ADHOC:      /* This shouldn't happen */
                        return 0;
                default:
-                       wiphy_err(wiphy, "%s: changing to %d not supported\n",
-                                 dev->name, type);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "%s: changing to %d not supported\n",
+                                   dev->name, type);
                        return -EOPNOTSUPP;
                }
                break;
@@ -1048,12 +1060,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
                        return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
                                                        flags, params);
                case NL80211_IFTYPE_UNSPECIFIED:
-                       wiphy_warn(wiphy, "%s: kept type as STA\n", dev->name);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "%s: kept type as STA\n", dev->name);
                case NL80211_IFTYPE_STATION:    /* This shouldn't happen */
                        return 0;
                default:
-                       wiphy_err(wiphy, "%s: changing to %d not supported\n",
-                                 dev->name, type);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "%s: changing to %d not supported\n",
+                                   dev->name, type);
                        return -EOPNOTSUPP;
                }
                break;
@@ -1070,12 +1084,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
                        return mwifiex_change_vif_to_p2p(dev, curr_iftype,
                                                         type, flags, params);
                case NL80211_IFTYPE_UNSPECIFIED:
-                       wiphy_warn(wiphy, "%s: kept type as AP\n", dev->name);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "%s: kept type as AP\n", dev->name);
                case NL80211_IFTYPE_AP:         /* This shouldn't happen */
                        return 0;
                default:
-                       wiphy_err(wiphy, "%s: changing to %d not supported\n",
-                                 dev->name, type);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "%s: changing to %d not supported\n",
+                                   dev->name, type);
                        return -EOPNOTSUPP;
                }
                break;
@@ -1100,19 +1116,22 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
                        return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
                                                        flags, params);
                case NL80211_IFTYPE_UNSPECIFIED:
-                       wiphy_warn(wiphy, "%s: kept type as P2P\n", dev->name);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "%s: kept type as P2P\n", dev->name);
                case NL80211_IFTYPE_P2P_CLIENT:
                case NL80211_IFTYPE_P2P_GO:
                        return 0;
                default:
-                       wiphy_err(wiphy, "%s: changing to %d not supported\n",
-                                 dev->name, type);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "%s: changing to %d not supported\n",
+                                   dev->name, type);
                        return -EOPNOTSUPP;
                }
                break;
        default:
-               wiphy_err(wiphy, "%s: unknown iftype: %d\n",
-                         dev->name, dev->ieee80211_ptr->iftype);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: unknown iftype: %d\n",
+                           dev->name, dev->ieee80211_ptr->iftype);
                return -EOPNOTSUPP;
        }
 
@@ -1206,12 +1225,14 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
        /* Get signal information from the firmware */
        if (mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
                             HostCmd_ACT_GEN_GET, 0, NULL, true)) {
-               dev_err(priv->adapter->dev, "failed to get signal information\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "failed to get signal information\n");
                return -EFAULT;
        }
 
        if (mwifiex_drv_get_data_rate(priv, &rate)) {
-               dev_err(priv->adapter->dev, "getting data rate\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "getting data rate error\n");
                return -EFAULT;
        }
 
@@ -1295,7 +1316,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
        struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
        enum ieee80211_band band;
 
-       dev_dbg(priv->adapter->dev, "dump_survey idx=%d\n", idx);
+       mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
 
        memset(survey, 0, sizeof(struct survey_info));
 
@@ -1472,8 +1493,8 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
        struct mwifiex_adapter *adapter = priv->adapter;
 
        if (!priv->media_connected) {
-               dev_err(adapter->dev,
-                       "Can not set Tx data rate in disconnected state\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Can not set Tx data rate in disconnected state\n");
                return -EINVAL;
        }
 
@@ -1556,17 +1577,20 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
        if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) {
-               wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: bss_type mismatched\n", __func__);
                return -EINVAL;
        }
 
        if (!priv->bss_started) {
-               wiphy_err(wiphy, "%s: bss not started\n", __func__);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: bss not started\n", __func__);
                return -EINVAL;
        }
 
        if (mwifiex_set_mgmt_ies(priv, data)) {
-               wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: setting mgmt ies failed\n", __func__);
                return -EFAULT;
        }
 
@@ -1594,7 +1618,8 @@ mwifiex_cfg80211_del_station(struct wiphy *wiphy, struct net_device *dev,
        if (!params->mac || is_broadcast_ether_addr(params->mac))
                return 0;
 
-       wiphy_dbg(wiphy, "%s: mac address %pM\n", __func__, params->mac);
+       mwifiex_dbg(priv->adapter, INFO, "%s: mac address %pM\n",
+                   __func__, params->mac);
 
        eth_zero_addr(deauth_mac);
 
@@ -1687,14 +1712,16 @@ static int mwifiex_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
        mwifiex_abort_cac(priv);
 
        if (mwifiex_del_mgmt_ies(priv))
-               wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to delete mgmt IEs!\n");
 
        priv->ap_11n_enabled = 0;
        memset(&priv->bss_cfg, 0, sizeof(priv->bss_cfg));
 
        if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
                             HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-               wiphy_err(wiphy, "Failed to stop the BSS\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to stop the BSS\n");
                return -1;
        }
 
@@ -1756,7 +1783,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
 
        if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
                kfree(bss_cfg);
-               wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to parse secuirty parameters!\n");
                return -1;
        }
 
@@ -1778,17 +1806,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
        if (mwifiex_is_11h_active(priv) &&
            !cfg80211_chandef_dfs_required(wiphy, &params->chandef,
                                           priv->bss_mode)) {
-               dev_dbg(priv->adapter->dev, "Disable 11h extensions in FW\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "Disable 11h extensions in FW\n");
                if (mwifiex_11h_activate(priv, false)) {
-                       dev_err(priv->adapter->dev,
-                               "Failed to disable 11h extensions!!");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Failed to disable 11h extensions!!");
                        return -1;
                }
                priv->state_11h.is_11h_active = true;
        }
 
        if (mwifiex_config_start_uap(priv, bss_cfg)) {
-               wiphy_err(wiphy, "Failed to start AP\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to start AP\n");
                kfree(bss_cfg);
                return -1;
        }
@@ -1816,8 +1846,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
-       wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
-               " reason code %d\n", priv->cfg_bssid, reason_code);
+       mwifiex_dbg(priv->adapter, MSG,
+                   "info: successfully disconnected from %pM:\t"
+                   "reason code %d\n", priv->cfg_bssid, reason_code);
 
        eth_zero_addr(priv->cfg_bssid);
        priv->hs2_enabled = false;
@@ -1899,13 +1930,13 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
 
        req_ssid.ssid_len = ssid_len;
        if (ssid_len > IEEE80211_MAX_SSID_LEN) {
-               dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
+               mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n");
                return -EINVAL;
        }
 
        memcpy(req_ssid.ssid, ssid, ssid_len);
        if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) {
-               dev_err(priv->adapter->dev, "invalid SSID - aborting\n");
+               mwifiex_dbg(priv->adapter, ERROR, "invalid SSID - aborting\n");
                return -EINVAL;
        }
 
@@ -1959,9 +1990,9 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
 
        if (sme->key) {
                if (mwifiex_is_alg_wep(priv->sec_info.encryption_mode)) {
-                       dev_dbg(priv->adapter->dev,
-                               "info: setting wep encryption"
-                               with key len %d\n", sme->key_len);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: setting wep encryption\t"
+                                   "with key len %d\n", sme->key_len);
                        priv->wep_key_curr_index = sme->key_idx;
                        ret = mwifiex_set_encode(priv, NULL, sme->key,
                                                 sme->key_len, sme->key_idx,
@@ -1978,7 +2009,7 @@ done:
                if (is_scanning_required) {
                        /* Do specific SSID scanning */
                        if (mwifiex_request_scan(priv, &req_ssid)) {
-                               dev_err(priv->adapter->dev, "scan error\n");
+                               mwifiex_dbg(priv->adapter, ERROR, "scan error\n");
                                return -EFAULT;
                        }
                }
@@ -1997,15 +2028,15 @@ done:
 
                if (!bss) {
                        if (is_scanning_required) {
-                               dev_warn(priv->adapter->dev,
-                                        "assoc: requested bss not found in scan results\n");
+                               mwifiex_dbg(priv->adapter, WARN,
+                                           "assoc: requested bss not found in scan results\n");
                                break;
                        }
                        is_scanning_required = 1;
                } else {
-                       dev_dbg(priv->adapter->dev,
-                               "info: trying to associate to '%s' bssid %pM\n",
-                               (char *) req_ssid.ssid, bss->bssid);
+                       mwifiex_dbg(priv->adapter, MSG,
+                                   "info: trying to associate to '%s' bssid %pM\n",
+                                   (char *)req_ssid.ssid, bss->bssid);
                        memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN);
                        break;
                }
@@ -2041,26 +2072,29 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
        int ret;
 
        if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
-               wiphy_err(wiphy,
-                         "%s: reject infra assoc request in non-STA role\n",
-                         dev->name);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: reject infra assoc request in non-STA role\n",
+                           dev->name);
                return -EINVAL;
        }
 
        if (priv->wdev.current_bss) {
-               wiphy_warn(wiphy, "%s: already connected\n", dev->name);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: already connected\n", dev->name);
                return -EALREADY;
        }
 
        if (adapter->surprise_removed || adapter->is_cmd_timedout) {
-               wiphy_err(wiphy,
-                         "%s: Ignore connection. Card removed or FW in bad state\n",
-                         dev->name);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: Ignore connection.\t"
+                           "Card removed or FW in bad state\n",
+                           dev->name);
                return -EFAULT;
        }
 
-       wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
-                 (char *) sme->ssid, sme->bssid);
+       mwifiex_dbg(adapter, INFO,
+                   "info: Trying to associate to %s and bssid %pM\n",
+                   (char *)sme->ssid, sme->bssid);
 
        ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
                                     priv->bss_mode, sme->channel, sme, 0);
@@ -2068,17 +2102,17 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
                                        NULL, 0, WLAN_STATUS_SUCCESS,
                                        GFP_KERNEL);
-               dev_dbg(priv->adapter->dev,
-                       "info: associated to bssid %pM successfully\n",
-                       priv->cfg_bssid);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "info: associated to bssid %pM successfully\n",
+                           priv->cfg_bssid);
                if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
                    priv->adapter->auto_tdls &&
                    priv->bss_type == MWIFIEX_BSS_TYPE_STA)
                        mwifiex_setup_auto_tdls_timer(priv);
        } else {
-               dev_dbg(priv->adapter->dev,
-                       "info: association to bssid %pM failed\n",
-                       priv->cfg_bssid);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "info: association to bssid %pM failed\n",
+                           priv->cfg_bssid);
                eth_zero_addr(priv->cfg_bssid);
 
                if (ret > 0)
@@ -2105,7 +2139,6 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
 static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
                                   struct cfg80211_ibss_params *params)
 {
-       struct wiphy *wiphy = priv->wdev.wiphy;
        struct mwifiex_adapter *adapter = priv->adapter;
        int index = 0, i;
        u8 config_bands = 0;
@@ -2162,8 +2195,10 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
        priv->adhoc_channel = ieee80211_frequency_to_channel(
                                params->chandef.chan->center_freq);
 
-       wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
-                 config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
+       mwifiex_dbg(adapter, INFO,
+                   "info: set ibss band %d, chan %d, chan offset %d\n",
+                   config_bands, priv->adhoc_channel,
+                   adapter->sec_chan_offset);
 
        return 0;
 }
@@ -2182,13 +2217,15 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
        int ret = 0;
 
        if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
-               wiphy_err(wiphy, "request to join ibss received "
-                               "when station is not in ibss mode\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "request to join ibss received\t"
+                           "when station is not in ibss mode\n");
                goto done;
        }
 
-       wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
-                 (char *) params->ssid, params->bssid);
+       mwifiex_dbg(priv->adapter, MSG,
+                   "info: trying to join to %s and bssid %pM\n",
+                   (char *)params->ssid, params->bssid);
 
        mwifiex_set_ibss_params(priv, params);
 
@@ -2200,12 +2237,12 @@ done:
        if (!ret) {
                cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
                                     params->chandef.chan, GFP_KERNEL);
-               dev_dbg(priv->adapter->dev,
-                       "info: joined/created adhoc network with bssid"
-                       %pM successfully\n", priv->cfg_bssid);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "info: joined/created adhoc network with bssid\t"
+                           "%pM successfully\n", priv->cfg_bssid);
        } else {
-               dev_dbg(priv->adapter->dev,
-                       "info: failed creating/joining adhoc network\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "info: failed creating/joining adhoc network\n");
        }
 
        return ret;
@@ -2222,8 +2259,8 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
-       wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
-                 priv->cfg_bssid);
+       mwifiex_dbg(priv->adapter, MSG, "info: disconnecting from essid %pM\n",
+                   priv->cfg_bssid);
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
@@ -2250,13 +2287,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
        struct ieee_types_header *ie;
        struct mwifiex_user_scan_cfg *user_scan_cfg;
 
-       wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
+       mwifiex_dbg(priv->adapter, CMD,
+                   "info: received scan request on %s\n", dev->name);
 
        /* Block scan request if scan operation or scan cleanup when interface
         * is disabled is in process
         */
        if (priv->scan_request || priv->scan_aborting) {
-               dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
+               mwifiex_dbg(priv->adapter, WARN,
+                           "cmd: Scan already in process..\n");
                return -EBUSY;
        }
 
@@ -2308,7 +2347,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
        ret = mwifiex_scan_networks(priv, user_scan_cfg);
        kfree(user_scan_cfg);
        if (ret) {
-               dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "scan failed: %d\n", ret);
                priv->scan_aborting = false;
                priv->scan_request = NULL;
                return ret;
@@ -2454,15 +2494,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        case NL80211_IFTYPE_ADHOC:
                if (adapter->curr_iface_comb.sta_intf ==
                    adapter->iface_limit.sta_intf) {
-                       wiphy_err(wiphy,
-                                 "cannot create multiple sta/adhoc ifaces\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cannot create multiple sta/adhoc ifaces\n");
                        return ERR_PTR(-EINVAL);
                }
 
                priv = mwifiex_get_unused_priv(adapter);
                if (!priv) {
-                       wiphy_err(wiphy,
-                                 "could not get free private struct\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "could not get free private struct\n");
                        return ERR_PTR(-EFAULT);
                }
 
@@ -2484,15 +2524,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        case NL80211_IFTYPE_AP:
                if (adapter->curr_iface_comb.uap_intf ==
                    adapter->iface_limit.uap_intf) {
-                       wiphy_err(wiphy,
-                                 "cannot create multiple AP ifaces\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cannot create multiple AP ifaces\n");
                        return ERR_PTR(-EINVAL);
                }
 
                priv = mwifiex_get_unused_priv(adapter);
                if (!priv) {
-                       wiphy_err(wiphy,
-                                 "could not get free private struct\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "could not get free private struct\n");
                        return ERR_PTR(-EFAULT);
                }
 
@@ -2511,15 +2551,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        case NL80211_IFTYPE_P2P_CLIENT:
                if (adapter->curr_iface_comb.p2p_intf ==
                    adapter->iface_limit.p2p_intf) {
-                       wiphy_err(wiphy,
-                                 "cannot create multiple P2P ifaces\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cannot create multiple P2P ifaces\n");
                        return ERR_PTR(-EINVAL);
                }
 
                priv = mwifiex_get_unused_priv(adapter);
                if (!priv) {
-                       wiphy_err(wiphy,
-                                 "could not get free private struct\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "could not get free private struct\n");
                        return ERR_PTR(-EFAULT);
                }
 
@@ -2550,7 +2590,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
                break;
        default:
-               wiphy_err(wiphy, "type not supported\n");
+               mwifiex_dbg(adapter, ERROR, "type not supported\n");
                return ERR_PTR(-EINVAL);
        }
 
@@ -2558,7 +2598,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                               name_assign_type, ether_setup,
                               IEEE80211_NUM_ACS, 1);
        if (!dev) {
-               wiphy_err(wiphy, "no memory available for netdevice\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "no memory available for netdevice\n");
                memset(&priv->wdev, 0, sizeof(priv->wdev));
                priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED;
                priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2599,7 +2640,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
        /* Register network device */
        if (register_netdevice(dev)) {
-               wiphy_err(wiphy, "cannot register virtual network device\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot register virtual network device\n");
                free_netdev(dev);
                priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
                priv->netdev = NULL;
@@ -2613,7 +2655,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                                                  WQ_MEM_RECLAIM |
                                                  WQ_UNBOUND, 1, name);
        if (!priv->dfs_cac_workqueue) {
-               wiphy_err(wiphy, "cannot register virtual network device\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot register virtual network device\n");
                free_netdev(dev);
                priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
                priv->netdev = NULL;
@@ -2628,7 +2671,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                                                      WQ_HIGHPRI | WQ_UNBOUND |
                                                      WQ_MEM_RECLAIM, 1, name);
        if (!priv->dfs_chan_sw_workqueue) {
-               wiphy_err(wiphy, "cannot register virtual network device\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot register virtual network device\n");
                free_netdev(dev);
                priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
                priv->netdev = NULL;
@@ -2642,7 +2686,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
 
        sema_init(&priv->async_sem, 1);
 
-       dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
+       mwifiex_dbg(adapter, INFO,
+                   "info: %s: Marvell 802.11 Adapter\n", dev->name);
 
 #ifdef CONFIG_DEBUG_FS
        mwifiex_dev_debugfs_init(priv);
@@ -2661,7 +2706,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
                adapter->curr_iface_comb.p2p_intf++;
                break;
        default:
-               wiphy_err(wiphy, "type not supported\n");
+               mwifiex_dbg(adapter, ERROR, "type not supported\n");
                return ERR_PTR(-EINVAL);
        }
 
@@ -2721,7 +2766,8 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
                adapter->curr_iface_comb.p2p_intf++;
                break;
        default:
-               dev_err(adapter->dev, "del_virtual_intf: type not supported\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "del_virtual_intf: type not supported\n");
                break;
        }
 
@@ -2839,7 +2885,8 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
                if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
                                        byte_seq,
                                        MWIFIEX_MEF_MAX_BYTESEQ)) {
-                       dev_err(priv->adapter->dev, "Pattern not supported\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Pattern not supported\n");
                        kfree(mef_entry);
                        return -EOPNOTSUPP;
                }
@@ -2954,21 +3001,22 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
        mwifiex_cancel_all_pending_cmd(adapter);
 
        if (!wowlan) {
-               dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "None of the WOWLAN triggers enabled\n");
                return 0;
        }
 
        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
 
        if (!priv->media_connected) {
-               dev_warn(adapter->dev,
-                        "Can not configure WOWLAN in disconnected state\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Can not configure WOWLAN in disconnected state\n");
                return 0;
        }
 
        ret = mwifiex_set_mef_filter(priv, wowlan);
        if (ret) {
-               dev_err(adapter->dev, "Failed to set MEF filter\n");
+               mwifiex_dbg(adapter, ERROR, "Failed to set MEF filter\n");
                return ret;
        }
 
@@ -2981,7 +3029,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
                ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
                                            MWIFIEX_SYNC_CMD, &hs_cfg);
                if (ret) {
-                       dev_err(adapter->dev, "Failed to set HS params\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Failed to set HS params\n");
                        return ret;
                }
        }
@@ -3041,7 +3090,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
                if (!mwifiex_is_pattern_supported(&crule->patterns[i],
                                                  byte_seq,
                                                MWIFIEX_COALESCE_MAX_BYTESEQ)) {
-                       dev_err(priv->adapter->dev, "Pattern not supported\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Pattern not supported\n");
                        return -EOPNOTSUPP;
                }
 
@@ -3050,8 +3100,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
 
                        pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq);
                        if (pkt_type && mrule->pkt_type) {
-                               dev_err(priv->adapter->dev,
-                                       "Multiple packet types not allowed\n");
+                               mwifiex_dbg(priv->adapter, ERROR,
+                                           "Multiple packet types not allowed\n");
                                return -EOPNOTSUPP;
                        } else if (pkt_type) {
                                mrule->pkt_type = pkt_type;
@@ -3074,8 +3124,8 @@ mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
        }
 
        if (!mrule->pkt_type) {
-               dev_err(priv->adapter->dev,
-                       "Packet type can not be determined\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Packet type can not be determined\n");
                return -EOPNOTSUPP;
        }
 
@@ -3093,8 +3143,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
 
        memset(&coalesce_cfg, 0, sizeof(coalesce_cfg));
        if (!coalesce) {
-               dev_dbg(adapter->dev,
-                       "Disable coalesce and reset all previous rules\n");
+               mwifiex_dbg(adapter, WARN,
+                           "Disable coalesce and reset all previous rules\n");
                return mwifiex_send_cmd(priv, HostCmd_CMD_COALESCE_CFG,
                                        HostCmd_ACT_GEN_SET, 0,
                                        &coalesce_cfg, true);
@@ -3105,8 +3155,8 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
                ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i],
                                                      &coalesce_cfg.rule[i]);
                if (ret) {
-                       dev_err(priv->adapter->dev,
-                               "Recheck the patterns provided for rule %d\n",
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Recheck the patterns provided for rule %d\n",
                                i + 1);
                        return ret;
                }
@@ -3138,9 +3188,9 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
 
        switch (action_code) {
        case WLAN_TDLS_SETUP_REQUEST:
-               dev_dbg(priv->adapter->dev,
-                       "Send TDLS Setup Request to %pM status_code=%d\n", peer,
-                        status_code);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "Send TDLS Setup Request to %pM status_code=%d\n",
+                           peer, status_code);
                mwifiex_add_auto_tdls_peer(priv, peer);
                ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
                                                   dialog_token, status_code,
@@ -3148,45 +3198,45 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
                break;
        case WLAN_TDLS_SETUP_RESPONSE:
                mwifiex_add_auto_tdls_peer(priv, peer);
-               dev_dbg(priv->adapter->dev,
-                       "Send TDLS Setup Response to %pM status_code=%d\n",
-                       peer, status_code);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "Send TDLS Setup Response to %pM status_code=%d\n",
+                           peer, status_code);
                ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
                                                   dialog_token, status_code,
                                                   extra_ies, extra_ies_len);
                break;
        case WLAN_TDLS_SETUP_CONFIRM:
-               dev_dbg(priv->adapter->dev,
-                       "Send TDLS Confirm to %pM status_code=%d\n", peer,
-                       status_code);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "Send TDLS Confirm to %pM status_code=%d\n", peer,
+                           status_code);
                ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
                                                   dialog_token, status_code,
                                                   extra_ies, extra_ies_len);
                break;
        case WLAN_TDLS_TEARDOWN:
-               dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
-                       peer);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "Send TDLS Tear down to %pM\n", peer);
                ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
                                                   dialog_token, status_code,
                                                   extra_ies, extra_ies_len);
                break;
        case WLAN_TDLS_DISCOVERY_REQUEST:
-               dev_dbg(priv->adapter->dev,
-                       "Send TDLS Discovery Request to %pM\n", peer);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "Send TDLS Discovery Request to %pM\n", peer);
                ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
                                                   dialog_token, status_code,
                                                   extra_ies, extra_ies_len);
                break;
        case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
-               dev_dbg(priv->adapter->dev,
-                       "Send TDLS Discovery Response to %pM\n", peer);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "Send TDLS Discovery Response to %pM\n", peer);
                ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
                                                   dialog_token, status_code,
                                                   extra_ies, extra_ies_len);
                break;
        default:
-               dev_warn(priv->adapter->dev,
-                        "Unknown TDLS mgmt/action frame %pM\n", peer);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Unknown TDLS mgmt/action frame %pM\n", peer);
                ret = -EINVAL;
                break;
        }
@@ -3208,8 +3258,8 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
        if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
                return -ENOTSUPP;
 
-       dev_dbg(priv->adapter->dev,
-               "TDLS peer=%pM, oper=%d\n", peer, action);
+       mwifiex_dbg(priv->adapter, MSG,
+                   "TDLS peer=%pM, oper=%d\n", peer, action);
 
        switch (action) {
        case NL80211_TDLS_ENABLE_LINK:
@@ -3220,22 +3270,22 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                break;
        case NL80211_TDLS_TEARDOWN:
                /* shouldn't happen!*/
-               dev_warn(priv->adapter->dev,
-                        "tdls_oper: teardown from driver not supported\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "tdls_oper: teardown from driver not supported\n");
                return -EINVAL;
        case NL80211_TDLS_SETUP:
                /* shouldn't happen!*/
-               dev_warn(priv->adapter->dev,
-                        "tdls_oper: setup from driver not supported\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "tdls_oper: setup from driver not supported\n");
                return -EINVAL;
        case NL80211_TDLS_DISCOVERY_REQ:
                /* shouldn't happen!*/
-               dev_warn(priv->adapter->dev,
-                        "tdls_oper: discovery from driver not supported\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "tdls_oper: discovery from driver not supported\n");
                return -EINVAL;
        default:
-               dev_err(priv->adapter->dev,
-                       "tdls_oper: operation not supported\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "tdls_oper: operation not supported\n");
                return -ENOTSUPP;
        }
 
@@ -3268,8 +3318,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
        if (priv->adapter->scan_processing) {
-               dev_err(priv->adapter->dev,
-                       "radar detection: scan in process...\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "radar detection: scan in process...\n");
                return -EBUSY;
        }
 
@@ -3284,8 +3334,8 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                                           params->beacon_csa.tail,
                                           params->beacon_csa.tail_len);
        if (!chsw_ie) {
-               dev_err(priv->adapter->dev,
-                       "Could not parse channel switch announcement IE\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Could not parse channel switch announcement IE\n");
                return -EINVAL;
        }
 
@@ -3297,10 +3347,12 @@ mwifiex_cfg80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        }
 
        if (mwifiex_del_mgmt_ies(priv))
-               wiphy_err(wiphy, "Failed to delete mgmt IEs!\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to delete mgmt IEs!\n");
 
        if (mwifiex_set_mgmt_ies(priv, &params->beacon_csa)) {
-               wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: setting mgmt ies failed\n", __func__);
                return -EFAULT;
        }
 
@@ -3324,16 +3376,17 @@ mwifiex_cfg80211_start_radar_detection(struct wiphy *wiphy,
        struct mwifiex_radar_params radar_params;
 
        if (priv->adapter->scan_processing) {
-               dev_err(priv->adapter->dev,
-                       "radar detection: scan already in process...\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "radar detection: scan already in process...\n");
                return -EBUSY;
        }
 
        if (!mwifiex_is_11h_active(priv)) {
-               dev_dbg(priv->adapter->dev, "Enable 11h extensions in FW\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "Enable 11h extensions in FW\n");
                if (mwifiex_11h_activate(priv, true)) {
-                       dev_err(priv->adapter->dev,
-                               "Failed to activate 11h extensions!!");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Failed to activate 11h extensions!!");
                        return -1;
                }
                priv->state_11h.is_11h_active = true;
@@ -3492,7 +3545,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        wiphy = wiphy_new(&mwifiex_cfg80211_ops,
                          sizeof(struct mwifiex_adapter *));
        if (!wiphy) {
-               dev_err(adapter->dev, "%s: creating new wiphy\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: creating new wiphy\n", __func__);
                return -ENOMEM;
        }
        wiphy->max_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
@@ -3563,20 +3617,22 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
 
        ret = wiphy_register(wiphy);
        if (ret < 0) {
-               dev_err(adapter->dev,
-                       "%s: wiphy_register failed: %d\n", __func__, ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: wiphy_register failed: %d\n", __func__, ret);
                wiphy_free(wiphy);
                return ret;
        }
 
        if (reg_alpha2 && mwifiex_is_valid_alpha2(reg_alpha2)) {
-               wiphy_info(wiphy, "driver hint alpha2: %2.2s\n", reg_alpha2);
+               mwifiex_dbg(adapter, INFO,
+                           "driver hint alpha2: %2.2s\n", reg_alpha2);
                regulatory_hint(wiphy, reg_alpha2);
        } else {
                country_code = mwifiex_11d_code_2_region(adapter->region_code);
                if (country_code)
-                       wiphy_info(wiphy, "ignoring F/W country code %2.2s\n",
-                                  country_code);
+                       mwifiex_dbg(adapter, WARN,
+                                   "ignoring F/W country code %2.2s\n",
+                                   country_code);
        }
 
        mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB,
index e9df8826f12412b9d70d432146688bd80bf6fedc..3ddb8ec676ed3df00a7dbdb7880cd23e9844bc97 100644 (file)
@@ -327,8 +327,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
                sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
 
        if (!sband) {
-               dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d\n",
-                       __func__, band);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: cannot find cfp by band %d\n",
+                           __func__, band);
                return cfp;
        }
 
@@ -349,9 +350,10 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
                }
        }
        if (i == sband->n_channels) {
-               dev_err(priv->adapter->dev, "%s: cannot find cfp by band %d"
-                       " & channel=%d freq=%d\n", __func__, band, channel,
-                       freq);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: cannot find cfp by band %d\t"
+                           "& channel=%d freq=%d\n",
+                           __func__, band, channel, freq);
        } else {
                if (!ch)
                        return cfp;
@@ -431,15 +433,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
            priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
                switch (adapter->config_bands) {
                case BAND_B:
-                       dev_dbg(adapter->dev, "info: infra band=%d "
-                               "supported_rates_b\n", adapter->config_bands);
+                       mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+                                   "supported_rates_b\n",
+                                   adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_b,
                                               sizeof(supported_rates_b));
                        break;
                case BAND_G:
                case BAND_G | BAND_GN:
-                       dev_dbg(adapter->dev, "info: infra band=%d "
-                               "supported_rates_g\n", adapter->config_bands);
+                       mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+                                   "supported_rates_g\n",
+                                   adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_g,
                                               sizeof(supported_rates_g));
                        break;
@@ -449,15 +453,17 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
                case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
                case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
                case BAND_B | BAND_G | BAND_GN:
-                       dev_dbg(adapter->dev, "info: infra band=%d "
-                               "supported_rates_bg\n", adapter->config_bands);
+                       mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+                                   "supported_rates_bg\n",
+                                   adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_bg,
                                               sizeof(supported_rates_bg));
                        break;
                case BAND_A:
                case BAND_A | BAND_G:
-                       dev_dbg(adapter->dev, "info: infra band=%d "
-                               "supported_rates_a\n", adapter->config_bands);
+                       mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+                                   "supported_rates_a\n",
+                                   adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_a,
                                               sizeof(supported_rates_a));
                        break;
@@ -466,14 +472,16 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
                case BAND_A | BAND_AN | BAND_AAC:
                case BAND_A | BAND_G | BAND_AN | BAND_GN:
                case BAND_A | BAND_G | BAND_AN | BAND_GN | BAND_AAC:
-                       dev_dbg(adapter->dev, "info: infra band=%d "
-                               "supported_rates_a\n", adapter->config_bands);
+                       mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+                                   "supported_rates_a\n",
+                                   adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_a,
                                               sizeof(supported_rates_a));
                        break;
                case BAND_GN:
-                       dev_dbg(adapter->dev, "info: infra band=%d "
-                               "supported_rates_n\n", adapter->config_bands);
+                       mwifiex_dbg(adapter, INFO, "info: infra band=%d\t"
+                                   "supported_rates_n\n",
+                                   adapter->config_bands);
                        k = mwifiex_copy_rates(rates, k, supported_rates_n,
                                               sizeof(supported_rates_n));
                        break;
@@ -482,25 +490,25 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
                /* Ad-hoc mode */
                switch (adapter->adhoc_start_band) {
                case BAND_B:
-                       dev_dbg(adapter->dev, "info: adhoc B\n");
+                       mwifiex_dbg(adapter, INFO, "info: adhoc B\n");
                        k = mwifiex_copy_rates(rates, k, adhoc_rates_b,
                                               sizeof(adhoc_rates_b));
                        break;
                case BAND_G:
                case BAND_G | BAND_GN:
-                       dev_dbg(adapter->dev, "info: adhoc G only\n");
+                       mwifiex_dbg(adapter, INFO, "info: adhoc G only\n");
                        k = mwifiex_copy_rates(rates, k, adhoc_rates_g,
                                               sizeof(adhoc_rates_g));
                        break;
                case BAND_B | BAND_G:
                case BAND_B | BAND_G | BAND_GN:
-                       dev_dbg(adapter->dev, "info: adhoc BG\n");
+                       mwifiex_dbg(adapter, INFO, "info: adhoc BG\n");
                        k = mwifiex_copy_rates(rates, k, adhoc_rates_bg,
                                               sizeof(adhoc_rates_bg));
                        break;
                case BAND_A:
                case BAND_A | BAND_AN:
-                       dev_dbg(adapter->dev, "info: adhoc A\n");
+                       mwifiex_dbg(adapter, INFO, "info: adhoc A\n");
                        k = mwifiex_copy_rates(rates, k, adhoc_rates_a,
                                               sizeof(adhoc_rates_a));
                        break;
index c5a14ff7eb82d63dc5a4e1560ff33acfe2cebaac..a1de83fd1dbe4a0c2f535844ef6978fd256d5cea 100644 (file)
@@ -62,7 +62,8 @@ mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
 
        spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
        if (list_empty(&adapter->cmd_free_q)) {
-               dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "GET_CMD_NODE: cmd node not available\n");
                spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
                return NULL;
        }
@@ -116,7 +117,8 @@ static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
 {
        /* Copy the HOST command to command buffer */
        memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len);
-       dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len);
+       mwifiex_dbg(priv->adapter, CMD,
+                   "cmd: host cmd size = %d\n", pcmd_ptr->len);
        return 0;
 }
 
@@ -147,8 +149,9 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
 
        /* Sanity test */
        if (host_cmd == NULL || host_cmd->size == 0) {
-               dev_err(adapter->dev, "DNLD_CMD: host_cmd is null"
-                       " or cmd size is 0, not sending\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "DNLD_CMD: host_cmd is null\t"
+                           "or cmd size is 0, not sending\n");
                if (cmd_node->wait_q_enabled)
                        adapter->cmd_wait_q.status = -1;
                mwifiex_recycle_cmd_node(adapter, cmd_node);
@@ -161,8 +164,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
        if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
            cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
            cmd_code != HostCmd_CMD_FUNC_INIT) {
-               dev_err(adapter->dev,
-                       "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
+               mwifiex_dbg(adapter, ERROR,
+                           "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
                        cmd_code);
                if (cmd_node->wait_q_enabled)
                        mwifiex_complete_cmd(adapter, cmd_node);
@@ -197,10 +200,12 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
                 */
                skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len);
 
-       dev_dbg(adapter->dev,
-               "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n", cmd_code,
-               le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
-               le16_to_cpu(host_cmd->seq_num));
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+                   cmd_code,
+                   le16_to_cpu(*(__le16 *)((u8 *)host_cmd + S_DS_GEN)),
+                   cmd_size, le16_to_cpu(host_cmd->seq_num));
+       mwifiex_dbg_dump(adapter, CMD_D, "cmd buffer:", host_cmd, cmd_size);
 
        if (adapter->iface_type == MWIFIEX_USB) {
                tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
@@ -221,7 +226,8 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
        }
 
        if (ret == -1) {
-               dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "DNLD_CMD: host to card failed\n");
                if (adapter->iface_type == MWIFIEX_USB)
                        adapter->cmd_sent = false;
                if (cmd_node->wait_q_enabled)
@@ -280,12 +286,14 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
                                        (adapter->seq_num, priv->bss_num,
                                         priv->bss_type)));
 
-       dev_dbg(adapter->dev,
-               "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
                le16_to_cpu(sleep_cfm_buf->command),
                le16_to_cpu(sleep_cfm_buf->action),
                le16_to_cpu(sleep_cfm_buf->size),
                le16_to_cpu(sleep_cfm_buf->seq_num));
+       mwifiex_dbg_dump(adapter, CMD_D, "SLEEP_CFM buffer: ", sleep_cfm_buf,
+                        le16_to_cpu(sleep_cfm_buf->size));
 
        if (adapter->iface_type == MWIFIEX_USB) {
                sleep_cfm_tmp =
@@ -311,7 +319,7 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
        }
 
        if (ret == -1) {
-               dev_err(adapter->dev, "SLEEP_CFM: failed\n");
+               mwifiex_dbg(adapter, ERROR, "SLEEP_CFM: failed\n");
                adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
                return -1;
        }
@@ -362,8 +370,9 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
        for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
                cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
                if (!cmd_array[i].skb) {
-                       dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n");
-                       return -1;
+                       mwifiex_dbg(adapter, ERROR,
+                                   "unable to allocate command buffer\n");
+                       return -ENOMEM;
                }
        }
 
@@ -386,7 +395,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
 
        /* Need to check if cmd pool is allocated or not */
        if (!adapter->cmd_pool) {
-               dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n");
+               mwifiex_dbg(adapter, FATAL,
+                           "info: FREE_CMD_BUF: cmd_pool is null\n");
                return 0;
        }
 
@@ -395,7 +405,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
        /* Release shared memory buffers */
        for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
                if (cmd_array[i].skb) {
-                       dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i);
+                       mwifiex_dbg(adapter, CMD,
+                                   "cmd: free cmd buffer %d\n", i);
                        dev_kfree_skb_any(cmd_array[i].skb);
                }
                if (!cmd_array[i].resp_skb)
@@ -409,7 +420,8 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
        }
        /* Release struct cmd_ctrl_node */
        if (adapter->cmd_pool) {
-               dev_dbg(adapter->dev, "cmd: free cmd pool\n");
+               mwifiex_dbg(adapter, CMD,
+                           "cmd: free cmd pool\n");
                kfree(adapter->cmd_pool);
                adapter->cmd_pool = NULL;
        }
@@ -459,7 +471,8 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
                rx_info->bss_type = priv->bss_type;
        }
 
-       dev_dbg(adapter->dev, "EVENT: cause: %#x\n", eventcause);
+       mwifiex_dbg(adapter, EVENT, "EVENT: cause: %#x\n", eventcause);
+       mwifiex_dbg_dump(adapter, EVT_D, "Event Buf:", skb->data, skb->len);
 
        if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
                ret = mwifiex_process_uap_event(priv);
@@ -498,28 +511,33 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
        }
 
        if (adapter->is_suspended) {
-               dev_err(adapter->dev, "PREP_CMD: device in suspended state\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "PREP_CMD: device in suspended state\n");
                return -1;
        }
 
        if (adapter->hs_enabling && cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
-               dev_err(adapter->dev, "PREP_CMD: host entering sleep state\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "PREP_CMD: host entering sleep state\n");
                return -1;
        }
 
        if (adapter->surprise_removed) {
-               dev_err(adapter->dev, "PREP_CMD: card is removed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "PREP_CMD: card is removed\n");
                return -1;
        }
 
        if (adapter->is_cmd_timedout) {
-               dev_err(adapter->dev, "PREP_CMD: FW is in bad state\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "PREP_CMD: FW is in bad state\n");
                return -1;
        }
 
        if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
                if (cmd_no != HostCmd_CMD_FUNC_INIT) {
-                       dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "PREP_CMD: FW in reset state\n");
                        return -1;
                }
        }
@@ -528,7 +546,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
        cmd_node = mwifiex_get_cmd_node(adapter);
 
        if (!cmd_node) {
-               dev_err(adapter->dev, "PREP_CMD: no free cmd node\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "PREP_CMD: no free cmd node\n");
                return -1;
        }
 
@@ -536,7 +555,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
        mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf, sync);
 
        if (!cmd_node->cmd_skb) {
-               dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "PREP_CMD: no free cmd buf\n");
                return -1;
        }
 
@@ -571,7 +591,8 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
 
        /* Return error, since the command preparation failed */
        if (ret) {
-               dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n",
+               mwifiex_dbg(adapter, ERROR,
+                           "PREP_CMD: cmd %#x preparation failed\n",
                        cmd_no);
                mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
                return -1;
@@ -626,7 +647,8 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
        mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
 
        atomic_dec(&adapter->cmd_pending);
-       dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
                le16_to_cpu(host_cmd->command),
                atomic_read(&adapter->cmd_pending));
 }
@@ -648,7 +670,7 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
 
        host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
        if (!host_cmd) {
-               dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n");
+               mwifiex_dbg(adapter, ERROR, "QUEUE_CMD: host_cmd is NULL\n");
                return;
        }
 
@@ -673,7 +695,8 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
        spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
 
        atomic_inc(&adapter->cmd_pending);
-       dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
                command, atomic_read(&adapter->cmd_pending));
 }
 
@@ -699,7 +722,8 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
 
        /* Check if already in processing */
        if (adapter->curr_cmd) {
-               dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n");
+               mwifiex_dbg(adapter, FATAL,
+                           "EXEC_NEXT_CMD: cmd in processing\n");
                return -1;
        }
 
@@ -721,8 +745,9 @@ int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
        priv = cmd_node->priv;
 
        if (adapter->ps_state != PS_STATE_AWAKE) {
-               dev_err(adapter->dev, "%s: cannot send cmd in sleep state,"
-                               " this should not happen\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: cannot send cmd in sleep state,\t"
+                           "this should not happen\n", __func__);
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
                return ret;
        }
@@ -772,8 +797,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
 
        if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
                resp = (struct host_cmd_ds_command *) adapter->upld_buf;
-               dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n",
-                       le16_to_cpu(resp->command));
+               mwifiex_dbg(adapter, ERROR,
+                           "CMD_RESP: NULL curr_cmd, %#x\n",
+                           le16_to_cpu(resp->command));
                return -1;
        }
 
@@ -781,8 +807,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
 
        resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
        if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
-               dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
-                       le16_to_cpu(resp->command));
+               mwifiex_dbg(adapter, ERROR,
+                           "CMD_RESP: %#x been canceled\n",
+                           le16_to_cpu(resp->command));
                mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
                adapter->curr_cmd = NULL;
@@ -794,7 +821,8 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
                /* Copy original response back to response buffer */
                struct mwifiex_ds_misc_cmd *hostcmd;
                uint16_t size = le16_to_cpu(resp->size);
-               dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size);
+               mwifiex_dbg(adapter, INFO,
+                           "info: host cmd resp size = %d\n", size);
                size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER);
                if (adapter->curr_cmd->data_buf) {
                        hostcmd = adapter->curr_cmd->data_buf;
@@ -822,13 +850,15 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
        adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
                                                                orig_cmdresp_no;
 
-       dev_dbg(adapter->dev,
-               "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
-               orig_cmdresp_no, cmdresp_result,
-               le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+                   orig_cmdresp_no, cmdresp_result,
+                   le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
+       mwifiex_dbg_dump(adapter, CMD_D, "CMD_RESP buffer:", resp,
+                        le16_to_cpu(resp->size));
 
        if (!(orig_cmdresp_no & HostCmd_RET_BIT)) {
-               dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n");
+               mwifiex_dbg(adapter, ERROR, "CMD_RESP: invalid cmd resp\n");
                if (adapter->curr_cmd->wait_q_enabled)
                        adapter->cmd_wait_q.status = -1;
 
@@ -852,8 +882,9 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
        /* Check init command response */
        if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
                if (ret) {
-                       dev_err(adapter->dev, "%s: cmd %#x failed during "
-                               "initialization\n", __func__, cmdresp_no);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s: cmd %#x failed during\t"
+                                   "initialization\n", __func__, cmdresp_no);
                        mwifiex_init_fw_complete(adapter);
                        return -1;
                } else if (adapter->last_init_cmd == cmdresp_no)
@@ -888,7 +919,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
 
        adapter->is_cmd_timedout = 1;
        if (!adapter->curr_cmd) {
-               dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cmd: empty curr_cmd\n");
                return;
        }
        cmd_node = adapter->curr_cmd;
@@ -897,47 +929,60 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
                        adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
                adapter->dbg.timeout_cmd_act =
                        adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
-               dev_err(adapter->dev,
-                       "%s: Timeout cmd id = %#x, act = %#x\n", __func__,
-                       adapter->dbg.timeout_cmd_id,
-                       adapter->dbg.timeout_cmd_act);
-
-               dev_err(adapter->dev, "num_data_h2c_failure = %d\n",
-                       adapter->dbg.num_tx_host_to_card_failure);
-               dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
-                       adapter->dbg.num_cmd_host_to_card_failure);
-
-               dev_err(adapter->dev, "is_cmd_timedout = %d\n",
-                       adapter->is_cmd_timedout);
-               dev_err(adapter->dev, "num_tx_timeout = %d\n",
-                       adapter->dbg.num_tx_timeout);
-
-               dev_err(adapter->dev, "last_cmd_index = %d\n",
-                       adapter->dbg.last_cmd_index);
-               dev_err(adapter->dev, "last_cmd_id: %*ph\n",
-                       (int)sizeof(adapter->dbg.last_cmd_id),
-                       adapter->dbg.last_cmd_id);
-               dev_err(adapter->dev, "last_cmd_act: %*ph\n",
-                       (int)sizeof(adapter->dbg.last_cmd_act),
-                       adapter->dbg.last_cmd_act);
-
-               dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
-                       adapter->dbg.last_cmd_resp_index);
-               dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
-                       (int)sizeof(adapter->dbg.last_cmd_resp_id),
-                       adapter->dbg.last_cmd_resp_id);
-
-               dev_err(adapter->dev, "last_event_index = %d\n",
-                       adapter->dbg.last_event_index);
-               dev_err(adapter->dev, "last_event: %*ph\n",
-                       (int)sizeof(adapter->dbg.last_event),
-                       adapter->dbg.last_event);
-
-               dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
-                       adapter->data_sent, adapter->cmd_sent);
-
-               dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
-                       adapter->ps_mode, adapter->ps_state);
+               mwifiex_dbg(adapter, MSG,
+                           "%s: Timeout cmd id = %#x, act = %#x\n", __func__,
+                           adapter->dbg.timeout_cmd_id,
+                           adapter->dbg.timeout_cmd_act);
+
+               mwifiex_dbg(adapter, MSG,
+                           "num_data_h2c_failure = %d\n",
+                           adapter->dbg.num_tx_host_to_card_failure);
+               mwifiex_dbg(adapter, MSG,
+                           "num_cmd_h2c_failure = %d\n",
+                           adapter->dbg.num_cmd_host_to_card_failure);
+
+               mwifiex_dbg(adapter, MSG,
+                           "is_cmd_timedout = %d\n",
+                           adapter->is_cmd_timedout);
+               mwifiex_dbg(adapter, MSG,
+                           "num_tx_timeout = %d\n",
+                           adapter->dbg.num_tx_timeout);
+
+               mwifiex_dbg(adapter, MSG,
+                           "last_cmd_index = %d\n",
+                           adapter->dbg.last_cmd_index);
+               mwifiex_dbg(adapter, MSG,
+                           "last_cmd_id: %*ph\n",
+                           (int)sizeof(adapter->dbg.last_cmd_id),
+                           adapter->dbg.last_cmd_id);
+               mwifiex_dbg(adapter, MSG,
+                           "last_cmd_act: %*ph\n",
+                           (int)sizeof(adapter->dbg.last_cmd_act),
+                           adapter->dbg.last_cmd_act);
+
+               mwifiex_dbg(adapter, MSG,
+                           "last_cmd_resp_index = %d\n",
+                           adapter->dbg.last_cmd_resp_index);
+               mwifiex_dbg(adapter, MSG,
+                           "last_cmd_resp_id: %*ph\n",
+                           (int)sizeof(adapter->dbg.last_cmd_resp_id),
+                           adapter->dbg.last_cmd_resp_id);
+
+               mwifiex_dbg(adapter, MSG,
+                           "last_event_index = %d\n",
+                           adapter->dbg.last_event_index);
+               mwifiex_dbg(adapter, MSG,
+                           "last_event: %*ph\n",
+                           (int)sizeof(adapter->dbg.last_event),
+                           adapter->dbg.last_event);
+
+               mwifiex_dbg(adapter, MSG,
+                           "data_sent=%d cmd_sent=%d\n",
+                           adapter->data_sent, adapter->cmd_sent);
+
+               mwifiex_dbg(adapter, MSG,
+                           "ps_mode=%d ps_state=%d\n",
+                           adapter->ps_mode, adapter->ps_state);
 
                if (cmd_node->wait_q_enabled) {
                        adapter->cmd_wait_q.status = -ETIMEDOUT;
@@ -948,8 +993,8 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
        if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
                mwifiex_init_fw_complete(adapter);
 
-       if (adapter->if_ops.fw_dump)
-               adapter->if_ops.fw_dump(adapter);
+       if (adapter->if_ops.device_dump)
+               adapter->if_ops.device_dump(adapter);
 
        if (adapter->if_ops.card_reset)
                adapter->if_ops.card_reset(adapter);
@@ -1015,7 +1060,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
                        if (!priv)
                                continue;
                        if (priv->scan_request) {
-                               dev_dbg(adapter->dev, "info: aborting scan\n");
+                               mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
                                cfg80211_scan_done(priv->scan_request, 1);
                                priv->scan_request = NULL;
                        }
@@ -1075,7 +1120,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                        if (!priv)
                                continue;
                        if (priv->scan_request) {
-                               dev_dbg(adapter->dev, "info: aborting scan\n");
+                               mwifiex_dbg(adapter, WARN, "info: aborting scan\n");
                                cfg80211_scan_done(priv->scan_request, 1);
                                priv->scan_request = NULL;
                        }
@@ -1100,11 +1145,11 @@ mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
            !adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter))
                mwifiex_dnld_sleep_confirm_cmd(adapter);
        else
-               dev_dbg(adapter->dev,
-                       "cmd: Delay Sleep Confirm (%s%s%s)\n",
-                       (adapter->cmd_sent) ? "D" : "",
-                       (adapter->curr_cmd) ? "C" : "",
-                       (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
+               mwifiex_dbg(adapter, CMD,
+                           "cmd: Delay Sleep Confirm (%s%s%s)\n",
+                           (adapter->cmd_sent) ? "D" : "",
+                           (adapter->curr_cmd) ? "C" : "",
+                           (IS_CARD_RX_RCVD(adapter)) ? "R" : "");
 }
 
 /*
@@ -1120,15 +1165,18 @@ mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
                        priv->adapter->hs_activated = true;
                        mwifiex_update_rxreor_flags(priv->adapter,
                                                    RXREOR_FORCE_NO_DROP);
-                       dev_dbg(priv->adapter->dev, "event: hs_activated\n");
+                       mwifiex_dbg(priv->adapter, EVENT,
+                                   "event: hs_activated\n");
                        priv->adapter->hs_activate_wait_q_woken = true;
                        wake_up_interruptible(
                                &priv->adapter->hs_activate_wait_q);
                } else {
-                       dev_dbg(priv->adapter->dev, "event: HS not configured\n");
+                       mwifiex_dbg(priv->adapter, EVENT,
+                                   "event: HS not configured\n");
                }
        } else {
-               dev_dbg(priv->adapter->dev, "event: hs_deactivated\n");
+               mwifiex_dbg(priv->adapter, EVENT,
+                           "event: hs_deactivated\n");
                priv->adapter->hs_activated = false;
        }
 }
@@ -1156,11 +1204,12 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
                mwifiex_hs_activated_event(priv, true);
                return 0;
        } else {
-               dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply"
-                       " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
-                       resp->result, conditions,
-                       phs_cfg->params.hs_config.gpio,
-                       phs_cfg->params.hs_config.gap);
+               mwifiex_dbg(adapter, CMD,
+                           "cmd: CMD_RESP: HS_CFG cmd reply\t"
+                           " result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
+                           resp->result, conditions,
+                           phs_cfg->params.hs_config.gpio,
+                           phs_cfg->params.hs_config.gap);
        }
        if (conditions != HS_CFG_CANCEL) {
                adapter->is_hs_configured = true;
@@ -1182,8 +1231,10 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
 void
 mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
 {
-       dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep"
-               " since there is interrupt from the firmware\n", __func__);
+       mwifiex_dbg(adapter, INFO,
+                   "info: %s: auto cancelling host sleep\t"
+                   "since there is interrupt from the firmware\n",
+                   __func__);
 
        adapter->if_ops.wakeup(adapter);
        adapter->hs_activated = false;
@@ -1212,13 +1263,14 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
        uint16_t seq_num = le16_to_cpu(cmd->seq_num);
 
        if (!upld_len) {
-               dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: cmd size is 0\n", __func__);
                return;
        }
 
-       dev_dbg(adapter->dev,
-               "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
-               command, result, le16_to_cpu(cmd->size), seq_num);
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: CMD_RESP: 0x%x, result %d, len %d, seqno 0x%x\n",
+                   command, result, le16_to_cpu(cmd->size), seq_num);
 
        /* Get BSS number and corresponding priv */
        priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
@@ -1232,15 +1284,16 @@ mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
        command &= HostCmd_CMD_ID_MASK;
 
        if (command != HostCmd_CMD_802_11_PS_MODE_ENH) {
-               dev_err(adapter->dev,
-                       "%s: rcvd unexpected resp for cmd %#x, result = %x\n",
-                       __func__, command, result);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: rcvd unexpected resp for cmd %#x, result = %x\n",
+                           __func__, command, result);
                return;
        }
 
        if (result) {
-               dev_err(adapter->dev, "%s: sleep confirm cmd failed\n",
-                       __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: sleep confirm cmd failed\n",
+                           __func__);
                adapter->pm_wakeup_card_req = false;
                adapter->ps_state = PS_STATE_AWAKE;
                return;
@@ -1305,7 +1358,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
                                        sizeof(struct mwifiex_ie_types_header));
                        cmd_size += sizeof(*ps_tlv);
                        tlv += sizeof(*ps_tlv);
-                       dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n");
+                       mwifiex_dbg(priv->adapter, CMD,
+                                   "cmd: PS Command: Enter PS\n");
                        ps_mode->null_pkt_interval =
                                        cpu_to_le16(adapter->null_pkt_interval);
                        ps_mode->multiple_dtims =
@@ -1335,8 +1389,8 @@ int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
                        tlv += sizeof(*auto_ds_tlv);
                        if (auto_ds)
                                idletime = auto_ds->idle_time;
-                       dev_dbg(priv->adapter->dev,
-                               "cmd: PS Command: Enter Auto Deep Sleep\n");
+                       mwifiex_dbg(priv->adapter, CMD,
+                                   "cmd: PS Command: Enter Auto Deep Sleep\n");
                        auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime);
                }
                cmd->size = cpu_to_le16(cmd_size);
@@ -1363,27 +1417,31 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
        uint16_t auto_ps_bitmap =
                le16_to_cpu(ps_mode->params.ps_bitmap);
 
-       dev_dbg(adapter->dev,
-               "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
-               __func__, resp->result, action);
+       mwifiex_dbg(adapter, INFO,
+                   "info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
+                   __func__, resp->result, action);
        if (action == EN_AUTO_PS) {
                if (auto_ps_bitmap & BITMAP_AUTO_DS) {
-                       dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n");
+                       mwifiex_dbg(adapter, CMD,
+                                   "cmd: Enabled auto deep sleep\n");
                        priv->adapter->is_deep_sleep = true;
                }
                if (auto_ps_bitmap & BITMAP_STA_PS) {
-                       dev_dbg(adapter->dev, "cmd: Enabled STA power save\n");
+                       mwifiex_dbg(adapter, CMD,
+                                   "cmd: Enabled STA power save\n");
                        if (adapter->sleep_period.period)
-                               dev_dbg(adapter->dev,
-                                       "cmd: set to uapsd/pps mode\n");
+                               mwifiex_dbg(adapter, CMD,
+                                           "cmd: set to uapsd/pps mode\n");
                }
        } else if (action == DIS_AUTO_PS) {
                if (ps_bitmap & BITMAP_AUTO_DS) {
                        priv->adapter->is_deep_sleep = false;
-                       dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n");
+                       mwifiex_dbg(adapter, CMD,
+                                   "cmd: Disabled auto deep sleep\n");
                }
                if (ps_bitmap & BITMAP_STA_PS) {
-                       dev_dbg(adapter->dev, "cmd: Disabled STA power save\n");
+                       mwifiex_dbg(adapter, CMD,
+                                   "cmd: Disabled STA power save\n");
                        if (adapter->sleep_period.period) {
                                adapter->delay_null_pkt = false;
                                adapter->tx_lock_flag = false;
@@ -1396,7 +1454,8 @@ int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
                else
                        adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
 
-               dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap);
+               mwifiex_dbg(adapter, CMD,
+                           "cmd: ps_bitmap=%#x\n", ps_bitmap);
 
                if (pm_cfg) {
                        /* This section is for get power save mode */
@@ -1533,29 +1592,29 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
                                                        api_rev->major_ver;
                                        adapter->key_api_minor_ver =
                                                        api_rev->minor_ver;
-                                       dev_dbg(adapter->dev,
-                                               "key_api v%d.%d\n",
-                                               adapter->key_api_major_ver,
-                                               adapter->key_api_minor_ver);
+                                       mwifiex_dbg(adapter, INFO,
+                                                   "key_api v%d.%d\n",
+                                                   adapter->key_api_major_ver,
+                                                   adapter->key_api_minor_ver);
                                        break;
                                case FW_API_VER_ID:
                                        adapter->fw_api_ver =
                                                        api_rev->major_ver;
-                                       dev_dbg(adapter->dev,
-                                               "Firmware api version %d\n",
-                                               adapter->fw_api_ver);
+                                       mwifiex_dbg(adapter, INFO,
+                                                   "Firmware api version %d\n",
+                                                   adapter->fw_api_ver);
                                        break;
                                default:
-                                       dev_warn(adapter->dev,
-                                                "Unknown api_id: %d\n",
-                                                api_id);
+                                       mwifiex_dbg(adapter, FATAL,
+                                                   "Unknown api_id: %d\n",
+                                                   api_id);
                                        break;
                                }
                                break;
                        default:
-                               dev_warn(adapter->dev,
-                                        "Unknown GET_HW_SPEC TLV type: %#x\n",
-                                        le16_to_cpu(tlv->type));
+                               mwifiex_dbg(adapter, FATAL,
+                                           "Unknown GET_HW_SPEC TLV type: %#x\n",
+                                           le16_to_cpu(tlv->type));
                                break;
                        }
                        parsed_len += le16_to_cpu(tlv->len) +
@@ -1565,14 +1624,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
                }
        }
 
-       dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
-               adapter->fw_release_number);
-       dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
-               hw_spec->permanent_addr);
-       dev_dbg(adapter->dev,
-               "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
-               le16_to_cpu(hw_spec->hw_if_version),
-               le16_to_cpu(hw_spec->version));
+       mwifiex_dbg(adapter, INFO,
+                   "info: GET_HW_SPEC: fw_release_number- %#x\n",
+                   adapter->fw_release_number);
+       mwifiex_dbg(adapter, INFO,
+                   "info: GET_HW_SPEC: permanent addr: %pM\n",
+                   hw_spec->permanent_addr);
+       mwifiex_dbg(adapter, INFO,
+                   "info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
+                   le16_to_cpu(hw_spec->hw_if_version),
+                   le16_to_cpu(hw_spec->version));
 
        ether_addr_copy(priv->adapter->perm_addr, hw_spec->permanent_addr);
        adapter->region_code = le16_to_cpu(hw_spec->region_code);
@@ -1585,8 +1646,8 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
        /* If it's unidentified region code, use the default (USA) */
        if (i >= MWIFIEX_MAX_REGION_CODE) {
                adapter->region_code = 0x10;
-               dev_dbg(adapter->dev,
-                       "cmd: unknown region code, use default (USA)\n");
+               mwifiex_dbg(adapter, WARN,
+                           "cmd: unknown region code, use default (USA)\n");
        }
 
        adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
index 1fb329dc6744580c6834f03a52d2f71424db8392..5a0636d43a1b9722b4be1e95b7528aa0ae6368f9 100644 (file)
@@ -152,24 +152,24 @@ free_and_exit:
 }
 
 /*
- * Proc firmware dump read handler.
+ * Proc device dump read handler.
  *
- * This function is called when the 'fw_dump' file is opened for
+ * This function is called when the 'device_dump' file is opened for
  * reading.
- * This function dumps firmware memory in different files
- * (ex. DTCM, ITCM, SQRAM etc.) based on the the segments for
+ * This function dumps driver information and firmware memory segments
+ * (ex. DTCM, ITCM, SQRAM etc.) for
  * debugging.
  */
 static ssize_t
-mwifiex_fw_dump_read(struct file *file, char __user *ubuf,
-                    size_t count, loff_t *ppos)
+mwifiex_device_dump_read(struct file *file, char __user *ubuf,
+                        size_t count, loff_t *ppos)
 {
        struct mwifiex_private *priv = file->private_data;
 
-       if (!priv->adapter->if_ops.fw_dump)
+       if (!priv->adapter->if_ops.device_dump)
                return -EIO;
 
-       priv->adapter->if_ops.fw_dump(priv->adapter);
+       priv->adapter->if_ops.device_dump(priv->adapter);
 
        return 0;
 }
@@ -535,6 +535,144 @@ done:
        return ret;
 }
 
+/* Proc debug_mask file read handler.
+ * This function is called when the 'debug_mask' file is opened for reading
+ * This function can be used read driver debugging mask value.
+ */
+static ssize_t
+mwifiex_debug_mask_read(struct file *file, char __user *ubuf,
+                       size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv =
+               (struct mwifiex_private *)file->private_data;
+       unsigned long page = get_zeroed_page(GFP_KERNEL);
+       char *buf = (char *)page;
+       size_t ret = 0;
+       int pos = 0;
+
+       if (!buf)
+               return -ENOMEM;
+
+       pos += snprintf(buf, PAGE_SIZE, "debug mask=0x%08x\n",
+                       priv->adapter->debug_mask);
+       ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+       free_page(page);
+       return ret;
+}
+
+/* Proc debug_mask file read handler.
+ * This function is called when the 'debug_mask' file is opened for reading
+ * This function can be used read driver debugging mask value.
+ */
+static ssize_t
+mwifiex_debug_mask_write(struct file *file, const char __user *ubuf,
+                        size_t count, loff_t *ppos)
+{
+       int ret;
+       unsigned long debug_mask;
+       struct mwifiex_private *priv = (void *)file->private_data;
+       unsigned long addr = get_zeroed_page(GFP_KERNEL);
+       char *buf = (void *)addr;
+       size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1));
+
+       if (!buf)
+               return -ENOMEM;
+
+       if (copy_from_user(buf, ubuf, buf_size)) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       if (kstrtoul(buf, 0, &debug_mask)) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       priv->adapter->debug_mask = debug_mask;
+       ret = count;
+done:
+       free_page(addr);
+       return ret;
+}
+
+/* Proc memrw file write handler.
+ * This function is called when the 'memrw' file is opened for writing
+ * This function can be used to write to a memory location.
+ */
+static ssize_t
+mwifiex_memrw_write(struct file *file, const char __user *ubuf, size_t count,
+                   loff_t *ppos)
+{
+       int ret;
+       char cmd;
+       struct mwifiex_ds_mem_rw mem_rw;
+       u16 cmd_action;
+       struct mwifiex_private *priv = (void *)file->private_data;
+       unsigned long addr = get_zeroed_page(GFP_KERNEL);
+       char *buf = (void *)addr;
+       size_t buf_size = min(count, (size_t)(PAGE_SIZE - 1));
+
+       if (!buf)
+               return -ENOMEM;
+
+       if (copy_from_user(buf, ubuf, buf_size)) {
+               ret = -EFAULT;
+               goto done;
+       }
+
+       ret = sscanf(buf, "%c %x %x", &cmd, &mem_rw.addr, &mem_rw.value);
+       if (ret != 3) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       if ((cmd == 'r') || (cmd == 'R')) {
+               cmd_action = HostCmd_ACT_GEN_GET;
+               mem_rw.value = 0;
+       } else if ((cmd == 'w') || (cmd == 'W')) {
+               cmd_action = HostCmd_ACT_GEN_SET;
+       } else {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       memcpy(&priv->mem_rw, &mem_rw, sizeof(mem_rw));
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_MEM_ACCESS, cmd_action, 0,
+                            &mem_rw, true))
+               ret = -1;
+       else
+               ret = count;
+
+done:
+       free_page(addr);
+       return ret;
+}
+
+/* Proc memrw file read handler.
+ * This function is called when the 'memrw' file is opened for reading
+ * This function can be used to read from a memory location.
+ */
+static ssize_t
+mwifiex_memrw_read(struct file *file, char __user *ubuf,
+                  size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv = (void *)file->private_data;
+       unsigned long addr = get_zeroed_page(GFP_KERNEL);
+       char *buf = (char *)addr;
+       int ret, pos = 0;
+
+       if (!buf)
+               return -ENOMEM;
+
+       pos += snprintf(buf, PAGE_SIZE, "0x%x 0x%x\n", priv->mem_rw.addr,
+                       priv->mem_rw.value);
+       ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
+
+       free_page(addr);
+       return ret;
+}
+
 static u32 saved_offset = -1, saved_bytes = -1;
 
 /*
@@ -654,7 +792,8 @@ mwifiex_hscfg_write(struct file *file, const char __user *ubuf,
        memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg));
 
        if (arg_num > 3) {
-               dev_err(priv->adapter->dev, "Too many arguments\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Too many arguments\n");
                ret = -EINVAL;
                goto done;
        }
@@ -746,11 +885,13 @@ static const struct file_operations mwifiex_dfs_##name##_fops = {       \
 MWIFIEX_DFS_FILE_READ_OPS(info);
 MWIFIEX_DFS_FILE_READ_OPS(debug);
 MWIFIEX_DFS_FILE_READ_OPS(getlog);
-MWIFIEX_DFS_FILE_READ_OPS(fw_dump);
+MWIFIEX_DFS_FILE_READ_OPS(device_dump);
 MWIFIEX_DFS_FILE_OPS(regrdwr);
 MWIFIEX_DFS_FILE_OPS(rdeeprom);
+MWIFIEX_DFS_FILE_OPS(memrw);
 MWIFIEX_DFS_FILE_OPS(hscfg);
 MWIFIEX_DFS_FILE_OPS(histogram);
+MWIFIEX_DFS_FILE_OPS(debug_mask);
 
 /*
  * This function creates the debug FS directory structure and the files.
@@ -772,9 +913,11 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
        MWIFIEX_DFS_ADD_FILE(getlog);
        MWIFIEX_DFS_ADD_FILE(regrdwr);
        MWIFIEX_DFS_ADD_FILE(rdeeprom);
-       MWIFIEX_DFS_ADD_FILE(fw_dump);
+       MWIFIEX_DFS_ADD_FILE(device_dump);
+       MWIFIEX_DFS_ADD_FILE(memrw);
        MWIFIEX_DFS_ADD_FILE(hscfg);
        MWIFIEX_DFS_ADD_FILE(histogram);
+       MWIFIEX_DFS_ADD_FILE(debug_mask);
 }
 
 /*
index 65d8d6d4b6ba3db2cfa3236e679cfcef6a307905..58400c69ab26adfb7da1fc82f7acf84acfcead2e 100644 (file)
@@ -64,104 +64,7 @@ static int mwifiex_ethtool_set_wol(struct net_device *dev,
        return 0;
 }
 
-static int
-mwifiex_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
-{
-       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
-       struct mwifiex_adapter *adapter = priv->adapter;
-       struct memory_type_mapping *entry;
-
-       if (!adapter->if_ops.fw_dump)
-               return -ENOTSUPP;
-
-       dump->flag = adapter->curr_mem_idx;
-       dump->version = 1;
-       if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
-               dump->len = adapter->drv_info_size;
-       } else if (adapter->curr_mem_idx != MWIFIEX_FW_DUMP_IDX) {
-               entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
-               dump->len = entry->mem_size;
-       } else {
-               dump->len = 0;
-       }
-
-       return 0;
-}
-
-static int
-mwifiex_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
-                     void *buffer)
-{
-       u8 *p = buffer;
-       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
-       struct mwifiex_adapter *adapter = priv->adapter;
-       struct memory_type_mapping *entry;
-
-       if (!adapter->if_ops.fw_dump)
-               return -ENOTSUPP;
-
-       if (adapter->curr_mem_idx == MWIFIEX_DRV_INFO_IDX) {
-               if (!adapter->drv_info_dump)
-                       return -EFAULT;
-               memcpy(p, adapter->drv_info_dump, adapter->drv_info_size);
-               return 0;
-       }
-
-       if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
-               dev_err(adapter->dev, "firmware dump in progress!!\n");
-               return -EBUSY;
-       }
-
-       entry = &adapter->mem_type_mapping_tbl[adapter->curr_mem_idx];
-
-       if (!entry->mem_ptr)
-               return -EFAULT;
-
-       memcpy(p, entry->mem_ptr, entry->mem_size);
-
-       entry->mem_size = 0;
-       vfree(entry->mem_ptr);
-       entry->mem_ptr = NULL;
-
-       return 0;
-}
-
-static int mwifiex_set_dump(struct net_device *dev, struct ethtool_dump *val)
-{
-       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
-       struct mwifiex_adapter *adapter = priv->adapter;
-
-       if (!adapter->if_ops.fw_dump)
-               return -ENOTSUPP;
-
-       if (val->flag == MWIFIEX_DRV_INFO_IDX) {
-               adapter->curr_mem_idx = MWIFIEX_DRV_INFO_IDX;
-               return 0;
-       }
-
-       if (adapter->curr_mem_idx == MWIFIEX_FW_DUMP_IDX) {
-               dev_err(adapter->dev, "firmware dump in progress!!\n");
-               return -EBUSY;
-       }
-
-       if (val->flag == MWIFIEX_FW_DUMP_IDX) {
-               adapter->curr_mem_idx = val->flag;
-               adapter->if_ops.fw_dump(adapter);
-               return 0;
-       }
-
-       if (val->flag < 0 || val->flag >= adapter->num_mem_types)
-               return -EINVAL;
-
-       adapter->curr_mem_idx = val->flag;
-
-       return 0;
-}
-
 const struct ethtool_ops mwifiex_ethtool_ops = {
        .get_wol = mwifiex_ethtool_get_wol,
        .set_wol = mwifiex_ethtool_set_wol,
-       .get_dump_flag = mwifiex_get_dump_flag,
-       .get_dump_data = mwifiex_get_dump_data,
-       .set_dump = mwifiex_set_dump,
 };
index 59d8964dd0dcaaadc39d0c09f872fe46c5488c4d..c404390cb0fa42b8d7778fcf23c121600ee48811 100644 (file)
@@ -323,6 +323,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT            0x0075
 #define HostCmd_CMD_802_11_TX_RATE_QUERY              0x007f
 #define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS     0x0083
+#define HostCmd_CMD_MEM_ACCESS                        0x0086
 #define HostCmd_CMD_CFG_DATA                          0x008f
 #define HostCmd_CMD_VERSION_EXT                       0x0097
 #define HostCmd_CMD_MEF_CFG                           0x009a
@@ -1576,6 +1577,13 @@ struct mwifiex_ie_types_extcap {
        u8 ext_capab[0];
 } __packed;
 
+struct host_cmd_ds_mem_access {
+       __le16 action;
+       __le16 reserved;
+       __le32 addr;
+       __le32 value;
+};
+
 struct mwifiex_ie_types_qos_info {
        struct mwifiex_ie_types_header header;
        u8 qos_info;
@@ -1958,6 +1966,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_p2p_mode_cfg mode_cfg;
                struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
                struct host_cmd_ds_mef_cfg mef_cfg;
+               struct host_cmd_ds_mem_access mem;
                struct host_cmd_ds_mac_reg_access mac_reg;
                struct host_cmd_ds_bbp_reg_access bbp_reg;
                struct host_cmd_ds_rf_reg_access rf_reg;
index e12192f5cfad306b8cd9d4e5fce7ec2bd2e67957..df7fdc09d38c7f1e326b59ad8fd0a9ea53485f74 100644 (file)
@@ -56,7 +56,7 @@ static void wakeup_timer_fn(unsigned long data)
 {
        struct mwifiex_adapter *adapter = (struct mwifiex_adapter *)data;
 
-       dev_err(adapter->dev, "Firmware wakeup failed\n");
+       mwifiex_dbg(adapter, ERROR, "Firmware wakeup failed\n");
        adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
        mwifiex_cancel_all_pending_cmd(adapter);
 
@@ -172,8 +172,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
        /* Allocate command buffer */
        ret = mwifiex_alloc_cmd_buffer(adapter);
        if (ret) {
-               dev_err(adapter->dev, "%s: failed to alloc cmd buffer\n",
-                       __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: failed to alloc cmd buffer\n",
+                           __func__);
                return -1;
        }
 
@@ -182,8 +183,9 @@ static int mwifiex_allocate_adapter(struct mwifiex_adapter *adapter)
                              + INTF_HEADER_LEN);
 
        if (!adapter->sleep_cfm) {
-               dev_err(adapter->dev, "%s: failed to alloc sleep cfm"
-                       " cmd buffer\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: failed to alloc sleep cfm\t"
+                           " cmd buffer\n", __func__);
                return -1;
        }
        skb_reserve(adapter->sleep_cfm, INTF_HEADER_LEN);
@@ -417,7 +419,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
        mwifiex_free_lock_list(adapter);
 
        /* Free command buffer */
-       dev_dbg(adapter->dev, "info: free cmd buffer\n");
+       mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n");
        mwifiex_free_cmd_buffer(adapter);
 
        for (idx = 0; idx < adapter->num_mem_types; idx++) {
@@ -433,6 +435,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
 
        if (adapter->drv_info_dump) {
                vfree(adapter->drv_info_dump);
+               adapter->drv_info_dump = NULL;
                adapter->drv_info_size = 0;
        }
 
@@ -595,10 +598,11 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
        for (i = 0; i < adapter->priv_num; ++i) {
                head = &adapter->bss_prio_tbl[i].bss_prio_head;
                lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
-               dev_dbg(adapter->dev, "info: delete BSS priority table,"
-                               " bss_type = %d, bss_num = %d, i = %d,"
-                               " head = %p\n",
-                             priv->bss_type, priv->bss_num, i, head);
+               mwifiex_dbg(adapter, INFO,
+                           "info: delete BSS priority table,\t"
+                           "bss_type = %d, bss_num = %d, i = %d,\t"
+                           "head = %p\n",
+                           priv->bss_type, priv->bss_num, i, head);
 
                {
                        spin_lock_irqsave(lock, flags);
@@ -609,9 +613,10 @@ static void mwifiex_delete_bss_prio_tbl(struct mwifiex_private *priv)
                        list_for_each_entry_safe(bssprio_node, tmp_node, head,
                                                 list) {
                                if (bssprio_node->priv == priv) {
-                                       dev_dbg(adapter->dev, "info: Delete "
-                                               "node %p, next = %p\n",
-                                               bssprio_node, tmp_node);
+                                       mwifiex_dbg(adapter, INFO,
+                                                   "info: Delete\t"
+                                                   "node %p, next = %p\n",
+                                                   bssprio_node, tmp_node);
                                        list_del(&bssprio_node->list);
                                        kfree(bssprio_node);
                                }
@@ -659,20 +664,23 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
        adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING;
        /* wait for mwifiex_process to complete */
        if (adapter->mwifiex_processing) {
-               dev_warn(adapter->dev, "main process is still running\n");
+               mwifiex_dbg(adapter, WARN,
+                           "main process is still running\n");
                return ret;
        }
 
        /* cancel current command */
        if (adapter->curr_cmd) {
-               dev_warn(adapter->dev, "curr_cmd is still in processing\n");
+               mwifiex_dbg(adapter, WARN,
+                           "curr_cmd is still in processing\n");
                del_timer_sync(&adapter->cmd_timer);
                mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
                adapter->curr_cmd = NULL;
        }
 
        /* shut down mwifiex */
-       dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
+       mwifiex_dbg(adapter, MSG,
+                   "info: shutdown mwifiex...\n");
 
        /* Clean up Tx/Rx queues and delete BSS priority table */
        for (i = 0; i < adapter->priv_num; i++) {
@@ -741,8 +749,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
                /* check if firmware is already running */
                ret = adapter->if_ops.check_fw_status(adapter, poll_num);
                if (!ret) {
-                       dev_notice(adapter->dev,
-                                  "WLAN FW already running! Skip FW dnld\n");
+                       mwifiex_dbg(adapter, MSG,
+                                   "WLAN FW already running! Skip FW dnld\n");
                        return 0;
                }
 
@@ -750,8 +758,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
 
                /* check if we are the winner for downloading FW */
                if (!adapter->winner) {
-                       dev_notice(adapter->dev,
-                                  "FW already running! Skip FW dnld\n");
+                       mwifiex_dbg(adapter, MSG,
+                                   "FW already running! Skip FW dnld\n");
                        goto poll_fw;
                }
        }
@@ -760,7 +768,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
                /* Download firmware with helper */
                ret = adapter->if_ops.prog_fw(adapter, pmfw);
                if (ret) {
-                       dev_err(adapter->dev, "prog_fw failed ret=%#x\n", ret);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "prog_fw failed ret=%#x\n", ret);
                        return ret;
                }
        }
@@ -769,7 +778,8 @@ poll_fw:
        /* Check if the firmware is downloaded successfully or not */
        ret = adapter->if_ops.check_fw_status(adapter, poll_num);
        if (ret)
-               dev_err(adapter->dev, "FW failed to be active in time\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "FW failed to be active in time\n");
 
        return ret;
 }
index d2b05c3a96da8d81060f4c698020890faab5454a..6f11a25a6b49d86498303f7a4647053eb8c22123 100644 (file)
@@ -189,6 +189,7 @@ struct tdls_peer_info {
 };
 
 struct mwifiex_debug_info {
+       unsigned int debug_mask;
        u32 int_counter;
        u32 packets_out[MAX_NUM_TID];
        u32 tx_buf_size;
@@ -342,6 +343,11 @@ struct mwifiex_ds_read_eeprom {
        u8 value[MAX_EEPROM_DATA];
 };
 
+struct mwifiex_ds_mem_rw {
+       u32 addr;
+       u32 value;
+};
+
 #define IEEE_MAX_IE_SIZE               256
 
 #define MWIFIEX_IE_HDR_SIZE    (sizeof(struct mwifiex_ie) - IEEE_MAX_IE_SIZE)
index 411a6c2f4aca5da922409bf33c1aa7eae5a92f00..cce8e39aa45e456e66dad6d4f762f07c96b9c9d2 100644 (file)
@@ -53,9 +53,9 @@ mwifiex_cmd_append_generic_ie(struct mwifiex_private *priv, u8 **buffer)
         *   parameter buffer pointer.
         */
        if (priv->gen_ie_buf_len) {
-               dev_dbg(priv->adapter->dev,
-                       "info: %s: append generic ie len %d to %p\n",
-                       __func__, priv->gen_ie_buf_len, *buffer);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: %s: append generic ie len %d to %p\n",
+                           __func__, priv->gen_ie_buf_len, *buffer);
 
                /* Wrap the generic IE buffer with a pass through TLV type */
                ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH);
@@ -125,9 +125,9 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
 
        tsf_val = cpu_to_le64(bss_desc->timestamp);
 
-       dev_dbg(priv->adapter->dev,
-               "info: %s: TSF offset calc: %016llx - %016llx\n",
-               __func__, bss_desc->timestamp, bss_desc->fw_tsf);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: %s: TSF offset calc: %016llx - %016llx\n",
+                   __func__, bss_desc->timestamp, bss_desc->fw_tsf);
 
        memcpy(*buffer, &tsf_val, sizeof(tsf_val));
        *buffer += sizeof(tsf_val);
@@ -152,7 +152,7 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
 
        tmp = kmemdup(rate1, rate1_size, GFP_KERNEL);
        if (!tmp) {
-               dev_err(priv->adapter->dev, "failed to alloc tmp buf\n");
+               mwifiex_dbg(priv->adapter, ERROR, "failed to alloc tmp buf\n");
                return -ENOMEM;
        }
 
@@ -169,8 +169,8 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
                }
        }
 
-       dev_dbg(priv->adapter->dev, "info: Tx data rate set to %#x\n",
-               priv->data_rate);
+       mwifiex_dbg(priv->adapter, INFO, "info: Tx data rate set to %#x\n",
+                   priv->data_rate);
 
        if (!priv->is_data_rate_auto) {
                while (*ptr) {
@@ -180,9 +180,10 @@ static int mwifiex_get_common_rates(struct mwifiex_private *priv, u8 *rate1,
                        }
                        ptr++;
                }
-               dev_err(priv->adapter->dev, "previously set fixed data rate %#x"
-                       " is not compatible with the network\n",
-                       priv->data_rate);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "previously set fixed data rate %#x\t"
+                           "is not compatible with the network\n",
+                           priv->data_rate);
 
                ret = -1;
                goto done;
@@ -214,8 +215,9 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
        if (mwifiex_get_common_rates(priv, out_rates, MWIFIEX_SUPPORTED_RATES,
                                     card_rates, card_rates_size)) {
                *out_rates_size = 0;
-               dev_err(priv->adapter->dev, "%s: cannot get common rates\n",
-                       __func__);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: cannot get common rates\n",
+                           __func__);
                return -1;
        }
 
@@ -246,8 +248,9 @@ mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer)
         * parameter buffer pointer.
         */
        if (priv->wps_ie_len) {
-               dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n",
-                       priv->wps_ie_len, *buffer);
+               mwifiex_dbg(priv->adapter, CMD,
+                           "cmd: append wps ie %d to %p\n",
+                           priv->wps_ie_len, *buffer);
 
                /* Wrap the generic IE buffer with a pass through TLV type */
                ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE);
@@ -292,8 +295,9 @@ mwifiex_cmd_append_wapi_ie(struct mwifiex_private *priv, u8 **buffer)
         *   parameter buffer pointer.
         */
        if (priv->wapi_ie_len) {
-               dev_dbg(priv->adapter->dev, "cmd: append wapi ie %d to %p\n",
-                       priv->wapi_ie_len, *buffer);
+               mwifiex_dbg(priv->adapter, CMD,
+                           "cmd: append wapi ie %d to %p\n",
+                           priv->wapi_ie_len, *buffer);
 
                /* Wrap the generic IE buffer with a pass through TLV type */
                ie_header.type = cpu_to_le16(TLV_TYPE_WAPI_IE);
@@ -453,8 +457,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
        rates_tlv->header.len = cpu_to_le16((u16) rates_size);
        memcpy(rates_tlv->rates, rates, rates_size);
        pos += sizeof(rates_tlv->header) + rates_size;
-       dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: rates size = %d\n",
-               rates_size);
+       mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_CMD: rates size = %d\n",
+                   rates_size);
 
        /* Add the Authentication type to be used for Auth frames */
        auth_tlv = (struct mwifiex_ie_types_auth_type *) pos;
@@ -487,14 +491,14 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
                       sizeof(struct mwifiex_chan_scan_param_set));
                chan_tlv->chan_scan_param[0].chan_number =
                        (bss_desc->phy_param_set.ds_param_set.current_chan);
-               dev_dbg(priv->adapter->dev, "info: Assoc: TLV Chan = %d\n",
-                       chan_tlv->chan_scan_param[0].chan_number);
+               mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Chan = %d\n",
+                           chan_tlv->chan_scan_param[0].chan_number);
 
                chan_tlv->chan_scan_param[0].radio_type =
                        mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
 
-               dev_dbg(priv->adapter->dev, "info: Assoc: TLV Band = %d\n",
-                       chan_tlv->chan_scan_param[0].radio_type);
+               mwifiex_dbg(priv->adapter, INFO, "info: Assoc: TLV Band = %d\n",
+                           chan_tlv->chan_scan_param[0].radio_type);
                pos += sizeof(chan_tlv->header) +
                        sizeof(struct mwifiex_chan_scan_param_set);
        }
@@ -544,8 +548,9 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
                tmp_cap &= ~WLAN_CAPABILITY_SHORT_SLOT_TIME;
 
        tmp_cap &= CAPINFO_MASK;
-       dev_dbg(priv->adapter->dev, "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
-               tmp_cap, CAPINFO_MASK);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: ASSOC_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+                   tmp_cap, CAPINFO_MASK);
        assoc->cap_info_bitmap = cpu_to_le16(tmp_cap);
 
        return 0;
@@ -621,23 +626,35 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
        struct ieee_types_assoc_rsp *assoc_rsp;
        struct mwifiex_bssdescriptor *bss_desc;
        bool enable_data = true;
-       u16 cap_info, status_code;
+       u16 cap_info, status_code, aid;
 
        assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
 
        cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap);
        status_code = le16_to_cpu(assoc_rsp->status_code);
+       aid = le16_to_cpu(assoc_rsp->a_id);
+
+       if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
+               dev_err(priv->adapter->dev,
+                       "invalid AID value 0x%x; bits 15:14 not set\n",
+                       aid);
+
+       aid &= ~(BIT(15) | BIT(14));
 
        priv->assoc_rsp_size = min(le16_to_cpu(resp->size) - S_DS_GEN,
                                   sizeof(priv->assoc_rsp_buf));
 
        memcpy(priv->assoc_rsp_buf, &resp->params, priv->assoc_rsp_size);
 
+       assoc_rsp->a_id = cpu_to_le16(aid);
+
        if (status_code) {
                priv->adapter->dbg.num_cmd_assoc_failure++;
-               dev_err(priv->adapter->dev,
-                       "ASSOC_RESP: failed, status code=%d err=%#x a_id=%#x\n",
-                       status_code, cap_info, le16_to_cpu(assoc_rsp->a_id));
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "ASSOC_RESP: failed,\t"
+                           "status code=%d err=%#x a_id=%#x\n",
+                           status_code, cap_info,
+                           le16_to_cpu(assoc_rsp->a_id));
 
                if (cap_info == MWIFIEX_TIMEOUT_FOR_AP_RESP) {
                        if (status_code == MWIFIEX_STATUS_CODE_AUTH_TIMEOUT)
@@ -661,8 +678,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
        /* Set the attempted BSSID Index to current */
        bss_desc = priv->attempted_bss_desc;
 
-       dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: %s\n",
-               bss_desc->ssid.ssid);
+       mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: %s\n",
+                   bss_desc->ssid.ssid);
 
        /* Make a copy of current BSSID descriptor */
        memcpy(&priv->curr_bss_params.bss_descriptor,
@@ -692,8 +709,9 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
                        = ((bss_desc->wmm_ie.qos_info_bitmap &
                                IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
 
-       dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
-               priv->curr_pkt_filter);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
+                   priv->curr_pkt_filter);
        if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
                priv->wpa_is_gtk_set = false;
 
@@ -709,8 +727,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
        }
 
        if (enable_data)
-               dev_dbg(priv->adapter->dev,
-                       "info: post association, re-enabling data flow\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: post association, re-enabling data flow\n");
 
        /* Reset SNR/NF/RSSI values */
        priv->data_rssi_last = 0;
@@ -728,7 +746,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
 
        priv->adapter->dbg.num_cmd_assoc_success++;
 
-       dev_dbg(priv->adapter->dev, "info: ASSOC_RESP: associated\n");
+       mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: associated\n");
 
        /* Add the ra_list here for infra mode as there will be only 1 ra
           always */
@@ -815,8 +833,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
 
        memcpy(adhoc_start->ssid, req_ssid->ssid, req_ssid->ssid_len);
 
-       dev_dbg(adapter->dev, "info: ADHOC_S_CMD: SSID = %s\n",
-               adhoc_start->ssid);
+       mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: SSID = %s\n",
+                   adhoc_start->ssid);
 
        memset(bss_desc->ssid.ssid, 0, IEEE80211_MAX_SSID_LEN);
        memcpy(bss_desc->ssid.ssid, req_ssid->ssid, req_ssid->ssid_len);
@@ -848,12 +866,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
        }
 
        if (!priv->adhoc_channel) {
-               dev_err(adapter->dev, "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "ADHOC_S_CMD: adhoc_channel cannot be 0\n");
                return -1;
        }
 
-       dev_dbg(adapter->dev, "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
-               priv->adhoc_channel);
+       mwifiex_dbg(adapter, INFO,
+                   "info: ADHOC_S_CMD: creating ADHOC on channel %d\n",
+                   priv->adhoc_channel);
 
        priv->curr_bss_params.bss_descriptor.channel = priv->adhoc_channel;
        priv->curr_bss_params.band = adapter->adhoc_start_band;
@@ -885,13 +905,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
        /* Set up privacy in bss_desc */
        if (priv->sec_info.encryption_mode) {
                /* Ad-Hoc capability privacy on */
-               dev_dbg(adapter->dev,
-                       "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: ADHOC_S_CMD: wep_status set privacy to WEP\n");
                bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
                tmp_cap |= WLAN_CAPABILITY_PRIVACY;
        } else {
-               dev_dbg(adapter->dev, "info: ADHOC_S_CMD: wep_status NOT set,"
-                               " setting privacy to ACCEPT ALL\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: ADHOC_S_CMD: wep_status NOT set,\t"
+                           "setting privacy to ACCEPT ALL\n");
                bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
        }
 
@@ -902,8 +923,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
                if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
                                     HostCmd_ACT_GEN_SET, 0,
                                     &priv->curr_pkt_filter, false)) {
-                       dev_err(adapter->dev,
-                               "ADHOC_S_CMD: G Protection config failed\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "ADHOC_S_CMD: G Protection config failed\n");
                        return -1;
                }
        }
@@ -918,10 +939,10 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
        memcpy(&priv->curr_bss_params.data_rates,
               &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
 
-       dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%4ph\n",
-               adhoc_start->data_rate);
+       mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: rates=%4ph\n",
+                   adhoc_start->data_rate);
 
-       dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
+       mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
 
        if (IS_SUPPORT_MULTI_BANDS(adapter)) {
                /* Append a channel TLV */
@@ -935,8 +956,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
                chan_tlv->chan_scan_param[0].chan_number =
                        (u8) priv->curr_bss_params.bss_descriptor.channel;
 
-               dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Chan = %d\n",
-                       chan_tlv->chan_scan_param[0].chan_number);
+               mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Chan = %d\n",
+                           chan_tlv->chan_scan_param[0].chan_number);
 
                chan_tlv->chan_scan_param[0].radio_type
                       = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
@@ -951,8 +972,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
                                chan_tlv->chan_scan_param[0].radio_type |=
                                        (IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
                }
-               dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
-                       chan_tlv->chan_scan_param[0].radio_type);
+               mwifiex_dbg(adapter, INFO, "info: ADHOC_S_CMD: TLV Band = %d\n",
+                           chan_tlv->chan_scan_param[0].radio_type);
                pos += sizeof(chan_tlv->header) +
                        sizeof(struct mwifiex_chan_scan_param_set);
                cmd_append_size +=
@@ -1074,8 +1095,8 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
                if (mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
                                     HostCmd_ACT_GEN_SET, 0,
                                     &curr_pkt_filter, false)) {
-                       dev_err(priv->adapter->dev,
-                               "ADHOC_J_CMD: G Protection config failed\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "ADHOC_J_CMD: G Protection config failed\n");
                        return -1;
                }
        }
@@ -1106,14 +1127,15 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
 
        tmp_cap &= CAPINFO_MASK;
 
-       dev_dbg(priv->adapter->dev,
-               "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
-               tmp_cap, CAPINFO_MASK);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: ADHOC_J_CMD: tmp_cap=%4X CAPINFO_MASK=%4lX\n",
+                   tmp_cap, CAPINFO_MASK);
 
        /* Information on BSSID descriptor passed to FW */
-       dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
-               adhoc_join->bss_descriptor.bssid,
-               adhoc_join->bss_descriptor.ssid);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: ADHOC_J_CMD: BSSID=%pM, SSID='%s'\n",
+                   adhoc_join->bss_descriptor.bssid,
+                   adhoc_join->bss_descriptor.ssid);
 
        for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
                    bss_desc->supported_rates[i]; i++)
@@ -1149,14 +1171,14 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
                       sizeof(struct mwifiex_chan_scan_param_set));
                chan_tlv->chan_scan_param[0].chan_number =
                        (bss_desc->phy_param_set.ds_param_set.current_chan);
-               dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Chan=%d\n",
-                       chan_tlv->chan_scan_param[0].chan_number);
+               mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Chan=%d\n",
+                           chan_tlv->chan_scan_param[0].chan_number);
 
                chan_tlv->chan_scan_param[0].radio_type =
                        mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
 
-               dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: TLV Band=%d\n",
-                       chan_tlv->chan_scan_param[0].radio_type);
+               mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_J_CMD: TLV Band=%d\n",
+                           chan_tlv->chan_scan_param[0].radio_type);
                pos += sizeof(chan_tlv->header) +
                                sizeof(struct mwifiex_chan_scan_param_set);
                cmd_append_size += sizeof(chan_tlv->header) +
@@ -1210,7 +1232,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
        /* Join result code 0 --> SUCCESS */
        reason_code = le16_to_cpu(resp->result);
        if (reason_code) {
-               dev_err(priv->adapter->dev, "ADHOC_RESP: failed\n");
+               mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n");
                if (priv->media_connected)
                        mwifiex_reset_connect_state(priv, reason_code);
 
@@ -1225,8 +1247,8 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
        priv->media_connected = true;
 
        if (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_AD_HOC_START) {
-               dev_dbg(priv->adapter->dev, "info: ADHOC_S_RESP %s\n",
-                       bss_desc->ssid.ssid);
+               mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_S_RESP %s\n",
+                           bss_desc->ssid.ssid);
 
                /* Update the created network descriptor with the new BSSID */
                memcpy(bss_desc->mac_address,
@@ -1238,8 +1260,9 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
                 * Now the join cmd should be successful.
                 * If BSSID has changed use SSID to compare instead of BSSID
                 */
-               dev_dbg(priv->adapter->dev, "info: ADHOC_J_RESP %s\n",
-                       bss_desc->ssid.ssid);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: ADHOC_J_RESP %s\n",
+                           bss_desc->ssid.ssid);
 
                /*
                 * Make a copy of current BSSID descriptor, only needed for
@@ -1252,10 +1275,10 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
                priv->adhoc_state = ADHOC_JOINED;
        }
 
-       dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: channel = %d\n",
-               priv->adhoc_channel);
-       dev_dbg(priv->adapter->dev, "info: ADHOC_RESP: BSSID = %pM\n",
-               priv->curr_bss_params.bss_descriptor.mac_address);
+       mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: channel = %d\n",
+                   priv->adhoc_channel);
+       mwifiex_dbg(priv->adapter, INFO, "info: ADHOC_RESP: BSSID = %pM\n",
+                   priv->curr_bss_params.bss_descriptor.mac_address);
 
        if (!netif_carrier_ok(priv->netdev))
                netif_carrier_on(priv->netdev);
@@ -1317,12 +1340,12 @@ int
 mwifiex_adhoc_start(struct mwifiex_private *priv,
                    struct cfg80211_ssid *adhoc_ssid)
 {
-       dev_dbg(priv->adapter->dev, "info: Adhoc Channel = %d\n",
-               priv->adhoc_channel);
-       dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
-               priv->curr_bss_params.bss_descriptor.channel);
-       dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %d\n",
-               priv->curr_bss_params.band);
+       mwifiex_dbg(priv->adapter, INFO, "info: Adhoc Channel = %d\n",
+                   priv->adhoc_channel);
+       mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.channel = %d\n",
+                   priv->curr_bss_params.bss_descriptor.channel);
+       mwifiex_dbg(priv->adapter, INFO, "info: curr_bss_params.band = %d\n",
+                   priv->curr_bss_params.band);
 
        if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
            priv->adapter->config_bands & BAND_AAC)
@@ -1343,14 +1366,16 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
 int mwifiex_adhoc_join(struct mwifiex_private *priv,
                       struct mwifiex_bssdescriptor *bss_desc)
 {
-       dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid =%s\n",
-               priv->curr_bss_params.bss_descriptor.ssid.ssid);
-       dev_dbg(priv->adapter->dev, "info: adhoc join: curr_bss ssid_len =%u\n",
-               priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
-       dev_dbg(priv->adapter->dev, "info: adhoc join: ssid =%s\n",
-               bss_desc->ssid.ssid);
-       dev_dbg(priv->adapter->dev, "info: adhoc join: ssid_len =%u\n",
-               bss_desc->ssid.ssid_len);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: adhoc join: curr_bss ssid =%s\n",
+                   priv->curr_bss_params.bss_descriptor.ssid.ssid);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: adhoc join: curr_bss ssid_len =%u\n",
+                   priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+       mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid =%s\n",
+                   bss_desc->ssid.ssid);
+       mwifiex_dbg(priv->adapter, INFO, "info: adhoc join: ssid_len =%u\n",
+                   bss_desc->ssid.ssid_len);
 
        /* Check if the requested SSID is already joined */
        if (priv->curr_bss_params.bss_descriptor.ssid.ssid_len &&
@@ -1358,8 +1383,9 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
                              &priv->curr_bss_params.bss_descriptor.ssid) &&
            (priv->curr_bss_params.bss_descriptor.bss_mode ==
                                                        NL80211_IFTYPE_ADHOC)) {
-               dev_dbg(priv->adapter->dev, "info: ADHOC_J_CMD: new ad-hoc SSID"
-                       " is the same as current; not attempting to re-join\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: ADHOC_J_CMD: new ad-hoc SSID\t"
+                           "is the same as current; not attempting to re-join\n");
                return -1;
        }
 
@@ -1370,10 +1396,12 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
        else
                mwifiex_set_ba_params(priv);
 
-       dev_dbg(priv->adapter->dev, "info: curr_bss_params.channel = %d\n",
-               priv->curr_bss_params.bss_descriptor.channel);
-       dev_dbg(priv->adapter->dev, "info: curr_bss_params.band = %c\n",
-               priv->curr_bss_params.band);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: curr_bss_params.channel = %d\n",
+                   priv->curr_bss_params.bss_descriptor.channel);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: curr_bss_params.band = %c\n",
+                   priv->curr_bss_params.band);
 
        return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_JOIN,
                                HostCmd_ACT_GEN_SET, 0, bss_desc, true);
@@ -1421,7 +1449,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
                ret = mwifiex_deauthenticate_infra(priv, mac);
                if (ret)
                        cfg80211_disconnected(priv->netdev, 0, NULL, 0,
-                                             GFP_KERNEL);
+                                             true, GFP_KERNEL);
                break;
        case NL80211_IFTYPE_ADHOC:
                return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
index 03a95c7d34bf9ef1e12524d836c3adb3eab8e8fd..3ba4e0e04223bcde4fd0da162db6bb6db6160b62 100644 (file)
 
 #define VERSION        "1.0"
 
+static unsigned int debug_mask = MWIFIEX_DEFAULT_DEBUG_MASK;
+module_param(debug_mask, uint, 0);
+MODULE_PARM_DESC(debug_mask, "bitmap for debug flags");
+
 const char driver_version[] = "mwifiex " VERSION " (%s) ";
 static char *cal_data_cfg;
 module_param(cal_data_cfg, charp, 0);
@@ -63,6 +67,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
 
        /* Save interface specific operations in adapter */
        memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
+       adapter->debug_mask = debug_mask;
 
        /* card specific initialization has been deferred until now .. */
        if (adapter->if_ops.init_if)
@@ -89,7 +94,8 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
        return 0;
 
 error:
-       dev_dbg(adapter->dev, "info: leave mwifiex_register with error\n");
+       mwifiex_dbg(adapter, ERROR,
+                   "info: leave mwifiex_register with error\n");
 
        for (i = 0; i < adapter->priv_num; i++)
                kfree(adapter->priv[i]);
@@ -231,11 +237,10 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
                goto exit_main_proc;
        } else {
                adapter->mwifiex_processing = true;
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
        }
 process_start:
        do {
-               adapter->more_task_flag = false;
-               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
                    (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
                        break;
@@ -275,7 +280,6 @@ process_start:
                        adapter->pm_wakeup_fw_try = true;
                        mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
                        adapter->if_ops.wakeup(adapter);
-                       spin_lock_irqsave(&adapter->main_proc_lock, flags);
                        continue;
                }
 
@@ -335,7 +339,6 @@ process_start:
                    (adapter->ps_state == PS_STATE_PRE_SLEEP) ||
                    (adapter->ps_state == PS_STATE_SLEEP_CFM) ||
                    adapter->tx_lock_flag){
-                       spin_lock_irqsave(&adapter->main_proc_lock, flags);
                        continue;
                }
 
@@ -386,12 +389,14 @@ process_start:
                        }
                        break;
                }
-               spin_lock_irqsave(&adapter->main_proc_lock, flags);
        } while (true);
 
        spin_lock_irqsave(&adapter->main_proc_lock, flags);
-       if (adapter->more_task_flag)
+       if (adapter->more_task_flag) {
+               adapter->more_task_flag = false;
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                goto process_start;
+       }
        adapter->mwifiex_processing = false;
        spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
@@ -455,8 +460,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        struct wireless_dev *wdev;
 
        if (!firmware) {
-               dev_err(adapter->dev,
-                       "Failed to get firmware %s\n", adapter->fw_name);
+               mwifiex_dbg(adapter, ERROR,
+                           "Failed to get firmware %s\n", adapter->fw_name);
                goto err_dnld_fw;
        }
 
@@ -472,13 +477,13 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        if (ret == -1)
                goto err_dnld_fw;
 
-       dev_notice(adapter->dev, "WLAN FW is active\n");
+       mwifiex_dbg(adapter, MSG, "WLAN FW is active\n");
 
        if (cal_data_cfg) {
                if ((request_firmware(&adapter->cal_data, cal_data_cfg,
                                      adapter->dev)) < 0)
-                       dev_err(adapter->dev,
-                               "Cal data request_firmware() failed\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Cal data request_firmware() failed\n");
        }
 
        /* enable host interrupt after fw dnld is successful */
@@ -503,12 +508,14 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
 
        priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
        if (mwifiex_register_cfg80211(adapter)) {
-               dev_err(adapter->dev, "cannot register with cfg80211\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot register with cfg80211\n");
                goto err_init_fw;
        }
 
        if (mwifiex_init_channel_scan_gap(adapter)) {
-               dev_err(adapter->dev, "could not init channel stats table\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "could not init channel stats table\n");
                goto err_init_fw;
        }
 
@@ -522,7 +529,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        wdev = mwifiex_add_virtual_intf(adapter->wiphy, "mlan%d", NET_NAME_ENUM,
                                        NL80211_IFTYPE_STATION, NULL, NULL);
        if (IS_ERR(wdev)) {
-               dev_err(adapter->dev, "cannot create default STA interface\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot create default STA interface\n");
                rtnl_unlock();
                goto err_add_intf;
        }
@@ -531,7 +539,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
                wdev = mwifiex_add_virtual_intf(adapter->wiphy, "uap%d", NET_NAME_ENUM,
                                                NL80211_IFTYPE_AP, NULL, NULL);
                if (IS_ERR(wdev)) {
-                       dev_err(adapter->dev, "cannot create AP interface\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cannot create AP interface\n");
                        rtnl_unlock();
                        goto err_add_intf;
                }
@@ -542,8 +551,8 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
                                                NL80211_IFTYPE_P2P_CLIENT, NULL,
                                                NULL);
                if (IS_ERR(wdev)) {
-                       dev_err(adapter->dev,
-                               "cannot create p2p client interface\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cannot create p2p client interface\n");
                        rtnl_unlock();
                        goto err_add_intf;
                }
@@ -551,7 +560,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
        rtnl_unlock();
 
        mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
-       dev_notice(adapter->dev, "driver_version = %s\n", fmt);
+       mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
        goto done;
 
 err_add_intf:
@@ -561,7 +570,8 @@ err_init_fw:
        if (adapter->if_ops.disable_int)
                adapter->if_ops.disable_int(adapter);
 err_dnld_fw:
-       pr_debug("info: %s: unregister device\n", __func__);
+       mwifiex_dbg(adapter, ERROR,
+                   "info: %s: unregister device\n", __func__);
        if (adapter->if_ops.unregister_dev)
                adapter->if_ops.unregister_dev(adapter);
 
@@ -602,8 +612,8 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
                                      adapter->dev, GFP_KERNEL, adapter,
                                      mwifiex_fw_dpc);
        if (ret < 0)
-               dev_err(adapter->dev,
-                       "request_firmware_nowait() returned error %d\n", ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "request_firmware_nowait error %d\n", ret);
        return ret;
 }
 
@@ -629,7 +639,8 @@ mwifiex_close(struct net_device *dev)
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
        if (priv->scan_request) {
-               dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "aborting scan on ndo_stop\n");
                cfg80211_scan_done(priv->scan_request, 1);
                priv->scan_request = NULL;
                priv->scan_aborting = true;
@@ -650,7 +661,8 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
                txq = netdev_get_tx_queue(priv->netdev, index);
                if (!netif_tx_queue_stopped(txq)) {
                        netif_tx_stop_queue(txq);
-                       dev_dbg(priv->adapter->dev, "stop queue: %d\n", index);
+                       mwifiex_dbg(priv->adapter, DATA,
+                                   "stop queue: %d\n", index);
                }
        }
 
@@ -715,8 +727,9 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct mwifiex_txinfo *tx_info;
        bool multicast;
 
-       dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
-               jiffies, priv->bss_type, priv->bss_num);
+       mwifiex_dbg(priv->adapter, DATA,
+                   "data: %lu BSS(%d-%d): Data <= kernel\n",
+                   jiffies, priv->bss_type, priv->bss_num);
 
        if (priv->adapter->surprise_removed) {
                kfree_skb(skb);
@@ -724,28 +737,31 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return 0;
        }
        if (!skb->len || (skb->len > ETH_FRAME_LEN)) {
-               dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Tx: bad skb len %d\n", skb->len);
                kfree_skb(skb);
                priv->stats.tx_dropped++;
                return 0;
        }
        if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
-               dev_dbg(priv->adapter->dev,
-                       "data: Tx: insufficient skb headroom %d\n",
-                       skb_headroom(skb));
+               mwifiex_dbg(priv->adapter, DATA,
+                           "data: Tx: insufficient skb headroom %d\n",
+                           skb_headroom(skb));
                /* Insufficient skb headroom - allocate a new skb */
                new_skb =
                        skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
                if (unlikely(!new_skb)) {
-                       dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Tx: cannot alloca new_skb\n");
                        kfree_skb(skb);
                        priv->stats.tx_dropped++;
                        return 0;
                }
                kfree_skb(skb);
                skb = new_skb;
-               dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n",
-                       skb_headroom(skb));
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: new skb headroomd %d\n",
+                           skb_headroom(skb));
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -803,8 +819,8 @@ mwifiex_set_mac_address(struct net_device *dev, void *addr)
        if (!ret)
                memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN);
        else
-               dev_err(priv->adapter->dev,
-                       "set mac address failed: ret=%d\n", ret);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "set mac address failed: ret=%d\n", ret);
 
        memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
 
@@ -842,20 +858,22 @@ mwifiex_tx_timeout(struct net_device *dev)
 
        priv->num_tx_timeout++;
        priv->tx_timeout_cnt++;
-       dev_err(priv->adapter->dev,
-               "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
-               jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num);
+       mwifiex_dbg(priv->adapter, ERROR,
+                   "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
+                   jiffies, priv->tx_timeout_cnt, priv->bss_type,
+                   priv->bss_num);
        mwifiex_set_trans_start(dev);
 
        if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD &&
            priv->adapter->if_ops.card_reset) {
-               dev_err(priv->adapter->dev,
-                       "tx_timeout_cnt exceeds threshold. Triggering card reset!\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "tx_timeout_cnt exceeds threshold.\t"
+                           "Triggering card reset!\n");
                priv->adapter->if_ops.card_reset(priv->adapter);
        }
 }
 
-void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
 {
        void *p;
        char drv_version[64];
@@ -868,10 +886,11 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
 
        if (adapter->drv_info_dump) {
                vfree(adapter->drv_info_dump);
+               adapter->drv_info_dump = NULL;
                adapter->drv_info_size = 0;
        }
 
-       dev_info(adapter->dev, "=== DRIVER INFO DUMP START===\n");
+       mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
 
        adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
 
@@ -939,12 +958,12 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
        }
 
        if (adapter->iface_type == MWIFIEX_SDIO) {
-               p += sprintf(p, "\n=== SDIO register DUMP===\n");
+               p += sprintf(p, "\n=== SDIO register dump===\n");
                if (adapter->if_ops.reg_dump)
                        p += adapter->if_ops.reg_dump(adapter, p);
        }
 
-       p += sprintf(p, "\n=== MORE DEBUG INFORMATION\n");
+       p += sprintf(p, "\n=== more debug information\n");
        debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
        if (debug_info) {
                for (i = 0; i < adapter->priv_num; i++) {
@@ -959,9 +978,99 @@ void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter)
        }
 
        adapter->drv_info_size = p - adapter->drv_info_dump;
-       dev_info(adapter->dev, "=== DRIVER INFO DUMP END===\n");
+       mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
+}
+EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
+
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
+{
+       u8 idx, *dump_data, *fw_dump_ptr;
+       u32 dump_len;
+
+       dump_len = (strlen("========Start dump driverinfo========\n") +
+                      adapter->drv_info_size +
+                      strlen("\n========End dump========\n"));
+
+       for (idx = 0; idx < adapter->num_mem_types; idx++) {
+               struct memory_type_mapping *entry =
+                               &adapter->mem_type_mapping_tbl[idx];
+
+               if (entry->mem_ptr) {
+                       dump_len += (strlen("========Start dump ") +
+                                       strlen(entry->mem_name) +
+                                       strlen("========\n") +
+                                       (entry->mem_size + 1) +
+                                       strlen("\n========End dump========\n"));
+               }
+       }
+
+       dump_data = vzalloc(dump_len + 1);
+       if (!dump_data)
+               goto done;
+
+       fw_dump_ptr = dump_data;
+
+       /* Dump all the memory data into single file, a userspace script will
+        * be used to split all the memory data to multiple files
+        */
+       mwifiex_dbg(adapter, MSG,
+                   "== mwifiex dump information to /sys/class/devcoredump start");
+
+       strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
+       fw_dump_ptr += strlen("========Start dump driverinfo========\n");
+       memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size);
+       fw_dump_ptr += adapter->drv_info_size;
+       strcpy(fw_dump_ptr, "\n========End dump========\n");
+       fw_dump_ptr += strlen("\n========End dump========\n");
+
+       for (idx = 0; idx < adapter->num_mem_types; idx++) {
+               struct memory_type_mapping *entry =
+                                       &adapter->mem_type_mapping_tbl[idx];
+
+               if (entry->mem_ptr) {
+                       strcpy(fw_dump_ptr, "========Start dump ");
+                       fw_dump_ptr += strlen("========Start dump ");
+
+                       strcpy(fw_dump_ptr, entry->mem_name);
+                       fw_dump_ptr += strlen(entry->mem_name);
+
+                       strcpy(fw_dump_ptr, "========\n");
+                       fw_dump_ptr += strlen("========\n");
+
+                       memcpy(fw_dump_ptr, entry->mem_ptr, entry->mem_size);
+                       fw_dump_ptr += entry->mem_size;
+
+                       strcpy(fw_dump_ptr, "\n========End dump========\n");
+                       fw_dump_ptr += strlen("\n========End dump========\n");
+               }
+       }
+
+       /* device dump data will be free in device coredump release function
+        * after 5 min
+        */
+       dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
+       mwifiex_dbg(adapter, MSG,
+                   "== mwifiex dump information to /sys/class/devcoredump end");
+
+done:
+       for (idx = 0; idx < adapter->num_mem_types; idx++) {
+               struct memory_type_mapping *entry =
+                       &adapter->mem_type_mapping_tbl[idx];
+
+               if (entry->mem_ptr) {
+                       vfree(entry->mem_ptr);
+                       entry->mem_ptr = NULL;
+               }
+               entry->mem_size = 0;
+       }
+
+       if (adapter->drv_info_dump) {
+               vfree(adapter->drv_info_dump);
+               adapter->drv_info_dump = NULL;
+               adapter->drv_info_size = 0;
+       }
 }
-EXPORT_SYMBOL_GPL(mwifiex_dump_drv_info);
+EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
 
 /*
  * CFG802.11 network device handler for statistics retrieval.
@@ -1230,21 +1339,24 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
                }
        }
 
-       dev_dbg(adapter->dev, "cmd: calling mwifiex_shutdown_drv...\n");
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: calling mwifiex_shutdown_drv...\n");
        adapter->init_wait_q_woken = false;
 
        if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
                wait_event_interruptible(adapter->init_wait_q,
                                         adapter->init_wait_q_woken);
-       dev_dbg(adapter->dev, "cmd: mwifiex_shutdown_drv done\n");
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: mwifiex_shutdown_drv done\n");
        if (atomic_read(&adapter->rx_pending) ||
            atomic_read(&adapter->tx_pending) ||
            atomic_read(&adapter->cmd_pending)) {
-               dev_err(adapter->dev, "rx_pending=%d, tx_pending=%d, "
-                      "cmd_pending=%d\n",
-                      atomic_read(&adapter->rx_pending),
-                      atomic_read(&adapter->tx_pending),
-                      atomic_read(&adapter->cmd_pending));
+               mwifiex_dbg(adapter, ERROR,
+                           "rx_pending=%d, tx_pending=%d,\t"
+                           "cmd_pending=%d\n",
+                           atomic_read(&adapter->rx_pending),
+                           atomic_read(&adapter->tx_pending),
+                           atomic_read(&adapter->cmd_pending));
        }
 
        for (i = 0; i < adapter->priv_num; i++) {
@@ -1264,11 +1376,13 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
        wiphy_free(adapter->wiphy);
 
        /* Unregister device */
-       dev_dbg(adapter->dev, "info: unregister device\n");
+       mwifiex_dbg(adapter, INFO,
+                   "info: unregister device\n");
        if (adapter->if_ops.unregister_dev)
                adapter->if_ops.unregister_dev(adapter);
        /* Free adapter structure */
-       dev_dbg(adapter->dev, "info: free adapter\n");
+       mwifiex_dbg(adapter, INFO,
+                   "info: free adapter\n");
        mwifiex_free_adapter(adapter);
 
 exit_remove:
index fe1256044a6c9bca9b9e8475cf8a0487ca7cd0b8..5a6c1c76b33bc173c0ab55465ec1ad2885675dde 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/of.h>
 #include <linux/idr.h>
 #include <linux/inetdevice.h>
+#include <linux/devcoredump.h>
 
 #include "decl.h"
 #include "ioctl.h"
@@ -147,6 +148,54 @@ enum {
 /* Address alignment */
 #define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1))
 
+/**
+ *enum mwifiex_debug_level  -  marvell wifi debug level
+ */
+enum MWIFIEX_DEBUG_LEVEL {
+       MWIFIEX_DBG_MSG         = 0x00000001,
+       MWIFIEX_DBG_FATAL       = 0x00000002,
+       MWIFIEX_DBG_ERROR       = 0x00000004,
+       MWIFIEX_DBG_DATA        = 0x00000008,
+       MWIFIEX_DBG_CMD         = 0x00000010,
+       MWIFIEX_DBG_EVENT       = 0x00000020,
+       MWIFIEX_DBG_INTR        = 0x00000040,
+       MWIFIEX_DBG_IOCTL       = 0x00000080,
+
+       MWIFIEX_DBG_MPA_D       = 0x00008000,
+       MWIFIEX_DBG_DAT_D       = 0x00010000,
+       MWIFIEX_DBG_CMD_D       = 0x00020000,
+       MWIFIEX_DBG_EVT_D       = 0x00040000,
+       MWIFIEX_DBG_FW_D        = 0x00080000,
+       MWIFIEX_DBG_IF_D        = 0x00100000,
+
+       MWIFIEX_DBG_ENTRY       = 0x10000000,
+       MWIFIEX_DBG_WARN        = 0x20000000,
+       MWIFIEX_DBG_INFO        = 0x40000000,
+       MWIFIEX_DBG_DUMP        = 0x80000000,
+
+       MWIFIEX_DBG_ANY         = 0xffffffff
+};
+
+#define MWIFIEX_DEFAULT_DEBUG_MASK     (MWIFIEX_DBG_MSG | \
+                                       MWIFIEX_DBG_FATAL | \
+                                       MWIFIEX_DBG_ERROR)
+
+#define mwifiex_dbg(adapter, dbg_mask, fmt, args...)           \
+do {                                                           \
+       if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask)     \
+               if ((adapter)->dev)                             \
+                       dev_info((adapter)->dev, fmt, ## args); \
+} while (0)
+
+#define DEBUG_DUMP_DATA_MAX_LEN                128
+#define mwifiex_dbg_dump(adapter, dbg_mask, str, buf, len)     \
+do {                                                           \
+       if ((adapter)->debug_mask & MWIFIEX_DBG_##dbg_mask)     \
+               print_hex_dump(KERN_DEBUG, str,                 \
+                              DUMP_PREFIX_OFFSET, 16, 1,       \
+                              buf, len, false);                \
+} while (0)
+
 struct mwifiex_dbg {
        u32 num_cmd_host_to_card_failure;
        u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -451,7 +500,7 @@ enum rdwr_status {
 };
 
 enum mwifiex_iface_work_flags {
-       MWIFIEX_IFACE_WORK_FW_DUMP,
+       MWIFIEX_IFACE_WORK_DEVICE_DUMP,
        MWIFIEX_IFACE_WORK_CARD_RESET,
 };
 
@@ -611,6 +660,7 @@ struct mwifiex_private {
        struct delayed_work dfs_chan_sw_work;
        struct cfg80211_beacon_data beacon_after;
        struct mwifiex_11h_intf_state state_11h;
+       struct mwifiex_ds_mem_rw mem_rw;
 };
 
 
@@ -740,8 +790,8 @@ struct mwifiex_if_ops {
        int (*init_fw_port) (struct mwifiex_adapter *);
        int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
        void (*card_reset) (struct mwifiex_adapter *);
-       void (*fw_dump)(struct mwifiex_adapter *);
        int (*reg_dump)(struct mwifiex_adapter *, char *);
+       void (*device_dump)(struct mwifiex_adapter *);
        int (*clean_pcie_ring) (struct mwifiex_adapter *adapter);
        void (*iface_work)(struct work_struct *work);
        void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter);
@@ -750,6 +800,7 @@ struct mwifiex_if_ops {
 
 struct mwifiex_adapter {
        u8 iface_type;
+       unsigned int debug_mask;
        struct mwifiex_iface_comb iface_limit;
        struct mwifiex_iface_comb curr_iface_comb;
        struct mwifiex_private *priv[MWIFIEX_MAX_BSS_NUM];
@@ -900,7 +951,6 @@ struct mwifiex_adapter {
        u8 key_api_major_ver, key_api_minor_ver;
        struct memory_type_mapping *mem_type_mapping_tbl;
        u8 num_mem_types;
-       u8 curr_mem_idx;
        void *drv_info_dump;
        u32 drv_info_size;
        bool scan_chan_gap_enabled;
@@ -1434,7 +1484,8 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
 u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
                            u8 rx_rate, u8 ht_info);
 
-void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter);
+void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
 void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 
index bcc7751d883c3773b558ef8c8b8bb8c3a8a3c474..77b9055a2d147411515b5875f67b90210938ac9b 100644 (file)
@@ -57,7 +57,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
 
        mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
        if (pci_dma_mapping_error(card->dev, mapping.addr)) {
-               dev_err(adapter->dev, "failed to map pci memory!\n");
+               mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
                return -1;
        }
        mapping.len = size;
@@ -89,8 +89,9 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
 
        if (card->sleep_cookie_vbase) {
                cookie_addr = (u32 *)card->sleep_cookie_vbase;
-               dev_dbg(adapter->dev, "info: ACCESS_HW: sleep cookie=0x%x\n",
-                       *cookie_addr);
+               mwifiex_dbg(adapter, INFO,
+                           "info: ACCESS_HW: sleep cookie=0x%x\n",
+                           *cookie_addr);
                if (*cookie_addr == FW_AWAKE_COOKIE)
                        return true;
        }
@@ -164,7 +165,8 @@ static int mwifiex_pcie_resume(struct device *dev)
        adapter = card->adapter;
 
        if (!adapter->is_suspended) {
-               dev_warn(adapter->dev, "Device already resumed\n");
+               mwifiex_dbg(adapter, WARN,
+                           "Device already resumed\n");
                return 0;
        }
 
@@ -361,16 +363,16 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
                sleep_cookie = *(u32 *)buffer;
 
                if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
-                       dev_dbg(adapter->dev,
-                               "sleep cookie found at count %d\n", count);
+                       mwifiex_dbg(adapter, INFO,
+                                   "sleep cookie found at count %d\n", count);
                        break;
                }
                usleep_range(20, 30);
        }
 
        if (count >= max_delay_loop_cnt)
-               dev_dbg(adapter->dev,
-                       "max count reached while accessing sleep cookie\n");
+               mwifiex_dbg(adapter, INFO,
+                           "max count reached while accessing sleep cookie\n");
 }
 
 /* This function wakes up the card by reading fw_status register. */
@@ -380,20 +382,23 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-       dev_dbg(adapter->dev, "event: Wakeup device...\n");
+       mwifiex_dbg(adapter, EVENT,
+                   "event: Wakeup device...\n");
 
        if (reg->sleep_cookie)
                mwifiex_pcie_dev_wakeup_delay(adapter);
 
        /* Reading fw_status register will wakeup device */
        if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) {
-               dev_warn(adapter->dev, "Reading fw_status register failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Reading fw_status register failed\n");
                return -1;
        }
 
        if (reg->sleep_cookie) {
                mwifiex_pcie_dev_wakeup_delay(adapter);
-               dev_dbg(adapter->dev, "PCIE wakeup: Setting PS_STATE_AWAKE\n");
+               mwifiex_dbg(adapter, INFO,
+                           "PCIE wakeup: Setting PS_STATE_AWAKE\n");
                adapter->ps_state = PS_STATE_AWAKE;
        }
 
@@ -407,7 +412,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
  */
 static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
 {
-       dev_dbg(adapter->dev, "cmd: Wakeup device completed\n");
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: Wakeup device completed\n");
 
        return 0;
 }
@@ -423,7 +429,8 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter)
        if (mwifiex_pcie_ok_to_access_hw(adapter)) {
                if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
                                      0x00000000)) {
-                       dev_warn(adapter->dev, "Disable host interrupt failed\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Disable host interrupt failed\n");
                        return -1;
                }
        }
@@ -443,7 +450,8 @@ static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter)
                /* Simply write the mask to the register */
                if (mwifiex_write_reg(adapter, PCIE_HOST_INT_MASK,
                                      HOST_INTR_MASK)) {
-                       dev_warn(adapter->dev, "Enable host interrupt failed\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Enable host interrupt failed\n");
                        return -1;
                }
        }
@@ -499,8 +507,8 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
                skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
                                                  GFP_KERNEL | GFP_DMA);
                if (!skb) {
-                       dev_err(adapter->dev,
-                               "Unable to allocate skb for RX ring.\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Unable to allocate skb for RX ring.\n");
                        kfree(card->rxbd_ring_vbase);
                        return -ENOMEM;
                }
@@ -512,10 +520,10 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
 
                buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
 
-               dev_dbg(adapter->dev,
-                       "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
-                       skb, skb->len, skb->data, (u32)buf_pa,
-                       (u32)((u64)buf_pa >> 32));
+               mwifiex_dbg(adapter, INFO,
+                           "info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+                           skb, skb->len, skb->data, (u32)buf_pa,
+                           (u32)((u64)buf_pa >> 32));
 
                card->rx_buf_list[i] = skb;
                if (reg->pfu_enabled) {
@@ -556,8 +564,8 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
                /* Allocate skb here so that firmware can DMA data from it */
                skb = dev_alloc_skb(MAX_EVENT_SIZE);
                if (!skb) {
-                       dev_err(adapter->dev,
-                               "Unable to allocate skb for EVENT buf.\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Unable to allocate skb for EVENT buf.\n");
                        kfree(card->evtbd_ring_vbase);
                        return -ENOMEM;
                }
@@ -569,10 +577,10 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
 
                buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
 
-               dev_dbg(adapter->dev,
-                       "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
-                       skb, skb->len, skb->data, (u32)buf_pa,
-                       (u32)((u64)buf_pa >> 32));
+               mwifiex_dbg(adapter, EVENT,
+                           "info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
+                           skb, skb->len, skb->data, (u32)buf_pa,
+                           (u32)((u64)buf_pa >> 32));
 
                card->evt_buf_list[i] = skb;
                card->evtbd_ring[i] = (void *)(card->evtbd_ring_vbase +
@@ -715,21 +723,23 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
                card->txbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
                                       MWIFIEX_MAX_TXRX_BD;
 
-       dev_dbg(adapter->dev, "info: txbd_ring: Allocating %d bytes\n",
-               card->txbd_ring_size);
+       mwifiex_dbg(adapter, INFO,
+                   "info: txbd_ring: Allocating %d bytes\n",
+                   card->txbd_ring_size);
        card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
                                                     card->txbd_ring_size,
                                                     &card->txbd_ring_pbase);
        if (!card->txbd_ring_vbase) {
-               dev_err(adapter->dev,
-                       "allocate consistent memory (%d bytes) failed!\n",
-                       card->txbd_ring_size);
+               mwifiex_dbg(adapter, ERROR,
+                           "allocate consistent memory (%d bytes) failed!\n",
+                           card->txbd_ring_size);
                return -ENOMEM;
        }
-       dev_dbg(adapter->dev,
-               "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
-               card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
-               (u32)((u64)card->txbd_ring_pbase >> 32), card->txbd_ring_size);
+       mwifiex_dbg(adapter, DATA,
+                   "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
+                   card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
+                   (u32)((u64)card->txbd_ring_pbase >> 32),
+                   card->txbd_ring_size);
 
        return mwifiex_init_txq_ring(adapter);
 }
@@ -777,23 +787,24 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
                card->rxbd_ring_size = sizeof(struct mwifiex_pcie_buf_desc) *
                                       MWIFIEX_MAX_TXRX_BD;
 
-       dev_dbg(adapter->dev, "info: rxbd_ring: Allocating %d bytes\n",
-               card->rxbd_ring_size);
+       mwifiex_dbg(adapter, INFO,
+                   "info: rxbd_ring: Allocating %d bytes\n",
+                   card->rxbd_ring_size);
        card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
                                                     card->rxbd_ring_size,
                                                     &card->rxbd_ring_pbase);
        if (!card->rxbd_ring_vbase) {
-               dev_err(adapter->dev,
-                       "allocate consistent memory (%d bytes) failed!\n",
-                       card->rxbd_ring_size);
+               mwifiex_dbg(adapter, ERROR,
+                           "allocate consistent memory (%d bytes) failed!\n",
+                           card->rxbd_ring_size);
                return -ENOMEM;
        }
 
-       dev_dbg(adapter->dev,
-               "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
-               card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
-               (u32)((u64)card->rxbd_ring_pbase >> 32),
-               card->rxbd_ring_size);
+       mwifiex_dbg(adapter, DATA,
+                   "info: rxbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
+                   card->rxbd_ring_vbase, (u32)card->rxbd_ring_pbase,
+                   (u32)((u64)card->rxbd_ring_pbase >> 32),
+                   card->rxbd_ring_size);
 
        return mwifiex_init_rxq_ring(adapter);
 }
@@ -840,23 +851,24 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
        card->evtbd_ring_size = sizeof(struct mwifiex_evt_buf_desc) *
                                MWIFIEX_MAX_EVT_BD;
 
-       dev_dbg(adapter->dev, "info: evtbd_ring: Allocating %d bytes\n",
+       mwifiex_dbg(adapter, INFO,
+                   "info: evtbd_ring: Allocating %d bytes\n",
                card->evtbd_ring_size);
        card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
                                                      card->evtbd_ring_size,
                                                      &card->evtbd_ring_pbase);
        if (!card->evtbd_ring_vbase) {
-               dev_err(adapter->dev,
-                       "allocate consistent memory (%d bytes) failed!\n",
-                       card->evtbd_ring_size);
+               mwifiex_dbg(adapter, ERROR,
+                           "allocate consistent memory (%d bytes) failed!\n",
+                           card->evtbd_ring_size);
                return -ENOMEM;
        }
 
-       dev_dbg(adapter->dev,
-               "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
-               card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
-               (u32)((u64)card->evtbd_ring_pbase >> 32),
-               card->evtbd_ring_size);
+       mwifiex_dbg(adapter, EVENT,
+                   "info: CMDRSP/EVT bd_ring - base: %p pbase: %#x:%x len: %#x\n",
+                   card->evtbd_ring_vbase, (u32)card->evtbd_ring_pbase,
+                   (u32)((u64)card->evtbd_ring_pbase >> 32),
+                   card->evtbd_ring_size);
 
        return mwifiex_pcie_init_evt_ring(adapter);
 }
@@ -895,8 +907,8 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
        /* Allocate memory for receiving command response data */
        skb = dev_alloc_skb(MWIFIEX_UPLD_SIZE);
        if (!skb) {
-               dev_err(adapter->dev,
-                       "Unable to allocate skb for command response data.\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Unable to allocate skb for command response data.\n");
                return -ENOMEM;
        }
        skb_put(skb, MWIFIEX_UPLD_SIZE);
@@ -944,14 +956,16 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
        card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
                                                     &card->sleep_cookie_pbase);
        if (!card->sleep_cookie_vbase) {
-               dev_err(adapter->dev, "pci_alloc_consistent failed!\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "pci_alloc_consistent failed!\n");
                return -ENOMEM;
        }
        /* Init val of Sleep Cookie */
        *(u32 *)card->sleep_cookie_vbase = FW_AWAKE_COOKIE;
 
-       dev_dbg(adapter->dev, "alloc_scook: sleep cookie=0x%x\n",
-               *((u32 *)card->sleep_cookie_vbase));
+       mwifiex_dbg(adapter, INFO,
+                   "alloc_scook: sleep cookie=0x%x\n",
+                   *((u32 *)card->sleep_cookie_vbase));
 
        return 0;
 }
@@ -993,8 +1007,8 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
                 */
                if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
                                      CPU_INTR_DNLD_RDY)) {
-                       dev_err(adapter->dev,
-                               "failed to assert dnld-rdy interrupt.\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "failed to assert dnld-rdy interrupt.\n");
                        return -1;
                }
        }
@@ -1018,13 +1032,14 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
 
        /* Read the TX ring read pointer set by firmware */
        if (mwifiex_read_reg(adapter, reg->tx_rdptr, &rdptr)) {
-               dev_err(adapter->dev,
-                       "SEND COMP: failed to read reg->tx_rdptr\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "SEND COMP: failed to read reg->tx_rdptr\n");
                return -1;
        }
 
-       dev_dbg(adapter->dev, "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
-               card->txbd_rdptr, rdptr);
+       mwifiex_dbg(adapter, DATA,
+                   "SEND COMP: rdptr_prev=0x%x, rdptr=0x%x\n",
+                   card->txbd_rdptr, rdptr);
 
        num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
        /* free from previous txbd_rdptr to current txbd_rdptr */
@@ -1038,9 +1053,9 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
                skb = card->tx_buf_list[wrdoneidx];
 
                if (skb) {
-                       dev_dbg(adapter->dev,
-                               "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
-                               skb, wrdoneidx);
+                       mwifiex_dbg(adapter, DATA,
+                                   "SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
+                                   skb, wrdoneidx);
                        mwifiex_unmap_pci_memory(adapter, skb,
                                                 PCI_DMA_TODEVICE);
 
@@ -1112,8 +1127,9 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
        __le16 *tmp;
 
        if (!(skb->data && skb->len)) {
-               dev_err(adapter->dev, "%s(): invalid parameter <%p, %#x>\n",
-                       __func__, skb->data, skb->len);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s(): invalid parameter <%p, %#x>\n",
+                           __func__, skb->data, skb->len);
                return -1;
        }
 
@@ -1121,7 +1137,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                mwifiex_pm_wakeup_card(adapter);
 
        num_tx_buffs = MWIFIEX_MAX_TXRX_BD << reg->tx_start_ptr;
-       dev_dbg(adapter->dev, "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
+       mwifiex_dbg(adapter, DATA,
+                   "info: SEND DATA: <Rd: %#x, Wr: %#x>\n",
                card->txbd_rdptr, card->txbd_wrptr);
        if (mwifiex_pcie_txbd_not_full(card)) {
                u8 *payload;
@@ -1175,39 +1192,40 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                /* Write the TX ring write pointer in to reg->tx_wrptr */
                if (mwifiex_write_reg(adapter, reg->tx_wrptr,
                                      card->txbd_wrptr | rx_val)) {
-                       dev_err(adapter->dev,
-                               "SEND DATA: failed to write reg->tx_wrptr\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "SEND DATA: failed to write reg->tx_wrptr\n");
                        ret = -1;
                        goto done_unmap;
                }
                if ((mwifiex_pcie_txbd_not_full(card)) &&
                    tx_param->next_pkt_len) {
                        /* have more packets and TxBD still can hold more */
-                       dev_dbg(adapter->dev,
-                               "SEND DATA: delay dnld-rdy interrupt.\n");
+                       mwifiex_dbg(adapter, DATA,
+                                   "SEND DATA: delay dnld-rdy interrupt.\n");
                        adapter->data_sent = false;
                } else {
                        /* Send the TX ready interrupt */
                        if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
                                              CPU_INTR_DNLD_RDY)) {
-                               dev_err(adapter->dev,
-                                       "SEND DATA: failed to assert dnld-rdy interrupt.\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "SEND DATA: failed to assert dnld-rdy interrupt.\n");
                                ret = -1;
                                goto done_unmap;
                        }
                }
-               dev_dbg(adapter->dev, "info: SEND DATA: Updated <Rd: %#x, Wr: "
-                       "%#x> and sent packet to firmware successfully\n",
-                       card->txbd_rdptr, card->txbd_wrptr);
+               mwifiex_dbg(adapter, DATA,
+                           "info: SEND DATA: Updated <Rd: %#x, Wr:\t"
+                           "%#x> and sent packet to firmware successfully\n",
+                           card->txbd_rdptr, card->txbd_wrptr);
        } else {
-               dev_dbg(adapter->dev,
-                       "info: TX Ring full, can't send packets to fw\n");
+               mwifiex_dbg(adapter, DATA,
+                           "info: TX Ring full, can't send packets to fw\n");
                adapter->data_sent = true;
                /* Send the TX ready interrupt */
                if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
                                      CPU_INTR_DNLD_RDY))
-                       dev_err(adapter->dev,
-                               "SEND DATA: failed to assert door-bell intr\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "SEND DATA: failed to assert door-bell intr\n");
                return -EBUSY;
        }
 
@@ -1243,8 +1261,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
 
        /* Read the RX ring Write pointer set by firmware */
        if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
-               dev_err(adapter->dev,
-                       "RECV DATA: failed to read reg->rx_wrptr\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "RECV DATA: failed to read reg->rx_wrptr\n");
                ret = -1;
                goto done;
        }
@@ -1277,15 +1295,15 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                rx_len = le16_to_cpu(pkt_len);
                if (WARN_ON(rx_len <= INTF_HEADER_LEN ||
                            rx_len > MWIFIEX_RX_DATA_BUF_SIZE)) {
-                       dev_err(adapter->dev,
-                               "Invalid RX len %d, Rd=%#x, Wr=%#x\n",
-                               rx_len, card->rxbd_rdptr, wrptr);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Invalid RX len %d, Rd=%#x, Wr=%#x\n",
+                                   rx_len, card->rxbd_rdptr, wrptr);
                        dev_kfree_skb_any(skb_data);
                } else {
                        skb_put(skb_data, rx_len);
-                       dev_dbg(adapter->dev,
-                               "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
-                               card->rxbd_rdptr, wrptr, rx_len);
+                       mwifiex_dbg(adapter, DATA,
+                                   "info: RECV DATA: Rd=%#x, Wr=%#x, Len=%d\n",
+                                   card->rxbd_rdptr, wrptr, rx_len);
                        skb_pull(skb_data, INTF_HEADER_LEN);
                        if (adapter->rx_work_enabled) {
                                skb_queue_tail(&adapter->rx_data_q, skb_data);
@@ -1299,8 +1317,8 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE,
                                                      GFP_KERNEL | GFP_DMA);
                if (!skb_tmp) {
-                       dev_err(adapter->dev,
-                               "Unable to allocate skb.\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Unable to allocate skb.\n");
                        return -ENOMEM;
                }
 
@@ -1311,9 +1329,9 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
 
                buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
 
-               dev_dbg(adapter->dev,
-                       "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
-                       skb_tmp, rd_index);
+               mwifiex_dbg(adapter, INFO,
+                           "RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
+                           skb_tmp, rd_index);
                card->rx_buf_list[rd_index] = skb_tmp;
 
                if (reg->pfu_enabled) {
@@ -1336,28 +1354,29 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
                                             reg->rx_rollover_ind) ^
                                             reg->rx_rollover_ind);
                }
-               dev_dbg(adapter->dev, "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
-                       card->rxbd_rdptr, wrptr);
+               mwifiex_dbg(adapter, DATA,
+                           "info: RECV DATA: <Rd: %#x, Wr: %#x>\n",
+                           card->rxbd_rdptr, wrptr);
 
                tx_val = card->txbd_wrptr & reg->tx_wrap_mask;
                /* Write the RX ring read pointer in to reg->rx_rdptr */
                if (mwifiex_write_reg(adapter, reg->rx_rdptr,
                                      card->rxbd_rdptr | tx_val)) {
-                       dev_err(adapter->dev,
-                               "RECV DATA: failed to write reg->rx_rdptr\n");
+                       mwifiex_dbg(adapter, DATA,
+                                   "RECV DATA: failed to write reg->rx_rdptr\n");
                        ret = -1;
                        goto done;
                }
 
                /* Read the RX ring Write pointer set by firmware */
                if (mwifiex_read_reg(adapter, reg->rx_wrptr, &wrptr)) {
-                       dev_err(adapter->dev,
-                               "RECV DATA: failed to read reg->rx_wrptr\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "RECV DATA: failed to read reg->rx_wrptr\n");
                        ret = -1;
                        goto done;
                }
-               dev_dbg(adapter->dev,
-                       "info: RECV DATA: Rcvd packet from fw successfully\n");
+               mwifiex_dbg(adapter, DATA,
+                           "info: RECV DATA: Rcvd packet from fw successfully\n");
                card->rxbd_wrptr = wrptr;
        }
 
@@ -1376,9 +1395,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        if (!(skb->data && skb->len)) {
-               dev_err(adapter->dev,
-                       "Invalid parameter in %s <%p. len %d>\n",
-                       __func__, skb->data, skb->len);
+               mwifiex_dbg(adapter, ERROR,
+                           "Invalid parameter in %s <%p. len %d>\n",
+                           __func__, skb->data, skb->len);
                return -1;
        }
 
@@ -1391,9 +1410,9 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
         * address scratch register
         */
        if (mwifiex_write_reg(adapter, reg->cmd_addr_lo, (u32)buf_pa)) {
-               dev_err(adapter->dev,
-                       "%s: failed to write download command to boot code.\n",
-                       __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: failed to write download command to boot code.\n",
+                           __func__);
                mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
                return -1;
        }
@@ -1403,18 +1422,18 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
         */
        if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
                              (u32)((u64)buf_pa >> 32))) {
-               dev_err(adapter->dev,
-                       "%s: failed to write download command to boot code.\n",
-                       __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: failed to write download command to boot code.\n",
+                           __func__);
                mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
                return -1;
        }
 
        /* Write the command length to cmd_size scratch register */
        if (mwifiex_write_reg(adapter, reg->cmd_size, skb->len)) {
-               dev_err(adapter->dev,
-                       "%s: failed to write command len to cmd_size scratch reg\n",
-                       __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: failed to write command len to cmd_size scratch reg\n",
+                           __func__);
                mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
                return -1;
        }
@@ -1422,8 +1441,8 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        /* Ring the door bell */
        if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
                              CPU_INTR_DOOR_BELL)) {
-               dev_err(adapter->dev,
-                       "%s: failed to assert door-bell intr\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: failed to assert door-bell intr\n", __func__);
                mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
                return -1;
        }
@@ -1443,8 +1462,8 @@ static int mwifiex_pcie_init_fw_port(struct mwifiex_adapter *adapter)
        /* Write the RX ring read pointer in to reg->rx_rdptr */
        if (mwifiex_write_reg(adapter, reg->rx_rdptr, card->rxbd_rdptr |
                              tx_wrap)) {
-               dev_err(adapter->dev,
-                       "RECV DATA: failed to write reg->rx_rdptr\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "RECV DATA: failed to write reg->rx_rdptr\n");
                return -1;
        }
        return 0;
@@ -1462,15 +1481,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        u8 *payload = (u8 *)skb->data;
 
        if (!(skb->data && skb->len)) {
-               dev_err(adapter->dev, "Invalid parameter in %s <%p, %#x>\n",
-                       __func__, skb->data, skb->len);
+               mwifiex_dbg(adapter, ERROR,
+                           "Invalid parameter in %s <%p, %#x>\n",
+                           __func__, skb->data, skb->len);
                return -1;
        }
 
        /* Make sure a command response buffer is available */
        if (!card->cmdrsp_buf) {
-               dev_err(adapter->dev,
-                       "No response buffer available, send command failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "No response buffer available, send command failed\n");
                return -EBUSY;
        }
 
@@ -1503,8 +1523,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                   address */
                if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
                                      (u32)cmdrsp_buf_pa)) {
-                       dev_err(adapter->dev,
-                               "Failed to write download cmd to boot code.\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Failed to write download cmd to boot code.\n");
                        ret = -1;
                        goto done;
                }
@@ -1512,8 +1532,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
                   address */
                if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi,
                                      (u32)((u64)cmdrsp_buf_pa >> 32))) {
-                       dev_err(adapter->dev,
-                               "Failed to write download cmd to boot code.\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Failed to write download cmd to boot code.\n");
                        ret = -1;
                        goto done;
                }
@@ -1523,16 +1543,16 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        /* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
        if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
                              (u32)cmd_buf_pa)) {
-               dev_err(adapter->dev,
-                       "Failed to write download cmd to boot code.\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Failed to write download cmd to boot code.\n");
                ret = -1;
                goto done;
        }
        /* Write the upper 32bits of the physical address to reg->cmd_addr_hi */
        if (mwifiex_write_reg(adapter, reg->cmd_addr_hi,
                              (u32)((u64)cmd_buf_pa >> 32))) {
-               dev_err(adapter->dev,
-                       "Failed to write download cmd to boot code.\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Failed to write download cmd to boot code.\n");
                ret = -1;
                goto done;
        }
@@ -1540,8 +1560,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        /* Write the command length to reg->cmd_size */
        if (mwifiex_write_reg(adapter, reg->cmd_size,
                              card->cmd_buf->len)) {
-               dev_err(adapter->dev,
-                       "Failed to write cmd len to reg->cmd_size\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Failed to write cmd len to reg->cmd_size\n");
                ret = -1;
                goto done;
        }
@@ -1549,8 +1569,8 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
        /* Ring the door bell */
        if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
                              CPU_INTR_DOOR_BELL)) {
-               dev_err(adapter->dev,
-                       "Failed to assert door-bell intr\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Failed to assert door-bell intr\n");
                ret = -1;
                goto done;
        }
@@ -1574,7 +1594,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
        u16 rx_len;
        __le16 pkt_len;
 
-       dev_dbg(adapter->dev, "info: Rx CMD Response\n");
+       mwifiex_dbg(adapter, CMD,
+                   "info: Rx CMD Response\n");
 
        mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
 
@@ -1598,8 +1619,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                        if (mwifiex_write_reg(adapter,
                                              PCIE_CPU_INT_EVENT,
                                              CPU_INTR_SLEEP_CFM_DONE)) {
-                               dev_warn(adapter->dev,
-                                        "Write register failed\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "Write register failed\n");
                                return -1;
                        }
                        mwifiex_delay_for_sleep_cookie(adapter,
@@ -1608,8 +1629,8 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                               mwifiex_pcie_ok_to_access_hw(adapter))
                                usleep_range(50, 60);
                } else {
-                       dev_err(adapter->dev,
-                               "There is no command but got cmdrsp\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "There is no command but got cmdrsp\n");
                }
                memcpy(adapter->upld_buf, skb->data,
                       min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
@@ -1628,15 +1649,15 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                   will prevent firmware from writing to the same response
                   buffer again. */
                if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo, 0)) {
-                       dev_err(adapter->dev,
-                               "cmd_done: failed to clear cmd_rsp_addr_lo\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cmd_done: failed to clear cmd_rsp_addr_lo\n");
                        return -1;
                }
                /* Write the upper 32bits of the cmdrsp buffer physical
                   address */
                if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_hi, 0)) {
-                       dev_err(adapter->dev,
-                               "cmd_done: failed to clear cmd_rsp_addr_hi\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "cmd_done: failed to clear cmd_rsp_addr_hi\n");
                        return -1;
                }
        }
@@ -1678,25 +1699,28 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
                mwifiex_pm_wakeup_card(adapter);
 
        if (adapter->event_received) {
-               dev_dbg(adapter->dev, "info: Event being processed, "
-                       "do not process this interrupt just yet\n");
+               mwifiex_dbg(adapter, EVENT,
+                           "info: Event being processed,\t"
+                           "do not process this interrupt just yet\n");
                return 0;
        }
 
        if (rdptr >= MWIFIEX_MAX_EVT_BD) {
-               dev_dbg(adapter->dev, "info: Invalid read pointer...\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "info: Invalid read pointer...\n");
                return -1;
        }
 
        /* Read the event ring write pointer set by firmware */
        if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
-               dev_err(adapter->dev,
-                       "EventReady: failed to read reg->evt_wrptr\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "EventReady: failed to read reg->evt_wrptr\n");
                return -1;
        }
 
-       dev_dbg(adapter->dev, "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
-               card->evtbd_rdptr, wrptr);
+       mwifiex_dbg(adapter, EVENT,
+                   "info: EventReady: Initial <Rd: 0x%x, Wr: 0x%x>",
+                   card->evtbd_rdptr, wrptr);
        if (((wrptr & MWIFIEX_EVTBD_MASK) != (card->evtbd_rdptr
                                              & MWIFIEX_EVTBD_MASK)) ||
            ((wrptr & reg->evt_rollover_ind) ==
@@ -1705,7 +1729,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
                __le16 data_len = 0;
                u16 evt_len;
 
-               dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
+               mwifiex_dbg(adapter, INFO,
+                           "info: Read Index: %d\n", rdptr);
                skb_cmd = card->evt_buf_list[rdptr];
                mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
 
@@ -1721,9 +1746,10 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
                   len is 2 bytes followed by type which is 2 bytes */
                memcpy(&data_len, skb_cmd->data, sizeof(__le16));
                evt_len = le16_to_cpu(data_len);
-
+               skb_trim(skb_cmd, evt_len);
                skb_pull(skb_cmd, INTF_HEADER_LEN);
-               dev_dbg(adapter->dev, "info: Event length: %d\n", evt_len);
+               mwifiex_dbg(adapter, EVENT,
+                           "info: Event length: %d\n", evt_len);
 
                if ((evt_len > 0) && (evt_len  < MAX_EVENT_SIZE))
                        memcpy(adapter->event_body, skb_cmd->data +
@@ -1740,8 +1766,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
        } else {
                if (mwifiex_write_reg(adapter, PCIE_CPU_INT_EVENT,
                                      CPU_INTR_EVENT_DONE)) {
-                       dev_warn(adapter->dev,
-                                "Write register failed\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Write register failed\n");
                        return -1;
                }
        }
@@ -1766,15 +1792,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
                return 0;
 
        if (rdptr >= MWIFIEX_MAX_EVT_BD) {
-               dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
-                       rdptr);
+               mwifiex_dbg(adapter, ERROR,
+                           "event_complete: Invalid rdptr 0x%x\n",
+                           rdptr);
                return -EINVAL;
        }
 
        /* Read the event ring write pointer set by firmware */
        if (mwifiex_read_reg(adapter, reg->evt_wrptr, &wrptr)) {
-               dev_err(adapter->dev,
-                       "event_complete: failed to read reg->evt_wrptr\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "event_complete: failed to read reg->evt_wrptr\n");
                return -1;
        }
 
@@ -1791,9 +1818,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
                desc->flags = 0;
                skb = NULL;
        } else {
-               dev_dbg(adapter->dev,
-                       "info: ERROR: buf still valid at index %d, <%p, %p>\n",
-                       rdptr, card->evt_buf_list[rdptr], skb);
+               mwifiex_dbg(adapter, ERROR,
+                           "info: ERROR: buf still valid at index %d, <%p, %p>\n",
+                           rdptr, card->evt_buf_list[rdptr], skb);
        }
 
        if ((++card->evtbd_rdptr & MWIFIEX_EVTBD_MASK) == MWIFIEX_MAX_EVT_BD) {
@@ -1802,18 +1829,20 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
                                        reg->evt_rollover_ind);
        }
 
-       dev_dbg(adapter->dev, "info: Updated <Rd: 0x%x, Wr: 0x%x>",
-               card->evtbd_rdptr, wrptr);
+       mwifiex_dbg(adapter, EVENT,
+                   "info: Updated <Rd: 0x%x, Wr: 0x%x>",
+                   card->evtbd_rdptr, wrptr);
 
        /* Write the event ring read pointer in to reg->evt_rdptr */
        if (mwifiex_write_reg(adapter, reg->evt_rdptr,
                              card->evtbd_rdptr)) {
-               dev_err(adapter->dev,
-                       "event_complete: failed to read reg->evt_rdptr\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "event_complete: failed to read reg->evt_rdptr\n");
                return -1;
        }
 
-       dev_dbg(adapter->dev, "info: Check Events Again\n");
+       mwifiex_dbg(adapter, EVENT,
+                   "info: Check Events Again\n");
        ret = mwifiex_pcie_process_event_ready(adapter);
 
        return ret;
@@ -1840,17 +1869,18 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        if (!firmware || !firmware_len) {
-               dev_err(adapter->dev,
-                       "No firmware image found! Terminating download\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "No firmware image found! Terminating download\n");
                return -1;
        }
 
-       dev_dbg(adapter->dev, "info: Downloading FW image (%d bytes)\n",
-               firmware_len);
+       mwifiex_dbg(adapter, INFO,
+                   "info: Downloading FW image (%d bytes)\n",
+                   firmware_len);
 
        if (mwifiex_pcie_disable_host_int(adapter)) {
-               dev_err(adapter->dev,
-                       "%s: Disabling interrupts failed.\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: Disabling interrupts failed.\n", __func__);
                return -1;
        }
 
@@ -1872,8 +1902,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        ret = mwifiex_read_reg(adapter, reg->cmd_size,
                                               &len);
                        if (ret) {
-                               dev_warn(adapter->dev,
-                                        "Failed reading len from boot code\n");
+                               mwifiex_dbg(adapter, FATAL,
+                                           "Failed reading len from boot code\n");
                                goto done;
                        }
                        if (len)
@@ -1884,8 +1914,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                if (!len) {
                        break;
                } else if (len > MWIFIEX_UPLD_SIZE) {
-                       pr_err("FW download failure @ %d, invalid length %d\n",
-                              offset, len);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "FW download failure @ %d, invalid length %d\n",
+                                   offset, len);
                        ret = -1;
                        goto done;
                }
@@ -1895,14 +1926,16 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                if (len & BIT(0)) {
                        block_retry_cnt++;
                        if (block_retry_cnt > MAX_WRITE_IOMEM_RETRY) {
-                               pr_err("FW download failure @ %d, over max "
-                                      "retry count\n", offset);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "FW download failure @ %d, over max\t"
+                                           "retry count\n", offset);
                                ret = -1;
                                goto done;
                        }
-                       dev_err(adapter->dev, "FW CRC error indicated by the "
-                               "helper: len = 0x%04X, txlen = %d\n",
-                               len, txlen);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "FW CRC error indicated by the\t"
+                                   "helper: len = 0x%04X, txlen = %d\n",
+                                   len, txlen);
                        len &= ~BIT(0);
                        /* Setting this to 0 to resend from same offset */
                        txlen = 0;
@@ -1913,7 +1946,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        if (firmware_len - offset < txlen)
                                txlen = firmware_len - offset;
 
-                       dev_dbg(adapter->dev, ".");
+                       mwifiex_dbg(adapter, INFO, ".");
 
                        tx_blocks = (txlen + card->pcie.blksz_fw_dl - 1) /
                                    card->pcie.blksz_fw_dl;
@@ -1927,8 +1960,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 
                /* Send the boot command to device */
                if (mwifiex_pcie_send_boot_cmd(adapter, skb)) {
-                       dev_err(adapter->dev,
-                               "Failed to send firmware download command\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Failed to send firmware download command\n");
                        ret = -1;
                        goto done;
                }
@@ -1937,9 +1970,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                do {
                        if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
                                             &ireg_intr)) {
-                               dev_err(adapter->dev, "%s: Failed to read "
-                                       "interrupt status during fw dnld.\n",
-                                       __func__);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "%s: Failed to read\t"
+                                           "interrupt status during fw dnld.\n",
+                                           __func__);
                                mwifiex_unmap_pci_memory(adapter, skb,
                                                         PCI_DMA_TODEVICE);
                                ret = -1;
@@ -1953,8 +1987,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                offset += txlen;
        } while (true);
 
-       dev_notice(adapter->dev,
-                  "info: FW download over, size %d bytes\n", offset);
+       mwifiex_dbg(adapter, MSG,
+                   "info: FW download over, size %d bytes\n", offset);
 
        ret = 0;
 
@@ -1980,15 +2014,17 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
        /* Mask spurios interrupts */
        if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS_MASK,
                              HOST_INTR_MASK)) {
-               dev_warn(adapter->dev, "Write register failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Write register failed\n");
                return -1;
        }
 
-       dev_dbg(adapter->dev, "Setting driver ready signature\n");
+       mwifiex_dbg(adapter, INFO,
+                   "Setting driver ready signature\n");
        if (mwifiex_write_reg(adapter, reg->drv_rdy,
                              FIRMWARE_READY_PCIE)) {
-               dev_err(adapter->dev,
-                       "Failed to write driver ready signature\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Failed to write driver ready signature\n");
                return -1;
        }
 
@@ -2015,12 +2051,13 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
                                     &winner_status))
                        ret = -1;
                else if (!winner_status) {
-                       dev_err(adapter->dev, "PCI-E is the winner\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "PCI-E is the winner\n");
                        adapter->winner = 1;
                } else {
-                       dev_err(adapter->dev,
-                               "PCI-E is not the winner <%#x,%d>, exit dnld\n",
-                               ret, adapter->winner);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "PCI-E is not the winner <%#x,%d>, exit dnld\n",
+                                   ret, adapter->winner);
                }
        }
 
@@ -2039,7 +2076,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
                return;
 
        if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) {
-               dev_warn(adapter->dev, "Read register failed\n");
+               mwifiex_dbg(adapter, ERROR, "Read register failed\n");
                return;
        }
 
@@ -2050,7 +2087,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
                /* Clear the pending interrupts */
                if (mwifiex_write_reg(adapter, PCIE_HOST_INT_STATUS,
                                      ~pcie_ireg)) {
-                       dev_warn(adapter->dev, "Write register failed\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Write register failed\n");
                        return;
                }
                spin_lock_irqsave(&adapter->int_lock, flags);
@@ -2133,21 +2171,24 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
        while (pcie_ireg & HOST_INTR_MASK) {
                if (pcie_ireg & HOST_INTR_DNLD_DONE) {
                        pcie_ireg &= ~HOST_INTR_DNLD_DONE;
-                       dev_dbg(adapter->dev, "info: TX DNLD Done\n");
+                       mwifiex_dbg(adapter, INTR,
+                                   "info: TX DNLD Done\n");
                        ret = mwifiex_pcie_send_data_complete(adapter);
                        if (ret)
                                return ret;
                }
                if (pcie_ireg & HOST_INTR_UPLD_RDY) {
                        pcie_ireg &= ~HOST_INTR_UPLD_RDY;
-                       dev_dbg(adapter->dev, "info: Rx DATA\n");
+                       mwifiex_dbg(adapter, INTR,
+                                   "info: Rx DATA\n");
                        ret = mwifiex_pcie_process_recv_data(adapter);
                        if (ret)
                                return ret;
                }
                if (pcie_ireg & HOST_INTR_EVENT_RDY) {
                        pcie_ireg &= ~HOST_INTR_EVENT_RDY;
-                       dev_dbg(adapter->dev, "info: Rx EVENT\n");
+                       mwifiex_dbg(adapter, INTR,
+                                   "info: Rx EVENT\n");
                        ret = mwifiex_pcie_process_event_ready(adapter);
                        if (ret)
                                return ret;
@@ -2156,8 +2197,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                if (pcie_ireg & HOST_INTR_CMD_DONE) {
                        pcie_ireg &= ~HOST_INTR_CMD_DONE;
                        if (adapter->cmd_sent) {
-                               dev_dbg(adapter->dev,
-                                       "info: CMD sent Interrupt\n");
+                               mwifiex_dbg(adapter, INTR,
+                                           "info: CMD sent Interrupt\n");
                                adapter->cmd_sent = false;
                        }
                        /* Handle command response */
@@ -2169,8 +2210,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                if (mwifiex_pcie_ok_to_access_hw(adapter)) {
                        if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
                                             &pcie_ireg)) {
-                               dev_warn(adapter->dev,
-                                        "Read register failed\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "Read register failed\n");
                                return -1;
                        }
 
@@ -2178,16 +2219,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                                if (mwifiex_write_reg(adapter,
                                                      PCIE_HOST_INT_STATUS,
                                                      ~pcie_ireg)) {
-                                       dev_warn(adapter->dev,
-                                                "Write register failed\n");
+                                       mwifiex_dbg(adapter, ERROR,
+                                                   "Write register failed\n");
                                        return -1;
                                }
                        }
 
                }
        }
-       dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
-               adapter->cmd_sent, adapter->data_sent);
+       mwifiex_dbg(adapter, INTR,
+                   "info: cmd_sent=%d data_sent=%d\n",
+                   adapter->cmd_sent, adapter->data_sent);
        if (adapter->ps_state != PS_STATE_SLEEP)
                mwifiex_pcie_enable_host_int(adapter);
 
@@ -2209,7 +2251,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
                                     struct mwifiex_tx_param *tx_param)
 {
        if (!skb) {
-               dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "Passed NULL skb to %s\n", __func__);
                return -1;
        }
 
@@ -2232,7 +2275,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
 
        ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY);
        if (ret) {
-               dev_err(adapter->dev, "PCIE write err\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "PCIE write err\n");
                return RDWR_STATUS_FAILURE;
        }
 
@@ -2243,24 +2287,25 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
                if (doneflag && ctrl_data == doneflag)
                        return RDWR_STATUS_DONE;
                if (ctrl_data != FW_DUMP_HOST_READY) {
-                       dev_info(adapter->dev,
-                                "The ctrl reg was changed, re-try again!\n");
+                       mwifiex_dbg(adapter, WARN,
+                                   "The ctrl reg was changed, re-try again!\n");
                        ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
                                                FW_DUMP_HOST_READY);
                        if (ret) {
-                               dev_err(adapter->dev, "PCIE write err\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "PCIE write err\n");
                                return RDWR_STATUS_FAILURE;
                        }
                }
                usleep_range(100, 200);
        }
 
-       dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+       mwifiex_dbg(adapter, ERROR, "Fail to pull ctrl_data\n");
        return RDWR_STATUS_FAILURE;
 }
 
 /* This function dump firmware memory to file */
-static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
+static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
@@ -2269,7 +2314,6 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
        enum rdwr_status stat;
        u32 memory_size;
        int ret;
-       static char *env[] = { "DRIVER=mwifiex_pcie", "EVENT=fw_dump", NULL };
 
        if (!card->pcie.can_dump_fw)
                return;
@@ -2284,12 +2328,12 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
                entry->mem_size = 0;
        }
 
-       dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
+       mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n");
 
        /* Read the number of the memories which will dump */
        stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
        if (stat == RDWR_STATUS_FAILURE)
-               goto done;
+               return;
 
        reg = creg->fw_dump_start;
        mwifiex_read_reg_byte(adapter, reg, &dump_num);
@@ -2300,7 +2344,7 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
 
                stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
                if (stat == RDWR_STATUS_FAILURE)
-                       goto done;
+                       return;
 
                memory_size = 0;
                reg = creg->fw_dump_start;
@@ -2311,36 +2355,36 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
                }
 
                if (memory_size == 0) {
-                       dev_info(adapter->dev, "Firmware dump Finished!\n");
+                       mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n");
                        ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl,
                                                FW_DUMP_READ_DONE);
                        if (ret) {
-                               dev_err(adapter->dev, "PCIE write err\n");
-                               goto done;
+                               mwifiex_dbg(adapter, ERROR, "PCIE write err\n");
+                               return;
                        }
                        break;
                }
 
-               dev_info(adapter->dev,
-                        "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+               mwifiex_dbg(adapter, DUMP,
+                           "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
                entry->mem_ptr = vmalloc(memory_size + 1);
                entry->mem_size = memory_size;
                if (!entry->mem_ptr) {
-                       dev_err(adapter->dev,
-                               "Vmalloc %s failed\n", entry->mem_name);
-                       goto done;
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Vmalloc %s failed\n", entry->mem_name);
+                       return;
                }
                dbg_ptr = entry->mem_ptr;
                end_ptr = dbg_ptr + memory_size;
 
                doneflag = entry->done_flag;
-               dev_info(adapter->dev, "Start %s output, please wait...\n",
-                        entry->mem_name);
+               mwifiex_dbg(adapter, DUMP, "Start %s output, please wait...\n",
+                           entry->mem_name);
 
                do {
                        stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
                        if (RDWR_STATUS_FAILURE == stat)
-                               goto done;
+                               return;
 
                        reg_start = creg->fw_dump_start;
                        reg_end = creg->fw_dump_end;
@@ -2349,46 +2393,49 @@ static void mwifiex_pcie_fw_dump_work(struct mwifiex_adapter *adapter)
                                if (dbg_ptr < end_ptr) {
                                        dbg_ptr++;
                                } else {
-                                       dev_err(adapter->dev,
-                                               "Allocated buf not enough\n");
-                                       goto done;
+                                       mwifiex_dbg(adapter, ERROR,
+                                                   "Allocated buf not enough\n");
+                                       return;
                                }
                        }
 
                        if (stat != RDWR_STATUS_DONE)
                                continue;
 
-                       dev_info(adapter->dev, "%s done: size=0x%tx\n",
-                                entry->mem_name, dbg_ptr - entry->mem_ptr);
+                       mwifiex_dbg(adapter, DUMP,
+                                   "%s done: size=0x%tx\n",
+                                   entry->mem_name, dbg_ptr - entry->mem_ptr);
                        break;
                } while (true);
        }
-       dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
-
-       kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+       mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n");
+}
 
-done:
-       adapter->curr_mem_idx = 0;
+static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
+{
+       mwifiex_drv_info_dump(adapter);
+       mwifiex_pcie_fw_dump(adapter);
+       mwifiex_upload_device_dump(adapter);
 }
 
 static unsigned long iface_work_flags;
 static struct mwifiex_adapter *save_adapter;
 static void mwifiex_pcie_work(struct work_struct *work)
 {
-       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
                               &iface_work_flags))
-               mwifiex_pcie_fw_dump_work(save_adapter);
+               mwifiex_pcie_device_dump_work(save_adapter);
 }
 
 static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
 /* This function dumps FW information */
-static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
+static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
 {
        save_adapter = adapter;
-       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
+       if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+       set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
 
        schedule_work(&pcie_work);
 }
@@ -2418,45 +2465,50 @@ static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
 
        pci_set_master(pdev);
 
-       dev_dbg(adapter->dev, "try set_consistent_dma_mask(32)\n");
+       mwifiex_dbg(adapter, INFO,
+                   "try set_consistent_dma_mask(32)\n");
        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
        if (ret) {
-               dev_err(adapter->dev, "set_dma_mask(32) failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "set_dma_mask(32) failed\n");
                goto err_set_dma_mask;
        }
 
        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
        if (ret) {
-               dev_err(adapter->dev, "set_consistent_dma_mask(64) failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "set_consistent_dma_mask(64) failed\n");
                goto err_set_dma_mask;
        }
 
        ret = pci_request_region(pdev, 0, DRV_NAME);
        if (ret) {
-               dev_err(adapter->dev, "req_reg(0) error\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "req_reg(0) error\n");
                goto err_req_region0;
        }
        card->pci_mmap = pci_iomap(pdev, 0, 0);
        if (!card->pci_mmap) {
-               dev_err(adapter->dev, "iomap(0) error\n");
+               mwifiex_dbg(adapter, ERROR, "iomap(0) error\n");
                ret = -EIO;
                goto err_iomap0;
        }
        ret = pci_request_region(pdev, 2, DRV_NAME);
        if (ret) {
-               dev_err(adapter->dev, "req_reg(2) error\n");
+               mwifiex_dbg(adapter, ERROR, "req_reg(2) error\n");
                goto err_req_region2;
        }
        card->pci_mmap1 = pci_iomap(pdev, 2, 0);
        if (!card->pci_mmap1) {
-               dev_err(adapter->dev, "iomap(2) error\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "iomap(2) error\n");
                ret = -EIO;
                goto err_iomap2;
        }
 
-       dev_dbg(adapter->dev,
-               "PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
-               card->pci_mmap, card->pci_mmap1);
+       mwifiex_dbg(adapter, INFO,
+                   "PCI memory map Virt0: %p PCI memory map Virt2: %p\n",
+                   card->pci_mmap, card->pci_mmap1);
 
        card->cmdrsp_buf = NULL;
        ret = mwifiex_pcie_create_txbd_ring(adapter);
@@ -2521,10 +2573,11 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
        if (user_rmmod) {
-               dev_dbg(adapter->dev, "Clearing driver ready signature\n");
+               mwifiex_dbg(adapter, INFO,
+                           "Clearing driver ready signature\n");
                if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
-                       dev_err(adapter->dev,
-                               "Failed to write driver not-ready signature\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Failed to write driver not-ready signature\n");
        }
 
        if (pdev) {
@@ -2555,7 +2608,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
                          "MRVL_PCIE", pdev);
        if (ret) {
-               pr_err("request_irq failed: ret=%d\n", ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "request_irq failed: ret=%d\n", ret);
                adapter->card = NULL;
                return -1;
        }
@@ -2582,7 +2636,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
        const struct mwifiex_pcie_card_reg *reg;
 
        if (card) {
-               dev_dbg(adapter->dev, "%s(): calling free_irq()\n", __func__);
+               mwifiex_dbg(adapter, INFO,
+                           "%s(): calling free_irq()\n", __func__);
                free_irq(card->dev->irq, card->dev);
 
                reg = card->pcie.reg;
@@ -2617,7 +2672,7 @@ static struct mwifiex_if_ops pcie_ops = {
        .cleanup_mpa_buf =              NULL,
        .init_fw_port =                 mwifiex_pcie_init_fw_port,
        .clean_pcie_ring =              mwifiex_clean_pcie_ring_buf,
-       .fw_dump =                      mwifiex_pcie_fw_dump,
+       .device_dump =                  mwifiex_pcie_device_dump,
 };
 
 /*
index 0ffdb7c5afd21345cfea5c025d0a2cacb2840076..baf9715ddc1034bc58e6ec4cea367caa670b571c 100644 (file)
@@ -241,20 +241,21 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv,
            * LinkSys WRT54G && bss_desc->privacy
            */
         ) {
-               dev_dbg(priv->adapter->dev, "info: %s: WPA:"
-                       " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
-                       "EncMode=%#x privacy=%#x\n", __func__,
-                       (bss_desc->bcn_wpa_ie) ?
-                       (*(bss_desc->bcn_wpa_ie)).
-                       vend_hdr.element_id : 0,
-                       (bss_desc->bcn_rsn_ie) ?
-                       (*(bss_desc->bcn_rsn_ie)).
-                       ieee_hdr.element_id : 0,
-                       (priv->sec_info.wep_enabled) ? "e" : "d",
-                       (priv->sec_info.wpa_enabled) ? "e" : "d",
-                       (priv->sec_info.wpa2_enabled) ? "e" : "d",
-                       priv->sec_info.encryption_mode,
-                       bss_desc->privacy);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: %s: WPA:\t"
+                           "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
+                           "EncMode=%#x privacy=%#x\n", __func__,
+                           (bss_desc->bcn_wpa_ie) ?
+                           (*bss_desc->bcn_wpa_ie).
+                           vend_hdr.element_id : 0,
+                           (bss_desc->bcn_rsn_ie) ?
+                           (*bss_desc->bcn_rsn_ie).
+                           ieee_hdr.element_id : 0,
+                           (priv->sec_info.wep_enabled) ? "e" : "d",
+                           (priv->sec_info.wpa_enabled) ? "e" : "d",
+                           (priv->sec_info.wpa2_enabled) ? "e" : "d",
+                           priv->sec_info.encryption_mode,
+                           bss_desc->privacy);
                return true;
        }
        return false;
@@ -277,20 +278,21 @@ mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
                 * Privacy bit may NOT be set in some APs like
                 * LinkSys WRT54G && bss_desc->privacy
                 */
-               dev_dbg(priv->adapter->dev, "info: %s: WPA2: "
-                       " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s "
-                       "EncMode=%#x privacy=%#x\n", __func__,
-                       (bss_desc->bcn_wpa_ie) ?
-                       (*(bss_desc->bcn_wpa_ie)).
-                       vend_hdr.element_id : 0,
-                       (bss_desc->bcn_rsn_ie) ?
-                       (*(bss_desc->bcn_rsn_ie)).
-                       ieee_hdr.element_id : 0,
-                       (priv->sec_info.wep_enabled) ? "e" : "d",
-                       (priv->sec_info.wpa_enabled) ? "e" : "d",
-                       (priv->sec_info.wpa2_enabled) ? "e" : "d",
-                       priv->sec_info.encryption_mode,
-                       bss_desc->privacy);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: %s: WPA2:\t"
+                           "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
+                           "EncMode=%#x privacy=%#x\n", __func__,
+                           (bss_desc->bcn_wpa_ie) ?
+                           (*bss_desc->bcn_wpa_ie).
+                           vend_hdr.element_id : 0,
+                           (bss_desc->bcn_rsn_ie) ?
+                           (*bss_desc->bcn_rsn_ie).
+                           ieee_hdr.element_id : 0,
+                           (priv->sec_info.wep_enabled) ? "e" : "d",
+                           (priv->sec_info.wpa_enabled) ? "e" : "d",
+                           (priv->sec_info.wpa2_enabled) ? "e" : "d",
+                           priv->sec_info.encryption_mode,
+                           bss_desc->privacy);
                return true;
        }
        return false;
@@ -333,18 +335,19 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
            ((!bss_desc->bcn_rsn_ie) ||
             ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
            priv->sec_info.encryption_mode && bss_desc->privacy) {
-               dev_dbg(priv->adapter->dev, "info: %s: dynamic "
-                       "WEP: wpa_ie=%#x wpa2_ie=%#x "
-                       "EncMode=%#x privacy=%#x\n",
-                       __func__,
-                       (bss_desc->bcn_wpa_ie) ?
-                       (*(bss_desc->bcn_wpa_ie)).
-                       vend_hdr.element_id : 0,
-                       (bss_desc->bcn_rsn_ie) ?
-                       (*(bss_desc->bcn_rsn_ie)).
-                       ieee_hdr.element_id : 0,
-                       priv->sec_info.encryption_mode,
-                       bss_desc->privacy);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: %s: dynamic\t"
+                           "WEP: wpa_ie=%#x wpa2_ie=%#x\t"
+                           "EncMode=%#x privacy=%#x\n",
+                           __func__,
+                           (bss_desc->bcn_wpa_ie) ?
+                           (*bss_desc->bcn_wpa_ie).
+                           vend_hdr.element_id : 0,
+                           (bss_desc->bcn_rsn_ie) ?
+                           (*bss_desc->bcn_rsn_ie).
+                           ieee_hdr.element_id : 0,
+                           priv->sec_info.encryption_mode,
+                           bss_desc->privacy);
                return true;
        }
        return false;
@@ -383,19 +386,20 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
                return 0;
 
        if (priv->wps.session_enable) {
-               dev_dbg(adapter->dev,
-                       "info: return success directly in WPS period\n");
+               mwifiex_dbg(adapter, IOCTL,
+                           "info: return success directly in WPS period\n");
                return 0;
        }
 
        if (bss_desc->chan_sw_ie_present) {
-               dev_err(adapter->dev,
-                       "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
+               mwifiex_dbg(adapter, INFO,
+                           "Don't connect to AP with WLAN_EID_CHANNEL_SWITCH\n");
                return -1;
        }
 
        if (mwifiex_is_bss_wapi(priv, bss_desc)) {
-               dev_dbg(adapter->dev, "info: return success for WAPI AP\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: return success for WAPI AP\n");
                return 0;
        }
 
@@ -405,7 +409,8 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
                        return 0;
                } else if (mwifiex_is_bss_static_wep(priv, bss_desc)) {
                        /* Static WEP enabled */
-                       dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: Disable 11n in WEP mode.\n");
                        bss_desc->disable_11n = true;
                        return 0;
                } else if (mwifiex_is_bss_wpa(priv, bss_desc)) {
@@ -418,9 +423,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
 
                                if (mwifiex_is_wpa_oui_present
                                                (bss_desc, CIPHER_SUITE_TKIP)) {
-                                       dev_dbg(adapter->dev,
-                                               "info: Disable 11n if AES "
-                                               "is not supported by AP\n");
+                                       mwifiex_dbg(adapter, INFO,
+                                                   "info: Disable 11n if AES\t"
+                                                   "is not supported by AP\n");
                                        bss_desc->disable_11n = true;
                                } else {
                                        return -1;
@@ -437,9 +442,9 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
 
                                if (mwifiex_is_rsn_oui_present
                                                (bss_desc, CIPHER_SUITE_TKIP)) {
-                                       dev_dbg(adapter->dev,
-                                               "info: Disable 11n if AES "
-                                               "is not supported by AP\n");
+                                       mwifiex_dbg(adapter, INFO,
+                                                   "info: Disable 11n if AES\t"
+                                                   "is not supported by AP\n");
                                        bss_desc->disable_11n = true;
                                } else {
                                        return -1;
@@ -455,17 +460,18 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
                }
 
                /* Security doesn't match */
-               dev_dbg(adapter->dev,
-                       "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s "
-                       "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__,
-                       (bss_desc->bcn_wpa_ie) ?
-                       (*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0,
-                       (bss_desc->bcn_rsn_ie) ?
-                       (*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0,
-                       (priv->sec_info.wep_enabled) ? "e" : "d",
-                       (priv->sec_info.wpa_enabled) ? "e" : "d",
-                       (priv->sec_info.wpa2_enabled) ? "e" : "d",
-                       priv->sec_info.encryption_mode, bss_desc->privacy);
+               mwifiex_dbg(adapter, ERROR,
+                           "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t"
+                           "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
+                           __func__,
+                           (bss_desc->bcn_wpa_ie) ?
+                           (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0,
+                           (bss_desc->bcn_rsn_ie) ?
+                           (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0,
+                           (priv->sec_info.wep_enabled) ? "e" : "d",
+                           (priv->sec_info.wpa_enabled) ? "e" : "d",
+                           (priv->sec_info.wpa2_enabled) ? "e" : "d",
+                           priv->sec_info.encryption_mode, bss_desc->privacy);
                return -1;
        }
 
@@ -560,7 +566,8 @@ mwifiex_append_rate_tlv(struct mwifiex_private *priv,
        else
                rates_size = mwifiex_get_supported_rates(priv, rates);
 
-       dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n",
+       mwifiex_dbg(priv->adapter, CMD,
+                   "info: SCAN_CMD: Rates size = %d\n",
                rates_size);
        rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos;
        rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
@@ -600,9 +607,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
        u8 radio_type;
 
        if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
-               dev_dbg(priv->adapter->dev,
-                       "info: Scan: Null detect: %p, %p, %p\n",
-                      scan_cfg_out, chan_tlv_out, scan_chan_list);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "info: Scan: Null detect: %p, %p, %p\n",
+                           scan_cfg_out, chan_tlv_out, scan_chan_list);
                return -1;
        }
 
@@ -645,16 +652,16 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                        }
 
                        radio_type = tmp_chan_list->radio_type;
-                       dev_dbg(priv->adapter->dev,
-                               "info: Scan: Chan(%3d), Radio(%d),"
-                               Mode(%d, %d), Dur(%d)\n",
-                               tmp_chan_list->chan_number,
-                               tmp_chan_list->radio_type,
-                               tmp_chan_list->chan_scan_mode_bitmap
-                               & MWIFIEX_PASSIVE_SCAN,
-                               (tmp_chan_list->chan_scan_mode_bitmap
-                                & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
-                               le16_to_cpu(tmp_chan_list->max_scan_time));
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: Scan: Chan(%3d), Radio(%d),\t"
+                                   "Mode(%d, %d), Dur(%d)\n",
+                                   tmp_chan_list->chan_number,
+                                   tmp_chan_list->radio_type,
+                                   tmp_chan_list->chan_scan_mode_bitmap
+                                   & MWIFIEX_PASSIVE_SCAN,
+                                   (tmp_chan_list->chan_scan_mode_bitmap
+                                   & MWIFIEX_DISABLE_CHAN_FILT) >> 1,
+                                   le16_to_cpu(tmp_chan_list->max_scan_time));
 
                        /* Copy the current channel TLV to the command being
                           prepared */
@@ -718,9 +725,11 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
                /* The total scan time should be less than scan command timeout
                   value */
                if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) {
-                       dev_err(priv->adapter->dev, "total scan time %dms"
-                               " is over limit (%dms), scan skipped\n",
-                               total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "total scan time %dms\t"
+                                   "is over limit (%dms), scan skipped\n",
+                                   total_scan_time,
+                                   MWIFIEX_MAX_TOTAL_SCAN_TIME);
                        ret = -1;
                        break;
                }
@@ -905,9 +914,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                        tlv_pos += (sizeof(wildcard_ssid_tlv->header)
                                + le16_to_cpu(wildcard_ssid_tlv->header.len));
 
-                       dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n",
-                               i, wildcard_ssid_tlv->ssid,
-                               wildcard_ssid_tlv->max_ssid_length);
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: scan: ssid[%d]: %s, %d\n",
+                                   i, wildcard_ssid_tlv->ssid,
+                                   wildcard_ssid_tlv->max_ssid_length);
 
                        /* Empty wildcard ssid with a maxlen will match many or
                           potentially all SSIDs (maxlen == 32), therefore do
@@ -928,8 +938,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                        *filtered_scan = true;
 
                if (user_scan_in->scan_chan_gap) {
-                       dev_dbg(adapter->dev, "info: scan: channel gap = %d\n",
-                               user_scan_in->scan_chan_gap);
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: scan: channel gap = %d\n",
+                                   user_scan_in->scan_chan_gap);
                        *max_chan_per_scan =
                                        MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
 
@@ -961,8 +972,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
           add tlv */
        if (num_probes) {
 
-               dev_dbg(adapter->dev, "info: scan: num_probes = %d\n",
-                       num_probes);
+               mwifiex_dbg(adapter, INFO,
+                           "info: scan: num_probes = %d\n",
+                           num_probes);
 
                num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos;
                num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
@@ -1003,7 +1015,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
 
        if (user_scan_in && user_scan_in->chan_list[0].chan_number) {
 
-               dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: Scan: Using supplied channel list\n");
 
                for (chan_idx = 0;
                     chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX &&
@@ -1056,13 +1069,13 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                    (user_scan_in->chan_list[0].chan_number ==
                     priv->curr_bss_params.bss_descriptor.channel)) {
                        *scan_current_only = true;
-                       dev_dbg(adapter->dev,
-                               "info: Scan: Scanning current channel only\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: Scan: Scanning current channel only\n");
                }
                chan_num = chan_idx;
        } else {
-               dev_dbg(adapter->dev,
-                       "info: Scan: Creating full region channel list\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: Scan: Creating full region channel list\n");
                chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
                                                            scan_chan_list,
                                                            *filtered_scan);
@@ -1094,8 +1107,9 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
        tlv_buf_left = tlv_buf_size;
        *tlv_data = NULL;
 
-       dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n",
-               tlv_buf_size);
+       mwifiex_dbg(adapter, INFO,
+                   "info: SCAN_RESP: tlv_buf_size = %d\n",
+                   tlv_buf_size);
 
        while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) {
 
@@ -1103,26 +1117,31 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
                tlv_len = le16_to_cpu(current_tlv->header.len);
 
                if (sizeof(tlv->header) + tlv_len > tlv_buf_left) {
-                       dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "SCAN_RESP: TLV buffer corrupt\n");
                        break;
                }
 
                if (req_tlv_type == tlv_type) {
                        switch (tlv_type) {
                        case TLV_TYPE_TSFTIMESTAMP:
-                               dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
-                                       "timestamp TLV, len = %d\n", tlv_len);
+                               mwifiex_dbg(adapter, INFO,
+                                           "info: SCAN_RESP: TSF\t"
+                                           "timestamp TLV, len = %d\n",
+                                           tlv_len);
                                *tlv_data = current_tlv;
                                break;
                        case TLV_TYPE_CHANNELBANDLIST:
-                               dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
-                                       " band list TLV, len = %d\n", tlv_len);
+                               mwifiex_dbg(adapter, INFO,
+                                           "info: SCAN_RESP: channel\t"
+                                           "band list TLV, len = %d\n",
+                                           tlv_len);
                                *tlv_data = current_tlv;
                                break;
                        default:
-                               dev_err(adapter->dev,
-                                       "SCAN_RESP: unhandled TLV = %d\n",
-                                      tlv_type);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "SCAN_RESP: unhandled TLV = %d\n",
+                                           tlv_type);
                                /* Give up, this seems corrupted */
                                return;
                        }
@@ -1177,8 +1196,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                total_ie_len = element_len + sizeof(struct ieee_types_header);
 
                if (bytes_left < total_ie_len) {
-                       dev_err(adapter->dev, "err: InterpretIE: in processing"
-                               " IE, bytes left < IE length\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "err: InterpretIE: in processing\t"
+                                   "IE, bytes left < IE length\n");
                        return -1;
                }
                switch (element_id) {
@@ -1186,9 +1206,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
                        bss_entry->ssid.ssid_len = element_len;
                        memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
                               element_len);
-                       dev_dbg(adapter->dev,
-                               "info: InterpretIE: ssid: %-32s\n",
-                               bss_entry->ssid.ssid);
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: InterpretIE: ssid: %-32s\n",
+                                   bss_entry->ssid.ssid);
                        break;
 
                case WLAN_EID_SUPP_RATES:
@@ -1419,19 +1439,20 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
        unsigned long flags;
 
        if (adapter->scan_processing) {
-               dev_err(adapter->dev, "cmd: Scan already in process...\n");
+               mwifiex_dbg(adapter, WARN,
+                           "cmd: Scan already in process...\n");
                return -EBUSY;
        }
 
        if (priv->scan_block) {
-               dev_err(adapter->dev,
-                       "cmd: Scan is blocked during association...\n");
+               mwifiex_dbg(adapter, WARN,
+                           "cmd: Scan is blocked during association...\n");
                return -EBUSY;
        }
 
        if (adapter->surprise_removed || adapter->is_cmd_timedout) {
-               dev_err(adapter->dev,
-                       "Ignore scan. Card removed or firmware in bad state\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Ignore scan. Card removed or firmware in bad state\n");
                return -EFAULT;
        }
 
@@ -1478,7 +1499,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
 
                        /* Perform internal scan synchronously */
                        if (!priv->scan_request) {
-                               dev_dbg(adapter->dev, "wait internal scan\n");
+                               mwifiex_dbg(adapter, INFO,
+                                           "wait internal scan\n");
                                mwifiex_wait_queue_complete(adapter, cmd_node);
                        }
                } else {
@@ -1553,8 +1575,8 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
                        ret = mwifiex_is_network_compatible(priv, bss_desc,
                                                            priv->bss_mode);
                        if (ret)
-                               dev_err(priv->adapter->dev,
-                                       "Incompatible network settings\n");
+                               mwifiex_dbg(priv->adapter, ERROR,
+                                           "Incompatible network settings\n");
                        break;
                default:
                        ret = 0;
@@ -1656,7 +1678,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
         */
        if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
            sizeof(struct mwifiex_fixed_bcn_param)) {
-               dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "InterpretIE: not enough bytes left\n");
                return -EFAULT;
        }
 
@@ -1669,7 +1692,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
                rssi = (-rssi) * 100;           /* Convert dBm to mBm */
                current_ptr += sizeof(u8);
                curr_bcn_bytes -= sizeof(u8);
-               dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+               mwifiex_dbg(adapter, INFO,
+                           "info: InterpretIE: RSSI=%d\n", rssi);
        } else {
                rssi = rssi_val;
        }
@@ -1682,14 +1706,16 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
        beacon_period = le16_to_cpu(bcn_param->beacon_period);
 
        cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
-       dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
-               cap_info_bitmap);
+       mwifiex_dbg(adapter, INFO,
+                   "info: InterpretIE: capabilities=0x%X\n",
+                   cap_info_bitmap);
 
        /* Rest of the current buffer are IE's */
        ie_buf = current_ptr;
        ie_len = curr_bcn_bytes;
-       dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
-               curr_bcn_bytes);
+       mwifiex_dbg(adapter, INFO,
+                   "info: InterpretIE: IELength for this AP = %d\n",
+                   curr_bcn_bytes);
 
        while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
                u8 element_id, element_len;
@@ -1698,8 +1724,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
                element_len = *(current_ptr + 1);
                if (curr_bcn_bytes < element_len +
                                sizeof(struct ieee_types_header)) {
-                       dev_err(adapter->dev,
-                               "%s: bytes left < IE length\n", __func__);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s: bytes left < IE length\n", __func__);
                        return -EFAULT;
                }
                if (element_id == WLAN_EID_DS_PARAMS) {
@@ -1719,8 +1745,8 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
 
                /* Skip entry if on csa closed channel */
                if (channel == priv->csa_chan) {
-                       dev_dbg(adapter->dev,
-                               "Dropping entry on csa closed channel\n");
+                       mwifiex_dbg(adapter, WARN,
+                                   "Dropping entry on csa closed channel\n");
                        return 0;
                }
 
@@ -1751,7 +1777,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
                        cfg80211_put_bss(priv->wdev.wiphy, bss);
                }
        } else {
-               dev_dbg(adapter->dev, "missing BSS channel IE\n");
+               mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
        }
 
        return 0;
@@ -1765,7 +1791,8 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv)
        if (adapter->curr_cmd->wait_q_enabled) {
                adapter->cmd_wait_q.status = 0;
                if (!priv->scan_request) {
-                       dev_dbg(adapter->dev, "complete internal scan\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "complete internal scan\n");
                        mwifiex_complete_cmd(adapter, adapter->curr_cmd);
                }
        }
@@ -1788,12 +1815,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
                        mwifiex_complete_scan(priv);
 
                if (priv->scan_request) {
-                       dev_dbg(adapter->dev, "info: notifying scan done\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: notifying scan done\n");
                        cfg80211_scan_done(priv->scan_request, 0);
                        priv->scan_request = NULL;
                } else {
                        priv->scan_aborting = false;
-                       dev_dbg(adapter->dev, "info: scan already aborted\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: scan already aborted\n");
                }
        } else if ((priv->scan_aborting && !priv->scan_request) ||
                   priv->scan_block) {
@@ -1809,12 +1838,14 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
                if (priv->scan_request) {
-                       dev_dbg(adapter->dev, "info: aborting scan\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: aborting scan\n");
                        cfg80211_scan_done(priv->scan_request, 1);
                        priv->scan_request = NULL;
                } else {
                        priv->scan_aborting = false;
-                       dev_dbg(adapter->dev, "info: scan already aborted\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: scan already aborted\n");
                }
        } else {
                /* Get scan command from scan_pending_q and put to
@@ -1877,8 +1908,9 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
 
 
        if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) {
-               dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
-                       scan_rsp->number_of_sets);
+               mwifiex_dbg(adapter, ERROR,
+                           "SCAN_RESP: too many AP returned (%d)\n",
+                           scan_rsp->number_of_sets);
                ret = -1;
                goto check_next_scan;
        }
@@ -1887,14 +1919,15 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
        mwifiex_11h_get_csa_closed_channel(priv);
 
        bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
-       dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n",
-               bytes_left);
+       mwifiex_dbg(adapter, INFO,
+                   "info: SCAN_RESP: bss_descript_size %d\n",
+                   bytes_left);
 
        scan_resp_size = le16_to_cpu(resp->size);
 
-       dev_dbg(adapter->dev,
-               "info: SCAN_RESP: returned %d APs before parsing\n",
-               scan_rsp->number_of_sets);
+       mwifiex_dbg(adapter, INFO,
+                   "info: SCAN_RESP: returned %d APs before parsing\n",
+                   scan_rsp->number_of_sets);
 
        bss_info = scan_rsp->bss_desc_and_tlv_buffer;
 
@@ -2007,13 +2040,13 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
                                       le16_to_cpu(fw_chan_stats->cca_scan_dur);
                chan_stats.cca_busy_dur =
                                       le16_to_cpu(fw_chan_stats->cca_busy_dur);
-               dev_dbg(adapter->dev,
-                       "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
-                       chan_stats.chan_num,
-                       chan_stats.noise,
-                       chan_stats.total_bss,
-                       chan_stats.cca_scan_dur,
-                       chan_stats.cca_busy_dur);
+               mwifiex_dbg(adapter, INFO,
+                           "chan=%d, noise=%d, total_network=%d scan_duration=%d, busy_duration=%d\n",
+                           chan_stats.chan_num,
+                           chan_stats.noise,
+                           chan_stats.total_bss,
+                           chan_stats.cca_scan_dur,
+                           chan_stats.cca_busy_dur);
                memcpy(&adapter->chan_stats[adapter->survey_idx++], &chan_stats,
                       sizeof(struct mwifiex_chan_stats));
                fw_chan_stats++;
@@ -2035,7 +2068,7 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
        unsigned long cmd_flags, scan_flags;
        bool complete_scan = false;
 
-       dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+       mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n");
 
        ext_scan_resp = &resp->params.ext_scan;
 
@@ -2048,8 +2081,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
                len = le16_to_cpu(tlv->len);
 
                if (buf_left < (sizeof(struct mwifiex_ie_types_header) + len)) {
-                       dev_err(adapter->dev,
-                               "error processing scan response TLVs");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "error processing scan response TLVs");
                        break;
                }
 
@@ -2075,8 +2108,8 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
                        cmd_ptr = (void *)cmd_node->cmd_skb->data;
                        if (le16_to_cpu(cmd_ptr->command) ==
                            HostCmd_CMD_802_11_SCAN_EXT) {
-                               dev_dbg(priv->adapter->dev,
-                                       "Scan pending in command pending list");
+                               mwifiex_dbg(adapter, INFO,
+                                           "Scan pending in command pending list");
                                complete_scan = false;
                                break;
                        }
@@ -2114,17 +2147,20 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
        u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
 
        if (num_of_set > MWIFIEX_MAX_AP) {
-               dev_err(adapter->dev,
-                       "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
-                       num_of_set);
+               mwifiex_dbg(adapter, ERROR,
+                           "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+                           num_of_set);
                ret = -1;
                goto check_next_scan;
        }
 
        bytes_left = scan_resp_size;
-       dev_dbg(adapter->dev,
-               "EXT_SCAN: size %d, returned %d APs...",
-               scan_resp_size, num_of_set);
+       mwifiex_dbg(adapter, INFO,
+                   "EXT_SCAN: size %d, returned %d APs...",
+                   scan_resp_size, num_of_set);
+       mwifiex_dbg_dump(adapter, CMD_D, "EXT_SCAN buffer:", buf,
+                        scan_resp_size +
+                        sizeof(struct mwifiex_event_scan_result));
 
        tlv = (struct mwifiex_ie_types_data *)scan_resp;
 
@@ -2132,7 +2168,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
                type = le16_to_cpu(tlv->header.type);
                len = le16_to_cpu(tlv->header.len);
                if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
-                       dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "EXT_SCAN: Error bytes left < TLV length\n");
                        break;
                }
                scan_rsp_tlv = NULL;
@@ -2158,8 +2195,9 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
                        len = le16_to_cpu(tlv->header.len);
                        if (bytes_left_for_tlv <
                            sizeof(struct mwifiex_ie_types_header) + len) {
-                               dev_err(adapter->dev,
-                                       "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "EXT_SCAN: Error in processing TLV,\t"
+                                           "bytes left < TLV length\n");
                                scan_rsp_tlv = NULL;
                                bytes_left_for_tlv = 0;
                                continue;
@@ -2199,8 +2237,8 @@ int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
                if (scan_info_tlv) {
                        rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
                        rssi *= 100;           /* Convert dBm to mBm */
-                       dev_dbg(adapter->dev,
-                               "info: InterpretIE: RSSI=%d\n", rssi);
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: InterpretIE: RSSI=%d\n", rssi);
                        fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
                        radio_type = &scan_info_tlv->radio_type;
                } else {
@@ -2271,13 +2309,14 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv,
        struct mwifiex_user_scan_cfg *scan_cfg;
 
        if (adapter->scan_processing) {
-               dev_err(adapter->dev, "cmd: Scan already in process...\n");
+               mwifiex_dbg(adapter, WARN,
+                           "cmd: Scan already in process...\n");
                return -EBUSY;
        }
 
        if (priv->scan_block) {
-               dev_err(adapter->dev,
-                       "cmd: Scan is blocked during association...\n");
+               mwifiex_dbg(adapter, WARN,
+                           "cmd: Scan is blocked during association...\n");
                return -EBUSY;
        }
 
@@ -2309,8 +2348,9 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
        int ret;
 
        if (down_interruptible(&priv->async_sem)) {
-               dev_err(priv->adapter->dev, "%s: acquire semaphore\n",
-                       __func__);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "%s: acquire semaphore fail\n",
+                           __func__);
                return -1;
        }
 
@@ -2400,8 +2440,9 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
 
        memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf,
               curr_bss->beacon_buf_size);
-       dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n",
-               priv->curr_bcn_size);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: current beacon saved %d\n",
+                   priv->curr_bcn_size);
 
        curr_bss->beacon_buf = priv->curr_bcn_buf;
 
index d10320f89bc16f0f87604fa3840a4a0b72e2530d..a0b121f3460c871eefca6fd30d79a3217ef401c8 100644 (file)
@@ -166,7 +166,8 @@ static int mwifiex_sdio_resume(struct device *dev)
        adapter = card->adapter;
 
        if (!adapter->is_suspended) {
-               dev_warn(adapter->dev, "device already resumed\n");
+               mwifiex_dbg(adapter, WARN,
+                           "device already resumed\n");
                return 0;
        }
 
@@ -191,8 +192,6 @@ mwifiex_sdio_remove(struct sdio_func *func)
        struct mwifiex_adapter *adapter;
        struct mwifiex_private *priv;
 
-       pr_debug("info: SDIO func num=%d\n", func->num);
-
        card = sdio_get_drvdata(func);
        if (!card)
                return;
@@ -201,6 +200,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
        if (!adapter || !adapter->priv_num)
                return;
 
+       mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
+
        if (user_rmmod) {
                if (adapter->is_suspended)
                        mwifiex_sdio_resume(adapter->dev);
@@ -257,12 +258,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
 
        /* Enable the Host Sleep */
        if (!mwifiex_enable_hs(adapter)) {
-               dev_err(adapter->dev, "cmd: failed to suspend\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cmd: failed to suspend\n");
                adapter->hs_enabling = false;
                return -EFAULT;
        }
 
-       dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
+       mwifiex_dbg(adapter, INFO,
+                   "cmd: suspend with MMC_PM_KEEP_POWER\n");
        ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
 
        /* Indicate device suspended */
@@ -386,8 +389,8 @@ mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
        u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
 
        if (adapter->is_suspended) {
-               dev_err(adapter->dev,
-                       "%s: not allowed while suspended\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: not allowed while suspended\n", __func__);
                return -1;
        }
 
@@ -434,7 +437,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
  */
 static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
 {
-       dev_dbg(adapter->dev, "event: wakeup device...\n");
+       mwifiex_dbg(adapter, EVENT,
+                   "event: wakeup device...\n");
 
        return mwifiex_write_reg(adapter, CONFIGURATION_REG, HOST_POWER_UP);
 }
@@ -446,7 +450,8 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
  */
 static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
 {
-       dev_dbg(adapter->dev, "cmd: wakeup device completed\n");
+       mwifiex_dbg(adapter, EVENT,
+                   "cmd: wakeup device completed\n");
 
        return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0);
 }
@@ -524,7 +529,8 @@ static int mwifiex_init_sdio_ioport(struct mwifiex_adapter *adapter)
        else
                return -1;
 cont:
-       pr_debug("info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
+       mwifiex_dbg(adapter, INFO,
+                   "info: SDIO FUNC1 IO port: %#x\n", adapter->ioport);
 
        /* Set Host interrupt reset to read to clear */
        if (!mwifiex_read_reg(adapter, card->reg->host_int_rsr_reg, &reg))
@@ -556,10 +562,12 @@ static int mwifiex_write_data_to_card(struct mwifiex_adapter *adapter,
                ret = mwifiex_write_data_sync(adapter, payload, pkt_len, port);
                if (ret) {
                        i++;
-                       dev_err(adapter->dev, "host_to_card, write iomem"
-                                       " (%d) failed: %d\n", i, ret);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "host_to_card, write iomem\t"
+                                   "(%d) failed: %d\n", i, ret);
                        if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
-                               dev_err(adapter->dev, "write CFG reg failed\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "write CFG reg failed\n");
 
                        ret = -1;
                        if (i > MAX_WRITE_IOMEM_RETRY)
@@ -584,7 +592,8 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
        const struct mwifiex_sdio_card_reg *reg = card->reg;
        u32 rd_bitmap = card->mp_rd_bitmap;
 
-       dev_dbg(adapter->dev, "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
+       mwifiex_dbg(adapter, DATA,
+                   "data: mp_rd_bitmap=0x%08x\n", rd_bitmap);
 
        if (card->supports_sdio_new_mode) {
                if (!(rd_bitmap & reg->data_port_mask))
@@ -598,8 +607,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
            (card->mp_rd_bitmap & CTRL_PORT_MASK)) {
                card->mp_rd_bitmap &= (u32) (~CTRL_PORT_MASK);
                *port = CTRL_PORT;
-               dev_dbg(adapter->dev, "data: port=%d mp_rd_bitmap=0x%08x\n",
-                       *port, card->mp_rd_bitmap);
+               mwifiex_dbg(adapter, DATA,
+                           "data: port=%d mp_rd_bitmap=0x%08x\n",
+                           *port, card->mp_rd_bitmap);
                return 0;
        }
 
@@ -613,9 +623,9 @@ static int mwifiex_get_rd_port(struct mwifiex_adapter *adapter, u8 *port)
        if (++card->curr_rd_port == card->max_ports)
                card->curr_rd_port = reg->start_rd_port;
 
-       dev_dbg(adapter->dev,
-               "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
-               *port, rd_bitmap, card->mp_rd_bitmap);
+       mwifiex_dbg(adapter, DATA,
+                   "data: port=%d mp_rd_bitmap=0x%08x -> 0x%08x\n",
+                   *port, rd_bitmap, card->mp_rd_bitmap);
 
        return 0;
 }
@@ -633,7 +643,8 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
        const struct mwifiex_sdio_card_reg *reg = card->reg;
        u32 wr_bitmap = card->mp_wr_bitmap;
 
-       dev_dbg(adapter->dev, "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
+       mwifiex_dbg(adapter, DATA,
+                   "data: mp_wr_bitmap=0x%08x\n", wr_bitmap);
 
        if (!(wr_bitmap & card->mp_data_port_mask)) {
                adapter->data_sent = true;
@@ -651,15 +662,16 @@ static int mwifiex_get_wr_port_data(struct mwifiex_adapter *adapter, u32 *port)
        }
 
        if ((card->has_control_mask) && (*port == CTRL_PORT)) {
-               dev_err(adapter->dev,
-                       "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
-                       *port, card->curr_wr_port, wr_bitmap,
-                       card->mp_wr_bitmap);
+               mwifiex_dbg(adapter, ERROR,
+                           "invalid data port=%d cur port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
+                           *port, card->curr_wr_port, wr_bitmap,
+                           card->mp_wr_bitmap);
                return -1;
        }
 
-       dev_dbg(adapter->dev, "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
-               *port, wr_bitmap, card->mp_wr_bitmap);
+       mwifiex_dbg(adapter, DATA,
+                   "data: port=%d mp_wr_bitmap=0x%08x -> 0x%08x\n",
+                   *port, wr_bitmap, card->mp_wr_bitmap);
 
        return 0;
 }
@@ -683,7 +695,8 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
                usleep_range(10, 20);
        }
 
-       dev_err(adapter->dev, "poll card status failed, tries = %d\n", tries);
+       mwifiex_dbg(adapter, ERROR,
+                   "poll card status failed, tries = %d\n", tries);
 
        return -1;
 }
@@ -738,7 +751,7 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
        if (mwifiex_read_data_sync(adapter, card->mp_regs,
                                   card->reg->max_mp_regs,
                                   REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
-               dev_err(adapter->dev, "read mp_regs failed\n");
+               mwifiex_dbg(adapter, ERROR, "read mp_regs failed\n");
                return;
        }
 
@@ -751,7 +764,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
                 *      UP_LD_CMD_PORT_HOST_INT_STATUS
                 * Clear the interrupt status register
                 */
-               dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
+               mwifiex_dbg(adapter, INTR,
+                           "int: sdio_ireg = %#x\n", sdio_ireg);
                spin_lock_irqsave(&adapter->int_lock, flags);
                adapter->int_status |= sdio_ireg;
                spin_unlock_irqrestore(&adapter->int_lock, flags);
@@ -802,7 +816,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
        /* Request the SDIO IRQ */
        ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
        if (ret) {
-               dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "claim irq failed: ret=%d\n", ret);
                goto out;
        }
 
@@ -810,7 +825,8 @@ static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
        ret = mwifiex_write_reg_locked(func, card->reg->host_int_mask_reg,
                                       card->reg->host_int_enable);
        if (ret) {
-               dev_err(adapter->dev, "enable host interrupt failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "enable host interrupt failed\n");
                sdio_release_irq(func);
        }
 
@@ -830,22 +846,25 @@ static int mwifiex_sdio_card_to_host(struct mwifiex_adapter *adapter,
        u32 nb;
 
        if (!buffer) {
-               dev_err(adapter->dev, "%s: buffer is NULL\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: buffer is NULL\n", __func__);
                return -1;
        }
 
        ret = mwifiex_read_data_sync(adapter, buffer, npayload, ioport, 1);
 
        if (ret) {
-               dev_err(adapter->dev, "%s: read iomem failed: %d\n", __func__,
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: read iomem failed: %d\n", __func__,
                        ret);
                return -1;
        }
 
        nb = le16_to_cpu(*(__le16 *) (buffer));
        if (nb > npayload) {
-               dev_err(adapter->dev, "%s: invalid packet, nb=%d npayload=%d\n",
-                       __func__, nb, npayload);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: invalid packet, nb=%d npayload=%d\n",
+                           __func__, nb, npayload);
                return -1;
        }
 
@@ -877,13 +896,14 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        u32 i = 0;
 
        if (!firmware_len) {
-               dev_err(adapter->dev,
-                       "firmware image not found! Terminating download\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "firmware image not found! Terminating download\n");
                return -1;
        }
 
-       dev_dbg(adapter->dev, "info: downloading FW image (%d bytes)\n",
-               firmware_len);
+       mwifiex_dbg(adapter, INFO,
+                   "info: downloading FW image (%d bytes)\n",
+                   firmware_len);
 
        /* Assume that the allocated buffer is 8-byte aligned */
        fwbuf = kzalloc(MWIFIEX_UPLD_SIZE, GFP_KERNEL);
@@ -897,8 +917,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                ret = mwifiex_sdio_poll_card_status(adapter, CARD_IO_READY |
                                                    DN_LD_CARD_RDY);
                if (ret) {
-                       dev_err(adapter->dev, "FW download with helper:"
-                               " poll status timeout @ %d\n", offset);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "FW download with helper:\t"
+                                   "poll status timeout @ %d\n", offset);
                        goto done;
                }
 
@@ -910,19 +931,19 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        ret = mwifiex_read_reg(adapter, reg->base_0_reg,
                                               &base0);
                        if (ret) {
-                               dev_err(adapter->dev,
-                                       "dev BASE0 register read failed: "
-                                       "base0=%#04X(%d). Terminating dnld\n",
-                                       base0, base0);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "dev BASE0 register read failed:\t"
+                                           "base0=%#04X(%d). Terminating dnld\n",
+                                           base0, base0);
                                goto done;
                        }
                        ret = mwifiex_read_reg(adapter, reg->base_1_reg,
                                               &base1);
                        if (ret) {
-                               dev_err(adapter->dev,
-                                       "dev BASE1 register read failed: "
-                                       "base1=%#04X(%d). Terminating dnld\n",
-                                       base1, base1);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "dev BASE1 register read failed:\t"
+                                           "base1=%#04X(%d). Terminating dnld\n",
+                                           base1, base1);
                                goto done;
                        }
                        len = (u16) (((base1 & 0xff) << 8) | (base0 & 0xff));
@@ -936,9 +957,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                if (!len) {
                        break;
                } else if (len > MWIFIEX_UPLD_SIZE) {
-                       dev_err(adapter->dev,
-                               "FW dnld failed @ %d, invalid length %d\n",
-                               offset, len);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "FW dnld failed @ %d, invalid length %d\n",
+                                   offset, len);
                        ret = -1;
                        goto done;
                }
@@ -948,14 +969,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                if (len & BIT(0)) {
                        i++;
                        if (i > MAX_WRITE_IOMEM_RETRY) {
-                               dev_err(adapter->dev,
-                                       "FW dnld failed @ %d, over max retry\n",
-                                       offset);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "FW dnld failed @ %d, over max retry\n",
+                                           offset);
                                ret = -1;
                                goto done;
                        }
-                       dev_err(adapter->dev, "CRC indicated by the helper:"
-                               " len = 0x%04X, txlen = %d\n", len, txlen);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "CRC indicated by the helper:\t"
+                                   "len = 0x%04X, txlen = %d\n", len, txlen);
                        len &= ~BIT(0);
                        /* Setting this to 0 to resend from same offset */
                        txlen = 0;
@@ -978,11 +1000,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                                              MWIFIEX_SDIO_BLOCK_SIZE,
                                              adapter->ioport);
                if (ret) {
-                       dev_err(adapter->dev,
-                               "FW download, write iomem (%d) failed @ %d\n",
-                               i, offset);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "FW download, write iomem (%d) failed @ %d\n",
+                                   i, offset);
                        if (mwifiex_write_reg(adapter, CONFIGURATION_REG, 0x04))
-                               dev_err(adapter->dev, "write CFG reg failed\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "write CFG reg failed\n");
 
                        ret = -1;
                        goto done;
@@ -991,8 +1014,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                offset += txlen;
        } while (true);
 
-       dev_notice(adapter->dev,
-                  "info: FW download over, size %d bytes\n", offset);
+       mwifiex_dbg(adapter, MSG,
+                   "info: FW download over, size %d bytes\n", offset);
 
        ret = 0;
 done:
@@ -1066,18 +1089,20 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
                blk_num = *(data + BLOCK_NUMBER_OFFSET);
                blk_size = adapter->sdio_rx_block_size * blk_num;
                if (blk_size > total_pkt_len) {
-                       dev_err(adapter->dev, "%s: error in pkt,\t"
-                               "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
-                               __func__, blk_num, blk_size, total_pkt_len);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s: error in blk_size,\t"
+                                   "blk_num=%d, blk_size=%d, total_pkt_len=%d\n",
+                                   __func__, blk_num, blk_size, total_pkt_len);
                        break;
                }
                pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET));
                pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET +
                                         2));
                if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) {
-                       dev_err(adapter->dev, "%s: error in pkt,\t"
-                               "pkt_len=%d, blk_size=%d\n",
-                               __func__, pkt_len, blk_size);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s: error in pkt_len,\t"
+                                   "pkt_len=%d, blk_size=%d\n",
+                                   __func__, pkt_len, blk_size);
                        break;
                }
                skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
@@ -1116,7 +1141,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
 
        switch (upld_typ) {
        case MWIFIEX_TYPE_AGGR_DATA:
-               dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: --- Rx: Aggr Data packet ---\n");
                rx_info = MWIFIEX_SKB_RXCB(skb);
                rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA;
                if (adapter->rx_work_enabled) {
@@ -1130,7 +1156,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                break;
 
        case MWIFIEX_TYPE_DATA:
-               dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n");
+               mwifiex_dbg(adapter, DATA,
+                           "info: --- Rx: Data packet ---\n");
                if (adapter->rx_work_enabled) {
                        skb_queue_tail(&adapter->rx_data_q, skb);
                        adapter->data_received = true;
@@ -1141,7 +1168,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                break;
 
        case MWIFIEX_TYPE_CMD:
-               dev_dbg(adapter->dev, "info: --- Rx: Cmd Response ---\n");
+               mwifiex_dbg(adapter, CMD,
+                           "info: --- Rx: Cmd Response ---\n");
                /* take care of curr_cmd = NULL case */
                if (!adapter->curr_cmd) {
                        cmd_buf = adapter->upld_buf;
@@ -1163,7 +1191,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                break;
 
        case MWIFIEX_TYPE_EVENT:
-               dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
+               mwifiex_dbg(adapter, EVENT,
+                           "info: --- Rx: Event ---\n");
                adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
 
                if ((skb->len > 0) && (skb->len  < MAX_EVENT_SIZE))
@@ -1178,7 +1207,8 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
                break;
 
        default:
-               dev_err(adapter->dev, "unknown upload type %#x\n", upld_typ);
+               mwifiex_dbg(adapter, ERROR,
+                           "unknown upload type %#x\n", upld_typ);
                dev_kfree_skb_any(skb);
                break;
        }
@@ -1210,16 +1240,18 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
 
        if ((card->has_control_mask) && (port == CTRL_PORT)) {
                /* Read the command Resp without aggr */
-               dev_dbg(adapter->dev, "info: %s: no aggregation for cmd "
-                       "response\n", __func__);
+               mwifiex_dbg(adapter, CMD,
+                           "info: %s: no aggregation for cmd\t"
+                           "response\n", __func__);
 
                f_do_rx_cur = 1;
                goto rx_curr_single;
        }
 
        if (!card->mpa_rx.enabled) {
-               dev_dbg(adapter->dev, "info: %s: rx aggregation disabled\n",
-                       __func__);
+               mwifiex_dbg(adapter, WARN,
+                           "info: %s: rx aggregation disabled\n",
+                           __func__);
 
                f_do_rx_cur = 1;
                goto rx_curr_single;
@@ -1230,7 +1262,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
            (card->has_control_mask && (card->mp_rd_bitmap &
                                        (~((u32) CTRL_PORT_MASK))))) {
                /* Some more data RX pending */
-               dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__);
+               mwifiex_dbg(adapter, INFO,
+                           "info: %s: not last packet\n", __func__);
 
                if (MP_RX_AGGR_IN_PROGRESS(card)) {
                        if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) {
@@ -1247,7 +1280,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
 
        } else {
                /* No more data RX pending */
-               dev_dbg(adapter->dev, "info: %s: last packet\n", __func__);
+               mwifiex_dbg(adapter, INFO,
+                           "info: %s: last packet\n", __func__);
 
                if (MP_RX_AGGR_IN_PROGRESS(card)) {
                        f_do_rx_aggr = 1;
@@ -1262,14 +1296,16 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
        }
 
        if (f_aggr_cur) {
-               dev_dbg(adapter->dev, "info: current packet aggregation\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: current packet aggregation\n");
                /* Curr pkt can be aggregated */
                mp_rx_aggr_setup(card, rx_len, port);
 
                if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) ||
                    mp_rx_aggr_port_limit_reached(card)) {
-                       dev_dbg(adapter->dev, "info: %s: aggregated packet "
-                               "limit reached\n", __func__);
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: %s: aggregated packet\t"
+                                   "limit reached\n", __func__);
                        /* No more pkts allowed in Aggr buf, rx it */
                        f_do_rx_aggr = 1;
                }
@@ -1277,8 +1313,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
 
        if (f_do_rx_aggr) {
                /* do aggr RX now */
-               dev_dbg(adapter->dev, "info: do_rx_aggr: num of packets: %d\n",
-                       card->mpa_rx.pkt_cnt);
+               mwifiex_dbg(adapter, DATA,
+                           "info: do_rx_aggr: num of packets: %d\n",
+                           card->mpa_rx.pkt_cnt);
 
                if (card->supports_sdio_new_mode) {
                        int i;
@@ -1318,8 +1355,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                                                                 GFP_KERNEL |
                                                                 GFP_DMA);
                        if (!skb_deaggr) {
-                               dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n",
-                                       pkt_len, pkt_type);
+                               mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
+                                           "drop pkt len=%d type=%d\n",
+                                           pkt_len, pkt_type);
                                curr_ptr += len_arr[pind];
                                continue;
                        }
@@ -1339,12 +1377,12 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
                                mwifiex_decode_rx_packet(adapter, skb_deaggr,
                                                         pkt_type);
                        } else {
-                               dev_err(adapter->dev, " drop wrong aggr pkt:\t"
-                                       "sdio_single_port_rx_aggr=%d\t"
-                                       "type=%d len=%d max_len=%d\n",
-                                       adapter->sdio_rx_aggr_enable,
-                                       pkt_type, pkt_len,
-                                       len_arr[pind]);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "drop wrong aggr pkt:\t"
+                                           "sdio_single_port_rx_aggr=%d\t"
+                                           "type=%d len=%d max_len=%d\n",
+                                           adapter->sdio_rx_aggr_enable,
+                                           pkt_type, pkt_len, len_arr[pind]);
                                dev_kfree_skb_any(skb_deaggr);
                        }
                        curr_ptr += len_arr[pind];
@@ -1354,13 +1392,14 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
 
 rx_curr_single:
        if (f_do_rx_cur) {
-               dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n",
-                       port, rx_len);
+               mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n",
+                           port, rx_len);
 
                skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
                if (!skb) {
-                       dev_err(adapter->dev, "single skb allocated fail,\t"
-                               "drop pkt port=%d len=%d\n", port, rx_len);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "single skb allocated fail,\t"
+                                   "drop pkt port=%d len=%d\n", port, rx_len);
                        if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
                                                      card->mpa_rx.buf, rx_len,
                                                      adapter->ioport + port))
@@ -1376,9 +1415,9 @@ rx_curr_single:
                        goto error;
                if (!adapter->sdio_rx_aggr_enable &&
                    pkt_type == MWIFIEX_TYPE_AGGR_DATA) {
-                       dev_err(adapter->dev, "drop wrong pkt type %d\t"
-                               "current SDIO RX Aggr not enabled\n",
-                               pkt_type);
+                       mwifiex_dbg(adapter, ERROR, "drop wrong pkt type %d\t"
+                                   "current SDIO RX Aggr not enabled\n",
+                                   pkt_type);
                        dev_kfree_skb_any(skb);
                        return 0;
                }
@@ -1386,7 +1425,8 @@ rx_curr_single:
                mwifiex_decode_rx_packet(adapter, skb, pkt_type);
        }
        if (f_post_aggr_cur) {
-               dev_dbg(adapter->dev, "info: current packet aggregation\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: current packet aggregation\n");
                /* Curr pkt can be aggregated */
                mp_rx_aggr_setup(card, rx_len, port);
        }
@@ -1458,7 +1498,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                     MWIFIEX_RX_DATA_BUF_SIZE)
                        return -1;
                rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
-               dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
+               mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len);
 
                skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA);
                if (!skb)
@@ -1469,17 +1509,17 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                if (mwifiex_sdio_card_to_host(adapter, &pkt_type, skb->data,
                                              skb->len, adapter->ioport |
                                                        CMD_PORT_SLCT)) {
-                       dev_err(adapter->dev,
-                               "%s: failed to card_to_host", __func__);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s: failed to card_to_host", __func__);
                        dev_kfree_skb_any(skb);
                        goto term_cmd;
                }
 
                if ((pkt_type != MWIFIEX_TYPE_CMD) &&
                    (pkt_type != MWIFIEX_TYPE_EVENT))
-                       dev_err(adapter->dev,
-                               "%s:Received wrong packet on cmd port",
-                               __func__);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s:Received wrong packet on cmd port",
+                                   __func__);
 
                mwifiex_decode_rx_packet(adapter, skb, pkt_type);
        }
@@ -1495,12 +1535,13 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                }
                card->mp_wr_bitmap = bitmap;
 
-               dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%x\n",
-                       card->mp_wr_bitmap);
+               mwifiex_dbg(adapter, INTR,
+                           "int: DNLD: wr_bitmap=0x%x\n",
+                           card->mp_wr_bitmap);
                if (adapter->data_sent &&
                    (card->mp_wr_bitmap & card->mp_data_port_mask)) {
-                       dev_dbg(adapter->dev,
-                               "info:  <--- Tx DONE Interrupt --->\n");
+                       mwifiex_dbg(adapter, INTR,
+                                   "info:  <--- Tx DONE Interrupt --->\n");
                        adapter->data_sent = false;
                }
        }
@@ -1517,8 +1558,8 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                        adapter->cmd_sent = false;
        }
 
-       dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
-               adapter->cmd_sent, adapter->data_sent);
+       mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n",
+                   adapter->cmd_sent, adapter->data_sent);
        if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
                bitmap = (u32) card->mp_regs[reg->rd_bitmap_l];
                bitmap |= ((u32) card->mp_regs[reg->rd_bitmap_u]) << 8;
@@ -1529,40 +1570,45 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
                                ((u32) card->mp_regs[reg->rd_bitmap_1u]) << 24;
                }
                card->mp_rd_bitmap = bitmap;
-               dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%x\n",
-                       card->mp_rd_bitmap);
+               mwifiex_dbg(adapter, INTR,
+                           "int: UPLD: rd_bitmap=0x%x\n",
+                           card->mp_rd_bitmap);
 
                while (true) {
                        ret = mwifiex_get_rd_port(adapter, &port);
                        if (ret) {
-                               dev_dbg(adapter->dev,
-                                       "info: no more rd_port available\n");
+                               mwifiex_dbg(adapter, INFO,
+                                           "info: no more rd_port available\n");
                                break;
                        }
                        len_reg_l = reg->rd_len_p0_l + (port << 1);
                        len_reg_u = reg->rd_len_p0_u + (port << 1);
                        rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
                        rx_len |= (u16) card->mp_regs[len_reg_l];
-                       dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
-                               port, rx_len);
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: RX: port=%d rx_len=%u\n",
+                                   port, rx_len);
                        rx_blocks =
                                (rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
                                 1) / MWIFIEX_SDIO_BLOCK_SIZE;
                        if (rx_len <= INTF_HEADER_LEN ||
                            (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
                             card->mpa_rx.buf_size) {
-                               dev_err(adapter->dev, "invalid rx_len=%d\n",
-                                       rx_len);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "invalid rx_len=%d\n",
+                                           rx_len);
                                return -1;
                        }
 
                        rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);
-                       dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len);
+                       mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n",
+                                   rx_len);
 
                        if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len,
                                                              port)) {
-                               dev_err(adapter->dev, "card_to_host_mpa failed:"
-                                       " int status=%#x\n", sdio_ireg);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "card_to_host_mpa failed: int status=%#x\n",
+                                           sdio_ireg);
                                goto term_cmd;
                        }
                }
@@ -1573,19 +1619,23 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
 term_cmd:
        /* terminate cmd */
        if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
-               dev_err(adapter->dev, "read CFG reg failed\n");
+               mwifiex_dbg(adapter, ERROR, "read CFG reg failed\n");
        else
-               dev_dbg(adapter->dev, "info: CFG reg val = %d\n", cr);
+               mwifiex_dbg(adapter, INFO,
+                           "info: CFG reg val = %d\n", cr);
 
        if (mwifiex_write_reg(adapter, CONFIGURATION_REG, (cr | 0x04)))
-               dev_err(adapter->dev, "write CFG reg failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "write CFG reg failed\n");
        else
-               dev_dbg(adapter->dev, "info: write success\n");
+               mwifiex_dbg(adapter, INFO, "info: write success\n");
 
        if (mwifiex_read_reg(adapter, CONFIGURATION_REG, &cr))
-               dev_err(adapter->dev, "read CFG reg failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "read CFG reg failed\n");
        else
-               dev_dbg(adapter->dev, "info: CFG reg val =%x\n", cr);
+               mwifiex_dbg(adapter, INFO,
+                           "info: CFG reg val =%x\n", cr);
 
        return -1;
 }
@@ -1619,8 +1669,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
        if (!card->mpa_tx.enabled ||
            (card->has_control_mask && (port == CTRL_PORT)) ||
            (card->supports_sdio_new_mode && (port == CMD_PORT_SLCT))) {
-               dev_dbg(adapter->dev, "info: %s: tx aggregation disabled\n",
-                       __func__);
+               mwifiex_dbg(adapter, WARN,
+                           "info: %s: tx aggregation disabled\n",
+                           __func__);
 
                f_send_cur_buf = 1;
                goto tx_curr_single;
@@ -1628,8 +1679,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
 
        if (next_pkt_len) {
                /* More pkt in TX queue */
-               dev_dbg(adapter->dev, "info: %s: more packets in queue.\n",
-                       __func__);
+               mwifiex_dbg(adapter, INFO,
+                           "info: %s: more packets in queue.\n",
+                           __func__);
 
                if (MP_TX_AGGR_IN_PROGRESS(card)) {
                        if (MP_TX_AGGR_BUF_HAS_ROOM(card, pkt_len)) {
@@ -1659,8 +1711,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
                }
        } else {
                /* Last pkt in TX queue */
-               dev_dbg(adapter->dev, "info: %s: Last packet in Tx Queue.\n",
-                       __func__);
+               mwifiex_dbg(adapter, INFO,
+                           "info: %s: Last packet in Tx Queue.\n",
+                           __func__);
 
                if (MP_TX_AGGR_IN_PROGRESS(card)) {
                        /* some packs in Aggr buf already */
@@ -1677,8 +1730,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
        }
 
        if (f_precopy_cur_buf) {
-               dev_dbg(adapter->dev, "data: %s: precopy current buffer\n",
-                       __func__);
+               mwifiex_dbg(adapter, DATA,
+                           "data: %s: precopy current buffer\n",
+                           __func__);
                MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
 
                if (MP_TX_AGGR_PKT_LIMIT_REACHED(card) ||
@@ -1688,9 +1742,10 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
        }
 
        if (f_send_aggr_buf) {
-               dev_dbg(adapter->dev, "data: %s: send aggr buffer: %d %d\n",
-                       __func__,
-                               card->mpa_tx.start_port, card->mpa_tx.ports);
+               mwifiex_dbg(adapter, DATA,
+                           "data: %s: send aggr buffer: %d %d\n",
+                           __func__, card->mpa_tx.start_port,
+                           card->mpa_tx.ports);
                if (card->supports_sdio_new_mode) {
                        u32 port_count;
                        int i;
@@ -1719,15 +1774,17 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter,
 
 tx_curr_single:
        if (f_send_cur_buf) {
-               dev_dbg(adapter->dev, "data: %s: send current buffer %d\n",
-                       __func__, port);
+               mwifiex_dbg(adapter, DATA,
+                           "data: %s: send current buffer %d\n",
+                           __func__, port);
                ret = mwifiex_write_data_to_card(adapter, payload, pkt_len,
                                                 adapter->ioport + port);
        }
 
        if (f_postcopy_cur_buf) {
-               dev_dbg(adapter->dev, "data: %s: postcopy current buffer\n",
-                       __func__);
+               mwifiex_dbg(adapter, DATA,
+                           "data: %s: postcopy current buffer\n",
+                           __func__);
                MP_TX_AGGR_BUF_PUT(card, payload, pkt_len, port);
        }
 
@@ -1771,8 +1828,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
        if (type == MWIFIEX_TYPE_DATA) {
                ret = mwifiex_get_wr_port_data(adapter, &port);
                if (ret) {
-                       dev_err(adapter->dev, "%s: no wr_port available\n",
-                               __func__);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s: no wr_port available\n",
+                                   __func__);
                        return ret;
                }
        } else {
@@ -1781,8 +1839,9 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
 
                if (pkt_len <= INTF_HEADER_LEN ||
                    pkt_len > MWIFIEX_UPLD_SIZE)
-                       dev_err(adapter->dev, "%s: payload=%p, nb=%d\n",
-                               __func__, payload, pkt_len);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s: payload=%p, nb=%d\n",
+                                   __func__, payload, pkt_len);
 
                if (card->supports_sdio_new_mode)
                        port = CMD_PORT_SLCT;
@@ -1896,7 +1955,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
        sdio_release_host(func);
        if (ret) {
-               pr_err("cannot set SDIO block size\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "cannot set SDIO block size\n");
                return ret;
        }
 
@@ -1977,7 +2037,8 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
                                             card->mp_tx_agg_buf_size,
                                             card->mp_rx_agg_buf_size);
        if (ret) {
-               dev_err(adapter->dev, "failed to alloc sdio mp-a buffers\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "failed to alloc sdio mp-a buffers\n");
                kfree(card->mp_regs);
                return -1;
        }
@@ -2041,8 +2102,9 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
 
        card->curr_wr_port = reg->start_wr_port;
 
-       dev_dbg(adapter->dev, "cmd: mp_end_port %d, data port mask 0x%x\n",
-               port, card->mp_data_port_mask);
+       mwifiex_dbg(adapter, CMD,
+                   "cmd: mp_end_port %d, data port mask 0x%x\n",
+                   port, card->mp_data_port_mask);
 }
 
 static struct mwifiex_adapter *save_adapter;
@@ -2059,7 +2121,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
         * We run it in a totally independent workqueue.
         */
 
-       pr_err("Resetting card...\n");
+       mwifiex_dbg(adapter, WARN, "Resetting card...\n");
        mmc_remove_host(target);
        /* 200ms delay is based on experiment with sdhci controller */
        mdelay(200);
@@ -2079,14 +2141,14 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
        sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
                    &ret);
        if (ret) {
-               dev_err(adapter->dev, "SDIO Write ERR\n");
+               mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
                return RDWR_STATUS_FAILURE;
        }
        for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
                ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl,
                                       &ret);
                if (ret) {
-                       dev_err(adapter->dev, "SDIO read err\n");
+                       mwifiex_dbg(adapter, ERROR, "SDIO read err\n");
                        return RDWR_STATUS_FAILURE;
                }
                if (ctrl_data == FW_DUMP_DONE)
@@ -2094,19 +2156,20 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
                if (doneflag && ctrl_data == doneflag)
                        return RDWR_STATUS_DONE;
                if (ctrl_data != FW_DUMP_HOST_READY) {
-                       dev_info(adapter->dev,
-                                "The ctrl reg was changed, re-try again!\n");
+                       mwifiex_dbg(adapter, WARN,
+                                   "The ctrl reg was changed, re-try again!\n");
                        sdio_writeb(card->func, FW_DUMP_HOST_READY,
                                    card->reg->fw_dump_ctrl, &ret);
                        if (ret) {
-                               dev_err(adapter->dev, "SDIO write err\n");
+                               mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
                                return RDWR_STATUS_FAILURE;
                        }
                }
                usleep_range(100, 200);
        }
        if (ctrl_data == FW_DUMP_HOST_READY) {
-               dev_err(adapter->dev, "Fail to pull ctrl_data\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "Fail to pull ctrl_data\n");
                return RDWR_STATUS_FAILURE;
        }
 
@@ -2114,7 +2177,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
 }
 
 /* This function dump firmware memory to file */
-static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
        int ret = 0;
@@ -2122,9 +2185,6 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
        u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
        enum rdwr_status stat;
        u32 memory_size;
-       static char *env[] = { "DRIVER=mwifiex_sdio", "EVENT=fw_dump", NULL };
-
-       mwifiex_dump_drv_info(adapter);
 
        if (!card->can_dump_fw)
                return;
@@ -2142,7 +2202,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
        mwifiex_pm_wakeup_card(adapter);
        sdio_claim_host(card->func);
 
-       dev_info(adapter->dev, "== mwifiex firmware dump start ==\n");
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
 
        stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
        if (stat == RDWR_STATUS_FAILURE)
@@ -2152,7 +2212,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
        /* Read the number of the memories which will dump */
        dump_num = sdio_readb(card->func, reg, &ret);
        if (ret) {
-               dev_err(adapter->dev, "SDIO read memory length err\n");
+               mwifiex_dbg(adapter, ERROR, "SDIO read memory length err\n");
                goto done;
        }
 
@@ -2169,7 +2229,7 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
                for (i = 0; i < 4; i++) {
                        read_reg = sdio_readb(card->func, reg, &ret);
                        if (ret) {
-                               dev_err(adapter->dev, "SDIO read err\n");
+                               mwifiex_dbg(adapter, ERROR, "SDIO read err\n");
                                goto done;
                        }
                        memory_size |= (read_reg << i*8);
@@ -2177,25 +2237,33 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
                }
 
                if (memory_size == 0) {
-                       dev_info(adapter->dev, "Firmware dump Finished!\n");
+                       mwifiex_dbg(adapter, DUMP, "Firmware dump Finished!\n");
+                       ret = mwifiex_write_reg(adapter,
+                                               card->reg->fw_dump_ctrl,
+                                               FW_DUMP_READ_DONE);
+                       if (ret) {
+                               mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
+                               return;
+                       }
                        break;
                }
 
-               dev_info(adapter->dev,
-                        "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
+               mwifiex_dbg(adapter, DUMP,
+                           "%s_SIZE=0x%x\n", entry->mem_name, memory_size);
                entry->mem_ptr = vmalloc(memory_size + 1);
                entry->mem_size = memory_size;
                if (!entry->mem_ptr) {
-                       dev_err(adapter->dev, "Vmalloc %s failed\n",
-                               entry->mem_name);
+                       mwifiex_dbg(adapter, ERROR, "Vmalloc %s failed\n",
+                                   entry->mem_name);
                        goto done;
                }
                dbg_ptr = entry->mem_ptr;
                end_ptr = dbg_ptr + memory_size;
 
                doneflag = entry->done_flag;
-               dev_info(adapter->dev, "Start %s output, please wait...\n",
-                        entry->mem_name);
+               mwifiex_dbg(adapter, DUMP,
+                           "Start %s output, please wait...\n",
+                           entry->mem_name);
 
                do {
                        stat = mwifiex_sdio_rdwr_firmware(adapter, doneflag);
@@ -2207,39 +2275,43 @@ static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter)
                        for (reg = reg_start; reg <= reg_end; reg++) {
                                *dbg_ptr = sdio_readb(card->func, reg, &ret);
                                if (ret) {
-                                       dev_err(adapter->dev,
-                                               "SDIO read err\n");
+                                       mwifiex_dbg(adapter, ERROR,
+                                                   "SDIO read err\n");
                                        goto done;
                                }
                                if (dbg_ptr < end_ptr)
                                        dbg_ptr++;
                                else
-                                       dev_err(adapter->dev,
-                                               "Allocated buf not enough\n");
+                                       mwifiex_dbg(adapter, ERROR,
+                                                   "Allocated buf not enough\n");
                        }
 
                        if (stat != RDWR_STATUS_DONE)
                                continue;
 
-                       dev_info(adapter->dev, "%s done: size=0x%tx\n",
-                                entry->mem_name, dbg_ptr - entry->mem_ptr);
+                       mwifiex_dbg(adapter, DUMP, "%s done: size=0x%tx\n",
+                                   entry->mem_name, dbg_ptr - entry->mem_ptr);
                        break;
                } while (1);
        }
-       dev_info(adapter->dev, "== mwifiex firmware dump end ==\n");
-
-       kobject_uevent_env(&adapter->wiphy->dev.kobj, KOBJ_CHANGE, env);
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
 
 done:
        sdio_release_host(card->func);
-       adapter->curr_mem_idx = 0;
+}
+
+static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
+{
+       mwifiex_drv_info_dump(adapter);
+       mwifiex_sdio_fw_dump(adapter);
+       mwifiex_upload_device_dump(adapter);
 }
 
 static void mwifiex_sdio_work(struct work_struct *work)
 {
-       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP,
+       if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
                               &iface_work_flags))
-               mwifiex_sdio_fw_dump_work(save_adapter);
+               mwifiex_sdio_device_dump_work(save_adapter);
        if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
                               &iface_work_flags))
                mwifiex_sdio_card_reset_work(save_adapter);
@@ -2259,13 +2331,13 @@ static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
 }
 
 /* This function dumps FW information */
-static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter)
 {
        save_adapter = adapter;
-       if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags))
+       if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags);
+       set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
        schedule_work(&sdio_work);
 }
 
@@ -2285,7 +2357,7 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
        if (!p)
                return 0;
 
-       dev_info(adapter->dev, "SDIO register DUMP START\n");
+       mwifiex_dbg(adapter, MSG, "SDIO register dump start\n");
 
        mwifiex_pm_wakeup_card(adapter);
 
@@ -2351,13 +2423,13 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
                                reg++;
                }
 
-               dev_info(adapter->dev, "%s\n", buf);
+               mwifiex_dbg(adapter, MSG, "%s\n", buf);
                p += sprintf(p, "%s\n", buf);
        }
 
        sdio_release_host(cardp->func);
 
-       dev_info(adapter->dev, "SDIO register DUMP END\n");
+       mwifiex_dbg(adapter, MSG, "SDIO register dump end\n");
 
        return p - drv_buf;
 }
@@ -2382,8 +2454,8 @@ static struct mwifiex_if_ops sdio_ops = {
        .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
        .event_complete = mwifiex_sdio_event_complete,
        .card_reset = mwifiex_sdio_card_reset,
-       .fw_dump = mwifiex_sdio_fw_dump,
        .reg_dump = mwifiex_sdio_reg_dump,
+       .device_dump = mwifiex_sdio_device_dump,
        .deaggr_pkt = mwifiex_deaggr_sdio_pkt,
 };
 
index 49422f2a53809fe0c241de93afb231c8011871c3..037adcd1f484abeb5f54478893ab7b1ff2b7ab76 100644 (file)
@@ -77,8 +77,8 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
        struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
 
        if (cmd_action != HostCmd_ACT_GEN_SET) {
-               dev_err(priv->adapter->dev,
-                       "mac_control: only support set cmd\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "mac_control: only support set cmd\n");
                return -1;
        }
 
@@ -112,7 +112,8 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
 {
        struct host_cmd_ds_802_11_snmp_mib *snmp_mib = &cmd->params.smib;
 
-       dev_dbg(priv->adapter->dev, "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
+       mwifiex_dbg(priv->adapter, CMD,
+                   "cmd: SNMP_CMD: cmd_oid = 0x%x\n", cmd_oid);
        cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SNMP_MIB);
        cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_snmp_mib)
                                - 1 + S_DS_GEN);
@@ -129,11 +130,11 @@ static int mwifiex_cmd_802_11_snmp_mib(struct mwifiex_private *priv,
                le16_add_cpu(&cmd->size, sizeof(u16));
        }
 
-       dev_dbg(priv->adapter->dev,
-               "cmd: SNMP_CMD: Action=0x%x, OID=0x%x, OIDSize=0x%x,"
-               " Value=0x%x\n",
-               cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
-               le16_to_cpu(*(__le16 *) snmp_mib->value));
+       mwifiex_dbg(priv->adapter, CMD,
+                   "cmd: SNMP_CMD: Action=0x%x, OID=0x%x,\t"
+                   "OIDSize=0x%x, Value=0x%x\n",
+                   cmd_action, cmd_oid, le16_to_cpu(snmp_mib->buf_size),
+                   le16_to_cpu(*(__le16 *)snmp_mib->value));
        return 0;
 }
 
@@ -356,9 +357,9 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
            (hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) &&
            ((adapter->arp_filter_size > 0) &&
             (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
-               dev_dbg(adapter->dev,
-                       "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
-                       adapter->arp_filter_size);
+               mwifiex_dbg(adapter, CMD,
+                           "cmd: Attach %d bytes ArpFilter to HSCfg cmd\n",
+                           adapter->arp_filter_size);
                memcpy(((u8 *) hs_cfg) +
                       sizeof(struct host_cmd_ds_802_11_hs_cfg_enh),
                       adapter->arp_filter, adapter->arp_filter_size);
@@ -378,11 +379,11 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
                hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
                hs_cfg->params.hs_config.gpio = hscfg_param->gpio;
                hs_cfg->params.hs_config.gap = hscfg_param->gap;
-               dev_dbg(adapter->dev,
-                       "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
-                      hs_cfg->params.hs_config.conditions,
-                      hs_cfg->params.hs_config.gpio,
-                      hs_cfg->params.hs_config.gap);
+               mwifiex_dbg(adapter, CMD,
+                           "cmd: HS_CFG_CMD: condition:0x%x gpio:0x%x gap:0x%x\n",
+                           hs_cfg->params.hs_config.conditions,
+                           hs_cfg->params.hs_config.gpio,
+                           hs_cfg->params.hs_config.gap);
        }
 
        return 0;
@@ -462,7 +463,7 @@ static int mwifiex_cmd_802_11_deauthenticate(struct mwifiex_private *priv,
        /* Set AP MAC address */
        memcpy(deauth->mac_addr, mac, ETH_ALEN);
 
-       dev_dbg(priv->adapter->dev, "cmd: Deauth: %pM\n", deauth->mac_addr);
+       mwifiex_dbg(priv->adapter, CMD, "cmd: Deauth: %pM\n", deauth->mac_addr);
 
        deauth->reason_code = cpu_to_le16(WLAN_REASON_DEAUTH_LEAVING);
 
@@ -540,9 +541,9 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
                } else if (!priv->wep_key[i].key_length) {
                        continue;
                } else {
-                       dev_err(priv->adapter->dev,
-                               "key%d Length = %d is incorrect\n",
-                              (i + 1), priv->wep_key[i].key_length);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "key%d Length = %d is incorrect\n",
+                                   (i + 1), priv->wep_key[i].key_length);
                        return -1;
                }
        }
@@ -562,7 +563,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
        u16 size, len = KEY_PARAMS_FIXED_LEN;
 
        if (enc_key->is_igtk_key) {
-               dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+               mwifiex_dbg(adapter, INFO,
+                           "%s: Set CMAC AES Key\n", __func__);
                if (enc_key->is_rx_seq_valid)
                        memcpy(km->key_param_set.key_params.cmac_aes.ipn,
                               enc_key->pn, enc_key->pn_len);
@@ -575,7 +577,8 @@ static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
                       enc_key->key_material, enc_key->key_len);
                len += sizeof(struct mwifiex_cmac_aes_param);
        } else {
-               dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+               mwifiex_dbg(adapter, INFO,
+                           "%s: Set AES Key\n", __func__);
                if (enc_key->is_rx_seq_valid)
                        memcpy(km->key_param_set.key_params.aes.pn,
                               enc_key->pn, enc_key->pn_len);
@@ -619,7 +622,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
        km->action = cpu_to_le16(cmd_action);
 
        if (cmd_action == HostCmd_ACT_GEN_GET) {
-               dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+               mwifiex_dbg(adapter, INFO, "%s: Get key\n", __func__);
                km->key_param_set.key_idx =
                                        enc_key->key_index & KEY_INDEX_MASK;
                km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
@@ -646,7 +649,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
               sizeof(struct mwifiex_ie_type_key_param_set_v2));
 
        if (enc_key->key_disable) {
-               dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+               mwifiex_dbg(adapter, INFO, "%s: Remove key\n", __func__);
                km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
                km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
                km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
@@ -667,7 +670,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
        memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
 
        if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
-               dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+               mwifiex_dbg(adapter, INFO, "%s: Set WEP Key\n", __func__);
                len += sizeof(struct mwifiex_wep_param);
                km->key_param_set.len = cpu_to_le16(len);
                km->key_param_set.key_type = KEY_TYPE_ID_WEP;
@@ -710,7 +713,7 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
                key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
 
        if (enc_key->is_wapi_key) {
-               dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+               mwifiex_dbg(adapter, INFO, "%s: Set WAPI Key\n", __func__);
                km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
                memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
                       PN_LEN);
@@ -750,7 +753,8 @@ mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
                return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
 
        if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
-               dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+               mwifiex_dbg(adapter, INFO,
+                           "%s: Set TKIP Key\n", __func__);
                if (enc_key->is_rx_seq_valid)
                        memcpy(km->key_param_set.key_params.tkip.pn,
                               enc_key->pn, enc_key->pn_len);
@@ -814,7 +818,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
                memset(&key_material->key_param_set, 0,
                       sizeof(struct mwifiex_ie_type_key_param_set));
        if (enc_key->is_wapi_key) {
-               dev_dbg(priv->adapter->dev, "info: Set WAPI Key\n");
+               mwifiex_dbg(priv->adapter, INFO, "info: Set WAPI Key\n");
                key_material->key_param_set.key_type_id =
                                                cpu_to_le16(KEY_TYPE_ID_WAPI);
                if (cmd_oid == KEY_INFO_ENABLED)
@@ -860,7 +864,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
        }
        if (enc_key->key_len == WLAN_KEY_LEN_CCMP) {
                if (enc_key->is_igtk_key) {
-                       dev_dbg(priv->adapter->dev, "cmd: CMAC_AES\n");
+                       mwifiex_dbg(priv->adapter, CMD, "cmd: CMAC_AES\n");
                        key_material->key_param_set.key_type_id =
                                        cpu_to_le16(KEY_TYPE_ID_AES_CMAC);
                        if (cmd_oid == KEY_INFO_ENABLED)
@@ -873,7 +877,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
                        key_material->key_param_set.key_info |=
                                                        cpu_to_le16(KEY_IGTK);
                } else {
-                       dev_dbg(priv->adapter->dev, "cmd: WPA_AES\n");
+                       mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_AES\n");
                        key_material->key_param_set.key_type_id =
                                                cpu_to_le16(KEY_TYPE_ID_AES);
                        if (cmd_oid == KEY_INFO_ENABLED)
@@ -892,7 +896,7 @@ mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
                                                        cpu_to_le16(KEY_MCAST);
                }
        } else if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
-               dev_dbg(priv->adapter->dev, "cmd: WPA_TKIP\n");
+               mwifiex_dbg(priv->adapter, CMD, "cmd: WPA_TKIP\n");
                key_material->key_param_set.key_type_id =
                                                cpu_to_le16(KEY_TYPE_ID_TKIP);
                key_material->key_param_set.key_info =
@@ -999,7 +1003,8 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
                &domain_info->domain;
        u8 no_of_triplet = adapter->domain_reg.no_of_triplet;
 
-       dev_dbg(adapter->dev, "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
+       mwifiex_dbg(adapter, INFO,
+                   "info: 11D: no_of_triplet=0x%x\n", no_of_triplet);
 
        cmd->command = cpu_to_le16(HostCmd_CMD_802_11D_DOMAIN_INFO);
        domain_info->action = cpu_to_le16(cmd_action);
@@ -1071,6 +1076,26 @@ static int mwifiex_cmd_ibss_coalescing_status(struct host_cmd_ds_command *cmd,
        return 0;
 }
 
+/* This function prepares command buffer to get/set memory location value.
+ */
+static int
+mwifiex_cmd_mem_access(struct host_cmd_ds_command *cmd, u16 cmd_action,
+                      void *pdata_buf)
+{
+       struct mwifiex_ds_mem_rw *mem_rw = (void *)pdata_buf;
+       struct host_cmd_ds_mem_access *mem_access = (void *)&cmd->params.mem;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_MEM_ACCESS);
+       cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_mem_access) +
+                               S_DS_GEN);
+
+       mem_access->action = cpu_to_le16(cmd_action);
+       mem_access->addr = cpu_to_le32(mem_rw->addr);
+       mem_access->value = cpu_to_le32(mem_rw->value);
+
+       return 0;
+}
+
 /*
  * This function prepares command to set/get register value.
  *
@@ -1215,8 +1240,9 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
                                                (u32)(card->sleep_cookie_pbase);
                host_spec->sleep_cookie_addr_hi =
                                 (u32)(((u64)(card->sleep_cookie_pbase)) >> 32);
-               dev_dbg(priv->adapter->dev, "sleep_cook_lo phy addr: 0x%x\n",
-                       host_spec->sleep_cookie_addr_lo);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "sleep_cook_lo phy addr: 0x%x\n",
+                           host_spec->sleep_cookie_addr_lo);
        }
 
        return 0;
@@ -1243,7 +1269,8 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
                                S_DS_GEN);
 
        subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action);
-       dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action);
+       mwifiex_dbg(priv->adapter, CMD,
+                   "cmd: action: %d\n", subsc_evt_cfg->action);
 
        /*For query requests, no configuration TLV structures are to be added.*/
        if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET)
@@ -1252,14 +1279,15 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
        subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events);
 
        event_bitmap = subsc_evt_cfg->events;
-       dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n",
-               event_bitmap);
+       mwifiex_dbg(priv->adapter, CMD, "cmd: event bitmap : %16x\n",
+                   event_bitmap);
 
        if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) ||
             (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) &&
            (event_bitmap == 0)) {
-               dev_dbg(priv->adapter->dev, "Error: No event specified "
-                       "for bitwise action type\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Error: No event specified\t"
+                           "for bitwise action type\n");
                return -EINVAL;
        }
 
@@ -1284,10 +1312,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
                rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value;
                rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq;
 
-               dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, "
-                       "RSSI:-%d dBm, Freq:%d\n",
-                       subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
-                       subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
+               mwifiex_dbg(priv->adapter, EVENT,
+                           "Cfg Beacon Low Rssi event,\t"
+                           "RSSI:-%d dBm, Freq:%d\n",
+                           subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
+                           subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
 
                pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
                le16_add_cpu(&cmd->size,
@@ -1304,10 +1333,11 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
                rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value;
                rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq;
 
-               dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, "
-                       "RSSI:-%d dBm, Freq:%d\n",
-                       subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
-                       subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
+               mwifiex_dbg(priv->adapter, EVENT,
+                           "Cfg Beacon High Rssi event,\t"
+                           "RSSI:-%d dBm, Freq:%d\n",
+                           subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
+                           subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
 
                pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
                le16_add_cpu(&cmd->size,
@@ -1463,12 +1493,14 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
                                                data, len);
                if (ret)
                        return ret;
-               dev_dbg(adapter->dev,
-                       "download cfg_data from device tree: %s\n", prop->name);
+               mwifiex_dbg(adapter, INFO,
+                           "download cfg_data from device tree: %s\n",
+                           prop->name);
        } else if (adapter->cal_data->data && adapter->cal_data->size > 0) {
                len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data,
                                            adapter->cal_data->size, data);
-               dev_dbg(adapter->dev, "download cfg_data from config file\n");
+               mwifiex_dbg(adapter, INFO,
+                           "download cfg_data from config file\n");
        } else {
                return -1;
        }
@@ -1583,9 +1615,9 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
                tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
 
                if (!params) {
-                       dev_err(priv->adapter->dev,
-                               "TDLS config params not available for %pM\n",
-                               oper->peer_mac);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "TDLS config params not available for %pM\n",
+                                   oper->peer_mac);
                        return -ENODATA;
                }
 
@@ -1663,7 +1695,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
 
                break;
        default:
-               dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+               mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS operation\n");
                return -ENOTSUPP;
        }
 
@@ -1870,8 +1902,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_11n_cfg(priv, cmd_ptr, cmd_action, data_buf);
                break;
        case HostCmd_CMD_WMM_GET_STATUS:
-               dev_dbg(priv->adapter->dev,
-                       "cmd: WMM: WMM_GET_STATUS cmd sent\n");
+               mwifiex_dbg(priv->adapter, CMD,
+                           "cmd: WMM: WMM_GET_STATUS cmd sent\n");
                cmd_ptr->command = cpu_to_le16(HostCmd_CMD_WMM_GET_STATUS);
                cmd_ptr->size =
                        cpu_to_le16(sizeof(struct host_cmd_ds_wmm_get_status) +
@@ -1885,6 +1917,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_802_11_SCAN_EXT:
                ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
                break;
+       case HostCmd_CMD_MEM_ACCESS:
+               ret = mwifiex_cmd_mem_access(cmd_ptr, cmd_action, data_buf);
+               break;
        case HostCmd_CMD_MAC_REG_ACCESS:
        case HostCmd_CMD_BBP_REG_ACCESS:
        case HostCmd_CMD_RF_REG_ACCESS:
@@ -1932,8 +1967,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                                                   data_buf);
                break;
        default:
-               dev_err(priv->adapter->dev,
-                       "PREP_CMD: unknown cmd- %#x\n", cmd_no);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "PREP_CMD: unknown cmd- %#x\n", cmd_no);
                ret = -1;
                break;
        }
@@ -2024,8 +2059,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
                                               &sdio_sp_rx_aggr_enable,
                                               true);
                        if (ret) {
-                               dev_err(priv->adapter->dev,
-                                       "error while enabling SP aggregation..disable it");
+                               mwifiex_dbg(priv->adapter, ERROR,
+                                           "error while enabling SP aggregation..disable it");
                                adapter->sdio_rx_aggr_enable = false;
                        }
                }
@@ -2108,8 +2143,8 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
                                       HostCmd_ACT_GEN_SET, DOT11D_I,
                                       &state_11d, true);
                if (ret)
-                       dev_err(priv->adapter->dev,
-                               "11D: failed to enable 11D\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "11D: failed to enable 11D\n");
        }
 
        /* Send cmd to FW to configure 11n specific configuration
index 88dc6b672ef43adb5cc8c1b836b19a1bed0db5d1..aa5b9a310340df972dd5142ede815366d25a0d5b 100644 (file)
@@ -49,8 +49,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
        struct host_cmd_ds_802_11_ps_mode_enh *pm;
        unsigned long flags;
 
-       dev_err(adapter->dev, "CMD_RESP: cmd %#x error, result=%#x\n",
-               resp->command, resp->result);
+       mwifiex_dbg(adapter, ERROR,
+                   "CMD_RESP: cmd %#x error, result=%#x\n",
+                   resp->command, resp->result);
 
        if (adapter->curr_cmd->wait_q_enabled)
                adapter->cmd_wait_q.status = -1;
@@ -58,9 +59,9 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
        switch (le16_to_cpu(resp->command)) {
        case HostCmd_CMD_802_11_PS_MODE_ENH:
                pm = &resp->params.psmode_enh;
-               dev_err(adapter->dev,
-                       "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
-                       resp->result, le16_to_cpu(pm->action));
+               mwifiex_dbg(adapter, ERROR,
+                           "PS_MODE_ENH cmd failed: result=0x%x action=0x%X\n",
+                           resp->result, le16_to_cpu(pm->action));
                /* We do not re-try enter-ps command in ad-hoc mode. */
                if (le16_to_cpu(pm->action) == EN_AUTO_PS &&
                    (le16_to_cpu(pm->params.ps_bitmap) & BITMAP_STA_PS) &&
@@ -91,7 +92,8 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
                break;
 
        case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
-               dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n");
+               mwifiex_dbg(adapter, MSG,
+                           "SDIO RX single-port aggregation Not support\n");
                break;
 
        default:
@@ -187,29 +189,34 @@ static int mwifiex_ret_802_11_snmp_mib(struct mwifiex_private *priv,
        u16 query_type = le16_to_cpu(smib->query_type);
        u32 ul_temp;
 
-       dev_dbg(priv->adapter->dev, "info: SNMP_RESP: oid value = %#x,"
-               " query_type = %#x, buf size = %#x\n",
-               oid, query_type, le16_to_cpu(smib->buf_size));
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: SNMP_RESP: oid value = %#x,\t"
+                   "query_type = %#x, buf size = %#x\n",
+                   oid, query_type, le16_to_cpu(smib->buf_size));
        if (query_type == HostCmd_ACT_GEN_GET) {
                ul_temp = le16_to_cpu(*((__le16 *) (smib->value)));
                if (data_buf)
                        *data_buf = ul_temp;
                switch (oid) {
                case FRAG_THRESH_I:
-                       dev_dbg(priv->adapter->dev,
-                               "info: SNMP_RESP: FragThsd =%u\n", ul_temp);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: SNMP_RESP: FragThsd =%u\n",
+                                   ul_temp);
                        break;
                case RTS_THRESH_I:
-                       dev_dbg(priv->adapter->dev,
-                               "info: SNMP_RESP: RTSThsd =%u\n", ul_temp);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: SNMP_RESP: RTSThsd =%u\n",
+                                   ul_temp);
                        break;
                case SHORT_RETRY_LIM_I:
-                       dev_dbg(priv->adapter->dev,
-                               "info: SNMP_RESP: TxRetryCount=%u\n", ul_temp);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: SNMP_RESP: TxRetryCount=%u\n",
+                                   ul_temp);
                        break;
                case DTIM_PERIOD_I:
-                       dev_dbg(priv->adapter->dev,
-                               "info: SNMP_RESP: DTIM period=%u\n", ul_temp);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: SNMP_RESP: DTIM period=%u\n",
+                                   ul_temp);
                default:
                        break;
                }
@@ -426,14 +433,15 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
                        priv->tx_power_level = (u16) pg->power_min;
                break;
        default:
-               dev_err(adapter->dev, "CMD_RESP: unknown cmd action %d\n",
-                       action);
+               mwifiex_dbg(adapter, ERROR,
+                           "CMD_RESP: unknown cmd action %d\n",
+                           action);
                return 0;
        }
-       dev_dbg(adapter->dev,
-               "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
-              priv->tx_power_level, priv->max_tx_power_level,
-              priv->min_tx_power_level);
+       mwifiex_dbg(adapter, INFO,
+                   "info: Current TxPower Level = %d, Max Power=%d, Min Power=%d\n",
+                   priv->tx_power_level, priv->max_tx_power_level,
+                   priv->min_tx_power_level);
 
        return 0;
 }
@@ -454,10 +462,10 @@ static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv,
                priv->min_tx_power_level = txp->min_power;
        }
 
-       dev_dbg(priv->adapter->dev,
-               "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
-               priv->tx_power_level, priv->max_tx_power_level,
-               priv->min_tx_power_level);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
+                   priv->tx_power_level, priv->max_tx_power_level,
+                   priv->min_tx_power_level);
 
        return 0;
 }
@@ -473,18 +481,18 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv,
        struct mwifiex_adapter *adapter = priv->adapter;
 
        if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
-               dev_dbg(adapter->dev,
-                       "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x"
-                       Rx action = 0x%x, Rx Mode = 0x%04x\n",
-                       le16_to_cpu(ant_mimo->action_tx),
-                       le16_to_cpu(ant_mimo->tx_ant_mode),
-                       le16_to_cpu(ant_mimo->action_rx),
-                       le16_to_cpu(ant_mimo->rx_ant_mode));
+               mwifiex_dbg(adapter, INFO,
+                           "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x\t"
+                           "Rx action = 0x%x, Rx Mode = 0x%04x\n",
+                           le16_to_cpu(ant_mimo->action_tx),
+                           le16_to_cpu(ant_mimo->tx_ant_mode),
+                           le16_to_cpu(ant_mimo->action_rx),
+                           le16_to_cpu(ant_mimo->rx_ant_mode));
        else
-               dev_dbg(adapter->dev,
-                       "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
-                       le16_to_cpu(ant_siso->action),
-                       le16_to_cpu(ant_siso->ant_mode));
+               mwifiex_dbg(adapter, INFO,
+                           "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
+                           le16_to_cpu(ant_siso->action),
+                           le16_to_cpu(ant_siso->ant_mode));
 
        return 0;
 }
@@ -502,8 +510,8 @@ static int mwifiex_ret_802_11_mac_address(struct mwifiex_private *priv,
 
        memcpy(priv->curr_addr, cmd_mac_addr->mac_addr, ETH_ALEN);
 
-       dev_dbg(priv->adapter->dev,
-               "info: set mac address: %pM\n", priv->curr_addr);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: set mac address: %pM\n", priv->curr_addr);
 
        return 0;
 }
@@ -587,7 +595,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
 
        if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
                if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
-                       dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: key: GTK is set\n");
                        priv->wpa_is_gtk_set = true;
                        priv->scan_block = false;
                }
@@ -617,7 +626,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
        key_v2 = &resp->params.key_material_v2;
        if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
                if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
-                       dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+                       mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
                        priv->wpa_is_gtk_set = true;
                        priv->scan_block = false;
                }
@@ -663,14 +672,14 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
                                - IEEE80211_COUNTRY_STRING_LEN)
                              / sizeof(struct ieee80211_country_ie_triplet));
 
-       dev_dbg(priv->adapter->dev,
-               "info: 11D Domain Info Resp: no_of_triplet=%d\n",
-               no_of_triplet);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: 11D Domain Info Resp: no_of_triplet=%d\n",
+                   no_of_triplet);
 
        if (no_of_triplet > MWIFIEX_MAX_TRIPLET_802_11D) {
-               dev_warn(priv->adapter->dev,
-                        "11D: invalid number of triplets %d returned\n",
-                        no_of_triplet);
+               mwifiex_dbg(priv->adapter, FATAL,
+                           "11D: invalid number of triplets %d returned\n",
+                           no_of_triplet);
                return -1;
        }
 
@@ -680,8 +689,8 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
        case HostCmd_ACT_GEN_GET:
                break;
        default:
-               dev_err(priv->adapter->dev,
-                       "11D: invalid action:%d\n", domain_info->action);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "11D: invalid action:%d\n", domain_info->action);
                return -1;
        }
 
@@ -741,6 +750,19 @@ mwifiex_ret_p2p_mode_cfg(struct mwifiex_private *priv,
        return 0;
 }
 
+/* This function handles the command response of mem_access command
+ */
+static int
+mwifiex_ret_mem_access(struct mwifiex_private *priv,
+                      struct host_cmd_ds_command *resp, void *pioctl_buf)
+{
+       struct host_cmd_ds_mem_access *mem = (void *)&resp->params.mem;
+
+       priv->mem_rw.addr = le32_to_cpu(mem->addr);
+       priv->mem_rw.value = le32_to_cpu(mem->value);
+
+       return 0;
+}
 /*
  * This function handles the command response of register access.
  *
@@ -830,12 +852,12 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
        if (le16_to_cpu(ibss_coal_resp->action) == HostCmd_ACT_GEN_SET)
                return 0;
 
-       dev_dbg(priv->adapter->dev,
-               "info: new BSSID %pM\n", ibss_coal_resp->bssid);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: new BSSID %pM\n", ibss_coal_resp->bssid);
 
        /* If rsp has NULL BSSID, Just return..... No Action */
        if (is_zero_ether_addr(ibss_coal_resp->bssid)) {
-               dev_warn(priv->adapter->dev, "new BSSID is NULL\n");
+               mwifiex_dbg(priv->adapter, FATAL, "new BSSID is NULL\n");
                return 0;
        }
 
@@ -871,48 +893,48 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
        case ACT_TDLS_DELETE:
                if (reason) {
                        if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
-                               dev_dbg(priv->adapter->dev,
-                                       "TDLS link delete for %pM failed: reason %d\n",
-                                       cmd_tdls_oper->peer_mac, reason);
+                               mwifiex_dbg(priv->adapter, ERROR,
+                                           "TDLS link delete for %pM failed: reason %d\n",
+                                           cmd_tdls_oper->peer_mac, reason);
                        else
-                               dev_err(priv->adapter->dev,
-                                       "TDLS link delete for %pM failed: reason %d\n",
-                                       cmd_tdls_oper->peer_mac, reason);
+                               mwifiex_dbg(priv->adapter, ERROR,
+                                           "TDLS link delete for %pM failed: reason %d\n",
+                                           cmd_tdls_oper->peer_mac, reason);
                } else {
-                       dev_dbg(priv->adapter->dev,
-                               "TDLS link delete for %pM successful\n",
-                               cmd_tdls_oper->peer_mac);
+                       mwifiex_dbg(priv->adapter, MSG,
+                                   "TDLS link delete for %pM successful\n",
+                                   cmd_tdls_oper->peer_mac);
                }
                break;
        case ACT_TDLS_CREATE:
                if (reason) {
-                       dev_err(priv->adapter->dev,
-                               "TDLS link creation for %pM failed: reason %d",
-                               cmd_tdls_oper->peer_mac, reason);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "TDLS link creation for %pM failed: reason %d",
+                                   cmd_tdls_oper->peer_mac, reason);
                        if (node && reason != TDLS_ERR_LINK_EXISTS)
                                node->tdls_status = TDLS_SETUP_FAILURE;
                } else {
-                       dev_dbg(priv->adapter->dev,
-                               "TDLS link creation for %pM successful",
-                               cmd_tdls_oper->peer_mac);
+                       mwifiex_dbg(priv->adapter, MSG,
+                                   "TDLS link creation for %pM successful",
+                                   cmd_tdls_oper->peer_mac);
                }
                break;
        case ACT_TDLS_CONFIG:
                if (reason) {
-                       dev_err(priv->adapter->dev,
-                               "TDLS link config for %pM failed, reason %d\n",
-                               cmd_tdls_oper->peer_mac, reason);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "TDLS link config for %pM failed, reason %d\n",
+                                   cmd_tdls_oper->peer_mac, reason);
                        if (node)
                                node->tdls_status = TDLS_SETUP_FAILURE;
                } else {
-                       dev_dbg(priv->adapter->dev,
-                               "TDLS link config for %pM successful\n",
-                               cmd_tdls_oper->peer_mac);
+                       mwifiex_dbg(priv->adapter, MSG,
+                                   "TDLS link config for %pM successful\n",
+                                   cmd_tdls_oper->peer_mac);
                }
                break;
        default:
-               dev_err(priv->adapter->dev,
-                       "Unknown TDLS command action response %d", action);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Unknown TDLS command action response %d", action);
                return -1;
        }
 
@@ -929,8 +951,9 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
 
        /* For every subscribe event command (Get/Set/Clear), FW reports the
         * current set of subscribed events*/
-       dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
-               le16_to_cpu(cmd_sub_event->events));
+       mwifiex_dbg(priv->adapter, EVENT,
+                   "Bitmap of currently subscribed events: %16x\n",
+                   le16_to_cpu(cmd_sub_event->events));
 
        return 0;
 }
@@ -940,7 +963,7 @@ static int mwifiex_ret_cfg_data(struct mwifiex_private *priv,
                                struct host_cmd_ds_command *resp)
 {
        if (resp->result != HostCmd_RESULT_OK) {
-               dev_err(priv->adapter->dev, "Cal data cmd resp failed\n");
+               mwifiex_dbg(priv->adapter, ERROR, "Cal data cmd resp failed\n");
                return -1;
        }
 
@@ -1008,8 +1031,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_802_11_BG_SCAN_QUERY:
                ret = mwifiex_ret_802_11_scan(priv, resp);
-               dev_dbg(adapter->dev,
-                       "info: CMD_RESP: BG_SCAN result is ready!\n");
+               mwifiex_dbg(adapter, CMD,
+                           "info: CMD_RESP: BG_SCAN result is ready!\n");
                break;
        case HostCmd_CMD_TXPWR_CFG:
                ret = mwifiex_ret_tx_power_cfg(priv, resp);
@@ -1088,8 +1111,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                                        / MWIFIEX_SDIO_BLOCK_SIZE)
                                       * MWIFIEX_SDIO_BLOCK_SIZE;
                adapter->curr_tx_buf_size = adapter->tx_buf_size;
-               dev_dbg(adapter->dev, "cmd: curr_tx_buf_size=%d\n",
-                       adapter->curr_tx_buf_size);
+               mwifiex_dbg(adapter, CMD, "cmd: curr_tx_buf_size=%d\n",
+                           adapter->curr_tx_buf_size);
 
                if (adapter->if_ops.update_mp_end_port)
                        adapter->if_ops.update_mp_end_port(adapter,
@@ -1103,6 +1126,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_802_11_IBSS_COALESCING_STATUS:
                ret = mwifiex_ret_ibss_coalescing_status(priv, resp);
                break;
+       case HostCmd_CMD_MEM_ACCESS:
+               ret = mwifiex_ret_mem_access(priv, resp, data_buf);
+               break;
        case HostCmd_CMD_MAC_REG_ACCESS:
        case HostCmd_CMD_BBP_REG_ACCESS:
        case HostCmd_CMD_RF_REG_ACCESS:
@@ -1146,8 +1172,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
                break;
        default:
-               dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
-                       resp->command);
+               mwifiex_dbg(adapter, ERROR,
+                           "CMD_RESP: unknown cmd response %#x\n",
+                           resp->command);
                break;
        }
 
index 0dc7a1d3993d325a15f84fa447afaa884349eabc..95203780010ae5a0887120c85969f9fdac22a1d6 100644 (file)
@@ -48,7 +48,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        if (!priv->media_connected)
                return;
 
-       dev_dbg(adapter->dev, "info: handles disconnect event\n");
+       mwifiex_dbg(adapter, INFO,
+                   "info: handles disconnect event\n");
 
        priv->media_connected = false;
 
@@ -104,12 +105,14 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
         * it could be used for re-assoc
         */
 
-       dev_dbg(adapter->dev, "info: previous SSID=%s, SSID len=%u\n",
-               priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
+       mwifiex_dbg(adapter, INFO,
+                   "info: previous SSID=%s, SSID len=%u\n",
+                   priv->prev_ssid.ssid, priv->prev_ssid.ssid_len);
 
-       dev_dbg(adapter->dev, "info: current SSID=%s, SSID len=%u\n",
-               priv->curr_bss_params.bss_descriptor.ssid.ssid,
-               priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
+       mwifiex_dbg(adapter, INFO,
+                   "info: current SSID=%s, SSID len=%u\n",
+                   priv->curr_bss_params.bss_descriptor.ssid.ssid,
+                   priv->curr_bss_params.bss_descriptor.ssid.ssid_len);
 
        memcpy(&priv->prev_ssid,
               &priv->curr_bss_params.bss_descriptor.ssid,
@@ -127,13 +130,13 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        if (adapter->is_cmd_timedout && adapter->curr_cmd)
                return;
        priv->media_connected = false;
-       dev_dbg(adapter->dev,
-               "info: successfully disconnected from %pM: reason code %d\n",
-               priv->cfg_bssid, reason_code);
+       mwifiex_dbg(adapter, MSG,
+                   "info: successfully disconnected from %pM: reason code %d\n",
+                   priv->cfg_bssid, reason_code);
        if (priv->bss_mode == NL80211_IFTYPE_STATION ||
            priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
                cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
-                                     GFP_KERNEL);
+                                     false, GFP_KERNEL);
        }
        eth_zero_addr(priv->cfg_bssid);
 
@@ -154,13 +157,13 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
        /* reserved 2 bytes are not mandatory in tdls event */
        if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
                              sizeof(u16) - sizeof(adapter->event_cause))) {
-               dev_err(adapter->dev, "Invalid event length!\n");
+               mwifiex_dbg(adapter, ERROR, "Invalid event length!\n");
                return -1;
        }
 
        sta_ptr = mwifiex_get_sta_entry(priv, tdls_evt->peer_mac);
        if (!sta_ptr) {
-               dev_err(adapter->dev, "cannot get sta entry!\n");
+               mwifiex_dbg(adapter, ERROR, "cannot get sta entry!\n");
                return -1;
        }
 
@@ -239,21 +242,21 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        switch (eventcause) {
        case EVENT_DUMMY_HOST_WAKEUP_SIGNAL:
-               dev_err(adapter->dev,
-                       "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "invalid EVENT: DUMMY_HOST_WAKEUP_SIGNAL, ignore it\n");
                break;
        case EVENT_LINK_SENSED:
-               dev_dbg(adapter->dev, "event: LINK_SENSED\n");
+               mwifiex_dbg(adapter, EVENT, "event: LINK_SENSED\n");
                if (!netif_carrier_ok(priv->netdev))
                        netif_carrier_on(priv->netdev);
                mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
                break;
 
        case EVENT_DEAUTHENTICATED:
-               dev_dbg(adapter->dev, "event: Deauthenticated\n");
+               mwifiex_dbg(adapter, EVENT, "event: Deauthenticated\n");
                if (priv->wps.session_enable) {
-                       dev_dbg(adapter->dev,
-                               "info: receive deauth event in wps session\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: receive deauth event in wps session\n");
                        break;
                }
                adapter->dbg.num_event_deauth++;
@@ -265,10 +268,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_DISASSOCIATED:
-               dev_dbg(adapter->dev, "event: Disassociated\n");
+               mwifiex_dbg(adapter, EVENT, "event: Disassociated\n");
                if (priv->wps.session_enable) {
-                       dev_dbg(adapter->dev,
-                               "info: receive disassoc event in wps session\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: receive disassoc event in wps session\n");
                        break;
                }
                adapter->dbg.num_event_disassoc++;
@@ -280,7 +283,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_LINK_LOST:
-               dev_dbg(adapter->dev, "event: Link lost\n");
+               mwifiex_dbg(adapter, EVENT, "event: Link lost\n");
                adapter->dbg.num_event_link_lost++;
                if (priv->media_connected) {
                        reason_code =
@@ -290,7 +293,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_PS_SLEEP:
-               dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+               mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
 
                adapter->ps_state = PS_STATE_PRE_SLEEP;
 
@@ -298,12 +301,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_PS_AWAKE:
-               dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+               mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
                if (!adapter->pps_uapsd_mode &&
                    priv->media_connected && adapter->sleep_period.period) {
                                adapter->pps_uapsd_mode = true;
-                               dev_dbg(adapter->dev,
-                                       "event: PPS/UAPSD mode activated\n");
+                               mwifiex_dbg(adapter, EVENT,
+                                           "event: PPS/UAPSD mode activated\n");
                }
                adapter->tx_lock_flag = false;
                if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -333,26 +336,26 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_DEEP_SLEEP_AWAKE:
                adapter->if_ops.wakeup_complete(adapter);
-               dev_dbg(adapter->dev, "event: DS_AWAKE\n");
+               mwifiex_dbg(adapter, EVENT, "event: DS_AWAKE\n");
                if (adapter->is_deep_sleep)
                        adapter->is_deep_sleep = false;
                break;
 
        case EVENT_HS_ACT_REQ:
-               dev_dbg(adapter->dev, "event: HS_ACT_REQ\n");
+               mwifiex_dbg(adapter, EVENT, "event: HS_ACT_REQ\n");
                ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_HS_CFG_ENH,
                                       0, 0, NULL, false);
                break;
 
        case EVENT_MIC_ERR_UNICAST:
-               dev_dbg(adapter->dev, "event: UNICAST MIC ERROR\n");
+               mwifiex_dbg(adapter, EVENT, "event: UNICAST MIC ERROR\n");
                cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
                                             NL80211_KEYTYPE_PAIRWISE,
                                             -1, NULL, GFP_KERNEL);
                break;
 
        case EVENT_MIC_ERR_MULTICAST:
-               dev_dbg(adapter->dev, "event: MULTICAST MIC ERROR\n");
+               mwifiex_dbg(adapter, EVENT, "event: MULTICAST MIC ERROR\n");
                cfg80211_michael_mic_failure(priv->netdev, priv->cfg_bssid,
                                             NL80211_KEYTYPE_GROUP,
                                             -1, NULL, GFP_KERNEL);
@@ -362,7 +365,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_ADHOC_BCN_LOST:
-               dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n");
+               mwifiex_dbg(adapter, EVENT, "event: ADHOC_BCN_LOST\n");
                priv->adhoc_is_link_sensed = false;
                mwifiex_clean_txrx(priv);
                mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -371,17 +374,17 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_BG_SCAN_REPORT:
-               dev_dbg(adapter->dev, "event: BGS_REPORT\n");
+               mwifiex_dbg(adapter, EVENT, "event: BGS_REPORT\n");
                ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY,
                                       HostCmd_ACT_GEN_GET, 0, NULL, false);
                break;
 
        case EVENT_PORT_RELEASE:
-               dev_dbg(adapter->dev, "event: PORT RELEASE\n");
+               mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
                break;
 
        case EVENT_EXT_SCAN_REPORT:
-               dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+               mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
                if (adapter->ext_scan)
                        ret = mwifiex_handle_event_ext_scan_report(priv,
                                                adapter->event_skb->data);
@@ -389,7 +392,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_WMM_STATUS_CHANGE:
-               dev_dbg(adapter->dev, "event: WMM status changed\n");
+               mwifiex_dbg(adapter, EVENT, "event: WMM status changed\n");
                ret = mwifiex_send_cmd(priv, HostCmd_CMD_WMM_GET_STATUS,
                                       0, 0, NULL, false);
                break;
@@ -401,13 +404,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
                                 HostCmd_ACT_GEN_GET, 0, NULL, false);
                priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
-               dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
+               mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_LOW\n");
                break;
        case EVENT_SNR_LOW:
-               dev_dbg(adapter->dev, "event: Beacon SNR_LOW\n");
+               mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_LOW\n");
                break;
        case EVENT_MAX_FAIL:
-               dev_dbg(adapter->dev, "event: MAX_FAIL\n");
+               mwifiex_dbg(adapter, EVENT, "event: MAX_FAIL\n");
                break;
        case EVENT_RSSI_HIGH:
                cfg80211_cqm_rssi_notify(priv->netdev,
@@ -416,47 +419,47 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
                                 HostCmd_ACT_GEN_GET, 0, NULL, false);
                priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
-               dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
+               mwifiex_dbg(adapter, EVENT, "event: Beacon RSSI_HIGH\n");
                break;
        case EVENT_SNR_HIGH:
-               dev_dbg(adapter->dev, "event: Beacon SNR_HIGH\n");
+               mwifiex_dbg(adapter, EVENT, "event: Beacon SNR_HIGH\n");
                break;
        case EVENT_DATA_RSSI_LOW:
-               dev_dbg(adapter->dev, "event: Data RSSI_LOW\n");
+               mwifiex_dbg(adapter, EVENT, "event: Data RSSI_LOW\n");
                break;
        case EVENT_DATA_SNR_LOW:
-               dev_dbg(adapter->dev, "event: Data SNR_LOW\n");
+               mwifiex_dbg(adapter, EVENT, "event: Data SNR_LOW\n");
                break;
        case EVENT_DATA_RSSI_HIGH:
-               dev_dbg(adapter->dev, "event: Data RSSI_HIGH\n");
+               mwifiex_dbg(adapter, EVENT, "event: Data RSSI_HIGH\n");
                break;
        case EVENT_DATA_SNR_HIGH:
-               dev_dbg(adapter->dev, "event: Data SNR_HIGH\n");
+               mwifiex_dbg(adapter, EVENT, "event: Data SNR_HIGH\n");
                break;
        case EVENT_LINK_QUALITY:
-               dev_dbg(adapter->dev, "event: Link Quality\n");
+               mwifiex_dbg(adapter, EVENT, "event: Link Quality\n");
                break;
        case EVENT_PRE_BEACON_LOST:
-               dev_dbg(adapter->dev, "event: Pre-Beacon Lost\n");
+               mwifiex_dbg(adapter, EVENT, "event: Pre-Beacon Lost\n");
                break;
        case EVENT_IBSS_COALESCED:
-               dev_dbg(adapter->dev, "event: IBSS_COALESCED\n");
+               mwifiex_dbg(adapter, EVENT, "event: IBSS_COALESCED\n");
                ret = mwifiex_send_cmd(priv,
                                HostCmd_CMD_802_11_IBSS_COALESCING_STATUS,
                                HostCmd_ACT_GEN_GET, 0, NULL, false);
                break;
        case EVENT_ADDBA:
-               dev_dbg(adapter->dev, "event: ADDBA Request\n");
+               mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
                mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
                                 HostCmd_ACT_GEN_SET, 0,
                                 adapter->event_body, false);
                break;
        case EVENT_DELBA:
-               dev_dbg(adapter->dev, "event: DELBA Request\n");
+               mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
                mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
                break;
        case EVENT_BA_STREAM_TIEMOUT:
-               dev_dbg(adapter->dev, "event:  BA Stream timeout\n");
+               mwifiex_dbg(adapter, EVENT, "event:  BA Stream timeout\n");
                mwifiex_11n_ba_stream_timeout(priv,
                                              (struct host_cmd_ds_11n_batimeout
                                               *)
@@ -464,28 +467,31 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
        case EVENT_AMSDU_AGGR_CTRL:
                ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
-               dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+               mwifiex_dbg(adapter, EVENT,
+                           "event: AMSDU_AGGR_CTRL %d\n", ctrl);
 
                adapter->tx_buf_size =
                                min_t(u16, adapter->curr_tx_buf_size, ctrl);
-               dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
-                       adapter->tx_buf_size);
+               mwifiex_dbg(adapter, EVENT, "event: tx_buf_size %d\n",
+                           adapter->tx_buf_size);
                break;
 
        case EVENT_WEP_ICV_ERR:
-               dev_dbg(adapter->dev, "event: WEP ICV error\n");
+               mwifiex_dbg(adapter, EVENT, "event: WEP ICV error\n");
                break;
 
        case EVENT_BW_CHANGE:
-               dev_dbg(adapter->dev, "event: BW Change\n");
+               mwifiex_dbg(adapter, EVENT, "event: BW Change\n");
                break;
 
        case EVENT_HOSTWAKE_STAIE:
-               dev_dbg(adapter->dev, "event: HOSTWAKE_STAIE %d\n", eventcause);
+               mwifiex_dbg(adapter, EVENT,
+                           "event: HOSTWAKE_STAIE %d\n", eventcause);
                break;
 
        case EVENT_REMAIN_ON_CHAN_EXPIRED:
-               dev_dbg(adapter->dev, "event: Remain on channel expired\n");
+               mwifiex_dbg(adapter, EVENT,
+                           "event: Remain on channel expired\n");
                cfg80211_remain_on_channel_expired(&priv->wdev,
                                                   priv->roc_cfg.cookie,
                                                   &priv->roc_cfg.chan,
@@ -496,7 +502,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_CHANNEL_SWITCH_ANN:
-               dev_dbg(adapter->dev, "event: Channel Switch Announcement\n");
+               mwifiex_dbg(adapter, EVENT, "event: Channel Switch Announcement\n");
                priv->csa_expire_time =
                                jiffies + msecs_to_jiffies(DFS_CHAN_MOVE_TIME);
                priv->csa_chan = priv->curr_bss_params.bss_descriptor.channel;
@@ -511,23 +517,23 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_TX_STATUS_REPORT:
-               dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+               mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
                mwifiex_parse_tx_status_event(priv, adapter->event_body);
                break;
 
        case EVENT_CHANNEL_REPORT_RDY:
-               dev_dbg(adapter->dev, "event: Channel Report\n");
+               mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
                ret = mwifiex_11h_handle_chanrpt_ready(priv,
                                                       adapter->event_skb);
                break;
        case EVENT_RADAR_DETECTED:
-               dev_dbg(adapter->dev, "event: Radar detected\n");
+               mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
                ret = mwifiex_11h_handle_radar_detected(priv,
                                                        adapter->event_skb);
                break;
        default:
-               dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
-                       eventcause);
+               mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
+                           eventcause);
                break;
        }
 
index 0599e41e253c740ca912f18b38fc05e58decef5f..d8b7d9c20450f704988c22e26ce81656d4e1a621 100644 (file)
@@ -64,7 +64,10 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
                                                  *(cmd_queued->condition),
                                                  (12 * HZ));
        if (status <= 0) {
-               dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
+               if (status == 0)
+                       status = -ETIMEDOUT;
+               mwifiex_dbg(adapter, ERROR,
+                           "cmd_wait_q terminated: %d\n", status);
                mwifiex_cancel_all_pending_cmd(adapter);
                return status;
        }
@@ -91,7 +94,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
        old_pkt_filter = priv->curr_pkt_filter;
 
        if (mcast_list->mode == MWIFIEX_PROMISC_MODE) {
-               dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: Enable Promiscuous mode\n");
                priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
                priv->curr_pkt_filter &=
                        ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
@@ -99,16 +103,16 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
                /* Multicast */
                priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
                if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
-                       dev_dbg(priv->adapter->dev,
-                               "info: Enabling All Multicast!\n");
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: Enabling All Multicast!\n");
                        priv->curr_pkt_filter |=
                                HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
                } else {
                        priv->curr_pkt_filter &=
                                ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE;
-                       dev_dbg(priv->adapter->dev,
-                               "info: Set multicast list=%d\n",
-                               mcast_list->num_multicast_addr);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: Set multicast list=%d\n",
+                                   mcast_list->num_multicast_addr);
                        /* Send multicast addresses to firmware */
                        ret = mwifiex_send_cmd(priv,
                                               HostCmd_CMD_MAC_MULTICAST_ADR,
@@ -116,9 +120,9 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
                                               mcast_list, false);
                }
        }
-       dev_dbg(priv->adapter->dev,
-               "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
-              old_pkt_filter, priv->curr_pkt_filter);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n",
+                   old_pkt_filter, priv->curr_pkt_filter);
        if (old_pkt_filter != priv->curr_pkt_filter) {
                ret = mwifiex_send_cmd(priv, HostCmd_CMD_MAC_CONTROL,
                                       HostCmd_ACT_GEN_SET,
@@ -151,7 +155,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
        rcu_read_unlock();
 
        if (!beacon_ie) {
-               dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           " failed to alloc beacon_ie\n");
                return -ENOMEM;
        }
 
@@ -165,7 +170,8 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
        bss_desc->bss_band = bss_priv->band;
        bss_desc->fw_tsf = bss_priv->fw_tsf;
        if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
-               dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: InterpretIE: AP WEP enabled\n");
                bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
        } else {
                bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
@@ -219,8 +225,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
 
        if (!strncmp(priv->adapter->country_code, &country_ie[2], 2)) {
                rcu_read_unlock();
-               wiphy_dbg(priv->wdev.wiphy,
-                         "11D: skip setting domain info in FW\n");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "11D: skip setting domain info in FW\n");
                return 0;
        }
        memcpy(priv->adapter->country_code, &country_ie[2], 2);
@@ -241,8 +247,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
 
        if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
                             HostCmd_ACT_GEN_SET, 0, NULL, false)) {
-               wiphy_err(priv->adapter->wiphy,
-                         "11D: setting domain info in FW\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "11D: setting domain info in FW fail\n");
                return -1;
        }
 
@@ -304,14 +310,15 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
 
                if (mwifiex_11h_get_csa_closed_channel(priv) ==
                                                        (u8)bss_desc->channel) {
-                       dev_err(adapter->dev,
-                               "Attempt to reconnect on csa closed chan(%d)\n",
-                               bss_desc->channel);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Attempt to reconnect on csa closed chan(%d)\n",
+                                   bss_desc->channel);
                        goto done;
                }
 
-               dev_dbg(adapter->dev, "info: SSID found in scan list ... "
-                                     "associating...\n");
+               mwifiex_dbg(adapter, INFO,
+                           "info: SSID found in scan list ...\t"
+                           "associating...\n");
 
                mwifiex_stop_net_dev_queue(priv->netdev, adapter);
                if (netif_carrier_ok(priv->netdev))
@@ -353,15 +360,17 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
                        netif_carrier_off(priv->netdev);
 
                if (!ret) {
-                       dev_dbg(adapter->dev, "info: network found in scan"
-                                                       " list. Joining...\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: network found in scan\t"
+                                   " list. Joining...\n");
                        ret = mwifiex_adhoc_join(priv, bss_desc);
                        if (bss)
                                cfg80211_put_bss(priv->adapter->wiphy, bss);
                } else {
-                       dev_dbg(adapter->dev, "info: Network not found in "
-                               "the list, creating adhoc with ssid = %s\n",
-                               req_ssid->ssid);
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: Network not found in\t"
+                                   "the list, creating adhoc with ssid = %s\n",
+                                   req_ssid->ssid);
                        ret = mwifiex_adhoc_start(priv, req_ssid);
                }
        }
@@ -396,8 +405,9 @@ int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
        switch (action) {
        case HostCmd_ACT_GEN_SET:
                if (adapter->pps_uapsd_mode) {
-                       dev_dbg(adapter->dev, "info: Host Sleep IOCTL"
-                               " is blocked in UAPSD/PPS mode\n");
+                       mwifiex_dbg(adapter, INFO,
+                                   "info: Host Sleep IOCTL\t"
+                                   "is blocked in UAPSD/PPS mode\n");
                        status = -1;
                        break;
                }
@@ -494,7 +504,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
        }
 
        if (adapter->hs_activated) {
-               dev_dbg(adapter->dev, "cmd: HS Already activated\n");
+               mwifiex_dbg(adapter, CMD,
+                           "cmd: HS Already activated\n");
                return true;
        }
 
@@ -510,14 +521,16 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
                                                   MWIFIEX_BSS_ROLE_STA),
                                  HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD,
                                  &hscfg)) {
-               dev_err(adapter->dev, "IOCTL request HS enable failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "IOCTL request HS enable failed\n");
                return false;
        }
 
        if (wait_event_interruptible_timeout(adapter->hs_activate_wait_q,
                                             adapter->hs_activate_wait_q_woken,
                                             (10 * HZ)) <= 0) {
-               dev_err(adapter->dev, "hs_activate_wait_q terminated\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "hs_activate_wait_q terminated\n");
                return false;
        }
 
@@ -637,10 +650,11 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
                dbm = (u16) power_cfg->power_level;
                if ((dbm < priv->min_tx_power_level) ||
                    (dbm > priv->max_tx_power_level)) {
-                       dev_err(priv->adapter->dev, "txpower value %d dBm"
-                               " is out of range (%d dBm-%d dBm)\n",
-                               dbm, priv->min_tx_power_level,
-                               priv->max_tx_power_level);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "txpower value %d dBm\t"
+                                   "is out of range (%d dBm-%d dBm)\n",
+                                   dbm, priv->min_tx_power_level,
+                                   priv->max_tx_power_level);
                        return -1;
                }
        }
@@ -739,14 +753,15 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
 {
        if (ie_len) {
                if (ie_len > sizeof(priv->wpa_ie)) {
-                       dev_err(priv->adapter->dev,
-                               "failed to copy WPA IE, too big\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "failed to copy WPA IE, too big\n");
                        return -1;
                }
                memcpy(priv->wpa_ie, ie_data_ptr, ie_len);
                priv->wpa_ie_len = (u8) ie_len;
-               dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
-                       priv->wpa_ie_len, priv->wpa_ie[0]);
+               mwifiex_dbg(priv->adapter, CMD,
+                           "cmd: Set Wpa_ie_len=%d IE=%#x\n",
+                           priv->wpa_ie_len, priv->wpa_ie[0]);
 
                if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
                        priv->sec_info.wpa_enabled = true;
@@ -759,8 +774,9 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
        } else {
                memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie));
                priv->wpa_ie_len = 0;
-               dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n",
-                       priv->wpa_ie_len, priv->wpa_ie[0]);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: reset wpa_ie_len=%d IE=%#x\n",
+                           priv->wpa_ie_len, priv->wpa_ie[0]);
                priv->sec_info.wpa_enabled = false;
                priv->sec_info.wpa2_enabled = false;
        }
@@ -780,23 +796,24 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
 {
        if (ie_len) {
                if (ie_len > sizeof(priv->wapi_ie)) {
-                       dev_dbg(priv->adapter->dev,
-                               "info: failed to copy WAPI IE, too big\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "info: failed to copy WAPI IE, too big\n");
                        return -1;
                }
                memcpy(priv->wapi_ie, ie_data_ptr, ie_len);
                priv->wapi_ie_len = ie_len;
-               dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n",
-                       priv->wapi_ie_len, priv->wapi_ie[0]);
+               mwifiex_dbg(priv->adapter, CMD,
+                           "cmd: Set wapi_ie_len=%d IE=%#x\n",
+                           priv->wapi_ie_len, priv->wapi_ie[0]);
 
                if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY)
                        priv->sec_info.wapi_enabled = true;
        } else {
                memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie));
                priv->wapi_ie_len = ie_len;
-               dev_dbg(priv->adapter->dev,
-                       "info: Reset wapi_ie_len=%d IE=%#x\n",
-                      priv->wapi_ie_len, priv->wapi_ie[0]);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: Reset wapi_ie_len=%d IE=%#x\n",
+                           priv->wapi_ie_len, priv->wapi_ie[0]);
                priv->sec_info.wapi_enabled = false;
        }
        return 0;
@@ -814,8 +831,8 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
 {
        if (ie_len) {
                if (ie_len > MWIFIEX_MAX_VSIE_LEN) {
-                       dev_dbg(priv->adapter->dev,
-                               "info: failed to copy WPS IE, too big\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "info: failed to copy WPS IE, too big\n");
                        return -1;
                }
 
@@ -825,13 +842,14 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
 
                memcpy(priv->wps_ie, ie_data_ptr, ie_len);
                priv->wps_ie_len = ie_len;
-               dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
-                       priv->wps_ie_len, priv->wps_ie[0]);
+               mwifiex_dbg(priv->adapter, CMD,
+                           "cmd: Set wps_ie_len=%d IE=%#x\n",
+                           priv->wps_ie_len, priv->wps_ie[0]);
        } else {
                kfree(priv->wps_ie);
                priv->wps_ie_len = ie_len;
-               dev_dbg(priv->adapter->dev,
-                       "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
        }
        return 0;
 }
@@ -875,8 +893,8 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
                /* Copy the required key as the current key */
                wep_key = &priv->wep_key[index];
                if (!wep_key->key_length) {
-                       dev_err(adapter->dev,
-                               "key not set, so cannot enable it\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "key not set, so cannot enable it\n");
                        return -1;
                }
 
@@ -953,7 +971,8 @@ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv,
 
        /* Current driver only supports key length of up to 32 bytes */
        if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) {
-               dev_err(priv->adapter->dev, "key length too long\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "key length too long\n");
                return -1;
        }
 
@@ -1040,7 +1059,7 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
 
        snprintf(version, max_len, driver_version, fw_ver);
 
-       dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version);
+       mwifiex_dbg(adapter, MSG, "info: MWIFIEX VERSION: %s\n", version);
 
        return 0;
 }
@@ -1128,7 +1147,8 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
        }
        if (mwifiex_send_cmd(priv, HostCmd_CMD_REMAIN_ON_CHAN,
                             action, 0, &roc_cfg, true)) {
-               dev_err(priv->adapter->dev, "failed to remain on channel\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "failed to remain on channel\n");
                return -1;
        }
 
@@ -1313,8 +1333,8 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
                if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
                    (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) {
                        priv->wps.session_enable = true;
-                       dev_dbg(priv->adapter->dev,
-                               "info: WPS Session Enabled.\n");
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: WPS Session Enabled.\n");
                        ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len);
                }
 
@@ -1361,7 +1381,8 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
                memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter));
                if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) {
                        adapter->arp_filter_size = 0;
-                       dev_err(adapter->dev, "invalid ARP filter size\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "invalid ARP filter size\n");
                        return -1;
                } else {
                        memcpy(adapter->arp_filter, gen_ie->ie_data,
@@ -1370,7 +1391,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
                }
                break;
        default:
-               dev_err(adapter->dev, "invalid IE type\n");
+               mwifiex_dbg(adapter, ERROR, "invalid IE type\n");
                return -1;
        }
        return 0;
index b8729c9394e92553b12301317c9b281209e248c6..d4d4cb1ce95b868f6e052b69b2f748b98a4b2b7e 100644 (file)
@@ -141,7 +141,7 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
 
        if (priv->hs2_enabled &&
            mwifiex_discard_gratuitous_arp(priv, skb)) {
-               dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n");
+               mwifiex_dbg(priv->adapter, INFO, "Bypassed Gratuitous ARP\n");
                dev_kfree_skb_any(skb);
                return 0;
        }
@@ -166,7 +166,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
 
        ret = mwifiex_recv_packet(priv, skb);
        if (ret == -1)
-               dev_err(priv->adapter->dev, "recv packet failed\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "recv packet failed\n");
 
        return ret;
 }
@@ -203,9 +204,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
        rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
 
        if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) {
-               dev_err(adapter->dev,
-                       "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
-                       skb->len, rx_pkt_offset, rx_pkt_length);
+               mwifiex_dbg(adapter, ERROR,
+                           "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
+                           skb->len, rx_pkt_offset, rx_pkt_length);
                priv->stats.rx_dropped++;
                dev_kfree_skb_any(skb);
                return ret;
@@ -214,7 +215,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
        if (rx_pkt_type == PKT_TYPE_MGMT) {
                ret = mwifiex_process_mgmt_packet(priv, skb);
                if (ret)
-                       dev_err(adapter->dev, "Rx of mgmt packet failed");
+                       mwifiex_dbg(adapter, ERROR, "Rx of mgmt packet failed");
                dev_kfree_skb_any(skb);
                return ret;
        }
index 5ce2d9a4f9193a34e9c197d63579b8aba9756d81..355ac5904fac1174e0ee200dcda012f06313a963 100644 (file)
@@ -53,7 +53,8 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
                       INTF_HEADER_LEN;
 
        if (!skb->len) {
-               dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+               mwifiex_dbg(adapter, ERROR,
+                           "Tx: bad packet length: %d\n", skb->len);
                tx_info->status_code = -1;
                return skb->data;
        }
@@ -184,21 +185,24 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
        switch (ret) {
        case -EBUSY:
                dev_kfree_skb_any(skb);
-               dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
-                       __func__, ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: host_to_card failed: ret=%d\n",
+                           __func__, ret);
                adapter->dbg.num_tx_host_to_card_failure++;
                break;
        case -1:
                adapter->data_sent = false;
                dev_kfree_skb_any(skb);
-               dev_err(adapter->dev, "%s: host_to_card failed: ret=%d\n",
-                       __func__, ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: host_to_card failed: ret=%d\n",
+                           __func__, ret);
                adapter->dbg.num_tx_host_to_card_failure++;
                break;
        case 0:
                dev_kfree_skb_any(skb);
-               dev_dbg(adapter->dev, "data: %s: host_to_card succeeded\n",
-                       __func__);
+               mwifiex_dbg(adapter, DATA,
+                           "data: %s: host_to_card succeeded\n",
+                           __func__);
                adapter->tx_lock_flag = true;
                break;
        case -EINPROGRESS:
index 087d84762cd34b580c0a81ccf6d8dd151cb7fd78..2faa1bc42abee2eb838d65c6c75c33a243f53875 100644 (file)
@@ -37,7 +37,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
        u32 tid;
        u8 tid_down;
 
-       dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+       mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
        spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
 
        skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
@@ -94,7 +94,7 @@ static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv,
        unsigned long flags;
        int i;
 
-       dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+       mwifiex_dbg(priv->adapter, DATA, "%s: %pM\n", __func__, mac);
        spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
 
        for (i = 0; i < MAX_NUM_TID; i++) {
@@ -132,8 +132,8 @@ mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
        supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
 
        if (skb_tailroom(skb) < rates_size + 4) {
-               dev_err(priv->adapter->dev,
-                       "Insuffient space while adding rates\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Insuffient space while adding rates\n");
                return -ENOMEM;
        }
 
@@ -199,8 +199,8 @@ mwifiex_tdls_add_ht_oper(struct mwifiex_private *priv, const u8 *mac,
 
        sta_ptr = mwifiex_get_sta_entry(priv, mac);
        if (unlikely(!sta_ptr)) {
-               dev_warn(priv->adapter->dev,
-                        "TDLS peer station not found in list\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "TDLS peer station not found in list\n");
                return -1;
        }
 
@@ -247,15 +247,16 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
 
        sta_ptr = mwifiex_get_sta_entry(priv, mac);
        if (unlikely(!sta_ptr)) {
-               dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "TDLS peer station not found in list\n");
                return -1;
        }
 
        if (!mwifiex_is_bss_in_11ac_mode(priv)) {
                if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
                   WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
-                       dev_dbg(adapter->dev,
-                               "TDLS peer doesn't support wider bandwitdh\n");
+                       mwifiex_dbg(adapter, WARN,
+                                   "TDLS peer doesn't support wider bandwidth\n");
                        return 0;
                }
        } else {
@@ -554,7 +555,7 @@ static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
                tf->u.discover_req.dialog_token = dialog_token;
                break;
        default:
-               dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+               mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
                return -EINVAL;
        }
 
@@ -608,8 +609,8 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
 
        skb = dev_alloc_skb(skb_len);
        if (!skb) {
-               dev_err(priv->adapter->dev,
-                       "allocate skb failed for management frame\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "allocate skb failed for management frame\n");
                return -ENOMEM;
        }
        skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
@@ -742,7 +743,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
                mwifiex_tdls_add_qos_capab(skb);
                break;
        default:
-               dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+               mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS action frame type\n");
                return -EINVAL;
        }
 
@@ -781,8 +782,8 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
 
        skb = dev_alloc_skb(skb_len);
        if (!skb) {
-               dev_err(priv->adapter->dev,
-                       "allocate skb failed for management frame\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "allocate skb failed for management frame\n");
                return -ENOMEM;
        }
 
@@ -848,8 +849,8 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
 
        peer = buf + ETH_ALEN;
        action = *(buf + sizeof(struct ethhdr) + 2);
-       dev_dbg(priv->adapter->dev,
-               "rx:tdls action: peer=%pM, action=%d\n", peer, action);
+       mwifiex_dbg(priv->adapter, DATA,
+                   "rx:tdls action: peer=%pM, action=%d\n", peer, action);
 
        switch (action) {
        case WLAN_TDLS_SETUP_REQUEST:
@@ -880,7 +881,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
                ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
                break;
        default:
-               dev_dbg(priv->adapter->dev, "Unknown TDLS frame type.\n");
+               mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
                return;
        }
 
@@ -967,8 +968,8 @@ mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
        sta_ptr = mwifiex_get_sta_entry(priv, peer);
 
        if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
-               dev_err(priv->adapter->dev,
-                       "link absent for peer %pM; cannot config\n", peer);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "link absent for peer %pM; cannot config\n", peer);
                return -EINVAL;
        }
 
@@ -988,8 +989,8 @@ mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
        sta_ptr = mwifiex_get_sta_entry(priv, peer);
 
        if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
-               dev_dbg(priv->adapter->dev,
-                       "Setup already in progress for peer %pM\n", peer);
+               mwifiex_dbg(priv->adapter, WARN,
+                           "Setup already in progress for peer %pM\n", peer);
                return 0;
        }
 
@@ -1046,8 +1047,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
        sta_ptr = mwifiex_get_sta_entry(priv, peer);
 
        if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
-               dev_dbg(priv->adapter->dev,
-                       "tdls: enable link %pM success\n", peer);
+               mwifiex_dbg(priv->adapter, MSG,
+                           "tdls: enable link %pM success\n", peer);
 
                sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
 
@@ -1076,8 +1077,8 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
                mwifiex_auto_tdls_update_peer_status(priv, peer,
                                                     TDLS_SETUP_COMPLETE);
        } else {
-               dev_dbg(priv->adapter->dev,
-                       "tdls: enable link %pM failed\n", peer);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "tdls: enable link %pM failed\n", peer);
                if (sta_ptr) {
                        mwifiex_11n_cleanup_reorder_tbl(priv);
                        spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
@@ -1180,9 +1181,9 @@ void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
                tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
                if (mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_OPER,
                                     HostCmd_ACT_GEN_SET, 0, &tdls_oper, false))
-                       dev_warn(priv->adapter->dev,
-                                "Disable link failed for TDLS peer %pM",
-                                sta_ptr->mac_addr);
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Disable link failed for TDLS peer %pM",
+                                   sta_ptr->mac_addr);
        }
 
        mwifiex_del_all_sta_list(priv);
@@ -1204,9 +1205,9 @@ int mwifiex_tdls_check_tx(struct mwifiex_private *priv, struct sk_buff *skb)
                            (peer->failure_count <
                             MWIFIEX_TDLS_MAX_FAIL_COUNT)) {
                                peer->tdls_status = TDLS_SETUP_INPROGRESS;
-                               dev_dbg(priv->adapter->dev,
-                                       "setup TDLS link, peer=%pM rssi=%d\n",
-                                       peer->mac_addr, peer->rssi);
+                               mwifiex_dbg(priv->adapter, INFO,
+                                           "setup TDLS link, peer=%pM rssi=%d\n",
+                                           peer->mac_addr, peer->rssi);
 
                                cfg80211_tdls_oper_request(priv->netdev,
                                                           peer->mac_addr,
@@ -1272,8 +1273,8 @@ void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac)
                tdls_peer->rssi_jiffies = jiffies;
                INIT_LIST_HEAD(&tdls_peer->list);
                list_add_tail(&tdls_peer->list, &priv->auto_tdls_list);
-               dev_dbg(priv->adapter->dev, "Add auto TDLS peer= %pM to list\n",
-                       mac);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "Add auto TDLS peer= %pM to list\n", mac);
        }
 
        spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
@@ -1341,8 +1342,8 @@ void mwifiex_check_auto_tdls(unsigned long context)
                return;
 
        if (!priv->auto_tdls_timer_active) {
-               dev_dbg(priv->adapter->dev,
-                       "auto TDLS timer inactive; return");
+               mwifiex_dbg(priv->adapter, INFO,
+                           "auto TDLS timer inactive; return");
                return;
        }
 
@@ -1368,9 +1369,9 @@ void mwifiex_check_auto_tdls(unsigned long context)
                     !tdls_peer->rssi) &&
                    tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
                        tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
-                       dev_dbg(priv->adapter->dev,
-                               "teardown TDLS link,peer=%pM rssi=%d\n",
-                               tdls_peer->mac_addr, -tdls_peer->rssi);
+                       mwifiex_dbg(priv->adapter, MSG,
+                                   "teardown TDLS link,peer=%pM rssi=%d\n",
+                                   tdls_peer->mac_addr, -tdls_peer->rssi);
                        tdls_peer->do_discover = true;
                        priv->check_tdls_tx = true;
                        cfg80211_tdls_oper_request(priv->netdev,
@@ -1384,9 +1385,10 @@ void mwifiex_check_auto_tdls(unsigned long context)
                           MWIFIEX_TDLS_MAX_FAIL_COUNT) {
                                priv->check_tdls_tx = true;
                                tdls_peer->do_setup = true;
-                               dev_dbg(priv->adapter->dev,
-                                       "check TDLS with peer=%pM rssi=%d\n",
-                                       tdls_peer->mac_addr, -tdls_peer->rssi);
+                               mwifiex_dbg(priv->adapter, INFO,
+                                           "check TDLS with peer=%pM\t"
+                                           "rssi=%d\n", tdls_peer->mac_addr,
+                                           tdls_peer->rssi);
                }
        }
        spin_unlock_irqrestore(&priv->auto_tdls_lock, flags);
index a245f444aeec17e23c027b60d027239050fb4497..28dcc84a34d2b2c1878cace213c8893b033c0580 100644 (file)
@@ -50,11 +50,15 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
                priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
 
        if (!priv) {
-               dev_err(adapter->dev, "data: priv not found. Drop RX packet\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "data: priv not found. Drop RX packet\n");
                dev_kfree_skb_any(skb);
                return -1;
        }
 
+       mwifiex_dbg_dump(adapter, DAT_D, "rx pkt:", skb->data,
+                        min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
+
        memset(rx_info, 0, sizeof(*rx_info));
        rx_info->bss_num = priv->bss_num;
        rx_info->bss_type = priv->bss_type;
@@ -112,10 +116,12 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
                                                           skb, tx_param);
                }
        }
+       mwifiex_dbg_dump(adapter, DAT_D, "tx pkt:", skb->data,
+                        min_t(size_t, skb->len, DEBUG_DUMP_DATA_MAX_LEN));
 
        switch (ret) {
        case -ENOSR:
-               dev_dbg(adapter->dev, "data: -ENOSR is returned\n");
+               mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
                break;
        case -EBUSY:
                if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -124,13 +130,14 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
                                if (local_tx_pd)
                                        local_tx_pd->flags = 0;
                }
-               dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+               mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
                break;
        case -1:
                if (adapter->iface_type != MWIFIEX_PCIE)
                        adapter->data_sent = false;
-               dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
-                       ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "mwifiex_write_data_async failed: 0x%X\n",
+                           ret);
                adapter->dbg.num_tx_host_to_card_failure++;
                mwifiex_write_data_complete(adapter, skb, 0, ret);
                break;
@@ -162,7 +169,8 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
        priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num,
                                      tx_info->bss_type);
        if (!priv) {
-               dev_err(adapter->dev, "data: priv not found. Drop TX packet\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "data: priv not found. Drop TX packet\n");
                adapter->dbg.num_tx_host_to_card_failure++;
                mwifiex_write_data_complete(adapter, skb, 0, 0);
                return ret;
@@ -187,7 +195,7 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
        }
        switch (ret) {
        case -ENOSR:
-               dev_err(adapter->dev, "data: -ENOSR is returned\n");
+               mwifiex_dbg(adapter, ERROR, "data: -ENOSR is returned\n");
                break;
        case -EBUSY:
                if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
@@ -202,13 +210,13 @@ static int mwifiex_host_to_card(struct mwifiex_adapter *adapter,
                        atomic_add(tx_info->aggr_num, &adapter->tx_queued);
                else
                        atomic_inc(&adapter->tx_queued);
-               dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+               mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
                break;
        case -1:
                if (adapter->iface_type != MWIFIEX_PCIE)
                        adapter->data_sent = false;
-               dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
-                       ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "mwifiex_write_data_async failed: 0x%X\n", ret);
                adapter->dbg.num_tx_host_to_card_failure++;
                mwifiex_write_data_complete(adapter, skb, 0, ret);
                break;
@@ -319,7 +327,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
                txq = netdev_get_tx_queue(priv->netdev, index);
                if (netif_tx_queue_stopped(txq)) {
                        netif_tx_wake_queue(txq);
-                       dev_dbg(adapter->dev, "wake queue: %d\n", index);
+                       mwifiex_dbg(adapter, DATA, "wake queue: %d\n", index);
                }
        }
 done:
index f5c2af01ba0a96c707edb76760d945154ddb30f2..a4ae28353b6d8bb2fd80c977aad8a7c06d78d6a3 100644 (file)
@@ -167,7 +167,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
        ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
                                 params->beacon.tail_len);
        if (ht_ie) {
-               memcpy(&bss_cfg->ht_cap, ht_ie,
+               memcpy(&bss_cfg->ht_cap, ht_ie + 2,
                       sizeof(struct ieee80211_ht_cap));
                cap_info = le16_to_cpu(bss_cfg->ht_cap.cap_info);
                memset(&bss_cfg->ht_cap.mcs, 0,
@@ -184,8 +184,8 @@ mwifiex_set_ht_params(struct mwifiex_private *priv,
                        bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
                        break;
                default:
-                       dev_warn(priv->adapter->dev,
-                                "Unsupported RX-STBC, default to 2x2\n");
+                       mwifiex_dbg(priv->adapter, WARN,
+                                   "Unsupported RX-STBC, default to 2x2\n");
                        bss_cfg->ht_cap.mcs.rx_mask[0] = 0xff;
                        bss_cfg->ht_cap.mcs.rx_mask[1] = 0xff;
                        break;
@@ -767,8 +767,8 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
                        return -1;
                break;
        default:
-               dev_err(priv->adapter->dev,
-                       "PREP_CMD: unknown cmd %#x\n", cmd_no);
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "PREP_CMD: unknown cmd %#x\n", cmd_no);
                return -1;
        }
 
@@ -806,24 +806,28 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv,
                             struct mwifiex_uap_bss_param *bss_cfg)
 {
        if (mwifiex_del_mgmt_ies(priv))
-               dev_err(priv->adapter->dev, "Failed to delete mgmt IEs!\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to delete mgmt IEs!\n");
 
        if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_STOP,
                             HostCmd_ACT_GEN_SET, 0, NULL, true)) {
-               dev_err(priv->adapter->dev, "Failed to stop the BSS\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to stop the BSS\n");
                return -1;
        }
 
        if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG,
                             HostCmd_ACT_GEN_SET,
                             UAP_BSS_PARAMS_I, bss_cfg, false)) {
-               dev_err(priv->adapter->dev, "Failed to set the SSID\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to set the SSID\n");
                return -1;
        }
 
        if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START,
                             HostCmd_ACT_GEN_SET, 0, NULL, false)) {
-               dev_err(priv->adapter->dev, "Failed to start the BSS\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Failed to start the BSS\n");
                return -1;
        }
 
index f4794cdc36d229612931a22c85e6524171afcb3b..06ce3fe660f138d51873838eda532207d9665004 100644 (file)
@@ -80,8 +80,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
 
                node = mwifiex_add_sta_entry(priv, event->sta_addr);
                if (!node) {
-                       dev_warn(adapter->dev,
-                                "could not create station entry!\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "could not create station entry!\n");
                        return -1;
                }
 
@@ -128,7 +128,8 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
                break;
        case EVENT_UAP_BSS_START:
-               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+               mwifiex_dbg(adapter, EVENT,
+                           "AP EVENT: event id: %#x\n", eventcause);
                memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
                       ETH_ALEN);
                if (priv->hist_data)
@@ -136,50 +137,53 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                break;
        case EVENT_UAP_MIC_COUNTERMEASURES:
                /* For future development */
-               dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
+               mwifiex_dbg(adapter, EVENT,
+                           "AP EVENT: event id: %#x\n", eventcause);
                break;
        case EVENT_AMSDU_AGGR_CTRL:
                ctrl = le16_to_cpu(*(__le16 *)adapter->event_body);
-               dev_dbg(adapter->dev, "event: AMSDU_AGGR_CTRL %d\n", ctrl);
+               mwifiex_dbg(adapter, EVENT,
+                           "event: AMSDU_AGGR_CTRL %d\n", ctrl);
 
                if (priv->media_connected) {
                        adapter->tx_buf_size =
                                min_t(u16, adapter->curr_tx_buf_size, ctrl);
-                       dev_dbg(adapter->dev, "event: tx_buf_size %d\n",
-                               adapter->tx_buf_size);
+                       mwifiex_dbg(adapter, EVENT,
+                                   "event: tx_buf_size %d\n",
+                                   adapter->tx_buf_size);
                }
                break;
        case EVENT_ADDBA:
-               dev_dbg(adapter->dev, "event: ADDBA Request\n");
+               mwifiex_dbg(adapter, EVENT, "event: ADDBA Request\n");
                if (priv->media_connected)
                        mwifiex_send_cmd(priv, HostCmd_CMD_11N_ADDBA_RSP,
                                         HostCmd_ACT_GEN_SET, 0,
                                         adapter->event_body, false);
                break;
        case EVENT_DELBA:
-               dev_dbg(adapter->dev, "event: DELBA Request\n");
+               mwifiex_dbg(adapter, EVENT, "event: DELBA Request\n");
                if (priv->media_connected)
                        mwifiex_11n_delete_ba_stream(priv, adapter->event_body);
                break;
        case EVENT_BA_STREAM_TIEMOUT:
-               dev_dbg(adapter->dev, "event:  BA Stream timeout\n");
+               mwifiex_dbg(adapter, EVENT, "event:  BA Stream timeout\n");
                if (priv->media_connected) {
                        ba_timeout = (void *)adapter->event_body;
                        mwifiex_11n_ba_stream_timeout(priv, ba_timeout);
                }
                break;
        case EVENT_EXT_SCAN_REPORT:
-               dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+               mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n");
                if (adapter->ext_scan)
                        return mwifiex_handle_event_ext_scan_report(priv,
                                                adapter->event_skb->data);
                break;
        case EVENT_TX_STATUS_REPORT:
-               dev_dbg(adapter->dev, "event: TX_STATUS Report\n");
+               mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
                mwifiex_parse_tx_status_event(priv, adapter->event_body);
                break;
        case EVENT_PS_SLEEP:
-               dev_dbg(adapter->dev, "info: EVENT: SLEEP\n");
+               mwifiex_dbg(adapter, EVENT, "info: EVENT: SLEEP\n");
 
                adapter->ps_state = PS_STATE_PRE_SLEEP;
 
@@ -187,12 +191,12 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_PS_AWAKE:
-               dev_dbg(adapter->dev, "info: EVENT: AWAKE\n");
+               mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
                if (!adapter->pps_uapsd_mode &&
                    priv->media_connected && adapter->sleep_period.period) {
                                adapter->pps_uapsd_mode = true;
-                               dev_dbg(adapter->dev,
-                                       "event: PPS/UAPSD mode activated\n");
+                               mwifiex_dbg(adapter, EVENT,
+                                           "event: PPS/UAPSD mode activated\n");
                }
                adapter->tx_lock_flag = false;
                if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -218,16 +222,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                break;
 
        case EVENT_CHANNEL_REPORT_RDY:
-               dev_dbg(adapter->dev, "event: Channel Report\n");
+               mwifiex_dbg(adapter, EVENT, "event: Channel Report\n");
                mwifiex_11h_handle_chanrpt_ready(priv, adapter->event_skb);
                break;
        case EVENT_RADAR_DETECTED:
-               dev_dbg(adapter->dev, "event: Radar detected\n");
+               mwifiex_dbg(adapter, EVENT, "event: Radar detected\n");
                mwifiex_11h_handle_radar_detected(priv, adapter->event_skb);
                break;
        default:
-               dev_dbg(adapter->dev, "event: unknown event id: %#x\n",
-                       eventcause);
+               mwifiex_dbg(adapter, EVENT,
+                           "event: unknown event id: %#x\n", eventcause);
                break;
        }
 
index 38ac4d74c486c8d080d1c87c78e54a74c63e296c..61c52fdf945d42140c000e2c8a298f1f4d08dc60 100644 (file)
@@ -103,8 +103,8 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
 
        if ((atomic_read(&adapter->pending_bridged_pkts) >=
                                             MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
-               dev_err(priv->adapter->dev,
-                       "Tx: Bridge packet limit reached. Drop packet!\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Tx: Bridge packet limit reached. Drop packet!\n");
                kfree_skb(skb);
                mwifiex_uap_cleanup_tx_queues(priv);
                return;
@@ -153,15 +153,15 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
        skb_pull(skb, hdr_chop);
 
        if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) {
-               dev_dbg(priv->adapter->dev,
-                       "data: Tx: insufficient skb headroom %d\n",
-                       skb_headroom(skb));
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "data: Tx: insufficient skb headroom %d\n",
+                           skb_headroom(skb));
                /* Insufficient skb headroom - allocate a new skb */
                new_skb =
                        skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
                if (unlikely(!new_skb)) {
-                       dev_err(priv->adapter->dev,
-                               "Tx: cannot allocate new_skb\n");
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "Tx: cannot allocate new_skb\n");
                        kfree_skb(skb);
                        priv->stats.tx_dropped++;
                        return;
@@ -169,8 +169,9 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
 
                kfree_skb(skb);
                skb = new_skb;
-               dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n",
-                       skb_headroom(skb));
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: new skb headroom %d\n",
+                           skb_headroom(skb));
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
@@ -225,7 +226,8 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
 
        /* don't do packet forwarding in disconnected state */
        if (!priv->media_connected) {
-               dev_err(adapter->dev, "drop packet in disconnected state.\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "drop packet in disconnected state.\n");
                dev_kfree_skb_any(skb);
                return 0;
        }
@@ -275,10 +277,10 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
 
        if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
             le16_to_cpu(uap_rx_pd->rx_pkt_length)) > (u16) skb->len) {
-               dev_err(adapter->dev,
-                       "wrong rx packet: len=%d, offset=%d, length=%d\n",
-                       skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
-                       le16_to_cpu(uap_rx_pd->rx_pkt_length));
+               mwifiex_dbg(adapter, ERROR,
+                           "wrong rx packet: len=%d, offset=%d, length=%d\n",
+                           skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset),
+                           le16_to_cpu(uap_rx_pd->rx_pkt_length));
                priv->stats.rx_dropped++;
                dev_kfree_skb_any(skb);
                return 0;
@@ -287,7 +289,8 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
        if (rx_pkt_type == PKT_TYPE_MGMT) {
                ret = mwifiex_process_mgmt_packet(priv, skb);
                if (ret)
-                       dev_err(adapter->dev, "Rx of mgmt packet failed");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "Rx of mgmt packet failed");
                dev_kfree_skb_any(skb);
                return ret;
        }
@@ -354,7 +357,8 @@ void *mwifiex_process_uap_txpd(struct mwifiex_private *priv,
                       INTF_HEADER_LEN;
 
        if (!skb->len) {
-               dev_err(adapter->dev, "Tx: bad packet length: %d\n", skb->len);
+               mwifiex_dbg(adapter, ERROR,
+                           "Tx: bad packet length: %d\n", skb->len);
                tx_info->status_code = -1;
                return skb->data;
        }
index fd8027f200a0ddd61c178ee1f7107e7e0931c1c1..aada93425f806a74b481e937d7f25fab5a68d4d3 100644 (file)
@@ -60,7 +60,6 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size);
 static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                            struct sk_buff *skb, u8 ep)
 {
-       struct device *dev = adapter->dev;
        u32 recv_type;
        __le32 tmp;
        int ret;
@@ -69,13 +68,15 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                mwifiex_process_hs_config(adapter);
 
        if (skb->len < INTF_HEADER_LEN) {
-               dev_err(dev, "%s: invalid skb->len\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: invalid skb->len\n", __func__);
                return -1;
        }
 
        switch (ep) {
        case MWIFIEX_USB_EP_CMD_EVENT:
-               dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
+               mwifiex_dbg(adapter, EVENT,
+                           "%s: EP_CMD_EVENT\n", __func__);
                skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
                recv_type = le32_to_cpu(tmp);
                skb_pull(skb, INTF_HEADER_LEN);
@@ -83,11 +84,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                switch (recv_type) {
                case MWIFIEX_USB_TYPE_CMD:
                        if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
-                               dev_err(dev, "CMD: skb->len too large\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "CMD: skb->len too large\n");
                                ret = -1;
                                goto exit_restore_skb;
                        } else if (!adapter->curr_cmd) {
-                               dev_dbg(dev, "CMD: no curr_cmd\n");
+                               mwifiex_dbg(adapter, WARN, "CMD: no curr_cmd\n");
                                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
                                        mwifiex_process_sleep_confirm_resp(
                                                        adapter, skb->data,
@@ -104,16 +106,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                        break;
                case MWIFIEX_USB_TYPE_EVENT:
                        if (skb->len < sizeof(u32)) {
-                               dev_err(dev, "EVENT: skb->len too small\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "EVENT: skb->len too small\n");
                                ret = -1;
                                goto exit_restore_skb;
                        }
                        skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
                        adapter->event_cause = le32_to_cpu(tmp);
-                       dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
+                       mwifiex_dbg(adapter, EVENT,
+                                   "event_cause %#x\n", adapter->event_cause);
 
                        if (skb->len > MAX_EVENT_SIZE) {
-                               dev_err(dev, "EVENT: event body too large\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "EVENT: event body too large\n");
                                ret = -1;
                                goto exit_restore_skb;
                        }
@@ -125,14 +130,16 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                        adapter->event_skb = skb;
                        break;
                default:
-                       dev_err(dev, "unknown recv_type %#x\n", recv_type);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "unknown recv_type %#x\n", recv_type);
                        return -1;
                }
                break;
        case MWIFIEX_USB_EP_DATA:
-               dev_dbg(dev, "%s: EP_DATA\n", __func__);
+               mwifiex_dbg(adapter, DATA, "%s: EP_DATA\n", __func__);
                if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
-                       dev_err(dev, "DATA: skb->len too large\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "DATA: skb->len too large\n");
                        return -1;
                }
 
@@ -141,7 +148,8 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
                atomic_inc(&adapter->rx_pending);
                break;
        default:
-               dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: unknown endport %#x\n", __func__, ep);
                return -1;
        }
 
@@ -176,8 +184,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
 
        if (recv_length) {
                if (urb->status || (adapter->surprise_removed)) {
-                       dev_err(adapter->dev,
-                               "URB status is failed: %d\n", urb->status);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "URB status is failed: %d\n", urb->status);
                        /* Do not free skb in case of command ep */
                        if (card->rx_cmd_ep != context->ep)
                                dev_kfree_skb_any(skb);
@@ -190,8 +198,9 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
 
                status = mwifiex_usb_recv(adapter, skb, context->ep);
 
-               dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
-                       recv_length, status);
+               mwifiex_dbg(adapter, INFO,
+                           "info: recv_length=%d, status=%d\n",
+                           recv_length, status);
                if (status == -EINPROGRESS) {
                        mwifiex_queue_main_work(adapter);
 
@@ -203,8 +212,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
                                return;
                } else {
                        if (status == -1)
-                               dev_err(adapter->dev,
-                                       "received data processing failed!\n");
+                               mwifiex_dbg(adapter, ERROR,
+                                           "received data processing failed!\n");
 
                        /* Do not free skb in case of command ep */
                        if (card->rx_cmd_ep != context->ep)
@@ -212,8 +221,8 @@ static void mwifiex_usb_rx_complete(struct urb *urb)
                }
        } else if (urb->status) {
                if (!adapter->is_suspended) {
-                       dev_warn(adapter->dev,
-                                "Card is removed: %d\n", urb->status);
+                       mwifiex_dbg(adapter, FATAL,
+                                   "Card is removed: %d\n", urb->status);
                        adapter->surprise_removed = true;
                }
                dev_kfree_skb_any(skb);
@@ -249,14 +258,17 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
        struct mwifiex_adapter *adapter = context->adapter;
        struct usb_card_rec *card = adapter->card;
 
-       dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status);
+       mwifiex_dbg(adapter, INFO,
+                   "%s: status: %d\n", __func__, urb->status);
 
        if (context->ep == card->tx_cmd_ep) {
-               dev_dbg(adapter->dev, "%s: CMD\n", __func__);
+               mwifiex_dbg(adapter, CMD,
+                           "%s: CMD\n", __func__);
                atomic_dec(&card->tx_cmd_urb_pending);
                adapter->cmd_sent = false;
        } else {
-               dev_dbg(adapter->dev, "%s: DATA\n", __func__);
+               mwifiex_dbg(adapter, DATA,
+                           "%s: DATA\n", __func__);
                atomic_dec(&card->tx_data_urb_pending);
                mwifiex_write_data_complete(adapter, context->skb, 0,
                                            urb->status ? -1 : 0);
@@ -275,8 +287,8 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
        if (card->rx_cmd_ep != ctx->ep) {
                ctx->skb = dev_alloc_skb(size);
                if (!ctx->skb) {
-                       dev_err(adapter->dev,
-                               "%s: dev_alloc_skb failed\n", __func__);
+                       mwifiex_dbg(adapter, ERROR,
+                                   "%s: dev_alloc_skb failed\n", __func__);
                        return -ENOMEM;
                }
        }
@@ -291,7 +303,7 @@ static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
                atomic_inc(&card->rx_data_urb_pending);
 
        if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) {
-               dev_err(adapter->dev, "usb_submit_urb failed\n");
+               mwifiex_dbg(adapter, ERROR, "usb_submit_urb failed\n");
                dev_kfree_skb_any(ctx->skb);
                ctx->skb = NULL;
 
@@ -468,7 +480,8 @@ static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
        adapter = card->adapter;
 
        if (unlikely(adapter->is_suspended))
-               dev_warn(adapter->dev, "Device already suspended\n");
+               mwifiex_dbg(adapter, WARN,
+                           "Device already suspended\n");
 
        mwifiex_enable_hs(adapter);
 
@@ -519,7 +532,8 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
        adapter = card->adapter;
 
        if (unlikely(!adapter->is_suspended)) {
-               dev_warn(adapter->dev, "Device already resumed\n");
+               mwifiex_dbg(adapter, WARN,
+                           "Device already resumed\n");
                return 0;
        }
 
@@ -578,7 +592,8 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
 
        mwifiex_usb_free(card);
 
-       dev_dbg(adapter->dev, "%s: removing card\n", __func__);
+       mwifiex_dbg(adapter, FATAL,
+                   "%s: removing card\n", __func__);
        mwifiex_remove_card(adapter, &add_remove_card_sem);
 
        usb_set_intfdata(intf, NULL);
@@ -608,7 +623,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
 
        card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!card->tx_cmd.urb) {
-               dev_err(adapter->dev, "tx_cmd.urb allocation failed\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "tx_cmd.urb allocation failed\n");
                return -ENOMEM;
        }
 
@@ -620,8 +636,8 @@ static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
 
                card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
                if (!card->tx_data_list[i].urb) {
-                       dev_err(adapter->dev,
-                               "tx_data_list[] urb allocation failed\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "tx_data_list[] urb allocation failed\n");
                        return -ENOMEM;
                }
        }
@@ -639,15 +655,13 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
 
        card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!card->rx_cmd.urb) {
-               dev_err(adapter->dev, "rx_cmd.urb allocation failed\n");
+               mwifiex_dbg(adapter, ERROR, "rx_cmd.urb allocation failed\n");
                return -ENOMEM;
        }
 
        card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
-       if (!card->rx_cmd.skb) {
-               dev_err(adapter->dev, "rx_cmd.skb allocation failed\n");
+       if (!card->rx_cmd.skb)
                return -ENOMEM;
-       }
 
        if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
                return -1;
@@ -658,8 +672,8 @@ static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
 
                card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
                if (!card->rx_data_list[i].urb) {
-                       dev_err(adapter->dev,
-                               "rx_data_list[] urb allocation failed\n");
+                       mwifiex_dbg(adapter, ERROR,
+                                   "rx_data_list[] urb allocation failed\n");
                        return -1;
                }
                if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
@@ -683,7 +697,8 @@ static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
        ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf,
                           *len, &actual_length, timeout);
        if (ret) {
-               dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "usb_bulk_msg for tx failed: %d\n", ret);
                return ret;
        }
 
@@ -702,7 +717,8 @@ static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
        ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf,
                           *len, &actual_length, timeout);
        if (ret) {
-               dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
+               mwifiex_dbg(adapter, ERROR,
+                           "usb_bulk_msg for rx failed: %d\n", ret);
                return ret;
        }
 
@@ -722,13 +738,13 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
        struct urb *tx_urb;
 
        if (adapter->is_suspended) {
-               dev_err(adapter->dev,
-                       "%s: not allowed while suspended\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: not allowed while suspended\n", __func__);
                return -1;
        }
 
        if (adapter->surprise_removed) {
-               dev_err(adapter->dev, "%s: device removed\n", __func__);
+               mwifiex_dbg(adapter, ERROR, "%s: device removed\n", __func__);
                return -1;
        }
 
@@ -737,7 +753,7 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
                return -EBUSY;
        }
 
-       dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep);
+       mwifiex_dbg(adapter, INFO, "%s: ep=%d\n", __func__, ep);
 
        if (ep == card->tx_cmd_ep) {
                context = &card->tx_cmd;
@@ -764,7 +780,8 @@ static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
                atomic_inc(&card->tx_data_urb_pending);
 
        if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
-               dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__);
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: usb_submit_urb failed\n", __func__);
                if (ep == card->tx_cmd_ep) {
                        atomic_dec(&card->tx_cmd_urb_pending);
                } else {
@@ -843,8 +860,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        u8 check_winner = 1;
 
        if (!firmware) {
-               dev_err(adapter->dev,
-                       "No firmware image found! Terminating download\n");
+               mwifiex_dbg(adapter, ERROR,
+                           "No firmware image found! Terminating download\n");
                ret = -1;
                goto fw_exit;
        }
@@ -889,8 +906,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                                                MWIFIEX_USB_EP_CMD_EVENT,
                                                MWIFIEX_USB_TIMEOUT);
                        if (ret) {
-                               dev_err(adapter->dev,
-                                       "write_data_sync: failed: %d\n", ret);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "write_data_sync: failed: %d\n",
+                                           ret);
                                continue;
                        }
 
@@ -902,8 +920,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                                                MWIFIEX_USB_EP_CMD_EVENT,
                                                MWIFIEX_USB_TIMEOUT);
                        if (ret) {
-                               dev_err(adapter->dev,
-                                       "read_data_sync: failed: %d\n", ret);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "read_data_sync: failed: %d\n",
+                                           ret);
                                continue;
                        }
 
@@ -913,17 +932,17 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                        /* check 1st firmware block resp for highest bit set */
                        if (check_winner) {
                                if (le32_to_cpu(sync_fw.cmd) & 0x80000000) {
-                                       dev_warn(adapter->dev,
-                                                "USB is not the winner %#x\n",
-                                                sync_fw.cmd);
+                                       mwifiex_dbg(adapter, WARN,
+                                                   "USB is not the winner %#x\n",
+                                                   sync_fw.cmd);
 
                                        /* returning success */
                                        ret = 0;
                                        goto cleanup;
                                }
 
-                               dev_dbg(adapter->dev,
-                                       "USB is the winner, start to download FW\n");
+                               mwifiex_dbg(adapter, MSG,
+                                           "start to download FW...\n");
 
                                check_winner = 0;
                                break;
@@ -931,9 +950,9 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 
                        /* check the firmware block response for CRC errors */
                        if (sync_fw.cmd) {
-                               dev_err(adapter->dev,
-                                       "FW received block with CRC %#x\n",
-                                       sync_fw.cmd);
+                               mwifiex_dbg(adapter, ERROR,
+                                           "FW received block with CRC %#x\n",
+                                           sync_fw.cmd);
                                ret = -1;
                                continue;
                        }
@@ -945,8 +964,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        } while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
 
 cleanup:
-       dev_notice(adapter->dev,
-                  "info: FW download over, size %d bytes\n", tlen);
+       mwifiex_dbg(adapter, MSG,
+                   "info: FW download over, size %d bytes\n", tlen);
 
        kfree(recv_buff);
        kfree(fwdata);
index b8a45872354d7f46c330da734fa5f2aef8b4c7c0..370323a47ecb99d3c246b94fc86e0e3eb0f8ffde 100644 (file)
@@ -26,6 +26,8 @@
 #include "11n.h"
 
 static struct mwifiex_debug_data items[] = {
+       {"debug_mask", item_size(debug_mask),
+        item_addr(debug_mask), 1},
        {"int_counter", item_size(int_counter),
         item_addr(int_counter), 1},
        {"wmm_ac_vo", item_size(packets_out[WMM_AC_VO]),
@@ -158,7 +160,8 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
        } else if (func_init_shutdown == MWIFIEX_FUNC_SHUTDOWN) {
                cmd = HostCmd_CMD_FUNC_SHUTDOWN;
        } else {
-               dev_err(priv->adapter->dev, "unsupported parameter\n");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "unsupported parameter\n");
                return -1;
        }
 
@@ -178,6 +181,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
        struct mwifiex_adapter *adapter = priv->adapter;
 
        if (info) {
+               info->debug_mask = adapter->debug_mask;
                memcpy(info->packets_out,
                       priv->wmm.packets_out,
                       sizeof(priv->wmm.packets_out));
@@ -336,9 +340,9 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
                action_code = *(payload + sizeof(struct ieee80211_hdr) + 1);
                if (category == WLAN_CATEGORY_PUBLIC &&
                    action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) {
-                       dev_dbg(priv->adapter->dev,
-                               "TDLS discovery response %pM nf=%d, snr=%d\n",
-                               ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "TDLS discovery response %pM nf=%d, snr=%d\n",
+                                   ieee_hdr->addr2, rx_pd->nf, rx_pd->snr);
                        mwifiex_auto_tdls_update_peer_signal(priv,
                                                             ieee_hdr->addr2,
                                                             rx_pd->snr,
@@ -346,8 +350,8 @@ mwifiex_parse_mgmt_packet(struct mwifiex_private *priv, u8 *payload, u16 len,
                }
                break;
        default:
-               dev_dbg(priv->adapter->dev,
-                       "unknown mgmt frame subytpe %#x\n", stype);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "unknown mgmt frame subtype %#x\n", stype);
        }
 
        return 0;
@@ -369,8 +373,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
 
        if (!priv->mgmt_frame_mask ||
            priv->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) {
-               dev_dbg(priv->adapter->dev,
-                       "do not receive mgmt frames on uninitialized intf");
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "do not receive mgmt frames on uninitialized intf");
                return -1;
        }
 
@@ -464,13 +468,14 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
                         struct cmd_ctrl_node *cmd_node)
 {
-       dev_dbg(adapter->dev, "cmd completed: status=%d\n",
-               adapter->cmd_wait_q.status);
+       mwifiex_dbg(adapter, CMD,
+                   "cmd completed: status=%d\n",
+                   adapter->cmd_wait_q.status);
 
        *(cmd_node->condition) = true;
 
        if (adapter->cmd_wait_q.status == -ETIMEDOUT)
-               dev_err(adapter->dev, "cmd timeout\n");
+               mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
        else
                wake_up_interruptible(&adapter->cmd_wait_q.wait);
 
@@ -536,13 +541,16 @@ void
 mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
                       int ies_len, struct mwifiex_sta_node *node)
 {
+       struct ieee_types_header *ht_cap_ie;
        const struct ieee80211_ht_cap *ht_cap;
 
        if (!ies)
                return;
 
-       ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
-       if (ht_cap) {
+       ht_cap_ie = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies,
+                                            ies_len);
+       if (ht_cap_ie) {
+               ht_cap = (void *)(ht_cap_ie + 1);
                node->is_11n_enabled = 1;
                node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
                                  IEEE80211_HT_CAP_MAX_AMSDU ?
index b2e99569a0f8b659b756179a7cec340de68952e1..a8ea21c3340c73537c8f597ad6dc2d176b45f588 100644 (file)
@@ -107,7 +107,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
 
        ra_list->total_pkt_count = 0;
 
-       dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
+       mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
 
        return ra_list;
 }
@@ -150,7 +150,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
 
        for (i = 0; i < MAX_NUM_TID; ++i) {
                ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
-               dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list);
+               mwifiex_dbg(adapter, INFO,
+                           "info: created ra_list %p\n", ra_list);
 
                if (!ra_list)
                        break;
@@ -178,8 +179,8 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
                        spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
                }
 
-               dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n",
-                       ra_list, ra_list->is_11n_enabled);
+               mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
+                           ra_list, ra_list->is_11n_enabled);
 
                if (ra_list->is_11n_enabled) {
                        ra_list->ba_pkt_count = 0;
@@ -241,11 +242,12 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
                return;
        }
 
-       dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, "
-               "qos_info Parameter Set Count=%d, Reserved=%#x\n",
-               wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
-               IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
-               wmm_ie->reserved);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: WMM Parameter IE: version=%d,\t"
+                   "qos_info Parameter Set Count=%d, Reserved=%#x\n",
+                   wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+                   IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
+                   wmm_ie->reserved);
 
        for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
                u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
@@ -257,10 +259,10 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
                priv->wmm.queue_priority[ac_idx] = ac_idx;
                tmp[ac_idx] = avg_back_off;
 
-               dev_dbg(priv->adapter->dev,
-                       "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
-                       (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
-                       cw_min, avg_back_off);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
+                           (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
+                           cw_min, avg_back_off);
                mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
        }
 
@@ -333,8 +335,8 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
 {
        int ac_val;
 
-       dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:"
-                       "BK(0), BE(1), VI(2), VO(3)\n");
+       mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
+                   "BK(0), BE(1), VI(2), VO(3)\n");
 
        if (!priv->wmm_enabled) {
                /* WMM is not enabled, default priorities */
@@ -346,9 +348,10 @@ mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
                        priv->wmm.ac_down_graded_vals[ac_val]
                                = mwifiex_wmm_eval_downgrade_ac(priv,
                                                (enum mwifiex_wmm_ac_e) ac_val);
-                       dev_dbg(priv->adapter->dev,
-                               "info: WMM: AC PRIO %d maps to %d\n",
-                               ac_val, priv->wmm.ac_down_graded_vals[ac_val]);
+                       mwifiex_dbg(priv->adapter, INFO,
+                                   "info: WMM: AC PRIO %d maps to %d\n",
+                                   ac_val,
+                                   priv->wmm.ac_down_graded_vals[ac_val]);
                }
        }
 }
@@ -428,6 +431,15 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                                                        priv->tos_to_tid_inv[i];
                }
 
+               priv->aggr_prio_tbl[6].amsdu
+                                       = priv->aggr_prio_tbl[6].ampdu_ap
+                                       = priv->aggr_prio_tbl[6].ampdu_user
+                                       = BA_STREAM_NOT_ALLOWED;
+
+               priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
+                                       = priv->aggr_prio_tbl[7].ampdu_user
+                                       = BA_STREAM_NOT_ALLOWED;
+
                mwifiex_set_ba_params(priv);
                mwifiex_reset_11n_rx_seq_num(priv);
 
@@ -512,8 +524,8 @@ static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
        int i;
 
        for (i = 0; i < MAX_NUM_TID; ++i) {
-               dev_dbg(priv->adapter->dev,
-                       "info: ra_list: freeing buf for tid %d\n", i);
+               mwifiex_dbg(priv->adapter, INFO,
+                           "info: ra_list: freeing buf for tid %d\n", i);
                list_for_each_entry_safe(ra_list, tmp_node,
                                         &priv->wmm.tid_tbl_ptr[i].ra_list,
                                         list) {
@@ -685,14 +697,15 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
        if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
            ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
                if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
-                       dev_dbg(adapter->dev,
-                               "TDLS setup packet for %pM. Don't block\n", ra);
+                       mwifiex_dbg(adapter, DATA,
+                                   "TDLS setup packet for %pM.\t"
+                                   "Don't block\n", ra);
                else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
                        tdls_status = mwifiex_get_tdls_link_status(priv, ra);
        }
 
        if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
-               dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
+               mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
                mwifiex_write_data_complete(adapter, skb, 0, -1);
                return;
        }
@@ -773,6 +786,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
 {
        u8 *curr = (u8 *) &resp->params.get_wmm_status;
        uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
+       int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
        bool valid = true;
 
        struct mwifiex_ie_types_data *tlv_hdr;
@@ -780,8 +794,9 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
        struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
        struct mwifiex_wmm_ac_status *ac_status;
 
-       dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
-               resp_len);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
+                   resp_len);
 
        while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
                tlv_hdr = (struct mwifiex_ie_types_data *) curr;
@@ -795,12 +810,12 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
                        tlv_wmm_qstatus =
                                (struct mwifiex_ie_types_wmm_queue_status *)
                                tlv_hdr;
-                       dev_dbg(priv->adapter->dev,
-                               "info: CMD_RESP: WMM_GET_STATUS:"
-                               QSTATUS TLV: %d, %d, %d\n",
-                               tlv_wmm_qstatus->queue_index,
-                               tlv_wmm_qstatus->flow_required,
-                               tlv_wmm_qstatus->disabled);
+                       mwifiex_dbg(priv->adapter, CMD,
+                                   "info: CMD_RESP: WMM_GET_STATUS:\t"
+                                   "QSTATUS TLV: %d, %d, %d\n",
+                                   tlv_wmm_qstatus->queue_index,
+                                   tlv_wmm_qstatus->flow_required,
+                                   tlv_wmm_qstatus->disabled);
 
                        ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
                                                         queue_index];
@@ -823,11 +838,10 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
                        wmm_param_ie->vend_hdr.element_id =
                                                WLAN_EID_VENDOR_SPECIFIC;
 
-                       dev_dbg(priv->adapter->dev,
-                               "info: CMD_RESP: WMM_GET_STATUS:"
-                               " WMM Parameter Set Count: %d\n",
-                               wmm_param_ie->qos_info_bitmap &
-                               IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK);
+                       mwifiex_dbg(priv->adapter, CMD,
+                                   "info: CMD_RESP: WMM_GET_STATUS:\t"
+                                   "WMM Parameter Set Count: %d\n",
+                                   wmm_param_ie->qos_info_bitmap & mask);
 
                        memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
                               wmm_ie, wmm_param_ie,
@@ -875,9 +889,9 @@ mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
        if (!wmm_ie)
                return 0;
 
-       dev_dbg(priv->adapter->dev,
-               "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
-               wmm_ie->vend_hdr.element_id);
+       mwifiex_dbg(priv->adapter, INFO,
+                   "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
+                   wmm_ie->vend_hdr.element_id);
 
        if ((priv->wmm_required ||
             (ht_cap && (priv->adapter->config_bands & BAND_GN ||
@@ -927,8 +941,8 @@ mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
         */
        ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
 
-       dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms,"
-                               " %d ms sent to FW\n", queue_delay, ret_val);
+       mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
+                   "%d ms sent to FW\n", queue_delay, ret_val);
 
        return ret_val;
 }
@@ -1082,14 +1096,15 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
        if (skb_queue_empty(&ptr->skb_head)) {
                spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
                                       ra_list_flags);
-               dev_dbg(adapter->dev, "data: nothing to send\n");
+               mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
                return;
        }
 
        skb = skb_dequeue(&ptr->skb_head);
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
-       dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
+       mwifiex_dbg(adapter, DATA,
+                   "data: dequeuing the packet %p %p\n", ptr, skb);
 
        ptr->total_pkt_count--;
 
@@ -1205,7 +1220,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
 
        switch (ret) {
        case -EBUSY:
-               dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
+               mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
                spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
 
                if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
@@ -1224,7 +1239,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
        case -1:
                if (adapter->iface_type != MWIFIEX_PCIE)
                        adapter->data_sent = false;
-               dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
+               mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
                adapter->dbg.num_tx_host_to_card_failure++;
                mwifiex_write_data_complete(adapter, skb, 0, ret);
                break;
@@ -1263,7 +1278,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
 
        tid = mwifiex_get_tid(ptr);
 
-       dev_dbg(adapter->dev, "data: tid=%d\n", tid);
+       mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
 
        spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
        if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
index 95921167b53f74a8577710a44afcd1f71b06142a..b71fc74d14ab4c8350ea2f7d68a72149c737fcb8 100644 (file)
@@ -5192,7 +5192,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
                priv->sniffer_enabled = true;
        }
 
-       *total_flags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI |
+       *total_flags &= FIF_ALLMULTI |
                        FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL |
                        FIF_OTHER_BSS;
 
index 275408eaf95e6d54f006b853475229ea105ee9dd..257a9eadd59573401540d21da89b9f1aa214d5a9 100644 (file)
@@ -351,8 +351,7 @@ int p54_setup_mac(struct p54_common *priv)
                 * "TRANSPARENT and PROMISCUOUS are mutually exclusive"
                 * STSW45X0C LMAC API - page 12
                 */
-               if (((priv->filter_flags & FIF_PROMISC_IN_BSS) ||
-                    (priv->filter_flags & FIF_OTHER_BSS)) &&
+               if (priv->filter_flags & FIF_OTHER_BSS &&
                    (mode != P54_FILTER_TYPE_PROMISCUOUS))
                        mode |= P54_FILTER_TYPE_TRANSPARENT;
        } else {
index 1f6fd5ff55313731b034b565c358ac56b21bfaf7..9a8fedd3c0f5b133369627b33c4881d0c766c676 100644 (file)
@@ -83,7 +83,7 @@ static void p54_led_brightness_set(struct led_classdev *led_dev,
 
 static int p54_register_led(struct p54_common *priv,
                            unsigned int led_index,
-                           char *name, char *trigger)
+                           char *name, const char *trigger)
 {
        struct p54_led_dev *led = &priv->leds[led_index];
        int err;
index e79674f73dc5766cda5bfae2c1762a5aeb98f863..2947ad21053ccc207dcb4a04988f517b7b9f0507 100644 (file)
@@ -395,13 +395,11 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
 {
        struct p54_common *priv = dev->priv;
 
-       *total_flags &= FIF_PROMISC_IN_BSS |
-                       FIF_ALLMULTI |
-                       FIF_OTHER_BSS;
+       *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS;
 
        priv->filter_flags = *total_flags;
 
-       if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
+       if (changed_flags & FIF_OTHER_BSS)
                p54_setup_mac(priv);
 
        if (changed_flags & FIF_ALLMULTI || multicast)
index 477f86354dc5a7ff8a324717a16093cd9fc55f81..0881ba8535f4e11bec61d029dc58f29a7f0c4500 100644 (file)
@@ -143,7 +143,7 @@ static int psm;
 static char *essid;
 
 /* Default to encapsulation unless translation requested */
-static bool translate = 1;
+static bool translate = true;
 
 static int country = USA;
 
index d72ff8e7125d4525d1761c60d3828dba22b63296..71a825c750cfc5523fb28d40286cb22888b1bb26 100644 (file)
@@ -356,9 +356,9 @@ struct ndis_80211_pmkid {
 #define CAP_MODE_80211G                4
 #define CAP_MODE_MASK          7
 
-#define WORK_LINK_UP           (1<<0)
-#define WORK_LINK_DOWN         (1<<1)
-#define WORK_SET_MULTICAST_LIST        (1<<2)
+#define WORK_LINK_UP           0
+#define WORK_LINK_DOWN         1
+#define WORK_SET_MULTICAST_LIST        2
 
 #define RNDIS_WLAN_ALG_NONE    0
 #define RNDIS_WLAN_ALG_WEP     (1<<0)
@@ -2861,7 +2861,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
 
                deauthenticate(usbdev);
 
-               cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL);
+               cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL);
        }
 
        netif_carrier_off(usbdev->net);
index bdf5590ba304bc9d10b3d26e69d5052a46101459..7da138892026c21df860d0e7875e1ab8c835a1d8 100644 (file)
@@ -273,10 +273,8 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
-                          !(filter_flags & FIF_PROMISC_IN_BSS));
+       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
        rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
-                          !(filter_flags & FIF_PROMISC_IN_BSS) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
        rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
index 79f4fe65a1196c5c0eabfc1cc9c3630553be97ef..4ea53aa9ede3f05322e2b582e191eff5089aba85 100644 (file)
@@ -274,10 +274,8 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
-                          !(filter_flags & FIF_PROMISC_IN_BSS));
+       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
        rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
-                          !(filter_flags & FIF_PROMISC_IN_BSS) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
index 05c64597838d6610c876ad548b2b6b6f8c536c18..237bbb54c7a89bc8419fc50d630529b495027134 100644 (file)
@@ -434,10 +434,8 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
-                          !(filter_flags & FIF_PROMISC_IN_BSS));
+       rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1);
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
-                          !(filter_flags & FIF_PROMISC_IN_BSS) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
index be2d54f257b1029c2434f8d0e4fe4ac4934e8306..dfeca8355b22f0972c3665906ec5726a9073ec75 100644 (file)
@@ -1513,8 +1513,7 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_FCSFAIL));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
                           !(filter_flags & FIF_PLCPFAIL));
-       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
-                          !(filter_flags & FIF_PROMISC_IN_BSS));
+       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1);
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
@@ -7818,21 +7817,25 @@ EXPORT_SYMBOL_GPL(rt2800_probe_hw);
 /*
  * IEEE80211 stack callback functions.
  */
-void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
-                        u16 *iv16)
+void rt2800_get_key_seq(struct ieee80211_hw *hw,
+                       struct ieee80211_key_conf *key,
+                       struct ieee80211_key_seq *seq)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct mac_iveiv_entry iveiv_entry;
        u32 offset;
 
-       offset = MAC_IVEIV_ENTRY(hw_key_idx);
+       if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
+               return;
+
+       offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
        rt2800_register_multiread(rt2x00dev, offset,
                                      &iveiv_entry, sizeof(iveiv_entry));
 
-       memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
-       memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
+       memcpy(&seq->tkip.iv16, &iveiv_entry.iv[0], 2);
+       memcpy(&seq->tkip.iv32, &iveiv_entry.iv[4], 4);
 }
-EXPORT_SYMBOL_GPL(rt2800_get_tkip_seq);
+EXPORT_SYMBOL_GPL(rt2800_get_key_seq);
 
 int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
index 3019db637a4ba3d71e74edf6e07ffc58860f536e..1609b8a7f7ebcbc801ff3eb71cd27acd7074a968 100644 (file)
@@ -209,8 +209,9 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
 
 int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev);
 
-void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx, u32 *iv32,
-                        u16 *iv16);
+void rt2800_get_key_seq(struct ieee80211_hw *hw,
+                       struct ieee80211_key_conf *key,
+                       struct ieee80211_key_seq *seq);
 int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
 int rt2800_conf_tx(struct ieee80211_hw *hw,
                   struct ieee80211_vif *vif, u16 queue_idx,
index cc1b3cc73c5aeda7dc698b18cb8bf1d2a5cefa2e..0af22573a2eb5daa478ffbe0dadcca7feb2eb104 100644 (file)
@@ -309,7 +309,7 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
        .sw_scan_start          = rt2x00mac_sw_scan_start,
        .sw_scan_complete       = rt2x00mac_sw_scan_complete,
        .get_stats              = rt2x00mac_get_stats,
-       .get_tkip_seq           = rt2800_get_tkip_seq,
+       .get_key_seq            = rt2800_get_key_seq,
        .set_rts_threshold      = rt2800_set_rts_threshold,
        .sta_add                = rt2x00mac_sta_add,
        .sta_remove             = rt2x00mac_sta_remove,
index aaa7aa4cad9dd5a9ca9d73846d15f11a0b12226a..a985a5a7945e77173867667a38f2dbe43d869cca 100644 (file)
@@ -148,7 +148,7 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = {
        .sw_scan_start          = rt2x00mac_sw_scan_start,
        .sw_scan_complete       = rt2x00mac_sw_scan_complete,
        .get_stats              = rt2x00mac_get_stats,
-       .get_tkip_seq           = rt2800_get_tkip_seq,
+       .get_key_seq            = rt2800_get_key_seq,
        .set_rts_threshold      = rt2800_set_rts_threshold,
        .sta_add                = rt2x00mac_sta_add,
        .sta_remove             = rt2x00mac_sta_remove,
index 6ec2466b52b6ccd686341e2ed53df35f0b0370e0..5932306084fd305a6f45ccf03d8957b771eaaa13 100644 (file)
@@ -835,7 +835,7 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
        .sw_scan_start          = rt2x00mac_sw_scan_start,
        .sw_scan_complete       = rt2x00mac_sw_scan_complete,
        .get_stats              = rt2x00mac_get_stats,
-       .get_tkip_seq           = rt2800_get_tkip_seq,
+       .get_key_seq            = rt2800_get_key_seq,
        .set_rts_threshold      = rt2800_set_rts_threshold,
        .sta_add                = rt2x00mac_sta_add,
        .sta_remove             = rt2x00mac_sta_remove,
index 300876df056f59a84b55ce288a735df9fb149ac7..1b8a459a412ba68af39734afe91c8667b239163c 100644 (file)
@@ -359,8 +359,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
            FIF_PLCPFAIL |
            FIF_CONTROL |
            FIF_PSPOLL |
-           FIF_OTHER_BSS |
-           FIF_PROMISC_IN_BSS;
+           FIF_OTHER_BSS;
 
        /*
         * Apply some rules to the filters:
@@ -369,9 +368,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
         * - Multicast filter seems to kill broadcast traffic so never use it.
         */
        *total_flags |= FIF_ALLMULTI;
-       if (*total_flags & FIF_OTHER_BSS ||
-           *total_flags & FIF_PROMISC_IN_BSS)
-               *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
 
        /*
         * If the device has a single filter for all control frames,
index 819455009fe4d042a954a03a721a81b03e4b4dd4..c8a967247a9a300e8ffcd3e4608b466080f53161 100644 (file)
@@ -530,10 +530,8 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
                           !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
-       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
-                          !(filter_flags & FIF_PROMISC_IN_BSS));
+       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
-                          !(filter_flags & FIF_PROMISC_IN_BSS) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
index a5458cf01fb26be50976c3dd4699e476e540c3c5..65ce3afb888adeb69e51498270914b89f7a4b40f 100644 (file)
@@ -480,10 +480,8 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
                           !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
-       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
-                          !(filter_flags & FIF_PROMISC_IN_BSS));
+       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
-                          !(filter_flags & FIF_PROMISC_IN_BSS) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
index 5cf509d346e8f61f76ee00254c6cebab1f53ac2b..73067cac289c69c93560cb0801e181012b2d00ee 100644 (file)
@@ -100,7 +100,7 @@ config RTL8821AE
        select RTLWIFI_PCI
        select RTLBTCOEXIST
        ---help---
-       This is the driver for Realtek RTL8i821AE/RTL8812AE 802.11av PCIe
+       This is the driver for Realtek RTL8821AE/RTL8812AE 802.11ac PCIe
        wireless network adapters.
 
        If you choose to build it as a module, it will be called rtl8821ae
index cefe26991421b8e42c22ba6ef6beb4be0adb9d95..f2b9d11adc9eadca0d1cc833614c4c7cc69536c0 100644 (file)
@@ -1286,8 +1286,11 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
                                                    0x12, 0xe1, 0x90);
                        break;
                case 3:
-                       btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
-                                                   0x3, 0xf1, 0x90);
+                       /* This call breaks BT when wireless is active -
+                        * comment it out for now until a better fix is found:
+                        * btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x1c,
+                        *                          0x3, 0xf1, 0x90);
+                        */
                        break;
                case 4:
                        btc8723b2ant_set_fw_ps_tdma(btcoexist, 0xe3, 0x10,
index 82733c6b8c46e66d79a6d1a74cf1903214744ef7..782ac2fc4b28f4d768f5f1142794bedfbc85bec0 100644 (file)
@@ -27,8 +27,7 @@
 #define __RTL_CORE_H__
 
 #define RTL_SUPPORTED_FILTERS          \
-       (FIF_PROMISC_IN_BSS | \
-       FIF_ALLMULTI | FIF_CONTROL | \
+       (FIF_ALLMULTI | FIF_CONTROL | \
        FIF_OTHER_BSS | \
        FIF_FCSFAIL | \
        FIF_BCN_PRBRESP_PROMISC)
index 1893d01b9e789c1bca2f25f1e0fa5d067da99cfc..a62bf0a65c321bb73553c403c5233ccd30d1f8c7 100644 (file)
@@ -40,6 +40,7 @@ static struct country_code_to_enum_rd allCountries[] = {
        {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
        {COUNTRY_CODE_WORLD_WIDE_13, "EC"},
        {COUNTRY_CODE_TELEC_NETGEAR, "EC"},
+       {COUNTRY_CODE_WORLD_WIDE_13_5G_ALL, "US"},
 };
 
 /*
@@ -124,6 +125,17 @@ static const struct ieee80211_regdomain rtl_regdom_14_60_64 = {
                      }
 };
 
+static const struct ieee80211_regdomain rtl_regdom_12_13_5g_all = {
+       .n_reg_rules = 4,
+       .alpha2 = "99",
+       .reg_rules = {
+                       RTL819x_2GHZ_CH01_11,
+                       RTL819x_2GHZ_CH12_13,
+                       RTL819x_5GHZ_5150_5350,
+                       RTL819x_5GHZ_5470_5850,
+               }
+};
+
 static const struct ieee80211_regdomain rtl_regdom_14 = {
        .n_reg_rules = 3,
        .alpha2 = "99",
@@ -348,6 +360,8 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
                return &rtl_regdom_14_60_64;
        case COUNTRY_CODE_GLOBAL_DOMAIN:
                return &rtl_regdom_14;
+       case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
+               return &rtl_regdom_12_13_5g_all;
        default:
                return &rtl_regdom_no_midband;
        }
@@ -384,6 +398,25 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
        return NULL;
 }
 
+static u8 channel_plan_to_country_code(u8 channelplan)
+{
+       switch (channelplan) {
+       case 0x20:
+       case 0x21:
+               return COUNTRY_CODE_WORLD_WIDE_13;
+       case 0x22:
+               return COUNTRY_CODE_IC;
+       case 0x32:
+               return COUNTRY_CODE_TELEC_NETGEAR;
+       case 0x41:
+               return COUNTRY_CODE_GLOBAL_DOMAIN;
+       case 0x7f:
+               return COUNTRY_CODE_WORLD_WIDE_13_5G_ALL;
+       default:
+               return COUNTRY_CODE_MAX; /*Error*/
+       }
+}
+
 int rtl_regd_init(struct ieee80211_hw *hw,
                  void (*reg_notifier)(struct wiphy *wiphy,
                                       struct regulatory_request *request))
@@ -396,11 +429,12 @@ int rtl_regd_init(struct ieee80211_hw *hw,
                return -EINVAL;
 
        /* init country_code from efuse channel plan */
-       rtlpriv->regd.country_code = rtlpriv->efuse.channel_plan;
+       rtlpriv->regd.country_code =
+               channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
 
-       RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
-                "rtl: EEPROM regdomain: 0x%0x\n",
-                 rtlpriv->regd.country_code);
+       RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
+                "rtl: EEPROM regdomain: 0x%0x conuntry code: %d\n",
+                rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
 
        if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
                RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
index 3bbbaaa68530ef8678fbc0b65e5b91f81d50e301..f7f15bce35dd61309098b50d4ff08bee701dc5b3 100644 (file)
@@ -49,6 +49,7 @@ enum country_code_type_t {
        COUNTRY_CODE_GLOBAL_DOMAIN = 10,
        COUNTRY_CODE_WORLD_WIDE_13 = 11,
        COUNTRY_CODE_TELEC_NETGEAR = 12,
+       COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
 
        /*add new channel plan above this line */
        COUNTRY_CODE_MAX
index 86ce5b1930e6d2824b66f7d5c95210c59fca6cb7..8ee83b093c0df4c2cca7e3368bdf46794980ec96 100644 (file)
@@ -1354,27 +1354,11 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtl88ee_clear_interrupt(hw);/*clear it here first*/
        rtl_write_dword(rtlpriv, REG_HIMR,
                        rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE,
@@ -1919,8 +1903,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
                 "dev_addr: %pM\n", rtlefuse->dev_addr);
        /*channel plan */
        rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
-       /* set channel paln to world wide 13 */
-       rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+       /* set channel plan from efuse */
+       rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
        /*tx power*/
        _rtl88ee_read_txpower_info_from_hwpg(hw,
                                             rtlefuse->autoload_failflag,
index ef28c8ea1e84601b34417a8f6760b2b824a235f8..02013df968a0abe09b89cd1c6ccc4ba4c70692b9 100644 (file)
@@ -23,7 +23,7 @@
  *
  *****************************************************************************/
 
-#include "pwrseqcmd.h"
+#include "../pwrseqcmd.h"
 #include "pwrseq.h"
 
 /* drivers should parse below arrays and do the corresponding actions */
index 79103347d96759c91cd67c1e49171d76381572a5..f2d9c6116e5c7f92f47f9845498bd52683d9975a 100644 (file)
@@ -26,7 +26,7 @@
 #ifndef __RTL8723E_PWRSEQ_H__
 #define __RTL8723E_PWRSEQ_H__
 
-#include "pwrseqcmd.h"
+#include "../pwrseqcmd.h"
 /* Check document WM-20110607-Paul-RTL8188EE_Power_Architecture-R02.vsd
  *     There are 6 HW Power States:
  *     0: POFF--Power Off
index d310d55d800efd584f9dd9a6d8e143539c0c777c..189859617db803e4440cff48568a3165790f4a45 100644 (file)
@@ -889,7 +889,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
        rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version));
        rtl92c_init_beacon_parameters(hw, rtlhal->version);
        rtl92c_init_ampdu_aggregation(hw);
-       rtl92c_init_beacon_max_error(hw, true);
+       rtl92c_init_beacon_max_error(hw);
        return err;
 }
 
@@ -1323,7 +1323,6 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
        enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
 
        bt_msr &= 0xfc;
-       rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
        if (type == NL80211_IFTYPE_UNSPECIFIED || type ==
            NL80211_IFTYPE_STATION) {
                _rtl92cu_stop_tx_beacon(hw);
index adb810794eef71e27080cd9590a07911eba5123b..f3db6bc8596a3143436eb7db2710d825d7dee8d6 100644 (file)
@@ -613,7 +613,7 @@ void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
        rtl_write_word(rtlpriv, 0x4CA, 0x0708);
 }
 
-void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
index bf53652e4eddb3428c5e80850c0a3aa59e4346f3..58548e8f2c41b64276ec1c696b6bf03fd79fc4b9 100644 (file)
@@ -66,7 +66,7 @@ void rtl92c_init_edca_param(struct ieee80211_hw *hw,
 
 void rtl92c_init_edca(struct ieee80211_hw *hw);
 void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw);
-void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode);
+void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw);
 void rtl92c_init_rdg_setting(struct ieee80211_hw *hw);
 void rtl92c_init_retry_function(struct ieee80211_hw *hw);
 
index c5d4b8013cdefe4cdec015c2c1c2819c5b8e8321..232865cc3ffdd01a7672d8e4cf38203f33a8285d 100644 (file)
@@ -875,7 +875,7 @@ static void _rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
                break;
        default:
                RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                        "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
+                        "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
                break;
        }
 }
index da0a6125f314b7c582eefb5ea297fd2efd593ca0..5f14308e8eb35e9914724ce8b610e14497dccef7 100644 (file)
@@ -1584,28 +1584,11 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl92ee_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtl92ee_clear_interrupt(hw);/*clear it here first*/
-
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
        rtlpci->irq_enabled = true;
@@ -2194,8 +2177,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
                 "dev_addr: %pM\n", rtlefuse->dev_addr);
        /*channel plan */
        rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
-       /* set channel paln to world wide 13 */
-       rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+       /* set channel plan from efuse */
+       rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
        /*tx power*/
        _rtl92ee_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag,
                                             hwinfo);
index 67bb47d77b68c5d45bd6bb6dc0f0721c2461fa2c..a4b7eac6856f2cac9937807e61c05d94cf22a689 100644 (file)
@@ -1258,18 +1258,6 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-}
-
 void rtl8723e_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1284,7 +1272,6 @@ void rtl8723e_disable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       rtl8723e_clear_interrupt(hw);/*clear it here first*/
        rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
        rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
        rtlpci->irq_enabled = false;
index 69d4f0fc1af1c1ed9ec459cb97e461e0f7adf880..d5da0f3c121728c4524df9775925709da3804fad 100644 (file)
@@ -613,7 +613,7 @@ static void _rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
                break;
        default:
                RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                        "[C2H], Unkown packet!! CmdId(%#X)!\n", c2h_cmd_id);
+                        "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id);
                break;
        }
 }
index b681af3c7a355d66fb411c71dcdcbba5bb8dfc01..c983d2fe147f2bf1da4923f2ddc8f2bbbfcd90de 100644 (file)
@@ -1634,28 +1634,11 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtl8723be_clear_interrupt(hw);/*clear it here first*/
-
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
        rtlpci->irq_enabled = true;
@@ -2139,8 +2122,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
                 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
 
-       /* set channel plan to world wide 13 */
-       rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
+       /* set channel plan from efuse */
+       rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
 
        if (rtlhal->oem_id == RT_CID_DEFAULT) {
                /* Does this one have a Toshiba SMID from group 1? */
index 8704eee9f3a495108e93135d6e306a090c322bc7..3236d44b459df69efd4f2b363f4b651e478a10a4 100644 (file)
@@ -2253,31 +2253,11 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
        }
 }
 
-static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 tmp;
-       tmp = rtl_read_dword(rtlpriv, REG_HISR);
-       /*printk("clear interrupt first:\n");
-       printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/
-       rtl_write_dword(rtlpriv, REG_HISR, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HISRE);
-       /*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/
-       rtl_write_dword(rtlpriv, REG_HISRE, tmp);
-
-       tmp = rtl_read_dword(rtlpriv, REG_HSISR);
-       /*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/
-       rtl_write_dword(rtlpriv, REG_HSISR, tmp);
-}
-
 void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 
-       rtl8821ae_clear_interrupt(hw);/*clear it here first*/
-
        rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
        rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
        rtlpci->irq_enabled = true;
@@ -3232,8 +3212,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
        if (rtlefuse->eeprom_channelplan == 0xff)
                rtlefuse->eeprom_channelplan = 0x7F;
 
-       /* set channel paln to world wide 13 */
-       /* rtlefuse->channel_plan = (u8)rtlefuse->eeprom_channelplan; */
+       /* set channel plan from efuse */
+       rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
 
        /*parse xtal*/
        rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8821AE];
index 5d54d16a59e7151942246ff1d737af28c332fadc..f238ee54226c4771fd26f2f95c293e0a3414c5a7 100644 (file)
@@ -763,8 +763,7 @@ static u64 wl1251_op_prepare_multicast(struct ieee80211_hw *hw,
        return (u64)(unsigned long)fp;
 }
 
-#define WL1251_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
-                                 FIF_ALLMULTI | \
+#define WL1251_SUPPORTED_FILTERS (FIF_ALLMULTI | \
                                  FIF_FCSFAIL | \
                                  FIF_BCN_PRBRESP_PROMISC | \
                                  FIF_CONTROL | \
@@ -795,10 +794,6 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
        wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
        wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
 
-       if (*total & FIF_PROMISC_IN_BSS) {
-               wl->rx_config |= CFG_BSSID_FILTER_EN;
-               wl->rx_config |= CFG_RX_ALL_GOOD;
-       }
        if (*total & FIF_ALLMULTI)
                /*
                 * CFG_MC_FILTER_EN in rx_config needs to be 0 to receive
@@ -825,7 +820,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
        if (ret < 0)
                goto out;
 
-       if (*total & FIF_ALLMULTI || *total & FIF_PROMISC_IN_BSS)
+       if (*total & FIF_ALLMULTI)
                ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
        else if (fp)
                ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
index 717c4f5a02c2a28ce9d7f0cab1fb70bda6121ace..49aca2cf76050200771705ecb6c535ba1eee804e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ip.h>
 #include <linux/firmware.h>
 #include <linux/etherdevice.h>
+#include <linux/irq.h>
 
 #include "../wlcore/wlcore.h"
 #include "../wlcore/debug.h"
@@ -578,7 +579,7 @@ static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
 
 static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
        [PART_TOP_PRCM_ELP_SOC] = {
-               .mem  = { .start = 0x00A02000, .size  = 0x00010000 },
+               .mem  = { .start = 0x00A00000, .size  = 0x00012000 },
                .reg  = { .start = 0x00807000, .size  = 0x00005000 },
                .mem2 = { .start = 0x00800000, .size  = 0x0000B000 },
                .mem3 = { .start = 0x00000000, .size  = 0x00000000 },
@@ -862,6 +863,7 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
 {
        u32 tmp;
        int ret;
+       u16 irq_invert;
 
        BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
                WL18XX_PHY_INIT_MEM_SIZE);
@@ -911,6 +913,28 @@ static int wl18xx_pre_upload(struct wl1271 *wl)
        /* re-enable FDSP clock */
        ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
                             MEM_FDSP_CLK_120_ENABLE);
+       if (ret < 0)
+               goto out;
+
+       ret = irq_get_trigger_type(wl->irq);
+       if ((ret == IRQ_TYPE_LEVEL_LOW) || (ret == IRQ_TYPE_EDGE_FALLING)) {
+               wl1271_info("using inverted interrupt logic: %d", ret);
+               ret = wlcore_set_partition(wl,
+                                          &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+               if (ret < 0)
+                       goto out;
+
+               ret = wl18xx_top_reg_read(wl, TOP_FN0_CCCR_REG_32, &irq_invert);
+               if (ret < 0)
+                       goto out;
+
+               irq_invert |= BIT(1);
+               ret = wl18xx_top_reg_write(wl, TOP_FN0_CCCR_REG_32, irq_invert);
+               if (ret < 0)
+                       goto out;
+
+               ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
+       }
 
 out:
        return ret;
@@ -1351,9 +1375,10 @@ out:
 }
 
 #define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
-static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+
+static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
+                                struct wl18xx_priv_conf *priv_conf)
 {
-       struct wl18xx_priv *priv = wl->priv;
        struct wlcore_conf_file *conf_file;
        const struct firmware *fw;
        int ret;
@@ -1362,14 +1387,14 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
        if (ret < 0) {
                wl1271_error("could not get configuration binary %s: %d",
                             WL18XX_CONF_FILE_NAME, ret);
-               goto out_fallback;
+               return ret;
        }
 
        if (fw->size != WL18XX_CONF_SIZE) {
                wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
                             WL18XX_CONF_SIZE, fw->size);
                ret = -EINVAL;
-               goto out;
+               goto out_release;
        }
 
        conf_file = (struct wlcore_conf_file *) fw->data;
@@ -1379,7 +1404,7 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
                             "expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
                             conf_file->header.magic);
                ret = -EINVAL;
-               goto out;
+               goto out_release;
        }
 
        if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
@@ -1387,28 +1412,32 @@ static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
                             "expected 0x%08x got 0x%08x",
                             WL18XX_CONF_VERSION, conf_file->header.version);
                ret = -EINVAL;
-               goto out;
+               goto out_release;
        }
 
-       memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
-       memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
+       memcpy(conf, &conf_file->core, sizeof(*conf));
+       memcpy(priv_conf, &conf_file->priv, sizeof(*priv_conf));
+
+out_release:
+       release_firmware(fw);
+       return ret;
+}
 
-       goto out;
+static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+{
+       struct wl18xx_priv *priv = wl->priv;
 
-out_fallback:
-       wl1271_warning("falling back to default config");
+       if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf) < 0) {
+               wl1271_warning("falling back to default config");
 
-       /* apply driver default configuration */
-       memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
-       /* apply default private configuration */
-       memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
+               /* apply driver default configuration */
+               memcpy(&wl->conf, &wl18xx_conf, sizeof(wl->conf));
+               /* apply default private configuration */
+               memcpy(&priv->conf, &wl18xx_default_priv_conf,
+                      sizeof(priv->conf));
+       }
 
-       /* For now we just fallback */
        return 0;
-
-out:
-       release_firmware(fw);
-       return ret;
 }
 
 static int wl18xx_plt_init(struct wl1271 *wl)
index a433a75f3cd7c85d51f67cfb8d03830e35c041be..bac2364c8e72d58bdc0cebe0611f0f15da31fe40 100644 (file)
 
 #define WL18XX_WELP_ARM_COMMAND                (WL18XX_REGISTERS_BASE + 0x7100)
 #define WL18XX_ENABLE                  (WL18XX_REGISTERS_BASE + 0x01543C)
+#define TOP_FN0_CCCR_REG_32            (WL18XX_TOP_OCP_BASE + 0x64)
 
 /* PRCM registers */
 #define PLATFORM_DETECTION             0xA0E3E0
index 0be807951afe3bbcca6db42ea98855ff8536254a..ef3fe0fff588b9789e1ece99cc8253b320cc5682 100644 (file)
@@ -3175,8 +3175,7 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
        return (u64)(unsigned long)fp;
 }
 
-#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
-                                 FIF_ALLMULTI | \
+#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
                                  FIF_FCSFAIL | \
                                  FIF_BCN_PRBRESP_PROMISC | \
                                  FIF_CONTROL | \
@@ -5966,10 +5965,6 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
 {
        int ret;
 
-       ret = wl12xx_set_power_on(wl);
-       if (ret < 0)
-               return ret;
-
        ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
        if (ret < 0)
                goto out;
@@ -5985,7 +5980,6 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
                ret = wl->ops->get_mac(wl);
 
 out:
-       wl1271_power_off(wl);
        return ret;
 }
 
@@ -6077,7 +6071,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
                IEEE80211_HW_AMPDU_AGGREGATION |
                IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
                IEEE80211_HW_QUEUE_CONTROL |
-               IEEE80211_HW_CHANCTX_STA_CSA;
+               IEEE80211_HW_CHANCTX_STA_CSA |
+               IEEE80211_HW_SUPPORT_FAST_XMIT;
 
        wl->hw->wiphy->cipher_suites = cipher_suites;
        wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -6432,10 +6427,22 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
        else
                wl->irq_flags |= IRQF_ONESHOT;
 
+       ret = wl12xx_set_power_on(wl);
+       if (ret < 0)
+               goto out_free_nvs;
+
+       ret = wl12xx_get_hw_info(wl);
+       if (ret < 0) {
+               wl1271_error("couldn't get hw info");
+               wl1271_power_off(wl);
+               goto out_free_nvs;
+       }
+
        ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
                                   wl->irq_flags, pdev->name, wl);
        if (ret < 0) {
-               wl1271_error("request_irq() failed: %d", ret);
+               wl1271_error("interrupt configuration failed");
+               wl1271_power_off(wl);
                goto out_free_nvs;
        }
 
@@ -6449,12 +6456,7 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
        }
 #endif
        disable_irq(wl->irq);
-
-       ret = wl12xx_get_hw_info(wl);
-       if (ret < 0) {
-               wl1271_error("couldn't get hw info");
-               goto out_irq;
-       }
+       wl1271_power_off(wl);
 
        ret = wl->ops->identify_chip(wl);
        if (ret < 0)
index e7af261e91980a571ac537455942c2803fe96ab5..89b6f69f09c8537478e6fabd7b3b834aeee6fd8d 100644 (file)
@@ -1230,7 +1230,7 @@ static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
 }
 
 #define SUPPORTED_FIF_FLAGS \
-       (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
+       (FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
        FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
 static void zd_op_configure_filter(struct ieee80211_hw *hw,
                        unsigned int changed_flags,
@@ -1256,7 +1256,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
         * we will have some issue with IPv6 which uses multicast for link
         * layer address resolution.
         */
-       if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
+       if (*new_flags & FIF_ALLMULTI)
                zd_mc_add_all(&hash);
 
        spin_lock_irqsave(&mac->lock, flags);
index 0d2594395ffbc797671711603461148270f1a03f..f1b2c1721917415de201d2f8bd54dc2a231179f3 100644 (file)
@@ -52,7 +52,7 @@
  * event channels are limited resource. Split event channels are
  * enabled by default.
  */
-bool separate_tx_rx_irq = 1;
+bool separate_tx_rx_irq = true;
 module_param(separate_tx_rx_irq, bool, 0644);
 
 /* The time that packets can stay on the guest Rx internal queue
index e031c943286ef3f7765e42640397626d7555607c..c89ca26e254d3b4b4e92cd28418386e39f801fb6 100644 (file)
@@ -1560,9 +1560,8 @@ static int xennet_init_queue(struct netfront_queue *queue)
        spin_lock_init(&queue->tx_lock);
        spin_lock_init(&queue->rx_lock);
 
-       init_timer(&queue->rx_refill_timer);
-       queue->rx_refill_timer.data = (unsigned long)queue;
-       queue->rx_refill_timer.function = rx_refill_timeout;
+       setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
+                   (unsigned long)queue);
 
        snprintf(queue->name, sizeof(queue->name), "%s-q%u",
                 queue->info->netdev->name, queue->id);
index 0c064485d1c2c47e3c84b2ab25fd524bf200ea33..fdc60db608291b7e1fd81c6a3d774ba9c9c77467 100644 (file)
@@ -68,6 +68,9 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
                        phy->irq = mdio->irq[addr];
        }
 
+       if (of_property_read_bool(child, "broken-turn-around"))
+               mdio->phy_ignore_ta_mask |= 1 << addr;
+
        /* Associate the OF node with the device structure so it
         * can be looked up later */
        of_node_get(child);
index 00b7d9c9fe485a1c4553cda878b519526371e8b0..2f5b518b0e788c80761553f5b2e77762f06bfb26 100644 (file)
@@ -2150,7 +2150,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
        rc = lcs_detect(card);
        if (rc) {
                LCS_DBF_TEXT(2, setup, "dtctfail");
-               dev_err(&card->dev->dev,
+               dev_err(&ccwgdev->dev,
                        "Detecting a network adapter for LCS devices"
                        " failed with rc=%d (0x%x)\n", rc, rc);
                lcs_stopcard(card);
index 3abac028899f10d97d448122ce7696050f36dfe0..ba974a2e409fdc54363dba280951afb6bd373aa6 100644 (file)
@@ -175,6 +175,8 @@ struct qeth_sbp_info {
        __u32 supported_funcs;
        enum qeth_sbp_roles role;
        __u32 hostnotification:1;
+       __u32 reflect_promisc:1;
+       __u32 reflect_promisc_primary:1;
 };
 
 static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
index 3466d3cb76474c2092cdd1d679d3a7704e793984..5e20fba37bfff268f23ea2e9ce214f71d7f7ff73 100644 (file)
@@ -645,7 +645,8 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
                                        card->info.hwtrap = 2;
                                qeth_schedule_recovery(card);
                                return NULL;
-                       case IPA_CMD_SETBRIDGEPORT:
+                       case IPA_CMD_SETBRIDGEPORT_IQD:
+                       case IPA_CMD_SETBRIDGEPORT_OSA:
                        case IPA_CMD_ADDRESS_CHANGE_NOTIF:
                                if (card->discipline->control_event_handler
                                                                (card, cmd))
index 7b55768a959201070ada6cbf81590390d1c3a172..beb4bdc26de5189e56d37d585a0bbd4de861c843 100644 (file)
@@ -237,6 +237,7 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_DELGMAC,       "delgmac"},
        {IPA_CMD_SETVLAN,       "setvlan"},
        {IPA_CMD_DELVLAN,       "delvlan"},
+       {IPA_CMD_SETBRIDGEPORT_OSA,     "set_bridge_port(osa)"},
        {IPA_CMD_SETCCID,       "setccid"},
        {IPA_CMD_DELCCID,       "delccid"},
        {IPA_CMD_MODCCID,       "modccid"},
@@ -249,7 +250,7 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_DELIP,         "delip"},
        {IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
        {IPA_CMD_SET_DIAG_ASS,  "set_diag_ass"},
-       {IPA_CMD_SETBRIDGEPORT, "set_bridge_port"},
+       {IPA_CMD_SETBRIDGEPORT_IQD,     "set_bridge_port(hs)"},
        {IPA_CMD_CREATE_ADDR,   "create_addr"},
        {IPA_CMD_DESTROY_ADDR,  "destroy_addr"},
        {IPA_CMD_REGISTER_LOCAL_ADDR,   "register_local_addr"},
index 1558be1af72d0ac0b101386a36380e055cf57025..6cccc9a49edea429189337273e1a0c0378925362 100644 (file)
@@ -92,6 +92,7 @@ enum qeth_ipa_cmds {
        IPA_CMD_DELGMAC                 = 0x24,
        IPA_CMD_SETVLAN                 = 0x25,
        IPA_CMD_DELVLAN                 = 0x26,
+       IPA_CMD_SETBRIDGEPORT_OSA       = 0x2b,
        IPA_CMD_SETCCID                 = 0x41,
        IPA_CMD_DELCCID                 = 0x42,
        IPA_CMD_MODCCID                 = 0x43,
@@ -104,7 +105,7 @@ enum qeth_ipa_cmds {
        IPA_CMD_DELIP                   = 0xb7,
        IPA_CMD_SETADAPTERPARMS         = 0xb8,
        IPA_CMD_SET_DIAG_ASS            = 0xb9,
-       IPA_CMD_SETBRIDGEPORT           = 0xbe,
+       IPA_CMD_SETBRIDGEPORT_IQD       = 0xbe,
        IPA_CMD_CREATE_ADDR             = 0xc3,
        IPA_CMD_DESTROY_ADDR            = 0xc4,
        IPA_CMD_REGISTER_LOCAL_ADDR     = 0xd1,
index 0ea0869120cf4762db9223af4ddd8e78f6f3a1db..2e65b989a9ea529b4e69a555eee2f2fb494c2752 100644 (file)
@@ -137,7 +137,7 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
                rc = 0;
                break;
        case IPA_RC_L2_UNSUPPORTED_CMD:
-               rc = -ENOSYS;
+               rc = -EOPNOTSUPP;
                break;
        case IPA_RC_L2_ADDR_TABLE_FULL:
                rc = -ENOSPC;
@@ -683,6 +683,39 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
        return rc ? -EINVAL : 0;
 }
 
+static void qeth_promisc_to_bridge(struct qeth_card *card)
+{
+       struct net_device *dev = card->dev;
+       enum qeth_ipa_promisc_modes promisc_mode;
+       int role;
+       int rc;
+
+       QETH_CARD_TEXT(card, 3, "pmisc2br");
+
+       if (!card->options.sbp.reflect_promisc)
+               return;
+       promisc_mode = (dev->flags & IFF_PROMISC) ? SET_PROMISC_MODE_ON
+                                               : SET_PROMISC_MODE_OFF;
+       if (promisc_mode == card->info.promisc_mode)
+               return;
+
+       if (promisc_mode == SET_PROMISC_MODE_ON) {
+               if (card->options.sbp.reflect_promisc_primary)
+                       role = QETH_SBP_ROLE_PRIMARY;
+               else
+                       role = QETH_SBP_ROLE_SECONDARY;
+       } else
+               role = QETH_SBP_ROLE_NONE;
+
+       rc = qeth_bridgeport_setrole(card, role);
+       QETH_DBF_TEXT_(SETUP, 2, "bpm%c%04x",
+                       (promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc);
+       if (!rc) {
+               card->options.sbp.role = role;
+               card->info.promisc_mode = promisc_mode;
+       }
+}
+
 static void qeth_l2_set_multicast_list(struct net_device *dev)
 {
        struct qeth_card *card = dev->ml_priv;
@@ -704,9 +737,10 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
                qeth_l2_add_mc(card, ha->addr, 1);
 
        spin_unlock_bh(&card->mclock);
-       if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
-               return;
-       qeth_setadp_promisc_mode(card);
+       if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+               qeth_setadp_promisc_mode(card);
+       else
+               qeth_promisc_to_bridge(card);
 }
 
 static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -994,7 +1028,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        qeth_bridgeport_query_support(card);
        if (card->options.sbp.supported_funcs)
                dev_info(&card->gdev->dev,
-               "The device represents a HiperSockets Bridge Capable Port\n");
+               "The device represents a Bridge Capable Port\n");
        qeth_trace_features(card);
 
        if (!card->dev && qeth_l2_setup_netdev(card)) {
@@ -1247,7 +1281,8 @@ static int qeth_l2_control_event(struct qeth_card *card,
                                        struct qeth_ipa_cmd *cmd)
 {
        switch (cmd->hdr.command) {
-       case IPA_CMD_SETBRIDGEPORT:
+       case IPA_CMD_SETBRIDGEPORT_OSA:
+       case IPA_CMD_SETBRIDGEPORT_IQD:
                if (cmd->data.sbp.hdr.command_code ==
                                IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
                        qeth_bridge_state_change(card, cmd);
@@ -1533,7 +1568,7 @@ static void qeth_bridge_host_event_worker(struct work_struct *work)
 
        if (data->hostevs.lost_event_mask) {
                dev_info(&data->card->gdev->dev,
-"Address notification from the HiperSockets Bridge Port stopped %s (%s)\n",
+"Address notification from the Bridge Port stopped %s (%s)\n",
                        data->card->dev->name,
                        (data->hostevs.lost_event_mask == 0x01)
                        ? "Overflow"
@@ -1617,70 +1652,80 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
        struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd)
 {
        int rc;
+       int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD);
 
-       switch (cbctl->ipa_rc) {
-       case IPA_RC_SUCCESS:
+       if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) ||
+           (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc)))
                switch (cbctl->cmd_rc) {
                case 0x0000:
                        rc = 0;
                        break;
+               case 0x2B04:
                case 0x0004:
-                       rc = -ENOSYS;
+                       rc = -EOPNOTSUPP;
                        break;
+               case 0x2B0C:
                case 0x000C: /* Not configured as bridge Port */
                        rc = -ENODEV; /* maybe not the best code here? */
                        dev_err(&card->gdev->dev,
-       "The HiperSockets device is not configured as a Bridge Port\n");
+       "The device is not configured as a Bridge Port\n");
                        break;
+               case 0x2B14:
                case 0x0014: /* Another device is Primary */
                        switch (setcmd) {
                        case IPA_SBP_SET_PRIMARY_BRIDGE_PORT:
                                rc = -EEXIST;
                                dev_err(&card->gdev->dev,
-       "The HiperSockets LAN already has a primary Bridge Port\n");
+       "The LAN already has a primary Bridge Port\n");
                                break;
                        case IPA_SBP_SET_SECONDARY_BRIDGE_PORT:
                                rc = -EBUSY;
                                dev_err(&card->gdev->dev,
-       "The HiperSockets device is already a primary Bridge Port\n");
+       "The device is already a primary Bridge Port\n");
                                break;
                        default:
                                rc = -EIO;
                        }
                        break;
+               case 0x2B18:
                case 0x0018: /* This device is currently Secondary */
                        rc = -EBUSY;
                        dev_err(&card->gdev->dev,
-       "The HiperSockets device is already a secondary Bridge Port\n");
+       "The device is already a secondary Bridge Port\n");
                        break;
+               case 0x2B1C:
                case 0x001C: /* Limit for Secondary devices reached */
                        rc = -EEXIST;
                        dev_err(&card->gdev->dev,
-       "The HiperSockets LAN cannot have more secondary Bridge Ports\n");
+       "The LAN cannot have more secondary Bridge Ports\n");
                        break;
+               case 0x2B24:
                case 0x0024: /* This device is currently Primary */
                        rc = -EBUSY;
                        dev_err(&card->gdev->dev,
-       "The HiperSockets device is already a primary Bridge Port\n");
+       "The device is already a primary Bridge Port\n");
                        break;
+               case 0x2B20:
                case 0x0020: /* Not authorized by zManager */
                        rc = -EACCES;
                        dev_err(&card->gdev->dev,
-       "The HiperSockets device is not authorized to be a Bridge Port\n");
+       "The device is not authorized to be a Bridge Port\n");
                        break;
                default:
                        rc = -EIO;
                }
-               break;
-       case IPA_RC_NOTSUPP:
-               rc = -ENOSYS;
-               break;
-       case IPA_RC_UNSUPPORTED_COMMAND:
-               rc = -ENOSYS;
-               break;
-       default:
-               rc = -EIO;
-       }
+       else
+               switch (cbctl->ipa_rc) {
+               case IPA_RC_NOTSUPP:
+                       rc = -EOPNOTSUPP;
+                       break;
+               case IPA_RC_UNSUPPORTED_COMMAND:
+                       rc = -EOPNOTSUPP;
+                       break;
+               default:
+                       rc = -EIO;
+               }
+
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc);
                QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc);
@@ -1688,6 +1733,13 @@ static int qeth_bridgeport_makerc(struct qeth_card *card,
        return rc;
 }
 
+static inline int ipa_cmd_sbp(struct qeth_card *card)
+{
+       return (card->info.type == QETH_CARD_TYPE_IQD) ?
+               IPA_CMD_SETBRIDGEPORT_IQD :
+               IPA_CMD_SETBRIDGEPORT_OSA;
+}
+
 static int qeth_bridgeport_query_support_cb(struct qeth_card *card,
        struct qeth_reply *reply, unsigned long data)
 {
@@ -1719,7 +1771,7 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
        struct _qeth_sbp_cbctl cbctl;
 
        QETH_CARD_TEXT(card, 2, "brqsuppo");
-       iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
        if (!iob)
                return;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1796,7 +1848,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
        QETH_CARD_TEXT(card, 2, "brqports");
        if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
                return -EOPNOTSUPP;
-       iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
        if (!iob)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1808,10 +1860,9 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
        cmd->data.sbp.hdr.seq_no = 1;
        rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb,
                                (void *)&cbctl);
-       if (rc)
+       if (rc < 0)
                return rc;
-       rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
-       return rc;
+       return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
 }
 EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
 
@@ -1864,7 +1915,7 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
        }
        if (!(card->options.sbp.supported_funcs & setcmd))
                return -EOPNOTSUPP;
-       iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0);
        if (!iob)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -1874,10 +1925,9 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
        cmd->data.sbp.hdr.seq_no = 1;
        rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb,
                                (void *)&cbctl);
-       if (rc)
+       if (rc < 0)
                return rc;
-       rc = qeth_bridgeport_makerc(card, &cbctl, setcmd);
-       return rc;
+       return qeth_bridgeport_makerc(card, &cbctl, setcmd);
 }
 
 /**
@@ -1898,7 +1948,7 @@ static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response)
                case 0x0004:
                case 0x0100:
                case 0x0106:
-                       rc = -ENOSYS;
+                       rc = -EOPNOTSUPP;
                        dev_err(&card->gdev->dev,
                                "Setting address notification failed\n");
                        break;
index 59e3aa538b4da4594456b0965bc0fe42a49379f3..52673cd1db9952ab8548b1bad01fc1997b8c1638 100644 (file)
@@ -23,8 +23,6 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
        if (!card)
                return -EINVAL;
 
-       mutex_lock(&card->conf_mutex);
-
        if (qeth_card_hw_is_reachable(card) &&
                                        card->options.sbp.supported_funcs)
                rc = qeth_bridgeport_query_ports(card,
@@ -59,8 +57,6 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
                        rc = sprintf(buf, "%s\n", word);
        }
 
-       mutex_unlock(&card->conf_mutex);
-
        return rc;
 }
 
@@ -90,7 +86,9 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
 
        mutex_lock(&card->conf_mutex);
 
-       if (qeth_card_hw_is_reachable(card)) {
+       if (card->options.sbp.reflect_promisc) /* Forbid direct manipulation */
+               rc = -EPERM;
+       else if (qeth_card_hw_is_reachable(card)) {
                rc = qeth_bridgeport_setrole(card, role);
                if (!rc)
                        card->options.sbp.role = role;
@@ -123,12 +121,8 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
        if (!card)
                return -EINVAL;
 
-       mutex_lock(&card->conf_mutex);
-
        enabled = card->options.sbp.hostnotification;
 
-       mutex_unlock(&card->conf_mutex);
-
        return sprintf(buf, "%d\n", enabled);
 }
 
@@ -167,10 +161,72 @@ static DEVICE_ATTR(bridge_hostnotify, 0644,
                        qeth_bridgeport_hostnotification_show,
                        qeth_bridgeport_hostnotification_store);
 
+static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct qeth_card *card = dev_get_drvdata(dev);
+       char *state;
+
+       if (!card)
+               return -EINVAL;
+
+       if (card->options.sbp.reflect_promisc) {
+               if (card->options.sbp.reflect_promisc_primary)
+                       state = "primary";
+               else
+                       state = "secondary";
+       } else
+               state = "none";
+
+       return sprintf(buf, "%s\n", state);
+}
+
+static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct qeth_card *card = dev_get_drvdata(dev);
+       int enable, primary;
+       int rc = 0;
+
+       if (!card)
+               return -EINVAL;
+
+       if (sysfs_streq(buf, "none")) {
+               enable = 0;
+               primary = 0;
+       } else if (sysfs_streq(buf, "primary")) {
+               enable = 1;
+               primary = 1;
+       } else if (sysfs_streq(buf, "secondary")) {
+               enable = 1;
+               primary = 0;
+       } else
+               return -EINVAL;
+
+       mutex_lock(&card->conf_mutex);
+
+       if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
+               rc = -EPERM;
+       else {
+               card->options.sbp.reflect_promisc = enable;
+               card->options.sbp.reflect_promisc_primary = primary;
+               rc = 0;
+       }
+
+       mutex_unlock(&card->conf_mutex);
+
+       return rc ? rc : count;
+}
+
+static DEVICE_ATTR(bridge_reflect_promisc, 0644,
+                       qeth_bridgeport_reflect_show,
+                       qeth_bridgeport_reflect_store);
+
 static struct attribute *qeth_l2_bridgeport_attrs[] = {
        &dev_attr_bridge_role.attr,
        &dev_attr_bridge_state.attr,
        &dev_attr_bridge_hostnotify.attr,
+       &dev_attr_bridge_reflect_promisc.attr,
        NULL,
 };
 
index 04e42c649134c1989a3485b01ff2a43cc380a82d..70eb2f61bb9269221f4c389838b67bf90fc071ba 100644 (file)
@@ -3198,8 +3198,7 @@ static int qeth_l3_set_features(struct net_device *dev,
        netdev_features_t features)
 {
        struct qeth_card *card = dev->ml_priv;
-       u32 changed = dev->features ^ features;
-       int err;
+       netdev_features_t changed = dev->features ^ features;
 
        if (!(changed & NETIF_F_RXCSUM))
                return 0;
@@ -3208,11 +3207,7 @@ static int qeth_l3_set_features(struct net_device *dev,
            card->state == CARD_STATE_RECOVER)
                return 0;
 
-       err = qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM);
-       if (err)
-               dev->features = features ^ NETIF_F_RXCSUM;
-
-       return err;
+       return qeth_l3_set_rx_csum(card, features & NETIF_F_RXCSUM ? 1 : 0);
 }
 
 static const struct ethtool_ops qeth_l3_ethtool_ops = {
index eb58afcfb73b4c7bbff39a33da272ab9d52e1c39..45d30398d7c34707897f981b16f8753657313cde 100644 (file)
@@ -728,7 +728,7 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
        }
        ndev = n->dev;
 
-       if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
+       if (ipv6_addr_is_multicast(&daddr6->sin6_addr)) {
                pr_info("multi-cast route %pI6 port %u, dev %s.\n",
                        daddr6->sin6_addr.s6_addr,
                        ntohs(daddr6->sin6_port), ndev->name);
index 5c9e680aa375a57c07a8977790271615ffb1499d..e32d24ec7a11516a79a40b23ac850da5120a3301 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/moduleparam.h>
 #include <generated/utsrelease.h>
 #include <linux/utsname.h>
+#include <linux/vmalloc.h>
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/slab.h>
index 8f6d0fb2cd807255a66e962c3cb7c4c8633d4d77..a7cfc270bd08a1f01867affc2dee0fc6b7611472 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/mutex.h>
 #include <linux/aer.h>
 #include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
 
 #include <net/tcp.h>
 #include <scsi/scsi.h>
index bc95ce89af0671f97a7c7f1128dab96cbc442e39..5ab2f6978209d8ac165d717bdeec16f8c1810c4e 100644 (file)
@@ -379,7 +379,7 @@ void rtw_cfg80211_indicate_disconnect(struct rtw_adapter *padapter)
                                                GFP_ATOMIC);
                } else {
                        cfg80211_disconnected(padapter->pnetdev, 0, NULL,
-                                             0, GFP_ATOMIC);
+                                             0, false, GFP_ATOMIC);
                }
        }
 }
index 0343ae386f0351bdff320f2f956371540eb931f3..ecd7c0f8248175b0afe2e17a51b52d5e0f13e66e 100644 (file)
@@ -1524,21 +1524,12 @@ static void vnt_configure(struct ieee80211_hw *hw,
        struct vnt_private *priv = hw->priv;
        u8 rx_mode = 0;
 
-       *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS |
-               FIF_BCN_PRBRESP_PROMISC;
+       *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
 
        VNSvInPortB(priv->PortOffset + MAC_REG_RCR, &rx_mode);
 
        dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
 
-       if (changed_flags & FIF_PROMISC_IN_BSS) {
-               /* unconditionally log net taps */
-               if (*total_flags & FIF_PROMISC_IN_BSS)
-                       rx_mode |= RCR_UNICAST;
-               else
-                       rx_mode &= ~RCR_UNICAST;
-       }
-
        if (changed_flags & FIF_ALLMULTI) {
                if (*total_flags & FIF_ALLMULTI) {
                        unsigned long flags;
index ab3ab84cb0a717179472971d2817724441cbb350..0d97b6457eadc7f027137697adebb0448a1503af 100644 (file)
@@ -785,8 +785,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
        u8 rx_mode = 0;
        int rc;
 
-       *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_PROMISC_IN_BSS |
-               FIF_BCN_PRBRESP_PROMISC;
+       *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
 
        rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
                MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
@@ -796,14 +795,6 @@ static void vnt_configure(struct ieee80211_hw *hw,
 
        dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
 
-       if (changed_flags & FIF_PROMISC_IN_BSS) {
-               /* unconditionally log net taps */
-               if (*total_flags & FIF_PROMISC_IN_BSS)
-                       rx_mode |= RCR_UNICAST;
-               else
-                       rx_mode &= ~RCR_UNICAST;
-       }
-
        if (changed_flags & FIF_ALLMULTI) {
                if (*total_flags & FIF_ALLMULTI) {
                        if (priv->mc_list_count > 2)
index 7c87aecf474444a908dd02f780a7649f9f93a187..342e2b30c48f92a5a1b98541e4af9cf2d2c40432 100644 (file)
@@ -722,7 +722,7 @@ void prism2_connect_result(wlandevice_t *wlandev, u8 failed)
 void prism2_disconnected(wlandevice_t *wlandev)
 {
        cfg80211_disconnected(wlandev->netdev, 0, NULL,
-               0, GFP_KERNEL);
+               0, false, GFP_KERNEL);
 }
 
 void prism2_roamed(wlandevice_t *wlandev)
index 74e6114ff18f9343e3012cf21c7faadbdf5c6f61..eb66d36db5f7e6940186c032e4bfef7ba9947e9e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/crypto.h>
 #include <linux/completion.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 #include <linux/idr.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi_device.h>
index 3f27bfd816d87201c5f3cec3ad7857ead488191b..a3a3d85142e51d1b08fe638eeab31c1a70c5b1a5 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 #include <linux/falloc.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
index a15411c79ae99649041c216439e938f52a7c071a..61dac494423e7346900b46a9f1e3be1a8b4554b3 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/list.h>
+#include <linux/vmalloc.h>
 #include <linux/file.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
index 675f2d9d1f14c69142d63179afa38e5b74255243..2b17bddeff0f1478876048118a5b4bdbcc798cd6 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/cdrom.h>
 #include <linux/module.h>
 #include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
 #include <asm/unaligned.h>
 #include <net/sock.h>
 #include <net/tcp.h>
index 07d2996d8c1fe922334ee57dfe4d27fd9d7685f8..edc95555825079135ab876056fb5710c5449850c 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/idr.h>
 #include <linux/timer.h>
 #include <linux/parser.h>
+#include <linux/vmalloc.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include <linux/uio_driver.h>
index ea32b386797f5d52b70ee6f4028f5e8df43f3a8f..83bbb26f3183f4b26ce36bbf69fdb52e2c0d95ec 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/compat.h>
 #include <linux/eventfd.h>
 #include <linux/fs.h>
+#include <linux/vmalloc.h>
 #include <linux/miscdevice.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
index 3a57a1b0fb510b8c8167835ca62eb06d0c4b53ca..b50642870a43b9675a3b1fe97b1cfadcdbb1c54a 100644 (file)
@@ -85,7 +85,7 @@ int afs_open_socket(void)
                return -ENOMEM;
        }
 
-       ret = sock_create_kern(AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
+       ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
        if (ret < 0) {
                destroy_workqueue(afs_async_calls);
                _leave(" = %d [socket]", ret);
index d08e079ea5d3aa37cf685cce89eb00122fe7ba02..754fd6c0b7470bab272b071e6ca6e4969e4e4209 100644 (file)
@@ -921,8 +921,8 @@ static int tcp_accept_from_sock(struct connection *con)
        mutex_unlock(&connections_lock);
 
        memset(&peeraddr, 0, sizeof(peeraddr));
-       result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
-                                 IPPROTO_TCP, &newsock);
+       result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+                                 SOCK_STREAM, IPPROTO_TCP, &newsock);
        if (result < 0)
                return -ENOMEM;
 
@@ -1173,8 +1173,8 @@ static void tcp_connect_to_sock(struct connection *con)
                goto out;
 
        /* Create a socket to communicate with */
-       result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
-                                 IPPROTO_TCP, &sock);
+       result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+                                 SOCK_STREAM, IPPROTO_TCP, &sock);
        if (result < 0)
                goto out_err;
 
@@ -1258,8 +1258,8 @@ static struct socket *tcp_create_listen_sock(struct connection *con,
                addr_len = sizeof(struct sockaddr_in6);
 
        /* Create a socket to communicate with */
-       result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM,
-                                 IPPROTO_TCP, &sock);
+       result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+                                 SOCK_STREAM, IPPROTO_TCP, &sock);
        if (result < 0) {
                log_print("Can't create listening comms socket");
                goto create_out;
@@ -1365,8 +1365,8 @@ static int sctp_listen_for_all(void)
 
        log_print("Using SCTP for communications");
 
-       result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET,
-                                 IPPROTO_SCTP, &sock);
+       result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+                                 SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
        if (result < 0) {
                log_print("Can't create comms socket, check SCTP is loaded");
                goto out;
index bfe62ae40f40920e6b95fa8ce16cc3130b8b0972..4f355a1c1a9e87e45678d573ca86a3496d7dd4a5 100644 (file)
@@ -261,6 +261,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(splice_to_pipe);
 
 void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
 {
index ebd63fd05649b544c7bbeb11ef507efd5e26077e..dc4254b8cbbc2075bf1bb2debadd4587d3721b1f 100644 (file)
 #define UBI32_CORE1_CLK                                279
 #define UBI32_CORE2_CLK                                280
 #define EBI2_AON_CLK                           281
+#define NSSTCM_CLK_SRC                         282
+#define NSSTCM_CLK                             283
 
 #endif
diff --git a/include/dt-bindings/net/ti-dp83867.h b/include/dt-bindings/net/ti-dp83867.h
new file mode 100644 (file)
index 0000000..172744a
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Device Tree constants for the Texas Instruments DP83867 PHY
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright:   (C) 2015 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_TI_DP83867_H
+#define _DT_BINDINGS_TI_DP83867_H
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_3_B_NIB       0x00
+#define DP83867_PHYCR_FIFO_DEPTH_4_B_NIB       0x01
+#define DP83867_PHYCR_FIFO_DEPTH_6_B_NIB       0x02
+#define DP83867_PHYCR_FIFO_DEPTH_8_B_NIB       0x03
+
+/* RGMIIDCTL internal delay for rx and tx */
+#define        DP83867_RGMIIDCTL_250_PS        0x0
+#define        DP83867_RGMIIDCTL_500_PS        0x1
+#define        DP83867_RGMIIDCTL_750_PS        0x2
+#define        DP83867_RGMIIDCTL_1_NS          0x3
+#define        DP83867_RGMIIDCTL_1_25_NS       0x4
+#define        DP83867_RGMIIDCTL_1_50_NS       0x5
+#define        DP83867_RGMIIDCTL_1_75_NS       0x6
+#define        DP83867_RGMIIDCTL_2_00_NS       0x7
+#define        DP83867_RGMIIDCTL_2_25_NS       0x8
+#define        DP83867_RGMIIDCTL_2_50_NS       0x9
+#define        DP83867_RGMIIDCTL_2_75_NS       0xa
+#define        DP83867_RGMIIDCTL_3_00_NS       0xb
+#define        DP83867_RGMIIDCTL_3_25_NS       0xc
+#define        DP83867_RGMIIDCTL_3_50_NS       0xd
+#define        DP83867_RGMIIDCTL_3_75_NS       0xe
+#define        DP83867_RGMIIDCTL_4_00_NS       0xf
+
+#endif
index 0ad5ef930b5d64d54db112bafdd80d39a49faa46..de9c8140931a2aab610178ce2ea7d264a5ba45e9 100644 (file)
 #define USB30_1_PHY_RESET                              112
 #define NSSFB0_RESET                                   113
 #define NSSFB1_RESET                                   114
+#define UBI32_CORE1_CLKRST_CLAMP_RESET                 115
+#define UBI32_CORE1_CLAMP_RESET                                116
+#define UBI32_CORE1_AHB_RESET                          117
+#define UBI32_CORE1_AXI_RESET                          118
+#define UBI32_CORE2_CLKRST_CLAMP_RESET                 119
+#define UBI32_CORE2_CLAMP_RESET                                120
+#define UBI32_CORE2_AHB_RESET                          121
+#define UBI32_CORE2_AXI_RESET                          122
+#define GMAC_CORE1_RESET                               123
+#define GMAC_CORE2_RESET                               124
+#define GMAC_CORE3_RESET                               125
+#define GMAC_CORE4_RESET                               126
+#define GMAC_AHB_RESET                                 127
+#define NSS_CH0_RST_RX_CLK_N_RESET                     128
+#define NSS_CH0_RST_TX_CLK_N_RESET                     129
+#define NSS_CH0_RST_RX_125M_N_RESET                    130
+#define NSS_CH0_HW_RST_RX_125M_N_RESET                 131
+#define NSS_CH0_RST_TX_125M_N_RESET                    132
+#define NSS_CH1_RST_RX_CLK_N_RESET                     133
+#define NSS_CH1_RST_TX_CLK_N_RESET                     134
+#define NSS_CH1_RST_RX_125M_N_RESET                    135
+#define NSS_CH1_HW_RST_RX_125M_N_RESET                 136
+#define NSS_CH1_RST_TX_125M_N_RESET                    137
+#define NSS_CH2_RST_RX_CLK_N_RESET                     138
+#define NSS_CH2_RST_TX_CLK_N_RESET                     139
+#define NSS_CH2_RST_RX_125M_N_RESET                    140
+#define NSS_CH2_HW_RST_RX_125M_N_RESET                 141
+#define NSS_CH2_RST_TX_125M_N_RESET                    142
+#define NSS_CH3_RST_RX_CLK_N_RESET                     143
+#define NSS_CH3_RST_TX_CLK_N_RESET                     144
+#define NSS_CH3_RST_RX_125M_N_RESET                    145
+#define NSS_CH3_HW_RST_RX_125M_N_RESET                 146
+#define NSS_CH3_RST_TX_125M_N_RESET                    147
+#define NSS_RST_RX_250M_125M_N_RESET                   148
+#define NSS_RST_TX_250M_125M_N_RESET                   149
+#define NSS_QSGMII_TXPI_RST_N_RESET                    150
+#define NSS_QSGMII_CDR_RST_N_RESET                     151
+#define NSS_SGMII2_CDR_RST_N_RESET                     152
+#define NSS_SGMII3_CDR_RST_N_RESET                     153
+#define NSS_CAL_PRBS_RST_N_RESET                       154
+#define NSS_LCKDT_RST_N_RESET                          155
+#define NSS_SRDS_N_RESET                               156
+
 #endif
index e34f906647d39dce39985d1cfe836f42b689c556..2ff4a9961e1d5166741f98f80651a516d6ef8eb5 100644 (file)
@@ -305,6 +305,15 @@ int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
 
 extern void bcma_driver_unregister(struct bcma_driver *drv);
 
+/* module_bcma_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit.  This eliminates a lot of
+ * boilerplate.  Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_bcma_driver(__bcma_driver) \
+       module_driver(__bcma_driver, bcma_driver_register, \
+                       bcma_driver_unregister)
+
 /* Set a fallback SPROM.
  * See kdoc at the function definition for complete documentation. */
 extern int bcma_arch_register_fallback_sprom(
index d5cda067115aaaa0f2b00a41c5a3d8c68ad114b3..2235aee8096aa66a13b730571e2229b76091a181 100644 (file)
@@ -105,7 +105,8 @@ struct bpf_verifier_ops {
         */
        bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
 
-       u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off,
+       u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
+                                 int src_reg, int ctx_off,
                                  struct bpf_insn *insn);
 };
 
@@ -123,15 +124,40 @@ struct bpf_prog_aux {
        const struct bpf_verifier_ops *ops;
        struct bpf_map **used_maps;
        struct bpf_prog *prog;
-       struct work_struct work;
+       union {
+               struct work_struct work;
+               struct rcu_head rcu;
+       };
 };
 
+struct bpf_array {
+       struct bpf_map map;
+       u32 elem_size;
+       /* 'ownership' of prog_array is claimed by the first program that
+        * is going to use this map or by the first program which FD is stored
+        * in the map to make sure that all callers and callees have the same
+        * prog_type and JITed flag
+        */
+       enum bpf_prog_type owner_prog_type;
+       bool owner_jited;
+       union {
+               char value[0] __aligned(8);
+               struct bpf_prog *prog[0] __aligned(8);
+       };
+};
+#define MAX_TAIL_CALL_CNT 32
+
+u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
+void bpf_prog_array_map_clear(struct bpf_map *map);
+bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
+
 #ifdef CONFIG_BPF_SYSCALL
 void bpf_register_prog_type(struct bpf_prog_type_list *tl);
 void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
 void bpf_prog_put(struct bpf_prog *prog);
+void bpf_prog_put_rcu(struct bpf_prog *prog);
 
 struct bpf_map *bpf_map_get(struct fd f);
 void bpf_map_put(struct bpf_map *map);
@@ -160,5 +186,7 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto;
 
 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+extern const struct bpf_func_proto bpf_tail_call_proto;
+extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
 
 #endif /* _LINUX_BPF_H */
index 606563ef8a725e54d27db855c63dad948f3ce815..9012f877520802662fb5f3704c60ef24d09c7136 100644 (file)
@@ -110,7 +110,29 @@ static inline bool is_zero_ether_addr(const u8 *addr)
  */
 static inline bool is_multicast_ether_addr(const u8 *addr)
 {
-       return 0x01 & addr[0];
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+       u32 a = *(const u32 *)addr;
+#else
+       u16 a = *(const u16 *)addr;
+#endif
+#ifdef __BIG_ENDIAN
+       return 0x01 & (a >> ((sizeof(a) * 8) - 8));
+#else
+       return 0x01 & a;
+#endif
+}
+
+static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+#ifdef __BIG_ENDIAN
+       return 0x01 & ((*(const u64 *)addr) >> 56);
+#else
+       return 0x01 & (*(const u64 *)addr);
+#endif
+#else
+       return is_multicast_ether_addr(addr);
+#endif
 }
 
 /**
@@ -168,6 +190,24 @@ static inline bool is_valid_ether_addr(const u8 *addr)
        return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
 }
 
+/**
+ * eth_proto_is_802_3 - Determine if a given Ethertype/length is a protocol
+ * @proto: Ethertype/length value to be tested
+ *
+ * Check that the value from the Ethertype/length field is a valid Ethertype.
+ *
+ * Return true if the valid is an 802.3 supported Ethertype.
+ */
+static inline bool eth_proto_is_802_3(__be16 proto)
+{
+#ifndef __BIG_ENDIAN
+       /* if CPU is little endian mask off bits representing LSB */
+       proto &= htons(0xFF00);
+#endif
+       /* cast both to u16 and compare since LSB can be ignored */
+       return (__force u16)proto >= (__force u16)htons(ETH_P_802_3_MIN);
+}
+
 /**
  * eth_random_addr - Generate software assigned random Ethernet address
  * @addr: Pointer to a six-byte array containing the Ethernet address
index fa11b3a367be54c4f73427d3c323c9c4936aca3a..17724f6ea983c9c5ac8fecb2069ec1d90b451168 100644 (file)
@@ -207,6 +207,16 @@ struct bpf_prog_aux;
                .off   = OFF,                                   \
                .imm   = 0 })
 
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)                                \
@@ -267,6 +277,14 @@ struct bpf_prog_aux;
                .off   = 0,                                     \
                .imm   = 0 })
 
+/* Internal classic blocks for direct assignment */
+
+#define __BPF_STMT(CODE, K)                                    \
+       ((struct sock_filter) BPF_STMT(CODE, K))
+
+#define __BPF_JUMP(CODE, K, JT, JF)                            \
+       ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
+
 #define bytes_to_bpf_size(bytes)                               \
 ({                                                             \
        int bpf_size = -EINVAL;                                 \
@@ -360,12 +378,9 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-void bpf_prog_select_runtime(struct bpf_prog *fp);
+int bpf_prog_select_runtime(struct bpf_prog *fp);
 void bpf_prog_free(struct bpf_prog *fp);
 
-int bpf_convert_filter(struct sock_filter *prog, int len,
-                      struct bpf_insn *new_prog, int *new_len);
-
 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
                                  gfp_t gfp_extra_flags);
@@ -377,14 +392,17 @@ static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
        __bpf_prog_free(fp);
 }
 
+typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
+                                      unsigned int flen);
+
 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+                             bpf_aux_classic_check_t trans);
 void bpf_prog_destroy(struct bpf_prog *fp);
 
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 int sk_attach_bpf(u32 ufd, struct sock *sk);
 int sk_detach_filter(struct sock *sk);
-
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
                  unsigned int len);
 
index 15928f0647e44187eb00ae7e128f4047519b093b..6ba7cf23748fe90f354e50c7514c230ee7e14bd7 100644 (file)
@@ -368,6 +368,11 @@ extern void free_pages(unsigned long addr, unsigned int order);
 extern void free_hot_cold_page(struct page *page, bool cold);
 extern void free_hot_cold_page_list(struct list_head *list, bool cold);
 
+struct page_frag_cache;
+extern void *__alloc_page_frag(struct page_frag_cache *nc,
+                              unsigned int fragsz, gfp_t gfp_mask);
+extern void __free_page_frag(void *addr);
+
 extern void __free_kmem_pages(struct page *page, unsigned int order);
 extern void free_kmem_pages(unsigned long addr, unsigned int order);
 
index 66a7d7600f4343a809252ba809d92bca5bc7e8f9..b49cf923becc2405b3d99ef6f88e5a524423e7a3 100644 (file)
@@ -74,7 +74,7 @@ static inline struct sock *sk_pppox(struct pppox_sock *po)
 struct module;
 
 struct pppox_proto {
-       int             (*create)(struct net *net, struct socket *sock);
+       int             (*create)(struct net *net, struct socket *sock, int kern);
        int             (*ioctl)(struct socket *sock, unsigned int cmd,
                                 unsigned long arg);
        struct module   *owner;
index 920e4457ce6eab1541a9322595fe2894559ce16f..67ce5bd3b56a54af98bbe10d360b9ea65fd4e4cf 100644 (file)
@@ -416,7 +416,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
 /**
  * __vlan_get_tag - get the VLAN ID that is part of the payload
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if the skb is not of VLAN type
  */
@@ -435,7 +435,7 @@ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
 /**
  * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if @skb->vlan_tci is not set correctly
  */
@@ -456,7 +456,7 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
 /**
  * vlan_get_tag - get the VLAN ID from the skb
  * @skb: skbuff to query
- * @vlan_tci: buffer to store vlaue
+ * @vlan_tci: buffer to store value
  *
  * Returns error if the skb is not VLAN tagged
  */
@@ -539,7 +539,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
         */
 
        proto = vhdr->h_vlan_encapsulated_proto;
-       if (ntohs(proto) >= ETH_P_802_3_MIN) {
+       if (eth_proto_is_802_3(proto)) {
                skb->protocol = proto;
                return;
        }
@@ -628,4 +628,24 @@ static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
        return features;
 }
 
+/**
+ * compare_vlan_header - Compare two vlan headers
+ * @h1: Pointer to vlan header
+ * @h2: Pointer to vlan header
+ *
+ * Compare two vlan headers, returns 0 if equal.
+ *
+ * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits.
+ */
+static inline unsigned long compare_vlan_header(const struct vlan_hdr *h1,
+                                               const struct vlan_hdr *h2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+       return *(u32 *)h1 ^ *(u32 *)h2;
+#else
+       return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) |
+              ((__force u32)h1->h_vlan_encapsulated_proto ^
+               (__force u32)h2->h_vlan_encapsulated_proto);
+#endif
+}
 #endif /* !(_LINUX_IF_VLAN_H_) */
index 2c677afeea4782c96b79d0d8ede4846d87783b99..193ad488d3e20f9b244b41a940d9fee7ee8ee6cc 100644 (file)
@@ -130,5 +130,6 @@ extern void ip_mc_unmap(struct in_device *);
 extern void ip_mc_remap(struct in_device *);
 extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
 extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed);
 
 #endif
index 66c30a763b108c7fe096009b344320fc1c282325..11f00cdabe3d463b8355c4cd3c328b218976d507 100644 (file)
@@ -23,7 +23,8 @@ struct mdio_gpio_platform_data {
        bool mdio_active_low;
        bool mdo_active_low;
 
-       unsigned int phy_mask;
+       u32 phy_mask;
+       u32 phy_ignore_ta_mask;
        int irqs[PHY_MAX_ADDR];
        /* reset callback */
        int (*reset)(struct mii_bus *bus);
index 83e80ab9450048d121b739bd23b85ccb14a39b36..ad31e476873f8b8f9a5a8fd0f04fb70075a4df7c 100644 (file)
@@ -46,8 +46,9 @@
 
 #define MAX_MSIX_P_PORT                17
 #define MAX_MSIX               64
-#define MSIX_LEGACY_SZ         4
 #define MIN_MSIX_P_PORT                5
+#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
+                                        (dev_cap).num_ports * MIN_MSIX_P_PORT)
 
 #define MLX4_MAX_100M_UNITS_VAL                255     /*
                                                 * work around: can't set values
@@ -528,7 +529,6 @@ struct mlx4_caps {
        int                     num_eqs;
        int                     reserved_eqs;
        int                     num_comp_vectors;
-       int                     comp_pool;
        int                     num_mpts;
        int                     max_fmr_maps;
        int                     num_mtts;
@@ -1332,10 +1332,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
 int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
 int mlx4_SYNC_TPT(struct mlx4_dev *dev);
 int mlx4_test_interrupts(struct mlx4_dev *dev);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
-                  int *vector);
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
 
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
 int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
 
 int mlx4_get_phys_port_id(struct mlx4_dev *dev);
index 2695ced222df23b56df42252fb7319c7d7d8b157..abc4767695e4bcd6a53fa1eaf8bd8d7809349d26 100644 (file)
@@ -169,6 +169,9 @@ int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
                       struct mlx5_query_cq_mbox_out *out);
 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
                        struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+                                  struct mlx5_core_cq *cq, u16 cq_period,
+                                  u16 cq_max_count);
 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
 
index abf65c7904214b75f5326a7f576df3b8e2f0a9d8..b2c43508a73711842caa4549f3ae4dc98d04aea3 100644 (file)
@@ -35,6 +35,7 @@
 
 #include <linux/types.h>
 #include <rdma/ib_verbs.h>
+#include <linux/mlx5/mlx5_ifc.h>
 
 #if defined(__LITTLE_ENDIAN)
 #define MLX5_SET_HOST_ENDIANNESS       0
@@ -58,6 +59,8 @@
 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
 #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
 
                     << __mlx5_dw_bit_off(typ, fld))); \
 } while (0)
 
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+       BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
+       *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+       cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+                    (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+                    << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
 __mlx5_mask(typ, fld))
@@ -88,6 +99,12 @@ __mlx5_mask(typ, fld))
 
 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
 
+#define MLX5_GET64_PR(typ, p, fld) ({ \
+       u64 ___t = MLX5_GET64(typ, p, fld); \
+       pr_debug(#fld " = 0x%llx\n", ___t); \
+       ___t; \
+})
+
 enum {
        MLX5_MAX_COMMANDS               = 32,
        MLX5_CMD_DATA_BLOCK_SIZE        = 512,
@@ -264,6 +281,7 @@ enum {
        MLX5_OPCODE_RDMA_WRITE_IMM      = 0x09,
        MLX5_OPCODE_SEND                = 0x0a,
        MLX5_OPCODE_SEND_IMM            = 0x0b,
+       MLX5_OPCODE_LSO                 = 0x0e,
        MLX5_OPCODE_RDMA_READ           = 0x10,
        MLX5_OPCODE_ATOMIC_CS           = 0x11,
        MLX5_OPCODE_ATOMIC_FA           = 0x12,
@@ -312,13 +330,6 @@ enum {
        MLX5_CAP_OFF_CMDIF_CSUM         = 46,
 };
 
-enum {
-       HCA_CAP_OPMOD_GET_MAX   = 0,
-       HCA_CAP_OPMOD_GET_CUR   = 1,
-       HCA_CAP_OPMOD_GET_ODP_MAX = 4,
-       HCA_CAP_OPMOD_GET_ODP_CUR = 5
-};
-
 struct mlx5_inbox_hdr {
        __be16          opcode;
        u8              rsvd[4];
@@ -541,6 +552,10 @@ struct mlx5_cmd_prot_block {
        u8              sig;
 };
 
+enum {
+       MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
 struct mlx5_err_cqe {
        u8      rsvd0[32];
        __be32  srqn;
@@ -554,13 +569,22 @@ struct mlx5_err_cqe {
 };
 
 struct mlx5_cqe64 {
-       u8              rsvd0[17];
+       u8              rsvd0[4];
+       u8              lro_tcppsh_abort_dupack;
+       u8              lro_min_ttl;
+       __be16          lro_tcp_win;
+       __be32          lro_ack_seq_num;
+       __be32          rss_hash_result;
+       u8              rss_hash_type;
        u8              ml_path;
-       u8              rsvd20[4];
+       u8              rsvd20[2];
+       __be16          check_sum;
        __be16          slid;
        __be32          flags_rqpn;
-       u8              rsvd28[4];
-       __be32          srqn;
+       u8              hds_ip_ext;
+       u8              l4_hdr_type_etc;
+       __be16          vlan_info;
+       __be32          srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
        __be32          imm_inval_pkey;
        u8              rsvd40[4];
        __be32          byte_cnt;
@@ -571,6 +595,40 @@ struct mlx5_cqe64 {
        u8              op_own;
 };
 
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+       return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+       return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+       return !!(cqe->l4_hdr_type_etc & 0x1);
+}
+
+enum {
+       CQE_L4_HDR_TYPE_NONE                    = 0x0,
+       CQE_L4_HDR_TYPE_TCP_NO_ACK              = 0x1,
+       CQE_L4_HDR_TYPE_UDP                     = 0x2,
+       CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA         = 0x3,
+       CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA        = 0x4,
+};
+
+enum {
+       CQE_RSS_HTYPE_IP        = 0x3 << 6,
+       CQE_RSS_HTYPE_L4        = 0x3 << 2,
+};
+
+enum {
+       CQE_L2_OK       = 1 << 0,
+       CQE_L3_OK       = 1 << 1,
+       CQE_L4_OK       = 1 << 2,
+};
+
 struct mlx5_sig_err_cqe {
        u8              rsvd0[16];
        __be32          expected_trans_sig;
@@ -996,4 +1054,135 @@ struct mlx5_destroy_psv_out {
        u8                      rsvd[8];
 };
 
+#define MLX5_CMD_OP_MAX 0x920
+
+enum {
+       VPORT_STATE_DOWN                = 0x0,
+       VPORT_STATE_UP                  = 0x1,
+};
+
+enum {
+       MLX5_L3_PROT_TYPE_IPV4          = 0,
+       MLX5_L3_PROT_TYPE_IPV6          = 1,
+};
+
+enum {
+       MLX5_L4_PROT_TYPE_TCP           = 0,
+       MLX5_L4_PROT_TYPE_UDP           = 1,
+};
+
+enum {
+       MLX5_HASH_FIELD_SEL_SRC_IP      = 1 << 0,
+       MLX5_HASH_FIELD_SEL_DST_IP      = 1 << 1,
+       MLX5_HASH_FIELD_SEL_L4_SPORT    = 1 << 2,
+       MLX5_HASH_FIELD_SEL_L4_DPORT    = 1 << 3,
+       MLX5_HASH_FIELD_SEL_IPSEC_SPI   = 1 << 4,
+};
+
+enum {
+       MLX5_MATCH_OUTER_HEADERS        = 1 << 0,
+       MLX5_MATCH_MISC_PARAMETERS      = 1 << 1,
+       MLX5_MATCH_INNER_HEADERS        = 1 << 2,
+
+};
+
+enum {
+       MLX5_FLOW_TABLE_TYPE_NIC_RCV    = 0,
+       MLX5_FLOW_TABLE_TYPE_ESWITCH    = 4,
+};
+
+enum {
+       MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT       = 0,
+       MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE  = 1,
+       MLX5_FLOW_CONTEXT_DEST_TYPE_TIR         = 2,
+};
+
+enum {
+       MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+       MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+       HCA_CAP_OPMOD_GET_MAX   = 0,
+       HCA_CAP_OPMOD_GET_CUR   = 1,
+};
+
+enum mlx5_cap_type {
+       MLX5_CAP_GENERAL = 0,
+       MLX5_CAP_ETHERNET_OFFLOADS,
+       MLX5_CAP_ODP,
+       MLX5_CAP_ATOMIC,
+       MLX5_CAP_ROCE,
+       MLX5_CAP_IPOIB_OFFLOADS,
+       MLX5_CAP_EOIB_OFFLOADS,
+       MLX5_CAP_FLOW_TABLE,
+       /* NUM OF CAP Types */
+       MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+       MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+       MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+       MLX5_GET(per_protocol_networking_offload_caps,\
+                mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+       MLX5_GET(per_protocol_networking_offload_caps,\
+                mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+       MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+       MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+       MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+       MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+       MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+       MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+       MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+enum {
+       MLX5_CMD_STAT_OK                        = 0x0,
+       MLX5_CMD_STAT_INT_ERR                   = 0x1,
+       MLX5_CMD_STAT_BAD_OP_ERR                = 0x2,
+       MLX5_CMD_STAT_BAD_PARAM_ERR             = 0x3,
+       MLX5_CMD_STAT_BAD_SYS_STATE_ERR         = 0x4,
+       MLX5_CMD_STAT_BAD_RES_ERR               = 0x5,
+       MLX5_CMD_STAT_RES_BUSY                  = 0x6,
+       MLX5_CMD_STAT_LIM_ERR                   = 0x8,
+       MLX5_CMD_STAT_BAD_RES_STATE_ERR         = 0x9,
+       MLX5_CMD_STAT_IX_ERR                    = 0xa,
+       MLX5_CMD_STAT_NO_RES_ERR                = 0xf,
+       MLX5_CMD_STAT_BAD_INP_LEN_ERR           = 0x50,
+       MLX5_CMD_STAT_BAD_OUTP_LEN_ERR          = 0x51,
+       MLX5_CMD_STAT_BAD_QP_STATE_ERR          = 0x10,
+       MLX5_CMD_STAT_BAD_PKT_ERR               = 0x30,
+       MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR    = 0x40,
+};
+
+static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+{
+       if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
+               return 0;
+       return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+}
+
 #endif /* MLX5_DEVICE_H */
index 9a90e7523dc24d2f7f29467023c8845cbf50cff7..6093bde16b94b8d734f2047c7d6454e1beaafe2c 100644 (file)
@@ -44,7 +44,6 @@
 
 #include <linux/mlx5/device.h>
 #include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/mlx5_ifc.h>
 
 enum {
        MLX5_BOARD_ID_LEN = 64,
@@ -85,7 +84,7 @@ enum {
 };
 
 enum {
-       MLX5_MAX_EQ_NAME        = 32
+       MLX5_MAX_IRQ_NAME       = 32
 };
 
 enum {
@@ -108,6 +107,7 @@ enum {
        MLX5_REG_PUDE            = 0x5009,
        MLX5_REG_PMPE            = 0x5010,
        MLX5_REG_PELC            = 0x500e,
+       MLX5_REG_PVLC            = 0x500f,
        MLX5_REG_PMLP            = 0, /* TBD */
        MLX5_REG_NODE_DESC       = 0x6001,
        MLX5_REG_HOST_ENDIANNESS = 0x7004,
@@ -150,6 +150,11 @@ enum mlx5_dev_event {
        MLX5_DEV_EVENT_CLIENT_REREG,
 };
 
+enum mlx5_port_status {
+       MLX5_PORT_UP        = 1 << 1,
+       MLX5_PORT_DOWN      = 1 << 2,
+};
+
 struct mlx5_uuar_info {
        struct mlx5_uar        *uars;
        int                     num_uars;
@@ -269,56 +274,7 @@ struct mlx5_cmd {
 struct mlx5_port_caps {
        int     gid_table_len;
        int     pkey_table_len;
-};
-
-struct mlx5_general_caps {
-       u8      log_max_eq;
-       u8      log_max_cq;
-       u8      log_max_qp;
-       u8      log_max_mkey;
-       u8      log_max_pd;
-       u8      log_max_srq;
-       u8      log_max_strq;
-       u8      log_max_mrw_sz;
-       u8      log_max_bsf_list_size;
-       u8      log_max_klm_list_size;
-       u32     max_cqes;
-       int     max_wqes;
-       u32     max_eqes;
-       u32     max_indirection;
-       int     max_sq_desc_sz;
-       int     max_rq_desc_sz;
-       int     max_dc_sq_desc_sz;
-       u64     flags;
-       u16     stat_rate_support;
-       int     log_max_msg;
-       int     num_ports;
-       u8      log_max_ra_res_qp;
-       u8      log_max_ra_req_qp;
-       int     max_srq_wqes;
-       int     bf_reg_size;
-       int     bf_regs_per_page;
-       struct mlx5_port_caps   port[MLX5_MAX_PORTS];
-       u8                      ext_port_cap[MLX5_MAX_PORTS];
-       int     max_vf;
-       u32     reserved_lkey;
-       u8      local_ca_ack_delay;
-       u8      log_max_mcg;
-       u32     max_qp_mcg;
-       int     min_page_sz;
-       int     pd_cap;
-       u32     max_qp_counters;
-       u32     pkey_table_size;
-       u8      log_max_ra_req_dc;
-       u8      log_max_ra_res_dc;
-       u32     uar_sz;
-       u8      min_log_pg_sz;
-       u8      log_max_xrcd;
-       u16     log_uar_page_sz;
-};
-
-struct mlx5_caps {
-       struct mlx5_general_caps gen;
+       u8      ext_port_cap;
 };
 
 struct mlx5_cmd_mailbox {
@@ -334,8 +290,6 @@ struct mlx5_buf_list {
 
 struct mlx5_buf {
        struct mlx5_buf_list    direct;
-       struct mlx5_buf_list   *page_list;
-       int                     nbufs;
        int                     npages;
        int                     size;
        u8                      page_shift;
@@ -351,7 +305,6 @@ struct mlx5_eq {
        u8                      eqn;
        int                     nent;
        u64                     mask;
-       char                    name[MLX5_MAX_EQ_NAME];
        struct list_head        list;
        int                     index;
        struct mlx5_rsc_debug   *dbg;
@@ -387,6 +340,8 @@ struct mlx5_core_mr {
 
 enum mlx5_res_type {
        MLX5_RES_QP,
+       MLX5_RES_SRQ,
+       MLX5_RES_XSRQ,
 };
 
 struct mlx5_core_rsc_common {
@@ -396,6 +351,7 @@ struct mlx5_core_rsc_common {
 };
 
 struct mlx5_core_srq {
+       struct mlx5_core_rsc_common     common; /* must be first */
        u32             srqn;
        int             max;
        int             max_gs;
@@ -414,7 +370,6 @@ struct mlx5_eq_table {
        struct mlx5_eq          pages_eq;
        struct mlx5_eq          async_eq;
        struct mlx5_eq          cmd_eq;
-       struct msix_entry       *msix_arr;
        int                     num_comp_vectors;
        /* protect EQs list
         */
@@ -467,9 +422,16 @@ struct mlx5_mr_table {
        struct radix_tree_root  tree;
 };
 
+struct mlx5_irq_info {
+       cpumask_var_t mask;
+       char name[MLX5_MAX_IRQ_NAME];
+};
+
 struct mlx5_priv {
        char                    name[MLX5_MAX_NAME_LEN];
        struct mlx5_eq_table    eq_table;
+       struct msix_entry       *msix_arr;
+       struct mlx5_irq_info    *irq_info;
        struct mlx5_uuar_info   uuari;
        MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
@@ -520,7 +482,9 @@ struct mlx5_core_dev {
        u8                      rev_id;
        char                    board_id[MLX5_BOARD_ID_LEN];
        struct mlx5_cmd         cmd;
-       struct mlx5_caps        caps;
+       struct mlx5_port_caps   port_caps[MLX5_MAX_PORTS];
+       u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+       u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
        phys_addr_t             iseg_base;
        struct mlx5_init_seg __iomem *iseg;
        void                    (*event) (struct mlx5_core_dev *dev,
@@ -529,6 +493,7 @@ struct mlx5_core_dev {
        struct mlx5_priv        priv;
        struct mlx5_profile     *profile;
        atomic_t                num_qps;
+       u32                     issi;
 };
 
 struct mlx5_db {
@@ -549,6 +514,11 @@ enum {
        MLX5_COMP_EQ_SIZE = 1024,
 };
 
+enum {
+       MLX5_PTYS_IB = 1 << 0,
+       MLX5_PTYS_EN = 1 << 2,
+};
+
 struct mlx5_db_pgdir {
        struct list_head        list;
        DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
@@ -584,13 +554,44 @@ struct mlx5_pas {
        u8      log_sz;
 };
 
+enum port_state_policy {
+       MLX5_AAA_000
+};
+
+enum phy_port_state {
+       MLX5_AAA_111
+};
+
+struct mlx5_hca_vport_context {
+       u32                     field_select;
+       bool                    sm_virt_aware;
+       bool                    has_smi;
+       bool                    has_raw;
+       enum port_state_policy  policy;
+       enum phy_port_state     phys_state;
+       enum ib_port_state      vport_state;
+       u8                      port_physical_state;
+       u64                     sys_image_guid;
+       u64                     port_guid;
+       u64                     node_guid;
+       u32                     cap_mask1;
+       u32                     cap_mask1_perm;
+       u32                     cap_mask2;
+       u32                     cap_mask2_perm;
+       u16                     lid;
+       u8                      init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
+       u8                      lmc;
+       u8                      subnet_timeout;
+       u16                     sm_lid;
+       u8                      sm_sl;
+       u16                     qkey_violation_counter;
+       u16                     pkey_violation_counter;
+       bool                    grh_required;
+};
+
 static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
 {
-       if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
                return buf->direct.buf + offset;
-       else
-               return buf->page_list[offset >> PAGE_SHIFT].buf +
-                       (offset & (PAGE_SIZE - 1));
 }
 
 extern struct workqueue_struct *mlx5_core_wq;
@@ -654,8 +655,8 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
 int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
-                      u16 opmod);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+                      enum mlx5_cap_mode cap_mode);
 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
                  int out_size);
 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -665,19 +666,21 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
 int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
 int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 void mlx5_health_cleanup(void);
 void  __init mlx5_health_init(void);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
-                  struct mlx5_buf *buf);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
                                                      gfp_t flags, int npages);
 void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
                                 struct mlx5_cmd_mailbox *head);
 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-                        struct mlx5_create_srq_mbox_in *in, int inlen);
+                        struct mlx5_create_srq_mbox_in *in, int inlen,
+                        int is_xrc);
 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
                        struct mlx5_query_srq_mbox_out *out);
@@ -734,7 +737,32 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
                         int size_in, void *data_out, int size_out,
                         u16 reg_num, int arg, int write);
+
 int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+                        int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+                             u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+                               u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+                                   u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
+                              u8 *proto_oper, int proto_mask,
+                              u8 local_port);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+                       int proto_mask);
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+                        enum mlx5_port_status status);
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu);
+int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+                           u8 local_port);
+int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+                            u8 local_port);
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+                             u8 *vl_hw_cap, u8 local_port);
 
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -799,6 +827,7 @@ struct mlx5_interface {
 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
 int mlx5_register_interface(struct mlx5_interface *intf);
 void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
 
 struct mlx5_profile {
        u64     mask;
@@ -809,4 +838,14 @@ struct mlx5_profile {
        } mr_cache[MAX_MR_CACHE_ENTRIES];
 };
 
+static inline int mlx5_get_gid_table_len(u16 param)
+{
+       if (param > 4) {
+               pr_warn("gid table length is zero\n");
+               return 0;
+       }
+
+       return 8 * (1 << param);
+}
+
 #endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
new file mode 100644 (file)
index 0000000..5f922c6
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_FLOW_TABLE_H
+#define MLX5_FLOW_TABLE_H
+
+#include <linux/mlx5/driver.h>
+
+struct mlx5_flow_table_group {
+       u8      log_sz;
+       u8      match_criteria_enable;
+       u32     match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+                            u16 num_groups,
+                            struct mlx5_flow_table_group *group);
+void mlx5_destroy_flow_table(void *flow_table);
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+                             void *match_criteria, void *flow_context,
+                             u32 *flow_index);
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+u32 mlx5_get_flow_table_id(void *flow_table);
+
+#endif /* MLX5_FLOW_TABLE_H */
index cb3ad17edd1f5959b0499b82899ec95ada191025..6d2f6fee041cd4f663fd7d6898b8ed8418a95b73 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
- */
-
+*/
 #ifndef MLX5_IFC_H
 #define MLX5_IFC_H
 
+enum {
+       MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS                   = 0x0,
+       MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED             = 0x1,
+       MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED           = 0x2,
+       MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED                  = 0x3,
+       MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED                    = 0x13,
+       MLX5_EVENT_TYPE_CODING_SRQ_LIMIT                           = 0x14,
+       MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED          = 0x1c,
+       MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION            = 0x1d,
+       MLX5_EVENT_TYPE_CODING_CQ_ERROR                            = 0x4,
+       MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR         = 0x5,
+       MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED               = 0x7,
+       MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT                    = 0xc,
+       MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR      = 0x10,
+       MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR     = 0x11,
+       MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR        = 0x12,
+       MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR                      = 0x8,
+       MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE                   = 0x9,
+       MLX5_EVENT_TYPE_CODING_GPIO_EVENT                          = 0x15,
+       MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
+       MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
+       MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT                      = 0x1b,
+       MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT         = 0x1f,
+       MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION        = 0xa,
+       MLX5_EVENT_TYPE_CODING_PAGE_REQUEST                        = 0xb
+};
+
+enum {
+       MLX5_MODIFY_TIR_BITMASK_LRO                   = 0x0,
+       MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE        = 0x1,
+       MLX5_MODIFY_TIR_BITMASK_HASH                  = 0x2,
+       MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN   = 0x3
+};
+
 enum {
        MLX5_CMD_OP_QUERY_HCA_CAP                 = 0x100,
        MLX5_CMD_OP_QUERY_ADAPTER                 = 0x101,
@@ -43,6 +76,8 @@ enum {
        MLX5_CMD_OP_QUERY_PAGES                   = 0x107,
        MLX5_CMD_OP_MANAGE_PAGES                  = 0x108,
        MLX5_CMD_OP_SET_HCA_CAP                   = 0x109,
+       MLX5_CMD_OP_QUERY_ISSI                    = 0x10a,
+       MLX5_CMD_OP_SET_ISSI                      = 0x10b,
        MLX5_CMD_OP_CREATE_MKEY                   = 0x200,
        MLX5_CMD_OP_QUERY_MKEY                    = 0x201,
        MLX5_CMD_OP_DESTROY_MKEY                  = 0x202,
@@ -66,6 +101,7 @@ enum {
        MLX5_CMD_OP_2ERR_QP                       = 0x507,
        MLX5_CMD_OP_2RST_QP                       = 0x50a,
        MLX5_CMD_OP_QUERY_QP                      = 0x50b,
+       MLX5_CMD_OP_SQD_RTS_QP                    = 0x50c,
        MLX5_CMD_OP_INIT2INIT_QP                  = 0x50e,
        MLX5_CMD_OP_CREATE_PSV                    = 0x600,
        MLX5_CMD_OP_DESTROY_PSV                   = 0x601,
@@ -73,7 +109,10 @@ enum {
        MLX5_CMD_OP_DESTROY_SRQ                   = 0x701,
        MLX5_CMD_OP_QUERY_SRQ                     = 0x702,
        MLX5_CMD_OP_ARM_RQ                        = 0x703,
-       MLX5_CMD_OP_RESIZE_SRQ                    = 0x704,
+       MLX5_CMD_OP_CREATE_XRC_SRQ                = 0x705,
+       MLX5_CMD_OP_DESTROY_XRC_SRQ               = 0x706,
+       MLX5_CMD_OP_QUERY_XRC_SRQ                 = 0x707,
+       MLX5_CMD_OP_ARM_XRC_SRQ                   = 0x708,
        MLX5_CMD_OP_CREATE_DCT                    = 0x710,
        MLX5_CMD_OP_DESTROY_DCT                   = 0x711,
        MLX5_CMD_OP_DRAIN_DCT                     = 0x712,
@@ -85,8 +124,12 @@ enum {
        MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT      = 0x753,
        MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT       = 0x754,
        MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT      = 0x755,
-       MLX5_CMD_OP_QUERY_RCOE_ADDRESS            = 0x760,
+       MLX5_CMD_OP_QUERY_ROCE_ADDRESS            = 0x760,
        MLX5_CMD_OP_SET_ROCE_ADDRESS              = 0x761,
+       MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT       = 0x762,
+       MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT      = 0x763,
+       MLX5_CMD_OP_QUERY_HCA_VPORT_GID           = 0x764,
+       MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY          = 0x765,
        MLX5_CMD_OP_QUERY_VPORT_COUNTER           = 0x770,
        MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
        MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
@@ -98,7 +141,7 @@ enum {
        MLX5_CMD_OP_CONFIG_INT_MODERATION         = 0x804,
        MLX5_CMD_OP_ACCESS_REG                    = 0x805,
        MLX5_CMD_OP_ATTACH_TO_MCG                 = 0x806,
-       MLX5_CMD_OP_DETACH_FROM_MCG               = 0x807,
+       MLX5_CMD_OP_DETTACH_FROM_MCG              = 0x807,
        MLX5_CMD_OP_GET_DROPPED_PACKET_LOG        = 0x80a,
        MLX5_CMD_OP_MAD_IFC                       = 0x50d,
        MLX5_CMD_OP_QUERY_MAD_DEMUX               = 0x80b,
@@ -106,23 +149,22 @@ enum {
        MLX5_CMD_OP_NOP                           = 0x80d,
        MLX5_CMD_OP_ALLOC_XRCD                    = 0x80e,
        MLX5_CMD_OP_DEALLOC_XRCD                  = 0x80f,
-       MLX5_CMD_OP_SET_BURST_SIZE                = 0x812,
-       MLX5_CMD_OP_QUERY_BURST_SZIE              = 0x813,
-       MLX5_CMD_OP_ACTIVATE_TRACER               = 0x814,
-       MLX5_CMD_OP_DEACTIVATE_TRACER             = 0x815,
-       MLX5_CMD_OP_CREATE_SNIFFER_RULE           = 0x820,
-       MLX5_CMD_OP_DESTROY_SNIFFER_RULE          = 0x821,
-       MLX5_CMD_OP_QUERY_CONG_PARAMS             = 0x822,
-       MLX5_CMD_OP_MODIFY_CONG_PARAMS            = 0x823,
-       MLX5_CMD_OP_QUERY_CONG_STATISTICS         = 0x824,
+       MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN        = 0x816,
+       MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN      = 0x817,
+       MLX5_CMD_OP_QUERY_CONG_STATUS             = 0x822,
+       MLX5_CMD_OP_MODIFY_CONG_STATUS            = 0x823,
+       MLX5_CMD_OP_QUERY_CONG_PARAMS             = 0x824,
+       MLX5_CMD_OP_MODIFY_CONG_PARAMS            = 0x825,
+       MLX5_CMD_OP_QUERY_CONG_STATISTICS         = 0x826,
+       MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT           = 0x827,
+       MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT        = 0x828,
+       MLX5_CMD_OP_SET_L2_TABLE_ENTRY            = 0x829,
+       MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY          = 0x82a,
+       MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY         = 0x82b,
        MLX5_CMD_OP_CREATE_TIR                    = 0x900,
        MLX5_CMD_OP_MODIFY_TIR                    = 0x901,
        MLX5_CMD_OP_DESTROY_TIR                   = 0x902,
        MLX5_CMD_OP_QUERY_TIR                     = 0x903,
-       MLX5_CMD_OP_CREATE_TIS                    = 0x912,
-       MLX5_CMD_OP_MODIFY_TIS                    = 0x913,
-       MLX5_CMD_OP_DESTROY_TIS                   = 0x914,
-       MLX5_CMD_OP_QUERY_TIS                     = 0x915,
        MLX5_CMD_OP_CREATE_SQ                     = 0x904,
        MLX5_CMD_OP_MODIFY_SQ                     = 0x905,
        MLX5_CMD_OP_DESTROY_SQ                    = 0x906,
@@ -135,9 +177,430 @@ enum {
        MLX5_CMD_OP_MODIFY_RMP                    = 0x90d,
        MLX5_CMD_OP_DESTROY_RMP                   = 0x90e,
        MLX5_CMD_OP_QUERY_RMP                     = 0x90f,
-       MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x910,
-       MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x911,
-       MLX5_CMD_OP_MAX                           = 0x911
+       MLX5_CMD_OP_CREATE_TIS                    = 0x912,
+       MLX5_CMD_OP_MODIFY_TIS                    = 0x913,
+       MLX5_CMD_OP_DESTROY_TIS                   = 0x914,
+       MLX5_CMD_OP_QUERY_TIS                     = 0x915,
+       MLX5_CMD_OP_CREATE_RQT                    = 0x916,
+       MLX5_CMD_OP_MODIFY_RQT                    = 0x917,
+       MLX5_CMD_OP_DESTROY_RQT                   = 0x918,
+       MLX5_CMD_OP_QUERY_RQT                     = 0x919,
+       MLX5_CMD_OP_CREATE_FLOW_TABLE             = 0x930,
+       MLX5_CMD_OP_DESTROY_FLOW_TABLE            = 0x931,
+       MLX5_CMD_OP_QUERY_FLOW_TABLE              = 0x932,
+       MLX5_CMD_OP_CREATE_FLOW_GROUP             = 0x933,
+       MLX5_CMD_OP_DESTROY_FLOW_GROUP            = 0x934,
+       MLX5_CMD_OP_QUERY_FLOW_GROUP              = 0x935,
+       MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x936,
+       MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x937,
+       MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY       = 0x938
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+       u8         outer_dmac[0x1];
+       u8         outer_smac[0x1];
+       u8         outer_ether_type[0x1];
+       u8         reserved_0[0x1];
+       u8         outer_first_prio[0x1];
+       u8         outer_first_cfi[0x1];
+       u8         outer_first_vid[0x1];
+       u8         reserved_1[0x1];
+       u8         outer_second_prio[0x1];
+       u8         outer_second_cfi[0x1];
+       u8         outer_second_vid[0x1];
+       u8         reserved_2[0x1];
+       u8         outer_sip[0x1];
+       u8         outer_dip[0x1];
+       u8         outer_frag[0x1];
+       u8         outer_ip_protocol[0x1];
+       u8         outer_ip_ecn[0x1];
+       u8         outer_ip_dscp[0x1];
+       u8         outer_udp_sport[0x1];
+       u8         outer_udp_dport[0x1];
+       u8         outer_tcp_sport[0x1];
+       u8         outer_tcp_dport[0x1];
+       u8         outer_tcp_flags[0x1];
+       u8         outer_gre_protocol[0x1];
+       u8         outer_gre_key[0x1];
+       u8         outer_vxlan_vni[0x1];
+       u8         reserved_3[0x5];
+       u8         source_eswitch_port[0x1];
+
+       u8         inner_dmac[0x1];
+       u8         inner_smac[0x1];
+       u8         inner_ether_type[0x1];
+       u8         reserved_4[0x1];
+       u8         inner_first_prio[0x1];
+       u8         inner_first_cfi[0x1];
+       u8         inner_first_vid[0x1];
+       u8         reserved_5[0x1];
+       u8         inner_second_prio[0x1];
+       u8         inner_second_cfi[0x1];
+       u8         inner_second_vid[0x1];
+       u8         reserved_6[0x1];
+       u8         inner_sip[0x1];
+       u8         inner_dip[0x1];
+       u8         inner_frag[0x1];
+       u8         inner_ip_protocol[0x1];
+       u8         inner_ip_ecn[0x1];
+       u8         inner_ip_dscp[0x1];
+       u8         inner_udp_sport[0x1];
+       u8         inner_udp_dport[0x1];
+       u8         inner_tcp_sport[0x1];
+       u8         inner_tcp_dport[0x1];
+       u8         inner_tcp_flags[0x1];
+       u8         reserved_7[0x9];
+
+       u8         reserved_8[0x40];
+};
+
+struct mlx5_ifc_flow_table_prop_layout_bits {
+       u8         ft_support[0x1];
+       u8         reserved_0[0x1f];
+
+       u8         reserved_1[0x2];
+       u8         log_max_ft_size[0x6];
+       u8         reserved_2[0x10];
+       u8         max_ft_level[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         reserved_4[0x18];
+       u8         log_max_ft_num[0x8];
+
+       u8         reserved_5[0x18];
+       u8         log_max_destination[0x8];
+
+       u8         reserved_6[0x18];
+       u8         log_max_flow[0x8];
+
+       u8         reserved_7[0x40];
+
+       struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+       struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+       u8         send[0x1];
+       u8         receive[0x1];
+       u8         write[0x1];
+       u8         read[0x1];
+       u8         reserved_0[0x1];
+       u8         srq_receive[0x1];
+       u8         reserved_1[0x1a];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+       u8         smac_47_16[0x20];
+
+       u8         smac_15_0[0x10];
+       u8         ethertype[0x10];
+
+       u8         dmac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         first_prio[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vid[0xc];
+
+       u8         ip_protocol[0x8];
+       u8         ip_dscp[0x6];
+       u8         ip_ecn[0x2];
+       u8         vlan_tag[0x1];
+       u8         reserved_0[0x1];
+       u8         frag[0x1];
+       u8         reserved_1[0x4];
+       u8         tcp_flags[0x9];
+
+       u8         tcp_sport[0x10];
+       u8         tcp_dport[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         udp_sport[0x10];
+       u8         udp_dport[0x10];
+
+       u8         src_ip[4][0x20];
+
+       u8         dst_ip[4][0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+       u8         reserved_0[0x20];
+
+       u8         reserved_1[0x10];
+       u8         source_port[0x10];
+
+       u8         outer_second_prio[0x3];
+       u8         outer_second_cfi[0x1];
+       u8         outer_second_vid[0xc];
+       u8         inner_second_prio[0x3];
+       u8         inner_second_cfi[0x1];
+       u8         inner_second_vid[0xc];
+
+       u8         outer_second_vlan_tag[0x1];
+       u8         inner_second_vlan_tag[0x1];
+       u8         reserved_2[0xe];
+       u8         gre_protocol[0x10];
+
+       u8         gre_key_h[0x18];
+       u8         gre_key_l[0x8];
+
+       u8         vxlan_vni[0x18];
+       u8         reserved_3[0x8];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0xc];
+       u8         outer_ipv6_flow_label[0x14];
+
+       u8         reserved_6[0xc];
+       u8         inner_ipv6_flow_label[0x14];
+
+       u8         reserved_7[0xe0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+       u8         pa_h[0x20];
+
+       u8         pa_l[0x14];
+       u8         reserved_0[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+       u8         hi[0x20];
+
+       u8         lo[0x20];
+};
+
+enum {
+       MLX5_ADS_STAT_RATE_NO_LIMIT  = 0x0,
+       MLX5_ADS_STAT_RATE_2_5GBPS   = 0x7,
+       MLX5_ADS_STAT_RATE_10GBPS    = 0x8,
+       MLX5_ADS_STAT_RATE_30GBPS    = 0x9,
+       MLX5_ADS_STAT_RATE_5GBPS     = 0xa,
+       MLX5_ADS_STAT_RATE_20GBPS    = 0xb,
+       MLX5_ADS_STAT_RATE_40GBPS    = 0xc,
+       MLX5_ADS_STAT_RATE_60GBPS    = 0xd,
+       MLX5_ADS_STAT_RATE_80GBPS    = 0xe,
+       MLX5_ADS_STAT_RATE_120GBPS   = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+       u8         fl[0x1];
+       u8         free_ar[0x1];
+       u8         reserved_0[0xe];
+       u8         pkey_index[0x10];
+
+       u8         reserved_1[0x8];
+       u8         grh[0x1];
+       u8         mlid[0x7];
+       u8         rlid[0x10];
+
+       u8         ack_timeout[0x5];
+       u8         reserved_2[0x3];
+       u8         src_addr_index[0x8];
+       u8         reserved_3[0x4];
+       u8         stat_rate[0x4];
+       u8         hop_limit[0x8];
+
+       u8         reserved_4[0x4];
+       u8         tclass[0x8];
+       u8         flow_label[0x14];
+
+       u8         rgid_rip[16][0x8];
+
+       u8         reserved_5[0x4];
+       u8         f_dscp[0x1];
+       u8         f_ecn[0x1];
+       u8         reserved_6[0x1];
+       u8         f_eth_prio[0x1];
+       u8         ecn[0x2];
+       u8         dscp[0x6];
+       u8         udp_sport[0x10];
+
+       u8         dei_cfi[0x1];
+       u8         eth_prio[0x3];
+       u8         sl[0x4];
+       u8         port[0x8];
+       u8         rmac_47_32[0x10];
+
+       u8         rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+       u8         reserved_0[0x200];
+
+       struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+       u8         reserved_1[0x200];
+
+       struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+       struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+       u8         reserved_2[0x200];
+
+       struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+       u8         reserved_3[0x7200];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+       u8         csum_cap[0x1];
+       u8         vlan_cap[0x1];
+       u8         lro_cap[0x1];
+       u8         lro_psh_flag[0x1];
+       u8         lro_time_stamp[0x1];
+       u8         reserved_0[0x6];
+       u8         max_lso_cap[0x5];
+       u8         reserved_1[0x4];
+       u8         rss_ind_tbl_cap[0x4];
+       u8         reserved_2[0x3];
+       u8         tunnel_lso_const_out_ip_id[0x1];
+       u8         reserved_3[0x2];
+       u8         tunnel_statless_gre[0x1];
+       u8         tunnel_stateless_vxlan[0x1];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x10];
+       u8         lro_min_mss_size[0x10];
+
+       u8         reserved_6[0x120];
+
+       u8         lro_timer_supported_periods[4][0x20];
+
+       u8         reserved_7[0x600];
+};
+
+struct mlx5_ifc_roce_cap_bits {
+       u8         roce_apm[0x1];
+       u8         reserved_0[0x1f];
+
+       u8         reserved_1[0x60];
+
+       u8         reserved_2[0xc];
+       u8         l3_type[0x4];
+       u8         reserved_3[0x8];
+       u8         roce_version[0x8];
+
+       u8         reserved_4[0x10];
+       u8         r_roce_dest_udp_port[0x10];
+
+       u8         r_roce_max_src_udp_port[0x10];
+       u8         r_roce_min_src_udp_port[0x10];
+
+       u8         reserved_5[0x10];
+       u8         roce_address_table_size[0x10];
+
+       u8         reserved_6[0x700];
+};
+
+enum {
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE     = 0x0,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES    = 0x2,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES    = 0x4,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES    = 0x8,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES   = 0x10,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES   = 0x20,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES   = 0x40,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES  = 0x80,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES  = 0x100,
+};
+
+enum {
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE     = 0x1,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES    = 0x2,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES    = 0x4,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES    = 0x8,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES   = 0x10,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES   = 0x20,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES   = 0x40,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES  = 0x80,
+       MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES  = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+       u8         reserved_0[0x40];
+
+       u8         atomic_req_endianness[0x1];
+       u8         reserved_1[0x1f];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x10];
+       u8         atomic_operations[0x10];
+
+       u8         reserved_4[0x10];
+       u8         atomic_size_qp[0x10];
+
+       u8         reserved_5[0x10];
+       u8         atomic_size_dc[0x10];
+
+       u8         reserved_6[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+       u8         reserved_0[0x40];
+
+       u8         sig[0x1];
+       u8         reserved_1[0x1f];
+
+       u8         reserved_2[0x20];
+
+       struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+       struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+       struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+       u8         reserved_3[0x720];
+};
+
+enum {
+       MLX5_WQ_TYPE_LINKED_LIST  = 0x0,
+       MLX5_WQ_TYPE_CYCLIC       = 0x1,
+       MLX5_WQ_TYPE_STRQ         = 0x2,
+};
+
+enum {
+       MLX5_WQ_END_PAD_MODE_NONE   = 0x0,
+       MLX5_WQ_END_PAD_MODE_ALIGN  = 0x1,
+};
+
+enum {
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES    = 0x0,
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES   = 0x1,
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES   = 0x2,
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES   = 0x3,
+       MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES  = 0x4,
+};
+
+enum {
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES  = 0x0,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES  = 0x1,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES  = 0x2,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES   = 0x3,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES   = 0x4,
+       MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES   = 0x5,
+};
+
+enum {
+       MLX5_CMD_HCA_CAP_PORT_TYPE_IB        = 0x0,
+       MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET  = 0x1,
+};
+
+enum {
+       MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED       = 0x0,
+       MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE  = 0x1,
+       MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED        = 0x3,
+};
+
+enum {
+       MLX5_CAP_PORT_TYPE_IB  = 0x0,
+       MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
 
 struct mlx5_ifc_cmd_hca_cap_bits {
@@ -148,9 +611,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_1[0xb];
        u8         log_max_qp[0x5];
 
-       u8         log_max_strq_sz[0x8];
-       u8         reserved_2[0x3];
-       u8         log_max_srqs[0x5];
+       u8         reserved_2[0xb];
+       u8         log_max_srq[0x5];
        u8         reserved_3[0x10];
 
        u8         reserved_4[0x8];
@@ -185,165 +647,6137 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         pad_cap[0x1];
        u8         cc_query_allowed[0x1];
        u8         cc_modify_allowed[0x1];
-       u8         reserved_15[0x1d];
+       u8         reserved_15[0xd];
+       u8         gid_table_size[0x10];
 
-       u8         reserved_16[0x6];
+       u8         out_of_seq_cnt[0x1];
+       u8         vport_counters[0x1];
+       u8         reserved_16[0x4];
        u8         max_qp_cnt[0xa];
        u8         pkey_table_size[0x10];
 
-       u8         eswitch_owner[0x1];
-       u8         reserved_17[0xa];
+       u8         vport_group_manager[0x1];
+       u8         vhca_group_manager[0x1];
+       u8         ib_virt[0x1];
+       u8         eth_virt[0x1];
+       u8         reserved_17[0x1];
+       u8         ets[0x1];
+       u8         nic_flow_table[0x1];
+       u8         reserved_18[0x4];
        u8         local_ca_ack_delay[0x5];
-       u8         reserved_18[0x8];
+       u8         reserved_19[0x6];
+       u8         port_type[0x2];
        u8         num_ports[0x8];
 
-       u8         reserved_19[0x3];
+       u8         reserved_20[0x3];
        u8         log_max_msg[0x5];
-       u8         reserved_20[0x18];
+       u8         reserved_21[0x18];
 
        u8         stat_rate_support[0x10];
-       u8         reserved_21[0x10];
+       u8         reserved_22[0xc];
+       u8         cqe_version[0x4];
 
-       u8         reserved_22[0x10];
+       u8         compact_address_vector[0x1];
+       u8         reserved_23[0xe];
+       u8         drain_sigerr[0x1];
        u8         cmdif_checksum[0x2];
        u8         sigerr_cqe[0x1];
-       u8         reserved_23[0x1];
+       u8         reserved_24[0x1];
        u8         wq_signature[0x1];
        u8         sctr_data_cqe[0x1];
-       u8         reserved_24[0x1];
+       u8         reserved_25[0x1];
        u8         sho[0x1];
        u8         tph[0x1];
        u8         rf[0x1];
-       u8         dc[0x1];
-       u8         reserved_25[0x2];
+       u8         dct[0x1];
+       u8         reserved_26[0x1];
+       u8         eth_net_offloads[0x1];
        u8         roce[0x1];
        u8         atomic[0x1];
-       u8         rsz_srq[0x1];
+       u8         reserved_27[0x1];
 
        u8         cq_oi[0x1];
        u8         cq_resize[0x1];
        u8         cq_moderation[0x1];
-       u8         sniffer_rule_flow[0x1];
-       u8         sniffer_rule_vport[0x1];
-       u8         sniffer_rule_phy[0x1];
-       u8         reserved_26[0x1];
+       u8         reserved_28[0x3];
+       u8         cq_eq_remap[0x1];
        u8         pg[0x1];
        u8         block_lb_mc[0x1];
-       u8         reserved_27[0x3];
+       u8         reserved_29[0x1];
+       u8         scqe_break_moderation[0x1];
+       u8         reserved_30[0x1];
        u8         cd[0x1];
-       u8         reserved_28[0x1];
+       u8         reserved_31[0x1];
        u8         apm[0x1];
-       u8         reserved_29[0x7];
+       u8         reserved_32[0x7];
        u8         qkv[0x1];
        u8         pkv[0x1];
-       u8         reserved_30[0x4];
+       u8         reserved_33[0x4];
        u8         xrc[0x1];
        u8         ud[0x1];
        u8         uc[0x1];
        u8         rc[0x1];
 
-       u8         reserved_31[0xa];
+       u8         reserved_34[0xa];
        u8         uar_sz[0x6];
-       u8         reserved_32[0x8];
+       u8         reserved_35[0x8];
        u8         log_pg_sz[0x8];
 
        u8         bf[0x1];
-       u8         reserved_33[0xa];
+       u8         reserved_36[0x1];
+       u8         pad_tx_eth_packet[0x1];
+       u8         reserved_37[0x8];
        u8         log_bf_reg_size[0x5];
-       u8         reserved_34[0x10];
+       u8         reserved_38[0x10];
 
-       u8         reserved_35[0x10];
+       u8         reserved_39[0x10];
        u8         max_wqe_sz_sq[0x10];
 
-       u8         reserved_36[0x10];
+       u8         reserved_40[0x10];
        u8         max_wqe_sz_rq[0x10];
 
-       u8         reserved_37[0x10];
+       u8         reserved_41[0x10];
        u8         max_wqe_sz_sq_dc[0x10];
 
-       u8         reserved_38[0x7];
+       u8         reserved_42[0x7];
        u8         max_qp_mcg[0x19];
 
-       u8         reserved_39[0x18];
+       u8         reserved_43[0x18];
        u8         log_max_mcg[0x8];
 
-       u8         reserved_40[0xb];
+       u8         reserved_44[0x3];
+       u8         log_max_transport_domain[0x5];
+       u8         reserved_45[0x3];
        u8         log_max_pd[0x5];
-       u8         reserved_41[0xb];
+       u8         reserved_46[0xb];
        u8         log_max_xrcd[0x5];
 
-       u8         reserved_42[0x20];
+       u8         reserved_47[0x20];
 
-       u8         reserved_43[0x3];
+       u8         reserved_48[0x3];
        u8         log_max_rq[0x5];
-       u8         reserved_44[0x3];
+       u8         reserved_49[0x3];
        u8         log_max_sq[0x5];
-       u8         reserved_45[0x3];
+       u8         reserved_50[0x3];
        u8         log_max_tir[0x5];
-       u8         reserved_46[0x3];
+       u8         reserved_51[0x3];
        u8         log_max_tis[0x5];
 
-       u8         reserved_47[0x13];
-       u8         log_max_rq_per_tir[0x5];
-       u8         reserved_48[0x3];
+       u8         basic_cyclic_rcv_wqe[0x1];
+       u8         reserved_52[0x2];
+       u8         log_max_rmp[0x5];
+       u8         reserved_53[0x3];
+       u8         log_max_rqt[0x5];
+       u8         reserved_54[0x3];
+       u8         log_max_rqt_size[0x5];
+       u8         reserved_55[0x3];
        u8         log_max_tis_per_sq[0x5];
 
-       u8         reserved_49[0xe0];
+       u8         reserved_56[0x3];
+       u8         log_max_stride_sz_rq[0x5];
+       u8         reserved_57[0x3];
+       u8         log_min_stride_sz_rq[0x5];
+       u8         reserved_58[0x3];
+       u8         log_max_stride_sz_sq[0x5];
+       u8         reserved_59[0x3];
+       u8         log_min_stride_sz_sq[0x5];
+
+       u8         reserved_60[0x1b];
+       u8         log_max_wq_sz[0x5];
+
+       u8         reserved_61[0xa0];
 
-       u8         reserved_50[0x10];
+       u8         reserved_62[0x3];
+       u8         log_max_l2_table[0x5];
+       u8         reserved_63[0x8];
        u8         log_uar_page_sz[0x10];
 
-       u8         reserved_51[0x100];
+       u8         reserved_64[0x100];
 
-       u8         reserved_52[0x1f];
+       u8         reserved_65[0x1f];
        u8         cqe_zip[0x1];
 
        u8         cqe_zip_timeout[0x10];
        u8         cqe_zip_max_num[0x10];
 
-       u8         reserved_53[0x220];
+       u8         reserved_66[0x220];
 };
 
-struct mlx5_ifc_set_hca_cap_in_bits {
-       u8         opcode[0x10];
-       u8         reserved_0[0x10];
+enum {
+       MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_FLOW_TABLE_  = 0x1,
+       MLX5_DEST_FORMAT_STRUCT_DESTINATION_TYPE_TIR          = 0x2,
+};
 
-       u8         reserved_1[0x10];
-       u8         op_mod[0x10];
+struct mlx5_ifc_dest_format_struct_bits {
+       u8         destination_type[0x8];
+       u8         destination_id[0x18];
 
-       u8         reserved_2[0x40];
+       u8         reserved_0[0x20];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+       struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+       struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+       struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
 
-       struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+       u8         reserved_0[0xa00];
 };
 
-struct mlx5_ifc_query_hca_cap_in_bits {
-       u8         opcode[0x10];
-       u8         reserved_0[0x10];
+enum {
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,
+       MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,
+};
 
-       u8         reserved_1[0x10];
-       u8         op_mod[0x10];
+struct mlx5_ifc_rx_hash_field_select_bits {
+       u8         l3_prot_type[0x1];
+       u8         l4_prot_type[0x1];
+       u8         selected_fields[0x1e];
+};
 
-       u8         reserved_2[0x40];
+enum {
+       MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST  = 0x0,
+       MLX5_WQ_WQ_TYPE_WQ_CYCLIC       = 0x1,
 };
 
-struct mlx5_ifc_query_hca_cap_out_bits {
-       u8         status[0x8];
+enum {
+       MLX5_WQ_END_PADDING_MODE_END_PAD_NONE   = 0x0,
+       MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN  = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+       u8         wq_type[0x4];
+       u8         wq_signature[0x1];
+       u8         end_padding_mode[0x2];
+       u8         cd_slave[0x1];
        u8         reserved_0[0x18];
 
-       u8         syndrome[0x20];
+       u8         hds_skip_first_sge[0x1];
+       u8         log2_hds_buf_size[0x3];
+       u8         reserved_1[0x7];
+       u8         page_offset[0x5];
+       u8         lwm[0x10];
 
-       u8         reserved_1[0x40];
+       u8         reserved_2[0x8];
+       u8         pd[0x18];
+
+       u8         reserved_3[0x8];
+       u8         uar_page[0x18];
+
+       u8         dbr_addr[0x40];
+
+       u8         hw_counter[0x20];
+
+       u8         sw_counter[0x20];
+
+       u8         reserved_4[0xc];
+       u8         log_wq_stride[0x4];
+       u8         reserved_5[0x3];
+       u8         log_wq_pg_sz[0x5];
+       u8         reserved_6[0x3];
+       u8         log_wq_sz[0x5];
+
+       u8         reserved_7[0x4e0];
 
-       u8         capability_struct[256][0x8];
+       struct mlx5_ifc_cmd_pas_bits pas[0];
 };
 
-struct mlx5_ifc_set_hca_cap_out_bits {
-       u8         status[0x8];
-       u8         reserved_0[0x18];
+struct mlx5_ifc_rq_num_bits {
+       u8         reserved_0[0x8];
+       u8         rq_num[0x18];
+};
 
-       u8         syndrome[0x20];
+struct mlx5_ifc_mac_address_layout_bits {
+       u8         reserved_0[0x10];
+       u8         mac_addr_47_32[0x10];
 
-       u8         reserved_1[0x40];
+       u8         mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+       u8         reserved_0[0xa0];
+
+       u8         min_time_between_cnps[0x20];
+
+       u8         reserved_1[0x12];
+       u8         cnp_dscp[0x6];
+       u8         reserved_2[0x5];
+       u8         cnp_802p_prio[0x3];
+
+       u8         reserved_3[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+       u8         reserved_0[0x60];
+
+       u8         reserved_1[0x4];
+       u8         clamp_tgt_rate[0x1];
+       u8         reserved_2[0x3];
+       u8         clamp_tgt_rate_after_time_inc[0x1];
+       u8         reserved_3[0x17];
+
+       u8         reserved_4[0x20];
+
+       u8         rpg_time_reset[0x20];
+
+       u8         rpg_byte_reset[0x20];
+
+       u8         rpg_threshold[0x20];
+
+       u8         rpg_max_rate[0x20];
+
+       u8         rpg_ai_rate[0x20];
+
+       u8         rpg_hai_rate[0x20];
+
+       u8         rpg_gd[0x20];
+
+       u8         rpg_min_dec_fac[0x20];
+
+       u8         rpg_min_rate[0x20];
+
+       u8         reserved_5[0xe0];
+
+       u8         rate_to_set_on_first_cnp[0x20];
+
+       u8         dce_tcp_g[0x20];
+
+       u8         dce_tcp_rtt[0x20];
+
+       u8         rate_reduce_monitor_period[0x20];
+
+       u8         reserved_6[0x20];
+
+       u8         initial_alpha_value[0x20];
+
+       u8         reserved_7[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+       u8         reserved_0[0x80];
+
+       u8         rppp_max_rps[0x20];
+
+       u8         rpg_time_reset[0x20];
+
+       u8         rpg_byte_reset[0x20];
+
+       u8         rpg_threshold[0x20];
+
+       u8         rpg_max_rate[0x20];
+
+       u8         rpg_ai_rate[0x20];
+
+       u8         rpg_hai_rate[0x20];
+
+       u8         rpg_gd[0x20];
+
+       u8         rpg_min_dec_fac[0x20];
+
+       u8         rpg_min_rate[0x20];
+
+       u8         reserved_1[0x640];
+};
+
+enum {
+       MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE    = 0x1,
+       MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET    = 0x2,
+       MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE  = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+       u8         resize_field_select[0x20];
+};
+
+enum {
+       MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD     = 0x1,
+       MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT  = 0x2,
+       MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI            = 0x4,
+       MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN         = 0x8,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+       u8         modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+       u8         field_select_r_roce_np[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+       u8         field_select_r_roce_rp[0x20];
+};
+
+enum {
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS     = 0x4,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET   = 0x8,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET   = 0x10,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD    = 0x20,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE     = 0x40,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE      = 0x80,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE     = 0x100,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD           = 0x200,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC  = 0x400,
+       MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE     = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+       u8         field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+       u8         time_since_last_clear_high[0x20];
+
+       u8         time_since_last_clear_low[0x20];
+
+       u8         symbol_errors_high[0x20];
+
+       u8         symbol_errors_low[0x20];
+
+       u8         sync_headers_errors_high[0x20];
+
+       u8         sync_headers_errors_low[0x20];
+
+       u8         edpl_bip_errors_lane0_high[0x20];
+
+       u8         edpl_bip_errors_lane0_low[0x20];
+
+       u8         edpl_bip_errors_lane1_high[0x20];
+
+       u8         edpl_bip_errors_lane1_low[0x20];
+
+       u8         edpl_bip_errors_lane2_high[0x20];
+
+       u8         edpl_bip_errors_lane2_low[0x20];
+
+       u8         edpl_bip_errors_lane3_high[0x20];
+
+       u8         edpl_bip_errors_lane3_low[0x20];
+
+       u8         fc_fec_corrected_blocks_lane0_high[0x20];
+
+       u8         fc_fec_corrected_blocks_lane0_low[0x20];
+
+       u8         fc_fec_corrected_blocks_lane1_high[0x20];
+
+       u8         fc_fec_corrected_blocks_lane1_low[0x20];
+
+       u8         fc_fec_corrected_blocks_lane2_high[0x20];
+
+       u8         fc_fec_corrected_blocks_lane2_low[0x20];
+
+       u8         fc_fec_corrected_blocks_lane3_high[0x20];
+
+       u8         fc_fec_corrected_blocks_lane3_low[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+       u8         fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+       u8         rs_fec_corrected_blocks_high[0x20];
+
+       u8         rs_fec_corrected_blocks_low[0x20];
+
+       u8         rs_fec_uncorrectable_blocks_high[0x20];
+
+       u8         rs_fec_uncorrectable_blocks_low[0x20];
+
+       u8         rs_fec_no_errors_blocks_high[0x20];
+
+       u8         rs_fec_no_errors_blocks_low[0x20];
+
+       u8         rs_fec_single_error_blocks_high[0x20];
+
+       u8         rs_fec_single_error_blocks_low[0x20];
+
+       u8         rs_fec_corrected_symbols_total_high[0x20];
+
+       u8         rs_fec_corrected_symbols_total_low[0x20];
+
+       u8         rs_fec_corrected_symbols_lane0_high[0x20];
+
+       u8         rs_fec_corrected_symbols_lane0_low[0x20];
+
+       u8         rs_fec_corrected_symbols_lane1_high[0x20];
+
+       u8         rs_fec_corrected_symbols_lane1_low[0x20];
+
+       u8         rs_fec_corrected_symbols_lane2_high[0x20];
+
+       u8         rs_fec_corrected_symbols_lane2_low[0x20];
+
+       u8         rs_fec_corrected_symbols_lane3_high[0x20];
+
+       u8         rs_fec_corrected_symbols_lane3_low[0x20];
+
+       u8         link_down_events[0x20];
+
+       u8         successful_recovery_events[0x20];
+
+       u8         reserved_0[0x180];
+};
+
+struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+       u8         transmit_queue_high[0x20];
+
+       u8         transmit_queue_low[0x20];
+
+       u8         reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+       u8         rx_octets_high[0x20];
+
+       u8         rx_octets_low[0x20];
+
+       u8         reserved_0[0xc0];
+
+       u8         rx_frames_high[0x20];
+
+       u8         rx_frames_low[0x20];
+
+       u8         tx_octets_high[0x20];
+
+       u8         tx_octets_low[0x20];
+
+       u8         reserved_1[0xc0];
+
+       u8         tx_frames_high[0x20];
+
+       u8         tx_frames_low[0x20];
+
+       u8         rx_pause_high[0x20];
+
+       u8         rx_pause_low[0x20];
+
+       u8         rx_pause_duration_high[0x20];
+
+       u8         rx_pause_duration_low[0x20];
+
+       u8         tx_pause_high[0x20];
+
+       u8         tx_pause_low[0x20];
+
+       u8         tx_pause_duration_high[0x20];
+
+       u8         tx_pause_duration_low[0x20];
+
+       u8         rx_pause_transition_high[0x20];
+
+       u8         rx_pause_transition_low[0x20];
+
+       u8         reserved_2[0x400];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+       u8         port_transmit_wait_high[0x20];
+
+       u8         port_transmit_wait_low[0x20];
+
+       u8         reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+       u8         dot3stats_alignment_errors_high[0x20];
+
+       u8         dot3stats_alignment_errors_low[0x20];
+
+       u8         dot3stats_fcs_errors_high[0x20];
+
+       u8         dot3stats_fcs_errors_low[0x20];
+
+       u8         dot3stats_single_collision_frames_high[0x20];
+
+       u8         dot3stats_single_collision_frames_low[0x20];
+
+       u8         dot3stats_multiple_collision_frames_high[0x20];
+
+       u8         dot3stats_multiple_collision_frames_low[0x20];
+
+       u8         dot3stats_sqe_test_errors_high[0x20];
+
+       u8         dot3stats_sqe_test_errors_low[0x20];
+
+       u8         dot3stats_deferred_transmissions_high[0x20];
+
+       u8         dot3stats_deferred_transmissions_low[0x20];
+
+       u8         dot3stats_late_collisions_high[0x20];
+
+       u8         dot3stats_late_collisions_low[0x20];
+
+       u8         dot3stats_excessive_collisions_high[0x20];
+
+       u8         dot3stats_excessive_collisions_low[0x20];
+
+       u8         dot3stats_internal_mac_transmit_errors_high[0x20];
+
+       u8         dot3stats_internal_mac_transmit_errors_low[0x20];
+
+       u8         dot3stats_carrier_sense_errors_high[0x20];
+
+       u8         dot3stats_carrier_sense_errors_low[0x20];
+
+       u8         dot3stats_frame_too_longs_high[0x20];
+
+       u8         dot3stats_frame_too_longs_low[0x20];
+
+       u8         dot3stats_internal_mac_receive_errors_high[0x20];
+
+       u8         dot3stats_internal_mac_receive_errors_low[0x20];
+
+       u8         dot3stats_symbol_errors_high[0x20];
+
+       u8         dot3stats_symbol_errors_low[0x20];
+
+       u8         dot3control_in_unknown_opcodes_high[0x20];
+
+       u8         dot3control_in_unknown_opcodes_low[0x20];
+
+       u8         dot3in_pause_frames_high[0x20];
+
+       u8         dot3in_pause_frames_low[0x20];
+
+       u8         dot3out_pause_frames_high[0x20];
+
+       u8         dot3out_pause_frames_low[0x20];
+
+       u8         reserved_0[0x3c0];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+       u8         ether_stats_drop_events_high[0x20];
+
+       u8         ether_stats_drop_events_low[0x20];
+
+       u8         ether_stats_octets_high[0x20];
+
+       u8         ether_stats_octets_low[0x20];
+
+       u8         ether_stats_pkts_high[0x20];
+
+       u8         ether_stats_pkts_low[0x20];
+
+       u8         ether_stats_broadcast_pkts_high[0x20];
+
+       u8         ether_stats_broadcast_pkts_low[0x20];
+
+       u8         ether_stats_multicast_pkts_high[0x20];
+
+       u8         ether_stats_multicast_pkts_low[0x20];
+
+       u8         ether_stats_crc_align_errors_high[0x20];
+
+       u8         ether_stats_crc_align_errors_low[0x20];
+
+       u8         ether_stats_undersize_pkts_high[0x20];
+
+       u8         ether_stats_undersize_pkts_low[0x20];
+
+       u8         ether_stats_oversize_pkts_high[0x20];
+
+       u8         ether_stats_oversize_pkts_low[0x20];
+
+       u8         ether_stats_fragments_high[0x20];
+
+       u8         ether_stats_fragments_low[0x20];
+
+       u8         ether_stats_jabbers_high[0x20];
+
+       u8         ether_stats_jabbers_low[0x20];
+
+       u8         ether_stats_collisions_high[0x20];
+
+       u8         ether_stats_collisions_low[0x20];
+
+       u8         ether_stats_pkts64octets_high[0x20];
+
+       u8         ether_stats_pkts64octets_low[0x20];
+
+       u8         ether_stats_pkts65to127octets_high[0x20];
+
+       u8         ether_stats_pkts65to127octets_low[0x20];
+
+       u8         ether_stats_pkts128to255octets_high[0x20];
+
+       u8         ether_stats_pkts128to255octets_low[0x20];
+
+       u8         ether_stats_pkts256to511octets_high[0x20];
+
+       u8         ether_stats_pkts256to511octets_low[0x20];
+
+       u8         ether_stats_pkts512to1023octets_high[0x20];
+
+       u8         ether_stats_pkts512to1023octets_low[0x20];
+
+       u8         ether_stats_pkts1024to1518octets_high[0x20];
+
+       u8         ether_stats_pkts1024to1518octets_low[0x20];
+
+       u8         ether_stats_pkts1519to2047octets_high[0x20];
+
+       u8         ether_stats_pkts1519to2047octets_low[0x20];
+
+       u8         ether_stats_pkts2048to4095octets_high[0x20];
+
+       u8         ether_stats_pkts2048to4095octets_low[0x20];
+
+       u8         ether_stats_pkts4096to8191octets_high[0x20];
+
+       u8         ether_stats_pkts4096to8191octets_low[0x20];
+
+       u8         ether_stats_pkts8192to10239octets_high[0x20];
+
+       u8         ether_stats_pkts8192to10239octets_low[0x20];
+
+       u8         reserved_0[0x280];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+       u8         if_in_octets_high[0x20];
+
+       u8         if_in_octets_low[0x20];
+
+       u8         if_in_ucast_pkts_high[0x20];
+
+       u8         if_in_ucast_pkts_low[0x20];
+
+       u8         if_in_discards_high[0x20];
+
+       u8         if_in_discards_low[0x20];
+
+       u8         if_in_errors_high[0x20];
+
+       u8         if_in_errors_low[0x20];
+
+       u8         if_in_unknown_protos_high[0x20];
+
+       u8         if_in_unknown_protos_low[0x20];
+
+       u8         if_out_octets_high[0x20];
+
+       u8         if_out_octets_low[0x20];
+
+       u8         if_out_ucast_pkts_high[0x20];
+
+       u8         if_out_ucast_pkts_low[0x20];
+
+       u8         if_out_discards_high[0x20];
+
+       u8         if_out_discards_low[0x20];
+
+       u8         if_out_errors_high[0x20];
+
+       u8         if_out_errors_low[0x20];
+
+       u8         if_in_multicast_pkts_high[0x20];
+
+       u8         if_in_multicast_pkts_low[0x20];
+
+       u8         if_in_broadcast_pkts_high[0x20];
+
+       u8         if_in_broadcast_pkts_low[0x20];
+
+       u8         if_out_multicast_pkts_high[0x20];
+
+       u8         if_out_multicast_pkts_low[0x20];
+
+       u8         if_out_broadcast_pkts_high[0x20];
+
+       u8         if_out_broadcast_pkts_low[0x20];
+
+       u8         reserved_0[0x480];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+       u8         a_frames_transmitted_ok_high[0x20];
+
+       u8         a_frames_transmitted_ok_low[0x20];
+
+       u8         a_frames_received_ok_high[0x20];
+
+       u8         a_frames_received_ok_low[0x20];
+
+       u8         a_frame_check_sequence_errors_high[0x20];
+
+       u8         a_frame_check_sequence_errors_low[0x20];
+
+       u8         a_alignment_errors_high[0x20];
+
+       u8         a_alignment_errors_low[0x20];
+
+       u8         a_octets_transmitted_ok_high[0x20];
+
+       u8         a_octets_transmitted_ok_low[0x20];
+
+       u8         a_octets_received_ok_high[0x20];
+
+       u8         a_octets_received_ok_low[0x20];
+
+       u8         a_multicast_frames_xmitted_ok_high[0x20];
+
+       u8         a_multicast_frames_xmitted_ok_low[0x20];
+
+       u8         a_broadcast_frames_xmitted_ok_high[0x20];
+
+       u8         a_broadcast_frames_xmitted_ok_low[0x20];
+
+       u8         a_multicast_frames_received_ok_high[0x20];
+
+       u8         a_multicast_frames_received_ok_low[0x20];
+
+       u8         a_broadcast_frames_received_ok_high[0x20];
+
+       u8         a_broadcast_frames_received_ok_low[0x20];
+
+       u8         a_in_range_length_errors_high[0x20];
+
+       u8         a_in_range_length_errors_low[0x20];
+
+       u8         a_out_of_range_length_field_high[0x20];
+
+       u8         a_out_of_range_length_field_low[0x20];
+
+       u8         a_frame_too_long_errors_high[0x20];
+
+       u8         a_frame_too_long_errors_low[0x20];
+
+       u8         a_symbol_error_during_carrier_high[0x20];
+
+       u8         a_symbol_error_during_carrier_low[0x20];
+
+       u8         a_mac_control_frames_transmitted_high[0x20];
+
+       u8         a_mac_control_frames_transmitted_low[0x20];
+
+       u8         a_mac_control_frames_received_high[0x20];
+
+       u8         a_mac_control_frames_received_low[0x20];
+
+       u8         a_unsupported_opcodes_received_high[0x20];
+
+       u8         a_unsupported_opcodes_received_low[0x20];
+
+       u8         a_pause_mac_ctrl_frames_received_high[0x20];
+
+       u8         a_pause_mac_ctrl_frames_received_low[0x20];
+
+       u8         a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+       u8         a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+       u8         reserved_0[0x300];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+       u8         command_completion_vector[0x20];
+
+       u8         reserved_0[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+       u8         reserved_0[0x18];
+       u8         port_num[0x1];
+       u8         reserved_1[0x3];
+       u8         vl[0x4];
+
+       u8         reserved_2[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+       u8         event_subtype[0x8];
+       u8         reserved_0[0x8];
+       u8         congestion_level[0x8];
+       u8         reserved_1[0x8];
+
+       u8         reserved_2[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+       u8         reserved_0[0x60];
+
+       u8         gpio_event_hi[0x20];
+
+       u8         gpio_event_lo[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+       u8         reserved_0[0x40];
+
+       u8         port_num[0x4];
+       u8         reserved_1[0x1c];
+
+       u8         reserved_2[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+       u8         reserved_0[0xe0];
+};
+
+enum {
+       MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN                 = 0x1,
+       MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR  = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+       u8         reserved_0[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_1[0x20];
+
+       u8         reserved_2[0x18];
+       u8         syndrome[0x8];
+
+       u8         reserved_3[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+       u8         bytes_committed[0x20];
+
+       u8         r_key[0x20];
+
+       u8         reserved_0[0x10];
+       u8         packet_len[0x10];
+
+       u8         rdma_op_len[0x20];
+
+       u8         rdma_va[0x40];
+
+       u8         reserved_1[0x5];
+       u8         rdma[0x1];
+       u8         write[0x1];
+       u8         requestor[0x1];
+       u8         qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+       u8         bytes_committed[0x20];
+
+       u8         reserved_0[0x10];
+       u8         wqe_index[0x10];
+
+       u8         reserved_1[0x10];
+       u8         len[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x5];
+       u8         rdma[0x1];
+       u8         write_read[0x1];
+       u8         requestor[0x1];
+       u8         qpn[0x18];
+};
+
+struct mlx5_ifc_qp_events_bits {
+       u8         reserved_0[0xa0];
+
+       u8         type[0x8];
+       u8         reserved_1[0x18];
+
+       u8         reserved_2[0x8];
+       u8         qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+       u8         reserved_0[0xc0];
+
+       u8         reserved_1[0x8];
+       u8         dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+       u8         reserved_0[0xc0];
+
+       u8         reserved_1[0x8];
+       u8         cq_number[0x18];
+};
+
+enum {
+       MLX5_QPC_STATE_RST        = 0x0,
+       MLX5_QPC_STATE_INIT       = 0x1,
+       MLX5_QPC_STATE_RTR        = 0x2,
+       MLX5_QPC_STATE_RTS        = 0x3,
+       MLX5_QPC_STATE_SQER       = 0x4,
+       MLX5_QPC_STATE_ERR        = 0x6,
+       MLX5_QPC_STATE_SQD        = 0x7,
+       MLX5_QPC_STATE_SUSPENDED  = 0x9,
+};
+
+enum {
+       MLX5_QPC_ST_RC            = 0x0,
+       MLX5_QPC_ST_UC            = 0x1,
+       MLX5_QPC_ST_UD            = 0x2,
+       MLX5_QPC_ST_XRC           = 0x3,
+       MLX5_QPC_ST_DCI           = 0x5,
+       MLX5_QPC_ST_QP0           = 0x7,
+       MLX5_QPC_ST_QP1           = 0x8,
+       MLX5_QPC_ST_RAW_DATAGRAM  = 0x9,
+       MLX5_QPC_ST_REG_UMR       = 0xc,
+};
+
+enum {
+       MLX5_QPC_PM_STATE_ARMED     = 0x0,
+       MLX5_QPC_PM_STATE_REARM     = 0x1,
+       MLX5_QPC_PM_STATE_RESERVED  = 0x2,
+       MLX5_QPC_PM_STATE_MIGRATED  = 0x3,
+};
+
+enum {
+       MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS                = 0x0,
+       MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT  = 0x1,
+};
+
+enum {
+       MLX5_QPC_MTU_256_BYTES        = 0x1,
+       MLX5_QPC_MTU_512_BYTES        = 0x2,
+       MLX5_QPC_MTU_1K_BYTES         = 0x3,
+       MLX5_QPC_MTU_2K_BYTES         = 0x4,
+       MLX5_QPC_MTU_4K_BYTES         = 0x5,
+       MLX5_QPC_MTU_RAW_ETHERNET_QP  = 0x7,
+};
+
+enum {
+       MLX5_QPC_ATOMIC_MODE_IB_SPEC     = 0x1,
+       MLX5_QPC_ATOMIC_MODE_ONLY_8B     = 0x2,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_8B    = 0x3,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_16B   = 0x4,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_32B   = 0x5,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_64B   = 0x6,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_128B  = 0x7,
+       MLX5_QPC_ATOMIC_MODE_UP_TO_256B  = 0x8,
+};
+
+enum {
+       MLX5_QPC_CS_REQ_DISABLE    = 0x0,
+       MLX5_QPC_CS_REQ_UP_TO_32B  = 0x11,
+       MLX5_QPC_CS_REQ_UP_TO_64B  = 0x22,
+};
+
+enum {
+       MLX5_QPC_CS_RES_DISABLE    = 0x0,
+       MLX5_QPC_CS_RES_UP_TO_32B  = 0x1,
+       MLX5_QPC_CS_RES_UP_TO_64B  = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+       u8         state[0x4];
+       u8         reserved_0[0x4];
+       u8         st[0x8];
+       u8         reserved_1[0x3];
+       u8         pm_state[0x2];
+       u8         reserved_2[0x7];
+       u8         end_padding_mode[0x2];
+       u8         reserved_3[0x2];
+
+       u8         wq_signature[0x1];
+       u8         block_lb_mc[0x1];
+       u8         atomic_like_write_en[0x1];
+       u8         latency_sensitive[0x1];
+       u8         reserved_4[0x1];
+       u8         drain_sigerr[0x1];
+       u8         reserved_5[0x2];
+       u8         pd[0x18];
+
+       u8         mtu[0x3];
+       u8         log_msg_max[0x5];
+       u8         reserved_6[0x1];
+       u8         log_rq_size[0x4];
+       u8         log_rq_stride[0x3];
+       u8         no_sq[0x1];
+       u8         log_sq_size[0x4];
+       u8         reserved_7[0x6];
+       u8         rlky[0x1];
+       u8         reserved_8[0x4];
+
+       u8         counter_set_id[0x8];
+       u8         uar_page[0x18];
+
+       u8         reserved_9[0x8];
+       u8         user_index[0x18];
+
+       u8         reserved_10[0x3];
+       u8         log_page_size[0x5];
+       u8         remote_qpn[0x18];
+
+       struct mlx5_ifc_ads_bits primary_address_path;
+
+       struct mlx5_ifc_ads_bits secondary_address_path;
+
+       u8         log_ack_req_freq[0x4];
+       u8         reserved_11[0x4];
+       u8         log_sra_max[0x3];
+       u8         reserved_12[0x2];
+       u8         retry_count[0x3];
+       u8         rnr_retry[0x3];
+       u8         reserved_13[0x1];
+       u8         fre[0x1];
+       u8         cur_rnr_retry[0x3];
+       u8         cur_retry_count[0x3];
+       u8         reserved_14[0x5];
+
+       u8         reserved_15[0x20];
+
+       u8         reserved_16[0x8];
+       u8         next_send_psn[0x18];
+
+       u8         reserved_17[0x8];
+       u8         cqn_snd[0x18];
+
+       u8         reserved_18[0x40];
+
+       u8         reserved_19[0x8];
+       u8         last_acked_psn[0x18];
+
+       u8         reserved_20[0x8];
+       u8         ssn[0x18];
+
+       u8         reserved_21[0x8];
+       u8         log_rra_max[0x3];
+       u8         reserved_22[0x1];
+       u8         atomic_mode[0x4];
+       u8         rre[0x1];
+       u8         rwe[0x1];
+       u8         rae[0x1];
+       u8         reserved_23[0x1];
+       u8         page_offset[0x6];
+       u8         reserved_24[0x3];
+       u8         cd_slave_receive[0x1];
+       u8         cd_slave_send[0x1];
+       u8         cd_master[0x1];
+
+       u8         reserved_25[0x3];
+       u8         min_rnr_nak[0x5];
+       u8         next_rcv_psn[0x18];
+
+       u8         reserved_26[0x8];
+       u8         xrcd[0x18];
+
+       u8         reserved_27[0x8];
+       u8         cqn_rcv[0x18];
+
+       u8         dbr_addr[0x40];
+
+       u8         q_key[0x20];
+
+       u8         reserved_28[0x5];
+       u8         rq_type[0x3];
+       u8         srqn_rmpn[0x18];
+
+       u8         reserved_29[0x8];
+       u8         rmsn[0x18];
+
+       u8         hw_sq_wqebb_counter[0x10];
+       u8         sw_sq_wqebb_counter[0x10];
+
+       u8         hw_rq_counter[0x20];
+
+       u8         sw_rq_counter[0x20];
+
+       u8         reserved_30[0x20];
+
+       u8         reserved_31[0xf];
+       u8         cgs[0x1];
+       u8         cs_req[0x8];
+       u8         cs_res[0x8];
+
+       u8         dc_access_key[0x40];
+
+       u8         reserved_32[0xc0];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+       u8         source_l3_address[16][0x8];
+
+       u8         reserved_0[0x3];
+       u8         vlan_valid[0x1];
+       u8         vlan_id[0xc];
+       u8         source_mac_47_32[0x10];
+
+       u8         source_mac_31_0[0x20];
+
+       u8         reserved_1[0x14];
+       u8         roce_l3_type[0x4];
+       u8         roce_version[0x8];
+
+       u8         reserved_2[0x20];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+       struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+       struct mlx5_ifc_odp_cap_bits odp_cap;
+       struct mlx5_ifc_atomic_caps_bits atomic_caps;
+       struct mlx5_ifc_roce_cap_bits roce_cap;
+       struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+       struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+       u8         reserved_0[0x8000];
+};
+
+enum {
+       MLX5_FLOW_CONTEXT_ACTION_ALLOW     = 0x1,
+       MLX5_FLOW_CONTEXT_ACTION_DROP      = 0x2,
+       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST  = 0x4,
+};
+
+struct mlx5_ifc_flow_context_bits {
+       u8         reserved_0[0x20];
+
+       u8         group_id[0x20];
+
+       u8         reserved_1[0x8];
+       u8         flow_tag[0x18];
+
+       u8         reserved_2[0x10];
+       u8         action[0x10];
+
+       u8         reserved_3[0x8];
+       u8         destination_list_size[0x18];
+
+       u8         reserved_4[0x160];
+
+       struct mlx5_ifc_fte_match_param_bits match_value;
+
+       u8         reserved_5[0x600];
+
+       struct mlx5_ifc_dest_format_struct_bits destination[0];
+};
+
+enum {
+       MLX5_XRC_SRQC_STATE_GOOD   = 0x0,
+       MLX5_XRC_SRQC_STATE_ERROR  = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+       u8         state[0x4];
+       u8         log_xrc_srq_size[0x4];
+       u8         reserved_0[0x18];
+
+       u8         wq_signature[0x1];
+       u8         cont_srq[0x1];
+       u8         reserved_1[0x1];
+       u8         rlky[0x1];
+       u8         basic_cyclic_rcv_wqe[0x1];
+       u8         log_rq_stride[0x3];
+       u8         xrcd[0x18];
+
+       u8         page_offset[0x6];
+       u8         reserved_2[0x2];
+       u8         cqn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         user_index_equal_xrc_srqn[0x1];
+       u8         reserved_4[0x1];
+       u8         log_page_size[0x6];
+       u8         user_index[0x18];
+
+       u8         reserved_5[0x20];
+
+       u8         reserved_6[0x8];
+       u8         pd[0x18];
+
+       u8         lwm[0x10];
+       u8         wqe_cnt[0x10];
+
+       u8         reserved_7[0x40];
+
+       u8         db_record_addr_h[0x20];
+
+       u8         db_record_addr_l[0x1e];
+       u8         reserved_8[0x2];
+
+       u8         reserved_9[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+       u8         packets[0x40];
+
+       u8         octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+       u8         reserved_0[0xc];
+       u8         prio[0x4];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x100];
+
+       u8         reserved_3[0x8];
+       u8         transport_domain[0x18];
+
+       u8         reserved_4[0x3c0];
+};
+
+enum {
+       MLX5_TIRC_DISP_TYPE_DIRECT    = 0x0,
+       MLX5_TIRC_DISP_TYPE_INDIRECT  = 0x1,
+};
+
+enum {
+       MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,
+       MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,
+};
+
+enum {
+       MLX5_TIRC_RX_HASH_FN_HASH_NONE           = 0x0,
+       MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8  = 0x1,
+       MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ       = 0x2,
+};
+
+enum {
+       MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_    = 0x1,
+       MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_  = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+       u8         reserved_0[0x20];
+
+       u8         disp_type[0x4];
+       u8         reserved_1[0x1c];
+
+       u8         reserved_2[0x40];
+
+       u8         reserved_3[0x4];
+       u8         lro_timeout_period_usecs[0x10];
+       u8         lro_enable_mask[0x4];
+       u8         lro_max_ip_payload_size[0x8];
+
+       u8         reserved_4[0x40];
+
+       u8         reserved_5[0x8];
+       u8         inline_rqn[0x18];
+
+       u8         rx_hash_symmetric[0x1];
+       u8         reserved_6[0x1];
+       u8         tunneled_offload_en[0x1];
+       u8         reserved_7[0x5];
+       u8         indirect_table[0x18];
+
+       u8         rx_hash_fn[0x4];
+       u8         reserved_8[0x2];
+       u8         self_lb_block[0x2];
+       u8         transport_domain[0x18];
+
+       u8         rx_hash_toeplitz_key[10][0x20];
+
+       struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+       struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+       u8         reserved_9[0x4c0];
+};
+
+enum {
+       MLX5_SRQC_STATE_GOOD   = 0x0,
+       MLX5_SRQC_STATE_ERROR  = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+       u8         state[0x4];
+       u8         log_srq_size[0x4];
+       u8         reserved_0[0x18];
+
+       u8         wq_signature[0x1];
+       u8         cont_srq[0x1];
+       u8         reserved_1[0x1];
+       u8         rlky[0x1];
+       u8         reserved_2[0x1];
+       u8         log_rq_stride[0x3];
+       u8         xrcd[0x18];
+
+       u8         page_offset[0x6];
+       u8         reserved_3[0x2];
+       u8         cqn[0x18];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x2];
+       u8         log_page_size[0x6];
+       u8         reserved_6[0x18];
+
+       u8         reserved_7[0x20];
+
+       u8         reserved_8[0x8];
+       u8         pd[0x18];
+
+       u8         lwm[0x10];
+       u8         wqe_cnt[0x10];
+
+       u8         reserved_9[0x40];
+
+       u8         dbr_addr[0x40];
+
+       u8         reserved_10[0x80];
+};
+
+enum {
+       MLX5_SQC_STATE_RST  = 0x0,
+       MLX5_SQC_STATE_RDY  = 0x1,
+       MLX5_SQC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+       u8         rlky[0x1];
+       u8         cd_master[0x1];
+       u8         fre[0x1];
+       u8         flush_in_error_en[0x1];
+       u8         reserved_0[0x4];
+       u8         state[0x4];
+       u8         reserved_1[0x14];
+
+       u8         reserved_2[0x8];
+       u8         user_index[0x18];
+
+       u8         reserved_3[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_4[0xa0];
+
+       u8         tis_lst_sz[0x10];
+       u8         reserved_5[0x10];
+
+       u8         reserved_6[0x40];
+
+       u8         reserved_7[0x8];
+       u8         tis_num_0[0x18];
+
+       struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_rqtc_bits {
+       u8         reserved_0[0xa0];
+
+       u8         reserved_1[0x10];
+       u8         rqt_max_size[0x10];
+
+       u8         reserved_2[0x10];
+       u8         rqt_actual_size[0x10];
+
+       u8         reserved_3[0x6a0];
+
+       struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+       MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,
+       MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,
+};
+
+enum {
+       MLX5_RQC_STATE_RST  = 0x0,
+       MLX5_RQC_STATE_RDY  = 0x1,
+       MLX5_RQC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+       u8         rlky[0x1];
+       u8         reserved_0[0x2];
+       u8         vsd[0x1];
+       u8         mem_rq_type[0x4];
+       u8         state[0x4];
+       u8         reserved_1[0x1];
+       u8         flush_in_error_en[0x1];
+       u8         reserved_2[0x12];
+
+       u8         reserved_3[0x8];
+       u8         user_index[0x18];
+
+       u8         reserved_4[0x8];
+       u8         cqn[0x18];
+
+       u8         counter_set_id[0x8];
+       u8         reserved_5[0x18];
+
+       u8         reserved_6[0x8];
+       u8         rmpn[0x18];
+
+       u8         reserved_7[0xe0];
+
+       struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+       MLX5_RMPC_STATE_RDY  = 0x1,
+       MLX5_RMPC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+       u8         reserved_0[0x8];
+       u8         state[0x4];
+       u8         reserved_1[0x14];
+
+       u8         basic_cyclic_rcv_wqe[0x1];
+       u8         reserved_2[0x1f];
+
+       u8         reserved_3[0x140];
+
+       struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+       MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS  = 0x0,
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+       u8         reserved_0[0x1f];
+       u8         roce_en[0x1];
+
+       u8         reserved_1[0x760];
+
+       u8         reserved_2[0x5];
+       u8         allowed_list_type[0x3];
+       u8         reserved_3[0xc];
+       u8         allowed_list_size[0xc];
+
+       struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+       u8         reserved_4[0x20];
+
+       u8         current_uc_mac_address[0][0x40];
+};
+
+enum {
+       MLX5_MKC_ACCESS_MODE_PA    = 0x0,
+       MLX5_MKC_ACCESS_MODE_MTT   = 0x1,
+       MLX5_MKC_ACCESS_MODE_KLMS  = 0x2,
+};
+
+struct mlx5_ifc_mkc_bits {
+       u8         reserved_0[0x1];
+       u8         free[0x1];
+       u8         reserved_1[0xd];
+       u8         small_fence_on_rdma_read_response[0x1];
+       u8         umr_en[0x1];
+       u8         a[0x1];
+       u8         rw[0x1];
+       u8         rr[0x1];
+       u8         lw[0x1];
+       u8         lr[0x1];
+       u8         access_mode[0x2];
+       u8         reserved_2[0x8];
+
+       u8         qpn[0x18];
+       u8         mkey_7_0[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         length64[0x1];
+       u8         bsf_en[0x1];
+       u8         sync_umr[0x1];
+       u8         reserved_4[0x2];
+       u8         expected_sigerr_count[0x1];
+       u8         reserved_5[0x1];
+       u8         en_rinval[0x1];
+       u8         pd[0x18];
+
+       u8         start_addr[0x40];
+
+       u8         len[0x40];
+
+       u8         bsf_octword_size[0x20];
+
+       u8         reserved_6[0x80];
+
+       u8         translations_octword_size[0x20];
+
+       u8         reserved_7[0x1b];
+       u8         log_page_size[0x5];
+
+       u8         reserved_8[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+       u8         reserved_0[0x10];
+       u8         pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+       u8         array128_auto[16][0x8];
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+       u8         field_select[0x20];
+
+       u8         reserved_0[0xe0];
+
+       u8         sm_virt_aware[0x1];
+       u8         has_smi[0x1];
+       u8         has_raw[0x1];
+       u8         grh_required[0x1];
+       u8         reserved_1[0xc];
+       u8         port_physical_state[0x4];
+       u8         vport_state_policy[0x4];
+       u8         port_state[0x4];
+       u8         vport_state[0x4];
+
+       u8         reserved_2[0x20];
+
+       u8         system_image_guid[0x40];
+
+       u8         port_guid[0x40];
+
+       u8         node_guid[0x40];
+
+       u8         cap_mask1[0x20];
+
+       u8         cap_mask1_field_select[0x20];
+
+       u8         cap_mask2[0x20];
+
+       u8         cap_mask2_field_select[0x20];
+
+       u8         reserved_3[0x80];
+
+       u8         lid[0x10];
+       u8         reserved_4[0x4];
+       u8         init_type_reply[0x4];
+       u8         lmc[0x3];
+       u8         subnet_timeout[0x5];
+
+       u8         sm_lid[0x10];
+       u8         sm_sl[0x4];
+       u8         reserved_5[0xc];
+
+       u8         qkey_violation_counter[0x10];
+       u8         pkey_violation_counter[0x10];
+
+       u8         reserved_6[0xca0];
+};
+
+enum {
+       MLX5_EQC_STATUS_OK                = 0x0,
+       MLX5_EQC_STATUS_EQ_WRITE_FAILURE  = 0xa,
+};
+
+enum {
+       MLX5_EQC_ST_ARMED  = 0x9,
+       MLX5_EQC_ST_FIRED  = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+       u8         status[0x4];
+       u8         reserved_0[0x9];
+       u8         ec[0x1];
+       u8         oi[0x1];
+       u8         reserved_1[0x5];
+       u8         st[0x4];
+       u8         reserved_2[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         reserved_4[0x14];
+       u8         page_offset[0x6];
+       u8         reserved_5[0x6];
+
+       u8         reserved_6[0x3];
+       u8         log_eq_size[0x5];
+       u8         uar_page[0x18];
+
+       u8         reserved_7[0x20];
+
+       u8         reserved_8[0x18];
+       u8         intr[0x8];
+
+       u8         reserved_9[0x3];
+       u8         log_page_size[0x5];
+       u8         reserved_10[0x18];
+
+       u8         reserved_11[0x60];
+
+       u8         reserved_12[0x8];
+       u8         consumer_counter[0x18];
+
+       u8         reserved_13[0x8];
+       u8         producer_counter[0x18];
+
+       u8         reserved_14[0x80];
+};
+
+enum {
+       MLX5_DCTC_STATE_ACTIVE    = 0x0,
+       MLX5_DCTC_STATE_DRAINING  = 0x1,
+       MLX5_DCTC_STATE_DRAINED   = 0x2,
+};
+
+enum {
+       MLX5_DCTC_CS_RES_DISABLE    = 0x0,
+       MLX5_DCTC_CS_RES_NA         = 0x1,
+       MLX5_DCTC_CS_RES_UP_TO_64B  = 0x2,
+};
+
+enum {
+       MLX5_DCTC_MTU_256_BYTES  = 0x1,
+       MLX5_DCTC_MTU_512_BYTES  = 0x2,
+       MLX5_DCTC_MTU_1K_BYTES   = 0x3,
+       MLX5_DCTC_MTU_2K_BYTES   = 0x4,
+       MLX5_DCTC_MTU_4K_BYTES   = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+       u8         reserved_0[0x4];
+       u8         state[0x4];
+       u8         reserved_1[0x18];
+
+       u8         reserved_2[0x8];
+       u8         user_index[0x18];
+
+       u8         reserved_3[0x8];
+       u8         cqn[0x18];
+
+       u8         counter_set_id[0x8];
+       u8         atomic_mode[0x4];
+       u8         rre[0x1];
+       u8         rwe[0x1];
+       u8         rae[0x1];
+       u8         atomic_like_write_en[0x1];
+       u8         latency_sensitive[0x1];
+       u8         rlky[0x1];
+       u8         free_ar[0x1];
+       u8         reserved_4[0xd];
+
+       u8         reserved_5[0x8];
+       u8         cs_res[0x8];
+       u8         reserved_6[0x3];
+       u8         min_rnr_nak[0x5];
+       u8         reserved_7[0x8];
+
+       u8         reserved_8[0x8];
+       u8         srqn[0x18];
+
+       u8         reserved_9[0x8];
+       u8         pd[0x18];
+
+       u8         tclass[0x8];
+       u8         reserved_10[0x4];
+       u8         flow_label[0x14];
+
+       u8         dc_access_key[0x40];
+
+       u8         reserved_11[0x5];
+       u8         mtu[0x3];
+       u8         port[0x8];
+       u8         pkey_index[0x10];
+
+       u8         reserved_12[0x8];
+       u8         my_addr_index[0x8];
+       u8         reserved_13[0x8];
+       u8         hop_limit[0x8];
+
+       u8         dc_access_key_violation_count[0x20];
+
+       u8         reserved_14[0x14];
+       u8         dei_cfi[0x1];
+       u8         eth_prio[0x3];
+       u8         ecn[0x2];
+       u8         dscp[0x6];
+
+       u8         reserved_15[0x40];
+};
+
+enum {
+       MLX5_CQC_STATUS_OK             = 0x0,
+       MLX5_CQC_STATUS_CQ_OVERFLOW    = 0x9,
+       MLX5_CQC_STATUS_CQ_WRITE_FAIL  = 0xa,
+};
+
+enum {
+       MLX5_CQC_CQE_SZ_64_BYTES   = 0x0,
+       MLX5_CQC_CQE_SZ_128_BYTES  = 0x1,
+};
+
+enum {
+       MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED  = 0x6,
+       MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED            = 0x9,
+       MLX5_CQC_ST_FIRED                                 = 0xa,
+};
+
+struct mlx5_ifc_cqc_bits {
+       u8         status[0x4];
+       u8         reserved_0[0x4];
+       u8         cqe_sz[0x3];
+       u8         cc[0x1];
+       u8         reserved_1[0x1];
+       u8         scqe_break_moderation_en[0x1];
+       u8         oi[0x1];
+       u8         reserved_2[0x2];
+       u8         cqe_zip_en[0x1];
+       u8         mini_cqe_res_format[0x2];
+       u8         st[0x4];
+       u8         reserved_3[0x8];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x14];
+       u8         page_offset[0x6];
+       u8         reserved_6[0x6];
+
+       u8         reserved_7[0x3];
+       u8         log_cq_size[0x5];
+       u8         uar_page[0x18];
+
+       u8         reserved_8[0x4];
+       u8         cq_period[0xc];
+       u8         cq_max_count[0x10];
+
+       u8         reserved_9[0x18];
+       u8         c_eqn[0x8];
+
+       u8         reserved_10[0x3];
+       u8         log_page_size[0x5];
+       u8         reserved_11[0x18];
+
+       u8         reserved_12[0x20];
+
+       u8         reserved_13[0x8];
+       u8         last_notified_index[0x18];
+
+       u8         reserved_14[0x8];
+       u8         last_solicit_index[0x18];
+
+       u8         reserved_15[0x8];
+       u8         consumer_counter[0x18];
+
+       u8         reserved_16[0x8];
+       u8         producer_counter[0x18];
+
+       u8         reserved_17[0x40];
+
+       u8         dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+       struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+       struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+       struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+       u8         reserved_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+       u8         reserved_0[0xc0];
+
+       u8         reserved_1[0x8];
+       u8         ieee_vendor_id[0x18];
+
+       u8         reserved_2[0x10];
+       u8         vsd_vendor_id[0x10];
+
+       u8         vsd[208][0x8];
+
+       u8         vsd_contd_psid[16][0x8];
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+       struct mlx5_ifc_modify_field_select_bits modify_field_select;
+       struct mlx5_ifc_resize_field_select_bits resize_field_select;
+       u8         reserved_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+       struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+       struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+       struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+       u8         reserved_0[0x20];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+       struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+       struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+       struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+       u8         reserved_0[0x7c0];
+};
+
+union mlx5_ifc_event_auto_bits {
+       struct mlx5_ifc_comp_event_bits comp_event;
+       struct mlx5_ifc_dct_events_bits dct_events;
+       struct mlx5_ifc_qp_events_bits qp_events;
+       struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+       struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+       struct mlx5_ifc_cq_error_bits cq_error;
+       struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+       struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+       struct mlx5_ifc_gpio_event_bits gpio_event;
+       struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+       struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+       struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+       u8         reserved_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+       u8         reserved_0[0x100];
+
+       u8         assert_existptr[0x20];
+
+       u8         assert_callra[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         fw_version[0x20];
+
+       u8         hw_id[0x20];
+
+       u8         reserved_2[0x20];
+
+       u8         irisc_index[0x8];
+       u8         synd[0x8];
+       u8         ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+       u8         no_lb[0x1];
+       u8         reserved_0[0x7];
+       u8         port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE  = 0x0,
+       MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE     = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         profile[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         roce_address_index[0x10];
+       u8         reserved_2[0x10];
+
+       u8         reserved_3[0x20];
+
+       struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL   = 0x0,
+       MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE  = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x6];
+       u8         demux_mode[0x2];
+       u8         reserved_4[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x8];
+       u8         table_index[0x18];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x13];
+       u8         vlan_valid[0x1];
+       u8         vlan[0xc];
+
+       struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+       u8         reserved_6[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         current_issi[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_hca_cap_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_set_fte_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x40];
+
+       u8         flow_index[0x20];
+
+       u8         reserved_6[0xe0];
+
+       struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+       u8         reserved_2[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         xrc_srqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+enum {
+       MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN  = 0x0,
+       MLX5_QUERY_VPORT_STATE_OUT_STATE_UP    = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         reserved_2[0x18];
+       u8         admin_state[0x4];
+       u8         state[0x4];
+};
+
+enum {
+       MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT  = 0x0,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_traffic_counter_bits received_errors;
+
+       struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+       struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+       struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+       struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+       struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+       struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+       struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+       u8         reserved_2[0xa00];
+};
+
+enum {
+       MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS  = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x60];
+
+       u8         clear[0x1];
+       u8         reserved_4[0x1f];
+
+       u8         reserved_5[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tisn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tirn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_srqc_bits srq_context_entry;
+
+       u8         reserved_2[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         srqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         sqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         resd_lkey[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqtn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         roce_address_index[0x10];
+       u8         reserved_2[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xc0];
+
+       struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rmpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_2[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_3[0x80];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         rx_write_requests[0x20];
+
+       u8         reserved_2[0x20];
+
+       u8         rx_read_requests[0x20];
+
+       u8         reserved_3[0x20];
+
+       u8         rx_atomic_requests[0x20];
+
+       u8         reserved_4[0x20];
+
+       u8         rx_dct_connect[0x20];
+
+       u8         reserved_5[0x20];
+
+       u8         out_of_buffer[0x20];
+
+       u8         reserved_6[0x20];
+
+       u8         out_of_sequence[0x20];
+
+       u8         reserved_7[0x620];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x80];
+
+       u8         clear[0x1];
+       u8         reserved_3[0x1f];
+
+       u8         reserved_4[0x18];
+       u8         counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x10];
+       u8         function_id[0x10];
+
+       u8         num_pages[0x20];
+};
+
+enum {
+       MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES     = 0x1,
+       MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES     = 0x2,
+       MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES  = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         function_id[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x5];
+       u8         allowed_list_type[0x3];
+       u8         reserved_4[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+       u8         reserved_2[0x600];
+
+       u8         bsf0_klm0_pas_mtt0_1[16][0x8];
+
+       u8         bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         mkey_index[0x18];
+
+       u8         pg_access[0x1];
+       u8         reserved_3[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xa0];
+
+       u8         reserved_2[0x13];
+       u8         vlan_valid[0x1];
+       u8         vlan[0xc];
+
+       struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+       u8         reserved_3[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x8];
+       u8         table_index[0x18];
+
+       u8         reserved_4[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x10];
+       u8         current_issi[0x10];
+
+       u8         reserved_2[0xa0];
+
+       u8         supported_issi_reserved[76][0x8];
+       u8         supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xb];
+       u8         port_num[0x4];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x10];
+       u8         pkey_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         gids_num[0x10];
+       u8         reserved_2[0x10];
+
+       struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xb];
+       u8         port_num[0x4];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x10];
+       u8         gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xb];
+       u8         port_num[0x4];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x80];
+
+       u8         reserved_2[0x8];
+       u8         level[0x8];
+       u8         reserved_3[0x8];
+       u8         log_size[0x8];
+
+       u8         reserved_4[0x120];
+};
+
+struct mlx5_ifc_query_flow_table_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x1c0];
+
+       struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x40];
+
+       u8         flow_index[0x20];
+
+       u8         reserved_6[0xe0];
+};
+
+enum {
+       MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
+       MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
+       MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0xa0];
+
+       u8         start_flow_index[0x20];
+
+       u8         reserved_2[0x20];
+
+       u8         end_flow_index[0x20];
+
+       u8         reserved_3[0xa0];
+
+       u8         reserved_4[0x18];
+       u8         match_criteria_enable[0x8];
+
+       struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+       u8         reserved_5[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         group_id[0x20];
+
+       u8         reserved_5[0x120];
+};
+
+struct mlx5_ifc_query_eq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_eqc_bits eq_context_entry;
+
+       u8         reserved_2[0x40];
+
+       u8         event_bitmask[0x40];
+
+       u8         reserved_3[0x580];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         eq_number[0x8];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_dctc_bits dct_context_entry;
+
+       u8         reserved_2[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         dctn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_cqc_bits cq_context;
+
+       u8         reserved_2[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         enable[0x1];
+       u8         tag_enable[0x1];
+       u8         reserved_2[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         priority[0x4];
+       u8         cong_protocol[0x4];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         cur_flows[0x20];
+
+       u8         sum_flows[0x20];
+
+       u8         cnp_ignored_high[0x20];
+
+       u8         cnp_ignored_low[0x20];
+
+       u8         cnp_handled_high[0x20];
+
+       u8         cnp_handled_low[0x20];
+
+       u8         reserved_2[0x100];
+
+       u8         time_stamp_high[0x20];
+
+       u8         time_stamp_low[0x20];
+
+       u8         accumulators_period[0x20];
+
+       u8         ecn_marked_roce_packets_high[0x20];
+
+       u8         ecn_marked_roce_packets_low[0x20];
+
+       u8         cnps_sent_high[0x20];
+
+       u8         cnps_sent_low[0x20];
+
+       u8         reserved_3[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         clear[0x1];
+       u8         reserved_2[0x1f];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x1c];
+       u8         cong_protocol[0x4];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         error[0x1];
+       u8         reserved_2[0x4];
+       u8         rdma[0x1];
+       u8         read_write[0x1];
+       u8         req_res[0x1];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_nop_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x18];
+       u8         admin_state[0x4];
+       u8         reserved_4[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tisn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tirn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         sq_state[0x4];
+       u8         reserved_2[0x4];
+       u8         sqn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqtn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         rq_state[0x4];
+       u8         reserved_2[0x4];
+       u8         rqn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         modify_bitmask[0x40];
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_rmp_bitmask_bits {
+       u8         reserved[0x20];
+
+       u8         reserved1[0x1f];
+       u8         lwm[0x1];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         rmp_state[0x4];
+       u8         reserved_2[0x4];
+       u8         rmpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       struct mlx5_ifc_rmp_bitmask_bits bitmask;
+
+       u8         reserved_4[0x40];
+
+       struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+       u8         reserved_0[0x1c];
+       u8         permanent_address[0x1];
+       u8         addresses_list[0x1];
+       u8         roce_en[0x1];
+       u8         reserved_1[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xf];
+       u8         vport_number[0x10];
+
+       struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+       u8         reserved_3[0x780];
+
+       struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         other_vport[0x1];
+       u8         reserved_2[0xb];
+       u8         port_num[0x4];
+       u8         vport_number[0x10];
+
+       u8         reserved_3[0x20];
+
+       struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ  = 0x0,
+       MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ  = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         cqn[0x18];
+
+       union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+       struct mlx5_ifc_cqc_bits cq_context;
+
+       u8         reserved_3[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         priority[0x4];
+       u8         cong_protocol[0x4];
+
+       u8         enable[0x1];
+       u8         tag_enable[0x1];
+       u8         reserved_3[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x1c];
+       u8         cong_protocol[0x4];
+
+       union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+       u8         reserved_3[0x80];
+
+       union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         output_num_entries[0x20];
+
+       u8         reserved_1[0x20];
+
+       u8         pas[0][0x40];
+};
+
+enum {
+       MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL     = 0x0,
+       MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS  = 0x1,
+       MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES    = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         function_id[0x10];
+
+       u8         input_num_entries[0x20];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         remote_lid[0x10];
+       u8         reserved_2[0x8];
+       u8         port[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_4[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         packet_headers_log[128][0x8];
+
+       u8         packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         eq_number[0x8];
+
+       u8         reserved_3[0x20];
+
+       u8         eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         function_id[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         dctn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         function_id[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         xrc_srqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tisn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         tirn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         srqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         sqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqtn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         rmpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         psvn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         mkey_index[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         group_id[0x20];
+
+       u8         reserved_5[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         eq_number[0x8];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         dctn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x10];
+       u8         vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x8];
+       u8         table_index[0x18];
+
+       u8         reserved_4[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x40];
+
+       u8         flow_index[0x20];
+
+       u8         reserved_6[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         xrcd[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         uar[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         transport_domain[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x18];
+       u8         counter_set_id[0x8];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         pd[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         xrc_srqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+       u8         reserved_3[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         tisn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         tirn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         srqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_srqc_bits srq_context_entry;
+
+       u8         reserved_3[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         sqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         rqtn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         rqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         rmpn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0xc0];
+
+       struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         opt_param_mask[0x20];
+
+       u8         reserved_3[0x20];
+
+       struct mlx5_ifc_qpc_bits qpc;
+
+       u8         reserved_4[0x80];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         reserved_2[0x8];
+       u8         psv0_index[0x18];
+
+       u8         reserved_3[0x8];
+       u8         psv1_index[0x18];
+
+       u8         reserved_4[0x8];
+       u8         psv2_index[0x18];
+
+       u8         reserved_5[0x8];
+       u8         psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         num_psv[0x4];
+       u8         reserved_2[0x4];
+       u8         pd[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         mkey_index[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         pg_access[0x1];
+       u8         reserved_3[0x1f];
+
+       struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+       u8         reserved_4[0x80];
+
+       u8         translations_octword_actual_size[0x20];
+
+       u8         reserved_5[0x560];
+
+       u8         klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x20];
+
+       u8         reserved_5[0x8];
+       u8         level[0x8];
+       u8         reserved_6[0x8];
+       u8         log_size[0x8];
+
+       u8         reserved_7[0x120];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         group_id[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+enum {
+       MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
+       MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
+       MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       u8         table_type[0x8];
+       u8         reserved_3[0x18];
+
+       u8         reserved_4[0x8];
+       u8         table_id[0x18];
+
+       u8         reserved_5[0x20];
+
+       u8         start_flow_index[0x20];
+
+       u8         reserved_6[0x20];
+
+       u8         end_flow_index[0x20];
+
+       u8         reserved_7[0xa0];
+
+       u8         reserved_8[0x18];
+       u8         match_criteria_enable[0x8];
+
+       struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+       u8         reserved_9[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x18];
+       u8         eq_number[0x8];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_eqc_bits eq_context_entry;
+
+       u8         reserved_3[0x40];
+
+       u8         event_bitmask[0x40];
+
+       u8         reserved_4[0x580];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         dctn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_dctc_bits dct_context_entry;
+
+       u8         reserved_3[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         cqn[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+
+       struct mlx5_ifc_cqc_bits cq_context;
+
+       u8         reserved_3[0x600];
+
+       u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x4];
+       u8         min_delay[0xc];
+       u8         int_vector[0x10];
+
+       u8         reserved_2[0x20];
+};
+
+enum {
+       MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE  = 0x0,
+       MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ   = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x4];
+       u8         min_delay[0xc];
+       u8         int_vector[0x10];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         qpn[0x18];
+
+       u8         reserved_3[0x20];
+
+       u8         multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ  = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         xrc_srqn[0x18];
+
+       u8         reserved_3[0x10];
+       u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+enum {
+       MLX5_ARM_RQ_IN_OP_MOD_SRQ_  = 0x1,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         srq_number[0x18];
+
+       u8         reserved_3[0x10];
+       u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x8];
+       u8         dct_number[0x18];
+
+       u8         reserved_3[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         xrcd[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         uar[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         transport_domain[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x18];
+       u8         counter_set_id[0x8];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x8];
+       u8         pd[0x18];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x10];
+       u8         vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         reserved_1[0x40];
+
+       u8         register_data[0][0x20];
+};
+
+enum {
+       MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE  = 0x0,
+       MLX5_ACCESS_REGISTER_IN_OP_MOD_READ   = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         reserved_2[0x10];
+       u8         register_id[0x10];
+
+       u8         argument[0x20];
+
+       u8         register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+       u8         status[0x4];
+       u8         version[0x4];
+       u8         local_port[0x8];
+       u8         pnat[0x2];
+       u8         reserved_0[0x2];
+       u8         lane[0x4];
+       u8         reserved_1[0x8];
+
+       u8         reserved_2[0x20];
+
+       u8         reserved_3[0x7];
+       u8         polarity[0x1];
+       u8         ob_tap0[0x8];
+       u8         ob_tap1[0x8];
+       u8         ob_tap2[0x8];
+
+       u8         reserved_4[0xc];
+       u8         ob_preemp_mode[0x4];
+       u8         ob_reg[0x8];
+       u8         ob_bias[0x8];
+
+       u8         reserved_5[0x20];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+       u8         status[0x4];
+       u8         version[0x4];
+       u8         local_port[0x8];
+       u8         pnat[0x2];
+       u8         reserved_0[0x2];
+       u8         lane[0x4];
+       u8         reserved_1[0x8];
+
+       u8         time_to_link_up[0x10];
+       u8         reserved_2[0xc];
+       u8         grade_lane_speed[0x4];
+
+       u8         grade_version[0x8];
+       u8         grade[0x18];
+
+       u8         reserved_3[0x4];
+       u8         height_grade_type[0x4];
+       u8         height_grade[0x18];
+
+       u8         height_dz[0x10];
+       u8         height_dv[0x10];
+
+       u8         reserved_4[0x10];
+       u8         height_sigma[0x10];
+
+       u8         reserved_5[0x20];
+
+       u8         reserved_6[0x4];
+       u8         phase_grade_type[0x4];
+       u8         phase_grade[0x18];
+
+       u8         reserved_7[0x8];
+       u8         phase_eo_pos[0x8];
+       u8         reserved_8[0x8];
+       u8         phase_eo_neg[0x8];
+
+       u8         ffe_set_tested[0x10];
+       u8         test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x1c];
+       u8         vl_hw_cap[0x4];
+
+       u8         reserved_3[0x1c];
+       u8         vl_admin[0x4];
+
+       u8         reserved_4[0x1c];
+       u8         vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+       u8         swid[0x8];
+       u8         local_port[0x8];
+       u8         reserved_0[0x4];
+       u8         admin_status[0x4];
+       u8         reserved_1[0x4];
+       u8         oper_status[0x4];
+
+       u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0xd];
+       u8         proto_mask[0x3];
+
+       u8         reserved_2[0x40];
+
+       u8         eth_proto_capability[0x20];
+
+       u8         ib_link_width_capability[0x10];
+       u8         ib_proto_capability[0x10];
+
+       u8         reserved_3[0x20];
+
+       u8         eth_proto_admin[0x20];
+
+       u8         ib_link_width_admin[0x10];
+       u8         ib_proto_admin[0x10];
+
+       u8         reserved_4[0x20];
+
+       u8         eth_proto_oper[0x20];
+
+       u8         ib_link_width_oper[0x10];
+       u8         ib_proto_oper[0x10];
+
+       u8         reserved_5[0x20];
+
+       u8         eth_proto_lp_advertise[0x20];
+
+       u8         reserved_6[0x60];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+       u8         reserved_0[0x20];
+
+       u8         algorithm_options[0x10];
+       u8         reserved_1[0x4];
+       u8         repetitions_mode[0x4];
+       u8         num_of_repetitions[0x8];
+
+       u8         grade_version[0x8];
+       u8         height_grade_type[0x4];
+       u8         phase_grade_type[0x4];
+       u8         height_grade_weight[0x8];
+       u8         phase_grade_weight[0x8];
+
+       u8         gisim_measure_bits[0x10];
+       u8         adaptive_tap_measure_bits[0x10];
+
+       u8         ber_bath_high_error_threshold[0x10];
+       u8         ber_bath_mid_error_threshold[0x10];
+
+       u8         ber_bath_low_error_threshold[0x10];
+       u8         one_ratio_high_threshold[0x10];
+
+       u8         one_ratio_high_mid_threshold[0x10];
+       u8         one_ratio_low_mid_threshold[0x10];
+
+       u8         one_ratio_low_threshold[0x10];
+       u8         ndeo_error_threshold[0x10];
+
+       u8         mixer_offset_step_size[0x10];
+       u8         reserved_2[0x8];
+       u8         mix90_phase_for_voltage_bath[0x8];
+
+       u8         mixer_offset_start[0x10];
+       u8         mixer_offset_end[0x10];
+
+       u8         reserved_3[0x15];
+       u8         ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+       u8         swid[0x8];
+       u8         local_port[0x8];
+       u8         sub_port[0x8];
+       u8         reserved_0[0x8];
+
+       u8         reserved_1[0x20];
+};
+
+struct mlx5_ifc_pqdr_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x5];
+       u8         prio[0x3];
+       u8         reserved_2[0x6];
+       u8         mode[0x2];
+
+       u8         reserved_3[0x20];
+
+       u8         reserved_4[0x10];
+       u8         min_threshold[0x10];
+
+       u8         reserved_5[0x10];
+       u8         max_threshold[0x10];
+
+       u8         reserved_6[0x10];
+       u8         mark_probability_denominator[0x10];
+
+       u8         reserved_7[0x60];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x60];
+
+       u8         reserved_3[0x1c];
+       u8         wrps_admin[0x4];
+
+       u8         reserved_4[0x1c];
+       u8         wrps_status[0x4];
+
+       u8         reserved_5[0x8];
+       u8         up_threshold[0x8];
+       u8         reserved_6[0x8];
+       u8         down_threshold[0x8];
+
+       u8         reserved_7[0x20];
+
+       u8         reserved_8[0x1c];
+       u8         srps_admin[0x4];
+
+       u8         reserved_9[0x1c];
+       u8         srps_status[0x4];
+
+       u8         reserved_10[0x40];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x8];
+       u8         lb_cap[0x8];
+       u8         reserved_3[0x8];
+       u8         lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x20];
+
+       u8         port_profile_mode[0x8];
+       u8         static_port_profile[0x8];
+       u8         active_port_profile[0x8];
+       u8         reserved_3[0x8];
+
+       u8         retransmission_active[0x8];
+       u8         fec_mode_active[0x18];
+
+       u8         reserved_4[0x20];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+       u8         swid[0x8];
+       u8         local_port[0x8];
+       u8         pnat[0x2];
+       u8         reserved_0[0x8];
+       u8         grp[0x6];
+
+       u8         clr[0x1];
+       u8         reserved_1[0x1c];
+       u8         prio_tc[0x3];
+
+       union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+       u8         reserved_0[0x3];
+       u8         single_mac[0x1];
+       u8         reserved_1[0x4];
+       u8         local_port[0x8];
+       u8         mac_47_32[0x10];
+
+       u8         mac_31_0[0x20];
+
+       u8         reserved_2[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         max_mtu[0x10];
+       u8         reserved_2[0x10];
+
+       u8         admin_mtu[0x10];
+       u8         reserved_3[0x10];
+
+       u8         oper_mtu[0x10];
+       u8         reserved_4[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+       u8         reserved_0[0x8];
+       u8         module[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0x18];
+       u8         attenuation_5g[0x8];
+
+       u8         reserved_3[0x18];
+       u8         attenuation_7g[0x8];
+
+       u8         reserved_4[0x18];
+       u8         attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+       u8         reserved_0[0x8];
+       u8         module[0x8];
+       u8         reserved_1[0xc];
+       u8         module_status[0x4];
+
+       u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+       u8         module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+       u8         reserved_0[0x4];
+       u8         mlpn_status[0x4];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         e[0x1];
+       u8         reserved_2[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+       u8         rxtx[0x1];
+       u8         reserved_0[0x7];
+       u8         local_port[0x8];
+       u8         reserved_1[0x8];
+       u8         width[0x8];
+
+       u8         lane0_module_mapping[0x20];
+
+       u8         lane1_module_mapping[0x20];
+
+       u8         lane2_module_mapping[0x20];
+
+       u8         lane3_module_mapping[0x20];
+
+       u8         reserved_2[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+       u8         reserved_0[0x8];
+       u8         module[0x8];
+       u8         reserved_1[0x4];
+       u8         admin_status[0x4];
+       u8         reserved_2[0x4];
+       u8         oper_status[0x4];
+
+       u8         ase[0x1];
+       u8         ee[0x1];
+       u8         reserved_3[0x1c];
+       u8         e[0x2];
+
+       u8         reserved_4[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+       u8         reserved_0[0x4];
+       u8         profile_id[0xc];
+       u8         reserved_1[0x4];
+       u8         proto_mask[0x4];
+       u8         reserved_2[0x8];
+
+       u8         reserved_3[0x10];
+       u8         lane_speed[0x10];
+
+       u8         reserved_4[0x17];
+       u8         lpbf[0x1];
+       u8         fec_mode_policy[0x8];
+
+       u8         retransmission_capability[0x8];
+       u8         fec_mode_capability[0x18];
+
+       u8         retransmission_support_admin[0x8];
+       u8         fec_mode_support_admin[0x18];
+
+       u8         retransmission_request_admin[0x8];
+       u8         fec_mode_request_admin[0x18];
+
+       u8         reserved_5[0x80];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x8];
+       u8         ib_port[0x8];
+
+       u8         reserved_2[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0xd];
+       u8         lbf_mode[0x3];
+
+       u8         reserved_2[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         dic[0x1];
+       u8         reserved_2[0x19];
+       u8         ipg[0x4];
+       u8         reserved_3[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0xe0];
+
+       u8         port_filter[8][0x20];
+
+       u8         port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         ppan[0x4];
+       u8         reserved_2[0x4];
+       u8         prio_mask_tx[0x8];
+       u8         reserved_3[0x8];
+       u8         prio_mask_rx[0x8];
+
+       u8         pptx[0x1];
+       u8         aptx[0x1];
+       u8         reserved_4[0x6];
+       u8         pfctx[0x8];
+       u8         reserved_5[0x10];
+
+       u8         pprx[0x1];
+       u8         aprx[0x1];
+       u8         reserved_6[0x6];
+       u8         pfcrx[0x8];
+       u8         reserved_7[0x10];
+
+       u8         reserved_8[0x80];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+       u8         op[0x4];
+       u8         reserved_0[0x4];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         op_admin[0x8];
+       u8         op_capability[0x8];
+       u8         op_request[0x8];
+       u8         op_active[0x8];
+
+       u8         admin[0x40];
+
+       u8         capability[0x40];
+
+       u8         request[0x40];
+
+       u8         active[0x40];
+
+       u8         reserved_2[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         reserved_2[0xc];
+       u8         error_count[0x4];
+       u8         reserved_3[0x10];
+
+       u8         reserved_4[0xc];
+       u8         lane[0x4];
+       u8         reserved_5[0x8];
+       u8         error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+       u8         reserved_0[0x8];
+       u8         local_port[0x8];
+       u8         reserved_1[0x10];
+
+       u8         port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+       u8         swid[0x8];
+       u8         local_port[0x8];
+       u8         reserved_0[0x4];
+       u8         admin_status[0x4];
+       u8         reserved_1[0x4];
+       u8         oper_status[0x4];
+
+       u8         ase[0x1];
+       u8         ee[0x1];
+       u8         reserved_2[0x1c];
+       u8         e[0x2];
+
+       u8         reserved_3[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+       u8         reserved_0[0x8];
+       u8         opamp_group[0x8];
+       u8         reserved_1[0xc];
+       u8         opamp_group_type[0x4];
+
+       u8         start_index[0x10];
+       u8         reserved_2[0x4];
+       u8         num_of_indices[0xc];
+
+       u8         index_data[18][0x10];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+       u8         reserved_0[0x6];
+       u8         rx_lane[0x2];
+       u8         reserved_1[0x6];
+       u8         tx_lane[0x2];
+       u8         reserved_2[0x8];
+       u8         module[0x8];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+       u8         reserved_0[0x6];
+       u8         lossy[0x1];
+       u8         epsb[0x1];
+       u8         reserved_1[0xc];
+       u8         size[0xc];
+
+       u8         xoff_threshold[0x10];
+       u8         xon_threshold[0x10];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+       u8         node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+       u8         reserved_0[0x18];
+       u8         power_settings_level[0x8];
+
+       u8         reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_host_endianness_bits {
+       u8         he[0x1];
+       u8         reserved_0[0x1f];
+
+       u8         reserved_1[0x60];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+       u8         reserved_0[0x20];
+
+       u8         mkey[0x20];
+
+       u8         addressh_63_32[0x20];
+
+       u8         addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+       u8         dc_key[0x40];
+
+       u8         ext[0x1];
+       u8         reserved_0[0x7];
+       u8         destination_qp_dct[0x18];
+
+       u8         static_rate[0x4];
+       u8         sl_eth_prio[0x4];
+       u8         fl[0x1];
+       u8         mlid[0x7];
+       u8         rlid_udp_sport[0x10];
+
+       u8         reserved_1[0x20];
+
+       u8         rmac_47_16[0x20];
+
+       u8         rmac_15_0[0x10];
+       u8         tclass[0x8];
+       u8         hop_limit[0x8];
+
+       u8         reserved_2[0x1];
+       u8         grh[0x1];
+       u8         reserved_3[0x2];
+       u8         src_addr_index[0x8];
+       u8         flow_label[0x14];
+
+       u8         rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+       u8         reserved_0[0x10];
+       u8         function_id[0x10];
+
+       u8         num_pages[0x20];
+
+       u8         reserved_1[0xa0];
+};
+
+struct mlx5_ifc_eqe_bits {
+       u8         reserved_0[0x8];
+       u8         event_type[0x8];
+       u8         reserved_1[0x8];
+       u8         event_sub_type[0x8];
+
+       u8         reserved_2[0xe0];
+
+       union mlx5_ifc_event_auto_bits event_data;
+
+       u8         reserved_3[0x10];
+       u8         signature[0x8];
+       u8         reserved_4[0x7];
+       u8         owner[0x1];
+};
+
+enum {
+       MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT  = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+       u8         type[0x8];
+       u8         reserved_0[0x18];
+
+       u8         input_length[0x20];
+
+       u8         input_mailbox_pointer_63_32[0x20];
+
+       u8         input_mailbox_pointer_31_9[0x17];
+       u8         reserved_1[0x9];
+
+       u8         command_input_inline_data[16][0x8];
+
+       u8         command_output_inline_data[16][0x8];
+
+       u8         output_mailbox_pointer_63_32[0x20];
+
+       u8         output_mailbox_pointer_31_9[0x17];
+       u8         reserved_2[0x9];
+
+       u8         output_length[0x20];
+
+       u8         token[0x8];
+       u8         signature[0x8];
+       u8         reserved_3[0x8];
+       u8         status[0x7];
+       u8         ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+       u8         status[0x8];
+       u8         reserved_0[0x18];
+
+       u8         syndrome[0x20];
+
+       u8         command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+       u8         opcode[0x10];
+       u8         reserved_0[0x10];
+
+       u8         reserved_1[0x10];
+       u8         op_mod[0x10];
+
+       u8         command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+       u8         mailbox_data[512][0x8];
+
+       u8         reserved_0[0x180];
+
+       u8         next_pointer_63_32[0x20];
+
+       u8         next_pointer_31_10[0x16];
+       u8         reserved_1[0xa];
+
+       u8         block_number[0x20];
+
+       u8         reserved_2[0x8];
+       u8         token[0x8];
+       u8         ctrl_signature[0x8];
+       u8         signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+       u8         ptag_63_32[0x20];
+
+       u8         ptag_31_8[0x18];
+       u8         reserved_0[0x6];
+       u8         wr_en[0x1];
+       u8         rd_en[0x1];
+};
+
+enum {
+       MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER  = 0x0,
+       MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED     = 0x1,
+       MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC  = 0x2,
+};
+
+enum {
+       MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER  = 0x0,
+       MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED     = 0x1,
+       MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC  = 0x2,
+};
+
+enum {
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR              = 0x1,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC                   = 0x7,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR                 = 0x8,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR                   = 0x9,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR            = 0xa,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR                 = 0xb,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN  = 0xc,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR                    = 0xd,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV                       = 0xe,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR                    = 0xf,
+       MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR                = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+       u8         fw_rev_minor[0x10];
+       u8         fw_rev_major[0x10];
+
+       u8         cmd_interface_rev[0x10];
+       u8         fw_rev_subminor[0x10];
+
+       u8         reserved_0[0x40];
+
+       u8         cmdq_phy_addr_63_32[0x20];
+
+       u8         cmdq_phy_addr_31_12[0x14];
+       u8         reserved_1[0x2];
+       u8         nic_interface[0x2];
+       u8         log_cmdq_size[0x4];
+       u8         log_cmdq_stride[0x4];
+
+       u8         command_doorbell_vector[0x20];
+
+       u8         reserved_2[0xf00];
+
+       u8         initializing[0x1];
+       u8         reserved_3[0x4];
+       u8         nic_interface_supported[0x3];
+       u8         reserved_4[0x18];
+
+       struct mlx5_ifc_health_buffer_bits health_buffer;
+
+       u8         no_dram_nic_offset[0x20];
+
+       u8         reserved_5[0x6e40];
+
+       u8         reserved_6[0x1f];
+       u8         clear_int[0x1];
+
+       u8         health_syndrome[0x8];
+       u8         health_counter[0x18];
+
+       u8         reserved_7[0x17fc0];
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+       struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+       struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+       struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+       struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+       struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+       struct mlx5_ifc_pamp_reg_bits pamp_reg;
+       struct mlx5_ifc_paos_reg_bits paos_reg;
+       struct mlx5_ifc_pcap_reg_bits pcap_reg;
+       struct mlx5_ifc_peir_reg_bits peir_reg;
+       struct mlx5_ifc_pelc_reg_bits pelc_reg;
+       struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+       struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+       struct mlx5_ifc_pifr_reg_bits pifr_reg;
+       struct mlx5_ifc_pipg_reg_bits pipg_reg;
+       struct mlx5_ifc_plbf_reg_bits plbf_reg;
+       struct mlx5_ifc_plib_reg_bits plib_reg;
+       struct mlx5_ifc_plpc_reg_bits plpc_reg;
+       struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+       struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+       struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+       struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+       struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+       struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+       struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+       struct mlx5_ifc_ppad_reg_bits ppad_reg;
+       struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+       struct mlx5_ifc_pplm_reg_bits pplm_reg;
+       struct mlx5_ifc_pplr_reg_bits pplr_reg;
+       struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+       struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
+       struct mlx5_ifc_pspa_reg_bits pspa_reg;
+       struct mlx5_ifc_ptas_reg_bits ptas_reg;
+       struct mlx5_ifc_ptys_reg_bits ptys_reg;
+       struct mlx5_ifc_pude_reg_bits pude_reg;
+       struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+       struct mlx5_ifc_slrg_reg_bits slrg_reg;
+       struct mlx5_ifc_sltp_reg_bits sltp_reg;
+       u8         reserved_0[0x60e0];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+       struct mlx5_ifc_health_buffer_bits health_buffer;
+       u8         reserved_0[0x200];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+       struct mlx5_ifc_initial_seg_bits initial_seg;
+       u8         reserved_0[0x20060];
 };
 
 #endif /* MLX5_IFC_H */
index 310b5f7fd6ae52101665c9f3dd6e042b6a4ceb9e..f079fb1a31f7f7953f0bb28f6f3a145279598dc7 100644 (file)
@@ -134,13 +134,21 @@ enum {
 
 enum {
        MLX5_WQE_CTRL_CQ_UPDATE         = 2 << 2,
+       MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
        MLX5_WQE_CTRL_SOLICITED         = 1 << 1,
 };
 
 enum {
+       MLX5_SEND_WQE_DS        = 16,
        MLX5_SEND_WQE_BB        = 64,
 };
 
+#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+       MLX5_SEND_WQE_MAX_WQEBBS        = 16,
+};
+
 enum {
        MLX5_WQE_FMR_PERM_LOCAL_READ    = 1 << 27,
        MLX5_WQE_FMR_PERM_LOCAL_WRITE   = 1 << 28,
@@ -200,6 +208,23 @@ struct mlx5_wqe_ctrl_seg {
 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
 
+enum {
+       MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
+       MLX5_ETH_WQE_L4_INNER_CSUM      = 1 << 5,
+       MLX5_ETH_WQE_L3_CSUM            = 1 << 6,
+       MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
+};
+
+struct mlx5_wqe_eth_seg {
+       u8              rsvd0[4];
+       u8              cs_flags;
+       u8              rsvd1;
+       __be16          mss;
+       __be32          rsvd2;
+       __be16          inline_hdr_sz;
+       u8              inline_hdr_start[2];
+};
+
 struct mlx5_wqe_xrc_seg {
        __be32                  xrc_srqn;
        u8                      rsvd[12];
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
new file mode 100644 (file)
index 0000000..967e0fd
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <linux/mlx5/driver.h>
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
+void mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+                            u8 port_num, u16  vf_num, u16 gid_index,
+                            union ib_gid *gid);
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+                             u8 port_num, u16 vf_num, u16 pkey_index,
+                             u16 *pkey);
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+                                u8 other_vport, u8 port_num,
+                                u16 vf_num,
+                                struct mlx5_hca_vport_context *rep);
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+                                          u64 *sys_image_guid);
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+                                  u64 *node_guid);
+
+#endif /* __MLX5_VPORT_H__ */
index 8d37e26a1007c6493990093e7ce8a03f2b201419..0038ac7466fd26e7562a026af68632573e3bb0ea 100644 (file)
@@ -226,6 +226,24 @@ struct page_frag {
 #endif
 };
 
+#define PAGE_FRAG_CACHE_MAX_SIZE       __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER      get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+       void * va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+       __u16 offset;
+       __u16 size;
+#else
+       __u32 offset;
+#endif
+       /* we maintain a pagecount bias, so that we dont dirty cache line
+        * containing page->_count every time we allocate a fragment.
+        */
+       unsigned int            pagecnt_bias;
+       bool pfmemalloc;
+};
+
 typedef unsigned long __nocast vm_flags_t;
 
 /*
index 738ea48be889e670275616bae5063259ca3d61d9..04aa06852771e478029bc82367ab8e509ad2dd56 100644 (file)
@@ -38,7 +38,6 @@ struct net;
 #define SOCK_NOSPACE           2
 #define SOCK_PASSCRED          3
 #define SOCK_PASSSEC           4
-#define SOCK_EXTERNALLY_ALLOCATED 5
 
 #ifndef ARCH_HAS_SOCKET_TYPES
 /**
@@ -208,7 +207,7 @@ void sock_unregister(int family);
 int __sock_create(struct net *net, int family, int type, int proto,
                  struct socket **res, int kern);
 int sock_create(int family, int type, int proto, struct socket **res);
-int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_kern(struct net *net, int family, int type, int proto, struct socket **res);
 int sock_create_lite(int family, int type, int proto, struct socket **res);
 void sock_release(struct socket *sock);
 int sock_sendmsg(struct socket *sock, struct msghdr *msg);
index 7d59dc6ab78922cc8c15b84ba95736752f8139e6..9672781c593d4e2b654625d5769bce4671675647 100644 (file)
@@ -66,7 +66,6 @@ enum {
        NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
        NETIF_F_HW_L2FW_DOFFLOAD_BIT,   /* Allow L2 Forwarding in Hardware */
        NETIF_F_BUSY_POLL_BIT,          /* Busy poll */
-       NETIF_F_HW_SWITCH_OFFLOAD_BIT,  /* HW switch offload */
 
        /*
         * Add your fresh new feature above and remember to update
@@ -125,7 +124,6 @@ enum {
 #define NETIF_F_HW_VLAN_STAG_TX        __NETIF_F(HW_VLAN_STAG_TX)
 #define NETIF_F_HW_L2FW_DOFFLOAD       __NETIF_F(HW_L2FW_DOFFLOAD)
 #define NETIF_F_BUSY_POLL      __NETIF_F(BUSY_POLL)
-#define NETIF_F_HW_SWITCH_OFFLOAD      __NETIF_F(HW_SWITCH_OFFLOAD)
 
 /* Features valid for ethtool to change */
 /* = all defined minus driver/device-class-related */
@@ -161,8 +159,7 @@ enum {
  */
 #define NETIF_F_ONE_FOR_ALL    (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
                                 NETIF_F_SG | NETIF_F_HIGHDMA |         \
-                                NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
-                                NETIF_F_HW_SWITCH_OFFLOAD)
+                                NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
 
 /*
  * If one device doesn't support one of these features, then disable it
index 05b9a694e21312ad26beec7dfa0f32f719cc8c87..6f5f71ff51697686e5b549681181bfbdbcd55f90 100644 (file)
@@ -1564,7 +1564,7 @@ struct net_device {
        const struct net_device_ops *netdev_ops;
        const struct ethtool_ops *ethtool_ops;
 #ifdef CONFIG_NET_SWITCHDEV
-       const struct swdev_ops *swdev_ops;
+       const struct switchdev_ops *switchdev_ops;
 #endif
 
        const struct header_ops *header_ops;
@@ -1652,7 +1652,14 @@ struct net_device {
        rx_handler_func_t __rcu *rx_handler;
        void __rcu              *rx_handler_data;
 
+#ifdef CONFIG_NET_CLS_ACT
+       struct tcf_proto __rcu  *ingress_cl_list;
+#endif
        struct netdev_queue __rcu *ingress_queue;
+#ifdef CONFIG_NETFILTER_INGRESS
+       struct list_head        nf_hooks_ingress;
+#endif
+
        unsigned char           broadcast[MAX_ADDR_LEN];
 #ifdef CONFIG_RFS_ACCEL
        struct cpu_rmap         *rx_cpu_rmap;
@@ -1990,6 +1997,7 @@ struct offload_callbacks {
 
 struct packet_offload {
        __be16                   type;  /* This is really htons(ether_type). */
+       u16                      priority;
        struct offload_callbacks callbacks;
        struct list_head         list;
 };
@@ -2552,10 +2560,6 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
 
 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 {
-       if (WARN_ON(!dev_queue)) {
-               pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
-               return;
-       }
        set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
 }
 
@@ -2571,15 +2575,7 @@ static inline void netif_stop_queue(struct net_device *dev)
        netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
 }
 
-static inline void netif_tx_stop_all_queues(struct net_device *dev)
-{
-       unsigned int i;
-
-       for (i = 0; i < dev->num_tx_queues; i++) {
-               struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-               netif_tx_stop_queue(txq);
-       }
-}
+void netif_tx_stop_all_queues(struct net_device *dev);
 
 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
@@ -2840,6 +2836,9 @@ static inline int netif_set_xps_queue(struct net_device *dev,
 }
 #endif
 
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+                 unsigned int num_tx_queues);
+
 /*
  * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
  * as a distribution range limit for the returned value.
index 63560d0a8dfe2802ec826d87921a8848fe56e68b..f5ff5d156da8f5a6fe04eec19e9a41a7ac3f6ab0 100644 (file)
@@ -54,10 +54,12 @@ struct nf_hook_state {
        struct net_device *in;
        struct net_device *out;
        struct sock *sk;
+       struct list_head *hook_list;
        int (*okfn)(struct sock *, struct sk_buff *);
 };
 
 static inline void nf_hook_state_init(struct nf_hook_state *p,
+                                     struct list_head *hook_list,
                                      unsigned int hook,
                                      int thresh, u_int8_t pf,
                                      struct net_device *indev,
@@ -71,6 +73,7 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
        p->in = indev;
        p->out = outdev;
        p->sk = sk;
+       p->hook_list = hook_list;
        p->okfn = okfn;
 }
 
@@ -79,16 +82,17 @@ typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
                               const struct nf_hook_state *state);
 
 struct nf_hook_ops {
-       struct list_head list;
+       struct list_head        list;
 
        /* User fills in from here down. */
-       nf_hookfn       *hook;
-       struct module   *owner;
-       void            *priv;
-       u_int8_t        pf;
-       unsigned int    hooknum;
+       nf_hookfn               *hook;
+       struct net_device       *dev;
+       struct module           *owner;
+       void                    *priv;
+       u_int8_t                pf;
+       unsigned int            hooknum;
        /* Hooks are ordered in ascending priority. */
-       int             priority;
+       int                     priority;
 };
 
 struct nf_sockopt_ops {
@@ -131,26 +135,33 @@ extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 #ifdef HAVE_JUMP_LABEL
 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+                                      u_int8_t pf, unsigned int hook)
 {
        if (__builtin_constant_p(pf) &&
            __builtin_constant_p(hook))
                return static_key_false(&nf_hooks_needed[pf][hook]);
 
-       return !list_empty(&nf_hooks[pf][hook]);
+       return !list_empty(nf_hook_list);
 }
 #else
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+                                      u_int8_t pf, unsigned int hook)
 {
-       return !list_empty(&nf_hooks[pf][hook]);
+       return !list_empty(nf_hook_list);
 }
 #endif
 
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+       return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
+}
+
 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
 
 /**
  *     nf_hook_thresh - call a netfilter hook
- *     
+ *
  *     Returns 1 if the hook has allowed the packet to pass.  The function
  *     okfn must be invoked by the caller in this case.  Any other return
  *     value indicates the packet has been consumed by the hook.
@@ -166,8 +177,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
        if (nf_hooks_active(pf, hook)) {
                struct nf_hook_state state;
 
-               nf_hook_state_init(&state, hook, thresh, pf,
-                                  indev, outdev, sk, okfn);
+               nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+                                  pf, indev, outdev, sk, okfn);
                return nf_hook_slow(skb, &state);
        }
        return 1;
index 34b172301558e64ef80a19d9731ea41e3a5291a6..ffdfdc24952afc51894d3bb289acb795cc8f0e7d 100644 (file)
@@ -122,13 +122,13 @@ struct ip_set_skbinfo {
 struct ip_set;
 
 #define ext_timeout(e, s)      \
-(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
 #define ext_counter(e, s)      \
-(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
 #define ext_comment(e, s)      \
-(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
 #define ext_skbinfo(e, s)      \
-(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
+((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
 
 typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
                           const struct ip_set_ext *ext,
@@ -533,29 +533,9 @@ bitmap_bytes(u32 a, u32 b)
 #include <linux/netfilter/ipset/ip_set_timeout.h>
 #include <linux/netfilter/ipset/ip_set_comment.h>
 
-static inline int
+int
 ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
-                     const void *e, bool active)
-{
-       if (SET_WITH_TIMEOUT(set)) {
-               unsigned long *timeout = ext_timeout(e, set);
-
-               if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                       htonl(active ? ip_set_timeout_get(timeout)
-                               : *timeout)))
-                       return -EMSGSIZE;
-       }
-       if (SET_WITH_COUNTER(set) &&
-           ip_set_put_counter(skb, ext_counter(e, set)))
-               return -EMSGSIZE;
-       if (SET_WITH_COMMENT(set) &&
-           ip_set_put_comment(skb, ext_comment(e, set)))
-               return -EMSGSIZE;
-       if (SET_WITH_SKBINFO(set) &&
-           ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
-               return -EMSGSIZE;
-       return 0;
-}
+                     const void *e, bool active);
 
 #define IP_SET_INIT_KEXT(skb, opt, set)                        \
        { .bytes = (skb)->len, .packets = 1,            \
index a3e215bb0241d47379bce4ff6e81ba2b4995d3c3..09f38206c18ff0948c003ffc452a88dd0a433536 100644 (file)
@@ -62,6 +62,7 @@ struct xt_mtchk_param {
        void *matchinfo;
        unsigned int hook_mask;
        u_int8_t family;
+       bool nft_compat;
 };
 
 /**
@@ -92,6 +93,7 @@ struct xt_tgchk_param {
        void *targinfo;
        unsigned int hook_mask;
        u_int8_t family;
+       bool nft_compat;
 };
 
 /* Target destructor parameters */
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
new file mode 100644 (file)
index 0000000..cb0727f
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef _NETFILTER_INGRESS_H_
+#define _NETFILTER_INGRESS_H_
+
+#include <linux/netfilter.h>
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_NETFILTER_INGRESS
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+       return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
+                                  NFPROTO_NETDEV, NF_NETDEV_INGRESS);
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+       struct nf_hook_state state;
+
+       nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
+                          NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
+                          skb->dev, NULL, NULL);
+       return nf_hook_slow(skb, &state);
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev)
+{
+       INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+}
+#else /* CONFIG_NETFILTER_INGRESS */
+static inline int nf_hook_ingress_active(struct sk_buff *skb)
+{
+       return 0;
+}
+
+static inline int nf_hook_ingress(struct sk_buff *skb)
+{
+       return 0;
+}
+
+static inline void nf_hook_ingress_init(struct net_device *dev) {}
+#endif /* CONFIG_NETFILTER_INGRESS */
+#endif /* _NETFILTER_INGRESS_H_ */
index 6835c1279df77328894ef9bc1396d786e7f29dec..9120edb650a068df60b0a2c390ae431ab78671cb 100644 (file)
@@ -28,6 +28,8 @@ struct netlink_skb_parms {
        __u32                   dst_group;
        __u32                   flags;
        struct sock             *sk;
+       bool                    nsid_is_set;
+       int                     nsid;
 };
 
 #define NETLINK_CB(skb)                (*(struct netlink_skb_parms*)&((skb)->cb))
index 2f7b9a40f627ead2e04d4105ad1eaeab76fdf5bf..2972c7f3aa1ded4bfc626e6630d16828e76cc15b 100644 (file)
 #define PCI_DEVICE_ID_ALTIMA_AC9100    0x03ea
 #define PCI_DEVICE_ID_ALTIMA_AC1003    0x03eb
 
+#define PCI_VENDOR_ID_CAVIUM           0x177d
+
 #define PCI_VENDOR_ID_BELKIN           0x1799
 #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
 
index 685809835b5c0edc5b80a3398e849c3a53d5b17c..a26c3f84b8ddc6c15e2abbecf47a588419534b11 100644 (file)
@@ -181,6 +181,9 @@ struct mii_bus {
        /* PHY addresses to be ignored when probing */
        u32 phy_mask;
 
+       /* PHY addresses to ignore the TA/read failure */
+       u32 phy_ignore_ta_mask;
+
        /*
         * Pointer to an array of interrupts, each PHY's
         * interrupt at the index matching its address
@@ -674,6 +677,17 @@ static inline bool phy_is_internal(struct phy_device *phydev)
        return phydev->is_internal;
 }
 
+/**
+ * phy_interface_is_rgmii - Convenience function for testing if a PHY interface
+ * is RGMII (all variants)
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
+{
+       return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+               phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+}
+
 /**
  * phy_write_mmd - Convenience function for writing a register
  * on an MMD on a given PHY.
index 7b8e260c4a27df00b8eca70d5cd4f52864ba6d87..a2324fb45cf4242d7165d873c2a0b112b47ebd9c 100644 (file)
@@ -79,17 +79,9 @@ static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
 
 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
 
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
 void net_inc_ingress_queue(void);
 void net_dec_ingress_queue(void);
-#else
-static inline void net_inc_ingress_queue(void)
-{
-}
-
-static inline void net_dec_ingress_queue(void)
-{
-}
 #endif
 
 extern void rtnetlink_init(void);
index f15154a879c711870ba649f867fc6eeb47212f14..cc612fc0a8943ec853b92e6b3516b0e5582299e2 100644 (file)
@@ -34,7 +34,8 @@
 #include <linux/dma-mapping.h>
 #include <linux/netdev_features.h>
 #include <linux/sched.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
+#include <linux/splice.h>
 
 /* A. Checksumming of received packets by device.
  *
@@ -170,12 +171,14 @@ struct nf_bridge_info {
                BRNF_PROTO_UNCHANGED,
                BRNF_PROTO_8021Q,
                BRNF_PROTO_PPPOE
-       } orig_proto;
+       } orig_proto:8;
        bool                    pkt_otherhost;
        unsigned int            mask;
        struct net_device       *physindev;
-       struct net_device       *physoutdev;
-       char                    neigh_header[8];
+       union {
+               struct net_device *physoutdev;
+               char neigh_header[8];
+       };
        __be32                  ipv4_daddr;
 };
 #endif
@@ -859,6 +862,9 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
                                        int len, int odd, struct sk_buff *skb),
                            void *from, int length);
 
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+                        int offset, size_t size);
+
 struct skb_seq_state {
        __u32           lower_offset;
        __u32           upper_offset;
@@ -919,7 +925,6 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
        skb->hash = hash;
 }
 
-void __skb_get_hash(struct sk_buff *skb);
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
        if (!skb->l4_hash && !skb->sw_hash)
@@ -928,6 +933,8 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
        return skb->hash;
 }
 
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 {
        return skb->hash;
@@ -1935,8 +1942,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
 
        if (skb_transport_header_was_set(skb))
                return;
-       else if (skb_flow_dissect(skb, &keys))
-               skb_set_transport_header(skb, keys.thoff);
+       else if (skb_flow_dissect_flow_keys(skb, &keys))
+               skb_set_transport_header(skb, keys.control.thoff);
        else
                skb_set_transport_header(skb, offset_hint);
 }
@@ -2127,10 +2134,6 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
                kfree_skb(skb);
 }
 
-#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
-#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
-#define NETDEV_PAGECNT_MAX_BIAS           NETDEV_FRAG_PAGE_MAX_SIZE
-
 void *netdev_alloc_frag(unsigned int fragsz);
 
 struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2185,6 +2188,11 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
        return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
 }
 
+static inline void skb_free_frag(void *addr)
+{
+       __free_page_frag(addr);
+}
+
 void *napi_alloc_frag(unsigned int fragsz);
 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
                                 unsigned int length, gfp_t gfp_mask);
@@ -2692,9 +2700,15 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
                              int len, __wsum csum);
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ssize_t skb_socket_splice(struct sock *sk,
+                         struct pipe_inode_info *pipe,
+                         struct splice_pipe_desc *spd);
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
                    struct pipe_inode_info *pipe, unsigned int len,
-                   unsigned int flags);
+                   unsigned int flags,
+                   ssize_t (*splice_cb)(struct sock *,
+                                        struct pipe_inode_info *,
+                                        struct splice_pipe_desc *));
 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
 unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
 int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -3050,7 +3064,7 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
                }
        } else if (skb->csum_bad) {
                /* ip_summed == CHECKSUM_NONE in this case */
-               return 1;
+               return (__force __sum16)1;
        }
 
        skb->csum = psum;
@@ -3298,9 +3312,6 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
        return skb->queue_mapping != 0;
 }
 
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
-                 unsigned int num_tx_queues);
-
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
 {
 #ifdef CONFIG_XFRM
@@ -3355,15 +3366,14 @@ static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
 static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
 {
        int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
-           skb_transport_offset(skb);
-       __u16 csum;
+                  skb_transport_offset(skb);
+       __wsum partial;
 
-       csum = csum_fold(csum_partial(skb_transport_header(skb),
-                                     plen, skb->csum));
+       partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
        skb->csum = res;
        SKB_GSO_CB(skb)->csum_start -= plen;
 
-       return csum;
+       return csum_fold(partial);
 }
 
 static inline bool skb_is_gso(const struct sk_buff *skb)
@@ -3418,10 +3428,9 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
 
 int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
-
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
-                  const struct flow_keys *keys, int hlen);
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+                                    unsigned int transport_len,
+                                    __sum16(*skb_chkf)(struct sk_buff *skb));
 
 /**
  * skb_head_is_locked - Determine if the skb->head is locked down
index 7f484a239f53bd496548ff87290821259e47914d..c735f5c91eead34520726a503538ec48046dde86 100644 (file)
@@ -99,6 +99,7 @@ struct plat_stmmacenet_data {
        int phy_addr;
        int interface;
        struct stmmac_mdio_bus_data *mdio_bus_data;
+       struct device_node *phy_node;
        struct stmmac_dma_cfg *dma_cfg;
        int clk_csr;
        int has_gmac;
index e8bbf403618f47931e1b32d4ade97b06465fc982..48c3696e86457172ff9e9a907db15e44b7e92bb5 100644 (file)
@@ -149,11 +149,16 @@ struct tcp_sock {
                                 * sum(delta(rcv_nxt)), or how many bytes
                                 * were acked.
                                 */
+       u32     segs_in;        /* RFC4898 tcpEStatsPerfSegsIn
+                                * total number of segments in.
+                                */
        u32     rcv_nxt;        /* What we want to receive next         */
        u32     copied_seq;     /* Head of yet unread data              */
        u32     rcv_wup;        /* rcv_nxt on last window update sent   */
        u32     snd_nxt;        /* Next sequence we send                */
-
+       u32     segs_out;       /* RFC4898 tcpEStatsPerfSegsOut
+                                * The total number of segments sent.
+                                */
        u64     bytes_acked;    /* RFC4898 tcpEStatsAppHCThruOctetsAcked
                                 * sum(delta(snd_una)), or how many bytes
                                 * were acked.
@@ -201,6 +206,7 @@ struct tcp_sock {
                syn_fastopen:1, /* SYN includes Fast Open option */
                syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
                syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+               save_syn:1,     /* Save headers of SYN packet */
                is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
        u32     tlp_high_seq;   /* snd_nxt at the time of TLP retransmit. */
 
@@ -328,6 +334,7 @@ struct tcp_sock {
         * socket. Used to retransmit SYNACKs etc.
         */
        struct request_sock *fastopen_rsk;
+       u32     *saved_syn;
 };
 
 enum tsq_flags {
@@ -395,4 +402,10 @@ static inline int fastopen_init_queue(struct sock *sk, int backlog)
        return 0;
 }
 
+static inline void tcp_saved_syn_free(struct tcp_sock *tp)
+{
+       kfree(tp->saved_syn);
+       tp->saved_syn = NULL;
+}
+
 #endif /* _LINUX_TCP_H */
index 80456f72d70aec17f6f64222aec85a61072a562f..def59d3a34d5e24bda47e4526f7050c73a164cd7 100644 (file)
@@ -142,6 +142,7 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
 void ipv6_mc_remap(struct inet6_dev *idev);
 void ipv6_mc_init_dev(struct inet6_dev *idev);
 void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed);
 void addrconf_dad_failure(struct inet6_ifaddr *ifp);
 
 bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
index 172632dd9930d1d98a68e47076c97fba3bd2c7ae..db639a4c5ab84cab3339193010641f0b0187db48 100644 (file)
@@ -74,7 +74,7 @@ void vsock_pending_work(struct work_struct *work);
 struct sock *__vsock_create(struct net *net,
                            struct socket *sock,
                            struct sock *parent,
-                           gfp_t priority, unsigned short type);
+                           gfp_t priority, unsigned short type, int kern);
 
 /**** TRANSPORT ****/
 
index ea6546d2c946aa1caeabcb071a368e07cf89c3a3..c28aca25320ebaa7c02172fbfee4827a5d207d6b 100644 (file)
@@ -63,6 +63,9 @@ enum {
        BOND_OPT_LP_INTERVAL,
        BOND_OPT_SLAVES,
        BOND_OPT_TLB_DYNAMIC_LB,
+       BOND_OPT_AD_ACTOR_SYS_PRIO,
+       BOND_OPT_AD_ACTOR_SYSTEM,
+       BOND_OPT_AD_USER_PORT_KEY,
        BOND_OPT_LAST
 };
 
index 78ed135e9dea6a9971d15e3b786a64de170b310c..20defc0353d1353c02f1d778e3f53809cae6c0bb 100644 (file)
@@ -136,6 +136,9 @@ struct bond_params {
        int packets_per_slave;
        int tlb_dynamic_lb;
        struct reciprocal_value reciprocal_packets_per_slave;
+       u16 ad_actor_sys_prio;
+       u16 ad_user_port_key;
+       u8 ad_actor_system[ETH_ALEN];
 };
 
 struct bond_parm_tbl {
index f8d6813cd5b2c05eb5f7bea0595a12ad4c02dca8..a741678f24a26d712e08775e213b1b1c50c8ca8a 100644 (file)
@@ -111,7 +111,7 @@ enum ieee80211_band {
  *     This may be due to the driver or due to regulatory bandwidth
  *     restrictions.
  * @IEEE80211_CHAN_INDOOR_ONLY: see %NL80211_FREQUENCY_ATTR_INDOOR_ONLY
- * @IEEE80211_CHAN_GO_CONCURRENT: see %NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @IEEE80211_CHAN_IR_CONCURRENT: see %NL80211_FREQUENCY_ATTR_IR_CONCURRENT
  * @IEEE80211_CHAN_NO_20MHZ: 20 MHz bandwidth is not permitted
  *     on this channel.
  * @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
@@ -129,7 +129,7 @@ enum ieee80211_channel_flags {
        IEEE80211_CHAN_NO_80MHZ         = 1<<7,
        IEEE80211_CHAN_NO_160MHZ        = 1<<8,
        IEEE80211_CHAN_INDOOR_ONLY      = 1<<9,
-       IEEE80211_CHAN_GO_CONCURRENT    = 1<<10,
+       IEEE80211_CHAN_IR_CONCURRENT    = 1<<10,
        IEEE80211_CHAN_NO_20MHZ         = 1<<11,
        IEEE80211_CHAN_NO_10MHZ         = 1<<12,
 };
@@ -4575,13 +4575,15 @@ void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
  * @ie: information elements of the deauth/disassoc frame (may be %NULL)
  * @ie_len: length of IEs
  * @reason: reason code for the disconnection, set it to 0 if unknown
+ * @locally_generated: disconnection was requested locally
  * @gfp: allocation flags
  *
  * After it calls this function, the driver should enter an idle state
  * and not try to connect to any AP any more.
  */
 void cfg80211_disconnected(struct net_device *dev, u16 reason,
-                          const u8 *ie, size_t ie_len, gfp_t gfp);
+                          const u8 *ie, size_t ie_len,
+                          bool locally_generated, gfp_t gfp);
 
 /**
  * cfg80211_ready_on_channel - notification of remain_on_channel start
index 6ea16c84293b0cdcb981df77481a4fa5e509fdde..290a9a69af0788794619b0ededc4a6ccfbab5e07 100644 (file)
@@ -44,6 +44,8 @@ struct cfg802154_ops {
        int     (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel);
        int     (*set_cca_mode)(struct wpan_phy *wpan_phy,
                                const struct wpan_phy_cca *cca);
+       int     (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level);
+       int     (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power);
        int     (*set_pan_id)(struct wpan_phy *wpan_phy,
                              struct wpan_dev *wpan_dev, __le16 pan_id);
        int     (*set_short_addr)(struct wpan_phy *wpan_phy,
@@ -61,14 +63,66 @@ struct cfg802154_ops {
                                struct wpan_dev *wpan_dev, bool mode);
 };
 
+static inline bool
+wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st)
+{
+       switch (st) {
+       case NL802154_SUPPORTED_BOOL_TRUE:
+               return b;
+       case NL802154_SUPPORTED_BOOL_FALSE:
+               return !b;
+       case NL802154_SUPPORTED_BOOL_BOTH:
+               return true;
+       default:
+               WARN_ON(1);
+       }
+
+       return false;
+}
+
+struct wpan_phy_supported {
+       u32 channels[IEEE802154_MAX_PAGE + 1],
+           cca_modes, cca_opts, iftypes;
+       enum nl802154_supported_bool_states lbt;
+       u8 min_minbe, max_minbe, min_maxbe, max_maxbe,
+          min_csma_backoffs, max_csma_backoffs;
+       s8 min_frame_retries, max_frame_retries;
+       size_t tx_powers_size, cca_ed_levels_size;
+       const s32 *tx_powers, *cca_ed_levels;
+};
+
 struct wpan_phy_cca {
        enum nl802154_cca_modes mode;
        enum nl802154_cca_opts opt;
 };
 
-struct wpan_phy {
-       struct mutex pib_lock;
+static inline bool
+wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b)
+{
+       if (a->mode != b->mode)
+               return false;
+
+       if (a->mode == NL802154_CCA_ENERGY_CARRIER)
+               return a->opt == b->opt;
 
+       return true;
+}
+
+/**
+ * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support
+ *     transmit power setting.
+ * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed
+ *     level setting.
+ * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode
+ *     setting.
+ */
+enum wpan_phy_flags {
+       WPAN_PHY_FLAG_TXPOWER           = BIT(1),
+       WPAN_PHY_FLAG_CCA_ED_LEVEL      = BIT(2),
+       WPAN_PHY_FLAG_CCA_MODE          = BIT(3),
+};
+
+struct wpan_phy {
        /* If multiple wpan_phys are registered and you're handed e.g.
         * a regular netdev with assigned ieee802154_ptr, you won't
         * know whether it points to a wpan_phy your driver has registered
@@ -77,6 +131,8 @@ struct wpan_phy {
         */
        const void *privid;
 
+       u32 flags;
+
        /*
         * This is a PIB according to 802.15.4-2011.
         * We do not provide timing-related variables, as they
@@ -84,12 +140,14 @@ struct wpan_phy {
         */
        u8 current_channel;
        u8 current_page;
-       u32 channels_supported[IEEE802154_MAX_PAGE + 1];
-       s8 transmit_power;
+       struct wpan_phy_supported supported;
+       /* current transmit_power in mBm */
+       s32 transmit_power;
        struct wpan_phy_cca cca;
 
        __le64 perm_extended_addr;
 
+       /* current cca ed threshold in mBm */
        s32 cca_ed_level;
 
        /* PHY depended MAC PIB values */
@@ -121,9 +179,9 @@ struct wpan_dev {
        __le64 extended_addr;
 
        /* MAC BSN field */
-       u8 bsn;
+       atomic_t bsn;
        /* MAC DSN field */
-       u8 dsn;
+       atomic_t dsn;
 
        u8 min_be;
        u8 max_be;
index 0a55ac715077d0aad820916f9daf979ae3dfd0fd..2d1d73cb773e9bd160f5c43bb58981ee8597b87c 100644 (file)
@@ -122,7 +122,9 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
 
 static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
 {
-       *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to));
+       __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
+
+       *sum = csum_fold(csum_add(tmp, (__force __wsum)to));
 }
 
 /* Implements RFC 1624 (Incremental Internet Checksum)
index 1e18005f7f65f061f6084ea1823a8d37368a57e4..267e70210061d82c22b4b2e91a9ab69cbb2b7ce7 100644 (file)
@@ -7,7 +7,7 @@
  *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
  *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
  *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
- *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -119,12 +119,14 @@ static inline u32 codel_time_to_us(codel_time_t val)
 /**
  * struct codel_params - contains codel parameters
  * @target:    target queue size (in time units)
+ * @ce_threshold:  threshold for marking packets with ECN CE
  * @interval:  width of moving time window
  * @mtu:       device mtu, or minimal queue backlog in bytes.
  * @ecn:       is Explicit Congestion Notification enabled
  */
 struct codel_params {
        codel_time_t    target;
+       codel_time_t    ce_threshold;
        codel_time_t    interval;
        u32             mtu;
        bool            ecn;
@@ -161,19 +163,24 @@ struct codel_vars {
  * @maxpacket: largest packet we've seen so far
  * @drop_count:        temp count of dropped packets in dequeue()
  * ecn_mark:   number of packets we ECN marked instead of dropping
+ * ce_mark:    number of packets CE marked because sojourn time was above ce_threshold
  */
 struct codel_stats {
        u32             maxpacket;
        u32             drop_count;
        u32             ecn_mark;
+       u32             ce_mark;
 };
 
+#define CODEL_DISABLED_THRESHOLD INT_MAX
+
 static void codel_params_init(struct codel_params *params,
                              const struct Qdisc *sch)
 {
        params->interval = MS2TIME(100);
        params->target = MS2TIME(5);
        params->mtu = psched_mtu(qdisc_dev(sch));
+       params->ce_threshold = CODEL_DISABLED_THRESHOLD;
        params->ecn = false;
 }
 
@@ -354,6 +361,9 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
                                                    vars->rec_inv_sqrt);
        }
 end:
+       if (skb && codel_time_after(vars->ldelay, params->ce_threshold) &&
+           INET_ECN_set_ce(skb))
+               stats->ce_mark++;
        return skb;
 }
 #endif
index 0fb99a26e97372613da1851d9ee4d9bb432aba3f..2bc73f8a00a9c4d20848a578eca44b99cf1b7281 100644 (file)
@@ -109,7 +109,6 @@ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
 extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY          0x1UL
-#define DST_METRICS_FORCE_OVERWRITE    0x2UL
 #define DST_METRICS_FLAGS              0x3UL
 #define __DST_METRICS_PTR(Y)   \
        ((u32 *)((Y) & ~DST_METRICS_FLAGS))
@@ -120,11 +119,6 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
        return dst->_metrics & DST_METRICS_READ_ONLY;
 }
 
-static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst)
-{
-       dst->_metrics |= DST_METRICS_FORCE_OVERWRITE;
-}
-
 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
 
 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
@@ -355,18 +349,6 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
        __skb_tunnel_rx(skb, dev, net);
 }
 
-/* Children define the path of the packet through the
- * Linux networking.  Thus, destinations are stackable.
- */
-
-static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
-{
-       struct dst_entry *child = dst_clone(skb_dst(skb)->child);
-
-       skb_dst_drop(skb);
-       return child;
-}
-
 int dst_discard_sk(struct sock *sk, struct sk_buff *skb);
 static inline int dst_discard(struct sk_buff *skb)
 {
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
new file mode 100644 (file)
index 0000000..1a8c224
--- /dev/null
@@ -0,0 +1,220 @@
+#ifndef _NET_FLOW_DISSECTOR_H
+#define _NET_FLOW_DISSECTOR_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/in6.h>
+#include <uapi/linux/if_ether.h>
+
+/**
+ * struct flow_dissector_key_control:
+ * @thoff: Transport header offset
+ */
+struct flow_dissector_key_control {
+       u16     thoff;
+       u16     addr_type;
+};
+
+/**
+ * struct flow_dissector_key_basic:
+ * @thoff: Transport header offset
+ * @n_proto: Network header protocol (eg. IPv4/IPv6)
+ * @ip_proto: Transport header protocol (eg. TCP/UDP)
+ */
+struct flow_dissector_key_basic {
+       __be16  n_proto;
+       u8      ip_proto;
+       u8      padding;
+};
+
+struct flow_dissector_key_tags {
+       u32     vlan_id:12,
+               flow_label:20;
+};
+
+struct flow_dissector_key_keyid {
+       __be32  keyid;
+};
+
+/**
+ * struct flow_dissector_key_ipv4_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv4_addrs {
+       /* (src,dst) must be grouped, in the same way than in IP header */
+       __be32 src;
+       __be32 dst;
+};
+
+/**
+ * struct flow_dissector_key_ipv6_addrs:
+ * @src: source ip address
+ * @dst: destination ip address
+ */
+struct flow_dissector_key_ipv6_addrs {
+       /* (src,dst) must be grouped, in the same way than in IP header */
+       struct in6_addr src;
+       struct in6_addr dst;
+};
+
+/**
+ * struct flow_dissector_key_tipc_addrs:
+ * @srcnode: source node address
+ */
+struct flow_dissector_key_tipc_addrs {
+       __be32 srcnode;
+};
+
+/**
+ * struct flow_dissector_key_addrs:
+ * @v4addrs: IPv4 addresses
+ * @v6addrs: IPv6 addresses
+ */
+struct flow_dissector_key_addrs {
+       union {
+               struct flow_dissector_key_ipv4_addrs v4addrs;
+               struct flow_dissector_key_ipv6_addrs v6addrs;
+               struct flow_dissector_key_tipc_addrs tipcaddrs;
+       };
+};
+
+/**
+ * flow_dissector_key_tp_ports:
+ *     @ports: port numbers of Transport header
+ *             src: source port number
+ *             dst: destination port number
+ */
+struct flow_dissector_key_ports {
+       union {
+               __be32 ports;
+               struct {
+                       __be16 src;
+                       __be16 dst;
+               };
+       };
+};
+
+
+/**
+ * struct flow_dissector_key_eth_addrs:
+ * @src: source Ethernet address
+ * @dst: destination Ethernet address
+ */
+struct flow_dissector_key_eth_addrs {
+       /* (dst,src) must be grouped, in the same way than in ETH header */
+       unsigned char dst[ETH_ALEN];
+       unsigned char src[ETH_ALEN];
+};
+
+enum flow_dissector_key_id {
+       FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
+       FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
+       FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
+       FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
+       FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
+       FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
+       FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
+       FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */
+       FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
+       FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
+       FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
+
+       FLOW_DISSECTOR_KEY_MAX,
+};
+
+struct flow_dissector_key {
+       enum flow_dissector_key_id key_id;
+       size_t offset; /* offset of struct flow_dissector_key_*
+                         in target the struct */
+};
+
+struct flow_dissector {
+       unsigned int used_keys; /* each bit repesents presence of one key id */
+       unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
+};
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+                            const struct flow_dissector_key *key,
+                            unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+                       struct flow_dissector *flow_dissector,
+                       void *target_container,
+                       void *data, __be16 proto, int nhoff, int hlen);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+                                   struct flow_dissector *flow_dissector,
+                                   void *target_container)
+{
+       return __skb_flow_dissect(skb, flow_dissector, target_container,
+                                 NULL, 0, 0, 0);
+}
+
+struct flow_keys {
+       struct flow_dissector_key_control control;
+#define FLOW_KEYS_HASH_START_FIELD basic
+       struct flow_dissector_key_basic basic;
+       struct flow_dissector_key_tags tags;
+       struct flow_dissector_key_keyid keyid;
+       struct flow_dissector_key_ports ports;
+       struct flow_dissector_key_addrs addrs;
+};
+
+#define FLOW_KEYS_HASH_OFFSET          \
+       offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD)
+
+__be32 flow_get_u32_src(const struct flow_keys *flow);
+__be32 flow_get_u32_dst(const struct flow_keys *flow);
+
+extern struct flow_dissector flow_keys_dissector;
+extern struct flow_dissector flow_keys_buf_dissector;
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+                                             struct flow_keys *flow)
+{
+       memset(flow, 0, sizeof(*flow));
+       return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+                                 NULL, 0, 0, 0);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+                                                 void *data, __be16 proto,
+                                                 int nhoff, int hlen)
+{
+       memset(flow, 0, sizeof(*flow));
+       return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+                                 data, proto, nhoff, hlen);
+}
+
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+                           void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+                                       int thoff, u8 ip_proto)
+{
+       return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+                  const struct flow_keys *keys, int hlen);
+
+/* struct flow_keys_digest:
+ *
+ * This structure is used to hold a digest of the full flow keys. This is a
+ * larger "hash" of a flow to allow definitively matching specific flows where
+ * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
+ * that it can by used in CB of skb (see sch_choke for an example).
+ */
+#define FLOW_KEYS_DIGEST_LEN   16
+struct flow_keys_digest {
+       u8      data[FLOW_KEYS_DIGEST_LEN];
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+                          const struct flow_keys *flow);
+
+#endif
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h
deleted file mode 100644 (file)
index dc8fd81..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef _NET_FLOW_KEYS_H
-#define _NET_FLOW_KEYS_H
-
-/* struct flow_keys:
- *     @src: source ip address in case of IPv4
- *           For IPv6 it contains 32bit hash of src address
- *     @dst: destination ip address in case of IPv4
- *           For IPv6 it contains 32bit hash of dst address
- *     @ports: port numbers of Transport header
- *             port16[0]: src port number
- *             port16[1]: dst port number
- *     @thoff: Transport header offset
- *     @n_proto: Network header protocol (eg. IPv4/IPv6)
- *     @ip_proto: Transport header protocol (eg. TCP/UDP)
- * All the members, except thoff, are in network byte order.
- */
-struct flow_keys {
-       /* (src,dst) must be grouped, in the same way than in IP header */
-       __be32 src;
-       __be32 dst;
-       union {
-               __be32 ports;
-               __be16 port16[2];
-       };
-       u16     thoff;
-       __be16  n_proto;
-       u8      ip_proto;
-};
-
-bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
-                       void *data, __be16 proto, int nhoff, int hlen);
-static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
-{
-       return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0);
-}
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
-                           void *data, int hlen_proto);
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
-{
-       return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-u32 flow_hash_from_keys(struct flow_keys *keys);
-unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len,
-                          __be16 protocol);
-#endif
index 14fb8d3390b4807ac2ed890efef079e0ca39ef54..2a0543a1899dd580e324bc7bbacc127b9cc791db 100644 (file)
@@ -62,6 +62,11 @@ struct genevehdr {
        struct geneve_opt options[];
 };
 
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+       return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
 #ifdef CONFIG_INET
 struct geneve_sock;
 
index 94a297052442600acff26e86404fec2afbb27d5a..0a87975128ec4d180e1184e138ef601c533b704f 100644 (file)
@@ -422,16 +422,6 @@ struct ieee802154_mlme_ops {
                               struct ieee802154_mac_params *params);
 
        struct ieee802154_llsec_ops *llsec;
-
-       /* The fields below are required. */
-
-       /*
-        * FIXME: these should become the part of PIB/MIB interface.
-        * However we still don't have IB interface of any kind
-        */
-       __le16 (*get_pan_id)(const struct net_device *dev);
-       __le16 (*get_short_addr)(const struct net_device *dev);
-       u8 (*get_dsn)(const struct net_device *dev);
 };
 
 static inline struct ieee802154_mlme_ops *
@@ -440,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev)
        return dev->ml_priv;
 }
 
-static inline struct ieee802154_reduced_mlme_ops *
-ieee802154_reduced_mlme_ops(const struct net_device *dev)
-{
-       return dev->ml_priv;
-}
-
 #endif
index 4a92423eefa509b27d8caf75474406fe03868b04..279f83591971bd886e78e1b20e4a5c29051fc563 100644 (file)
@@ -41,7 +41,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
-       sk_release_kernel(sk);
+       sock_release(sk->sk_socket);
 }
 
 #endif
index 8d1765577acca21f698813ee8359a39d76680b7e..e1300b3dd597b9a68db7b6dc9c03a8ea238b4e4c 100644 (file)
@@ -43,7 +43,7 @@ enum {
  * @len: total length of the original datagram
  * @meat: length of received fragments so far
  * @flags: fragment queue flags
- * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
+ * @max_size: maximum received fragment size
  * @net: namespace that this frag belongs to
  */
 struct inet_frag_queue {
index 73fe0f9525d92068aa43fa995a4d1547cc20eb34..b73c88a19dd408f0de41f87c80242816fac4b19d 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/wait.h>
-#include <linux/vmalloc.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_sock.h>
@@ -148,8 +147,6 @@ struct inet_hashinfo {
         */
        struct inet_listen_hashbucket   listening_hash[INET_LHTABLE_SIZE]
                                        ____cacheline_aligned_in_smp;
-
-       atomic_t                        bsockets;
 };
 
 static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -166,52 +163,12 @@ static inline spinlock_t *inet_ehash_lockp(
        return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
 }
 
-static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
-{
-       unsigned int i, size = 256;
-#if defined(CONFIG_PROVE_LOCKING)
-       unsigned int nr_pcpus = 2;
-#else
-       unsigned int nr_pcpus = num_possible_cpus();
-#endif
-       if (nr_pcpus >= 4)
-               size = 512;
-       if (nr_pcpus >= 8)
-               size = 1024;
-       if (nr_pcpus >= 16)
-               size = 2048;
-       if (nr_pcpus >= 32)
-               size = 4096;
-       if (sizeof(spinlock_t) != 0) {
-#ifdef CONFIG_NUMA
-               if (size * sizeof(spinlock_t) > PAGE_SIZE)
-                       hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t));
-               else
-#endif
-               hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t),
-                                               GFP_KERNEL);
-               if (!hashinfo->ehash_locks)
-                       return ENOMEM;
-               for (i = 0; i < size; i++)
-                       spin_lock_init(&hashinfo->ehash_locks[i]);
-       }
-       hashinfo->ehash_locks_mask = size - 1;
-       return 0;
-}
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
 
 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
 {
-       if (hashinfo->ehash_locks) {
-#ifdef CONFIG_NUMA
-               unsigned int size = (hashinfo->ehash_locks_mask + 1) *
-                                                       sizeof(spinlock_t);
-               if (size > PAGE_SIZE)
-                       vfree(hashinfo->ehash_locks);
-               else
-#endif
-               kfree(hashinfo->ehash_locks);
-               hashinfo->ehash_locks = NULL;
-       }
+       kvfree(hashinfo->ehash_locks);
+       hashinfo->ehash_locks = NULL;
 }
 
 struct inet_bind_bucket *
index b6c3737da4e94404585a97f59ad7a4e2e1f6e105..47eb67b08abdf28b185514cfc1a99685a8c8b8dd 100644 (file)
@@ -187,6 +187,7 @@ struct inet_sock {
                                transparent:1,
                                mc_all:1,
                                nodefrag:1;
+       __u8                    bind_address_no_port:1;
        __u8                    rcv_tos;
        __u8                    convert_csum;
        int                     uc_index;
index d14af7edd197c2c364c1da62addc9781aa79fe84..0750a186ea635678efe15b2619f32e91f86fde99 100644 (file)
@@ -31,7 +31,7 @@
 #include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 
 struct sock;
 
@@ -45,6 +45,7 @@ struct inet_skb_parm {
 #define IPSKB_FRAG_COMPLETE    BIT(3)
 #define IPSKB_REROUTED         BIT(4)
 #define IPSKB_DOREDIRECT       BIT(5)
+#define IPSKB_FRAG_PMTU                BIT(6)
 
        u16                     frag_max_size;
 };
@@ -108,9 +109,8 @@ int ip_local_deliver(struct sk_buff *skb);
 int ip_mr_input(struct sk_buff *skb);
 int ip_output(struct sock *sk, struct sk_buff *skb);
 int ip_mc_output(struct sock *sk, struct sk_buff *skb);
-int ip_fragment(struct sock *sk, struct sk_buff *skb,
-               int (*output)(struct sock *, struct sk_buff *));
-int ip_do_nat(struct sk_buff *skb);
+int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
+                  int (*output)(struct sock *, struct sk_buff *));
 void ip_send_check(struct iphdr *ip);
 int __ip_local_out(struct sk_buff *skb);
 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb);
@@ -355,15 +355,32 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
                                  skb->len, proto, 0);
 }
 
+/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to :     flow->v4addrs.src = iph->saddr;
+ *                     flow->v4addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
+                                           const struct iphdr *iph)
+{
+       BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
+                    offsetof(typeof(flow->addrs), v4addrs.src) +
+                             sizeof(flow->addrs.v4addrs.src));
+       memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
+       flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+}
+
 static inline void inet_set_txhash(struct sock *sk)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct flow_keys keys;
 
-       keys.src = inet->inet_saddr;
-       keys.dst = inet->inet_daddr;
-       keys.port16[0] = inet->inet_sport;
-       keys.port16[1] = inet->inet_dport;
+       memset(&keys, 0, sizeof(keys));
+
+       keys.addrs.v4addrs.src = inet->inet_saddr;
+       keys.addrs.v4addrs.dst = inet->inet_daddr;
+       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+       keys.ports.src = inet->inet_sport;
+       keys.ports.dst = inet->inet_dport;
 
        sk->sk_txhash = flow_hash_from_keys(&keys);
 }
@@ -478,6 +495,16 @@ enum ip_defrag_users {
        IP_DEFRAG_MACVLAN,
 };
 
+/* Return true if the value of 'user' is between 'lower_bond'
+ * and 'upper_bond' inclusively.
+ */
+static inline bool ip_defrag_user_in_between(u32 user,
+                                            enum ip_defrag_users lower_bond,
+                                            enum ip_defrag_users upper_bond)
+{
+       return user >= lower_bond && user <= upper_bond;
+}
+
 int ip_defrag(struct sk_buff *skb, u32 user);
 #ifdef CONFIG_INET
 struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
index 20e80fa7bbdd5a0effd39ed6761ac167912f7da3..3b76849c190fc2ce79b59d07466a05182d2b99fe 100644 (file)
@@ -120,45 +120,19 @@ struct rt6_info {
        struct rt6key                   rt6i_src;
        struct rt6key                   rt6i_prefsrc;
 
+       struct list_head                rt6i_uncached;
+       struct uncached_list            *rt6i_uncached_list;
+
        struct inet6_dev                *rt6i_idev;
-       unsigned long                   _rt6i_peer;
+       struct rt6_info * __percpu      *rt6i_pcpu;
 
        u32                             rt6i_metric;
+       u32                             rt6i_pmtu;
        /* more non-fragment space at head required */
        unsigned short                  rt6i_nfheader_len;
        u8                              rt6i_protocol;
 };
 
-static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
-{
-       return inetpeer_ptr(rt->_rt6i_peer);
-}
-
-static inline bool rt6_has_peer(struct rt6_info *rt)
-{
-       return inetpeer_ptr_is_peer(rt->_rt6i_peer);
-}
-
-static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
-       __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
-{
-       return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
-}
-
-static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
-{
-       inetpeer_init_ptr(&rt->_rt6i_peer, base);
-}
-
-static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
-{
-       inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
-}
-
 static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
 {
        return ((struct rt6_info *)dst)->rt6i_idev;
@@ -189,13 +163,12 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
        rt0->rt6i_flags |= RTF_EXPIRES;
 }
 
-static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
+static inline u32 rt6_get_cookie(const struct rt6_info *rt)
 {
-       struct dst_entry *new = (struct dst_entry *) from;
+       if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
+               rt = (struct rt6_info *)(rt->dst.from);
 
-       rt->rt6i_flags &= ~RTF_EXPIRES;
-       dst_hold(new);
-       rt->dst.from = new;
+       return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
 }
 
 static inline void ip6_rt_put(struct rt6_info *rt)
index 5e192068e6cb61a78d9b19b2b58bffd7c68b44bb..297629aadb190d4cfcdec41c241efa2b0db76a71 100644 (file)
@@ -145,7 +145,7 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
 #ifdef CONFIG_IPV6_SUBTREES
        np->saddr_cache = saddr;
 #endif
-       np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+       np->dst_cookie = rt6_get_cookie(rt);
 }
 
 static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
@@ -163,11 +163,14 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
        return rt->rt6i_flags & RTF_LOCAL;
 }
 
-static inline bool ipv6_anycast_destination(const struct sk_buff *skb)
+static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
+                                           const struct in6_addr *daddr)
 {
-       struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+       struct rt6_info *rt = (struct rt6_info *)dst;
 
-       return rt->rt6i_flags & RTF_ANYCAST;
+       return rt->rt6i_flags & RTF_ANYCAST ||
+               (rt->rt6i_dst.plen != 128 &&
+                ipv6_addr_equal(&rt->rt6i_dst.addr, daddr));
 }
 
 int ip6_fragment(struct sock *sk, struct sk_buff *skb,
@@ -194,9 +197,15 @@ static inline bool ip6_sk_ignore_df(const struct sock *sk)
               inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
 }
 
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
+                                          struct in6_addr *daddr)
 {
-       return &rt->rt6i_gateway;
+       if (rt->rt6i_flags & RTF_GATEWAY)
+               return &rt->rt6i_gateway;
+       else if (unlikely(rt->rt6i_flags & RTF_CACHE))
+               return &rt->rt6i_dst.addr;
+       else
+               return daddr;
 }
 
 #endif
index eec8ad3c98432af6250a7a0c659d443da90ce45a..82dbdb092a5d1c43d088fea8055c1bcafee156c5 100644 (file)
@@ -19,7 +19,7 @@
 #include <net/if_inet6.h>
 #include <net/ndisc.h>
 #include <net/flow.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 #include <net/snmp.h>
 
 #define SIN6_LEN_RFC2133       24
@@ -239,8 +239,10 @@ struct ip6_flowlabel {
        struct net              *fl_net;
 };
 
-#define IPV6_FLOWINFO_MASK     cpu_to_be32(0x0FFFFFFF)
-#define IPV6_FLOWLABEL_MASK    cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWINFO_MASK             cpu_to_be32(0x0FFFFFFF)
+#define IPV6_FLOWLABEL_MASK            cpu_to_be32(0x000FFFFF)
+#define IPV6_FLOWLABEL_STATELESS_FLAG  cpu_to_be32(0x00080000)
+
 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
 #define IPV6_TCLASS_SHIFT      20
 
@@ -669,8 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
-void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
-                      struct rt6_info *rt);
+__be32 ipv6_select_ident(struct net *net,
+                        const struct in6_addr *daddr,
+                        const struct in6_addr *saddr);
 void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -689,6 +692,20 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
        return hlimit;
 }
 
+/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store
+ * Equivalent to :     flow->v6addrs.src = iph->saddr;
+ *                     flow->v6addrs.dst = iph->daddr;
+ */
+static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
+                                           const struct ipv6hdr *iph)
+{
+       BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) !=
+                    offsetof(typeof(flow->addrs), v6addrs.src) +
+                    sizeof(flow->addrs.v6addrs.src));
+       memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs));
+       flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline void ip6_set_txhash(struct sock *sk)
 {
@@ -696,10 +713,15 @@ static inline void ip6_set_txhash(struct sock *sk)
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct flow_keys keys;
 
-       keys.src = (__force __be32)ipv6_addr_hash(&np->saddr);
-       keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr);
-       keys.port16[0] = inet->inet_sport;
-       keys.port16[1] = inet->inet_dport;
+       memset(&keys, 0, sizeof(keys));
+
+       memcpy(&keys.addrs.v6addrs.src, &np->saddr,
+              sizeof(keys.addrs.v6addrs.src));
+       memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
+              sizeof(keys.addrs.v6addrs.dst));
+       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+       keys.ports.src = inet->inet_sport;
+       keys.ports.dst = inet->inet_dport;
 
        sk->sk_txhash = flow_hash_from_keys(&keys);
 }
@@ -719,6 +741,9 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
                hash ^= hash >> 12;
 
                flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
+               if (net->ipv6.sysctl.flowlabel_state_ranges)
+                       flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
        }
 
        return flowlabel;
index 0134681acc4cfe354c6546be1a1ffefa5c078cde..fe994d2e52869d06be7d26b48b942e666e60f2da 100644 (file)
@@ -96,7 +96,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
 }
 
 struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
-                         struct proto *prot);
+                         struct proto *prot, int kern);
 void llc_sk_free(struct sock *sk);
 
 void llc_sk_reset(struct sock *sk);
index fc57f6b82fc59e4dc6ae72b802856ab80e0af6e9..887fe95b980547d81b00b8be2648a8415bdc8429 100644 (file)
@@ -337,10 +337,16 @@ enum ieee80211_bss_change {
  * enum ieee80211_event_type - event to be notified to the low level driver
  * @RSSI_EVENT: AP's rssi crossed the a threshold set by the driver.
  * @MLME_EVENT: event related to MLME
+ * @BAR_RX_EVENT: a BAR was received
+ * @BA_FRAME_TIMEOUT: Frames were released from the reordering buffer because
+ *     they timed out. This won't be called for each frame released, but only
+ *     once each time the timeout triggers.
  */
 enum ieee80211_event_type {
        RSSI_EVENT,
        MLME_EVENT,
+       BAR_RX_EVENT,
+       BA_FRAME_TIMEOUT,
 };
 
 /**
@@ -399,18 +405,32 @@ struct ieee80211_mlme_event {
        u16 reason;
 };
 
+/**
+ * struct ieee80211_ba_event - data attached for BlockAck related events
+ * @sta: pointer to the &ieee80211_sta to which this event relates
+ * @tid: the tid
+ * @ssn: the starting sequence number (for %BAR_RX_EVENT)
+ */
+struct ieee80211_ba_event {
+       struct ieee80211_sta *sta;
+       u16 tid;
+       u16 ssn;
+};
+
 /**
  * struct ieee80211_event - event to be sent to the driver
  * @type: The event itself. See &enum ieee80211_event_type.
  * @rssi: relevant if &type is %RSSI_EVENT
  * @mlme: relevant if &type is %AUTH_EVENT
- * @u:    union holding the above two fields
+ * @ba: relevant if &type is %BAR_RX_EVENT or %BA_FRAME_TIMEOUT
+ * @u:union holding the fields above
  */
 struct ieee80211_event {
        enum ieee80211_event_type type;
        union {
                struct ieee80211_rssi_event rssi;
                struct ieee80211_mlme_event mlme;
+               struct ieee80211_ba_event ba;
        } u;
 };
 
@@ -1481,6 +1501,47 @@ struct ieee80211_key_conf {
        u8 key[0];
 };
 
+#define IEEE80211_MAX_PN_LEN   16
+
+/**
+ * struct ieee80211_key_seq - key sequence counter
+ *
+ * @tkip: TKIP data, containing IV32 and IV16 in host byte order
+ * @ccmp: PN data, most significant byte first (big endian,
+ *     reverse order than in packet)
+ * @aes_cmac: PN data, most significant byte first (big endian,
+ *     reverse order than in packet)
+ * @aes_gmac: PN data, most significant byte first (big endian,
+ *     reverse order than in packet)
+ * @gcmp: PN data, most significant byte first (big endian,
+ *     reverse order than in packet)
+ * @hw: data for HW-only (e.g. cipher scheme) keys
+ */
+struct ieee80211_key_seq {
+       union {
+               struct {
+                       u32 iv32;
+                       u16 iv16;
+               } tkip;
+               struct {
+                       u8 pn[6];
+               } ccmp;
+               struct {
+                       u8 pn[6];
+               } aes_cmac;
+               struct {
+                       u8 pn[6];
+               } aes_gmac;
+               struct {
+                       u8 pn[6];
+               } gcmp;
+               struct {
+                       u8 seq[IEEE80211_MAX_PN_LEN];
+                       u8 seq_len;
+               } hw;
+       };
+};
+
 /**
  * struct ieee80211_cipher_scheme - cipher scheme
  *
@@ -1667,8 +1728,7 @@ struct ieee80211_tx_control {
  * @sta: station table entry, %NULL for per-vif queue
  * @tid: the TID for this queue (unused for per-vif queue)
  * @ac: the AC for this queue
- * @drv_priv: data area for driver use, will always be aligned to
- *     sizeof(void *).
+ * @drv_priv: driver private area, sized by hw->txq_data_size
  *
  * The driver can obtain packets from this queue by calling
  * ieee80211_tx_dequeue().
@@ -1798,6 +1858,10 @@ struct ieee80211_txq {
  *     the driver returns 1. This also forces the driver to advertise its
  *     supported cipher suites.
  *
+ * @IEEE80211_HW_SUPPORT_FAST_XMIT: The driver/hardware supports fast-xmit,
+ *     this currently requires only the ability to calculate the duration
+ *     for frames.
+ *
  * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
  *     queue mapping in order to use different queues (not just one per AC)
  *     for different virtual interfaces. See the doc section on HW queue
@@ -1846,7 +1910,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_WANT_MONITOR_VIF                   = 1<<14,
        IEEE80211_HW_NO_AUTO_VIF                        = 1<<15,
        IEEE80211_HW_SW_CRYPTO_CONTROL                  = 1<<16,
-       /* free slots */
+       IEEE80211_HW_SUPPORT_FAST_XMIT                  = 1<<17,
        IEEE80211_HW_REPORTS_TX_ACK_STATUS              = 1<<18,
        IEEE80211_HW_CONNECTION_MONITOR                 = 1<<19,
        IEEE80211_HW_QUEUE_CONTROL                      = 1<<20,
@@ -1940,8 +2004,8 @@ enum ieee80211_hw_flags {
  *     Use the %IEEE80211_RADIOTAP_VHT_KNOWN_* values.
  *
  * @netdev_features: netdev features to be set in each netdev created
- *     from this HW. Note only HW checksum features are currently
- *     compatible with mac80211. Other feature bits will be rejected.
+ *     from this HW. Note that not all features are usable with mac80211,
+ *     other features will be rejected during HW registration.
  *
  * @uapsd_queues: This bitmap is included in (re)association frame to indicate
  *     for each access category if it is uAPSD trigger-enabled and delivery-
@@ -2505,10 +2569,6 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  * stack. It is always safe to pass more frames than requested,
  * but this has negative impact on power consumption.
  *
- * @FIF_PROMISC_IN_BSS: promiscuous mode within your BSS,
- *     think of the BSS as your network segment and then this corresponds
- *     to the regular ethernet device promiscuous mode.
- *
  * @FIF_ALLMULTI: pass all multicast frames, this is used if requested
  *     by the user or if the hardware is not capable of filtering by
  *     multicast address.
@@ -2525,8 +2585,8 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  *     mac80211 needs to do and the amount of CPU wakeups, so you should
  *     honour this flag if possible.
  *
- * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS
- *     is not set then only those addressed to this station.
+ * @FIF_CONTROL: pass control frames (except for PS Poll) addressed to this
+ *     station
  *
  * @FIF_OTHER_BSS: pass frames destined to other BSSes
  *
@@ -2536,7 +2596,6 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
  * @FIF_PROBE_REQ: pass probe request frames
  */
 enum ieee80211_filter_flags {
-       FIF_PROMISC_IN_BSS      = 1<<0,
        FIF_ALLMULTI            = 1<<1,
        FIF_FCSFAIL             = 1<<2,
        FIF_PLCPFAIL            = 1<<3,
@@ -2819,9 +2878,9 @@ enum ieee80211_reconfig_type {
  *     Returns zero if statistics are available.
  *     The callback can sleep.
  *
- * @get_tkip_seq: If your device implements TKIP encryption in hardware this
- *     callback should be provided to read the TKIP transmit IVs (both IV32
- *     and IV16) for the given key from hardware.
+ * @get_key_seq: If your device implements encryption in hardware and does
+ *     IV/PN assignment then this callback should be provided to read the
+ *     IV/PN for the given key from hardware.
  *     The callback must be atomic.
  *
  * @set_frag_threshold: Configuration of fragmentation threshold. Assign this
@@ -3004,7 +3063,7 @@ enum ieee80211_reconfig_type {
  *     The callback can sleep.
  * @event_callback: Notify driver about any event in mac80211. See
  *     &enum ieee80211_event_type for the different types.
- *     The callback can sleep.
+ *     The callback must be atomic.
  *
  * @release_buffered_frames: Release buffered frames according to the given
  *     parameters. In the case where the driver buffers some frames for
@@ -3220,8 +3279,9 @@ struct ieee80211_ops {
                                 struct ieee80211_vif *vif);
        int (*get_stats)(struct ieee80211_hw *hw,
                         struct ieee80211_low_level_stats *stats);
-       void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
-                            u32 *iv32, u16 *iv16);
+       void (*get_key_seq)(struct ieee80211_hw *hw,
+                           struct ieee80211_key_conf *key,
+                           struct ieee80211_key_seq *seq);
        int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
        int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
        int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -3469,14 +3529,15 @@ enum ieee80211_tpt_led_trigger_flags {
 };
 
 #ifdef CONFIG_MAC80211_LEDS
-char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
-char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
-                                        unsigned int flags,
-                                        const struct ieee80211_tpt_blink *blink_table,
-                                        unsigned int blink_table_len);
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+                                  unsigned int flags,
+                                  const struct ieee80211_tpt_blink *blink_table,
+                                  unsigned int blink_table_len);
 #endif
 /**
  * ieee80211_get_tx_led_name - get name of TX LED
@@ -3490,7 +3551,7 @@ char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
  *
  * Return: The name of the LED trigger. %NULL if not configured for LEDs.
  */
-static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
 {
 #ifdef CONFIG_MAC80211_LEDS
        return __ieee80211_get_tx_led_name(hw);
@@ -3511,7 +3572,7 @@ static inline char *ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
  *
  * Return: The name of the LED trigger. %NULL if not configured for LEDs.
  */
-static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
 {
 #ifdef CONFIG_MAC80211_LEDS
        return __ieee80211_get_rx_led_name(hw);
@@ -3532,7 +3593,7 @@ static inline char *ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
  *
  * Return: The name of the LED trigger. %NULL if not configured for LEDs.
  */
-static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
 {
 #ifdef CONFIG_MAC80211_LEDS
        return __ieee80211_get_assoc_led_name(hw);
@@ -3553,7 +3614,7 @@ static inline char *ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
  *
  * Return: The name of the LED trigger. %NULL if not configured for LEDs.
  */
-static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+static inline const char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
 {
 #ifdef CONFIG_MAC80211_LEDS
        return __ieee80211_get_radio_led_name(hw);
@@ -3574,7 +3635,7 @@ static inline char *ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
  *
  * Note: This function must be called before ieee80211_register_hw().
  */
-static inline char *
+static inline const char *
 ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw, unsigned int flags,
                                 const struct ieee80211_tpt_blink *blink_table,
                                 unsigned int blink_table_len)
@@ -4254,40 +4315,6 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
 void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
                                        u8 *k1, u8 *k2);
 
-/**
- * struct ieee80211_key_seq - key sequence counter
- *
- * @tkip: TKIP data, containing IV32 and IV16 in host byte order
- * @ccmp: PN data, most significant byte first (big endian,
- *     reverse order than in packet)
- * @aes_cmac: PN data, most significant byte first (big endian,
- *     reverse order than in packet)
- * @aes_gmac: PN data, most significant byte first (big endian,
- *     reverse order than in packet)
- * @gcmp: PN data, most significant byte first (big endian,
- *     reverse order than in packet)
- */
-struct ieee80211_key_seq {
-       union {
-               struct {
-                       u32 iv32;
-                       u16 iv16;
-               } tkip;
-               struct {
-                       u8 pn[6];
-               } ccmp;
-               struct {
-                       u8 pn[6];
-               } aes_cmac;
-               struct {
-                       u8 pn[6];
-               } aes_gmac;
-               struct {
-                       u8 pn[6];
-               } gcmp;
-       };
-};
-
 /**
  * ieee80211_get_key_tx_seq - get key TX sequence counter
  *
index 7df28a4c23f98793626371d1e2334ad91f7ebf87..9605c7f7453fafd76806c4718ab4827e9fb9881a 100644 (file)
@@ -89,41 +89,26 @@ struct ieee802154_hw {
 #define IEEE802154_HW_TX_OMIT_CKSUM    0x00000001
 /* Indicates that receiver will autorespond with ACK frames. */
 #define IEEE802154_HW_AACK             0x00000002
-/* Indicates that transceiver will support transmit power setting. */
-#define IEEE802154_HW_TXPOWER          0x00000004
 /* Indicates that transceiver will support listen before transmit. */
-#define IEEE802154_HW_LBT              0x00000008
-/* Indicates that transceiver will support cca mode setting. */
-#define IEEE802154_HW_CCA_MODE         0x00000010
-/* Indicates that transceiver will support cca ed level setting. */
-#define IEEE802154_HW_CCA_ED_LEVEL     0x00000020
+#define IEEE802154_HW_LBT              0x00000004
 /* Indicates that transceiver will support csma (max_be, min_be, csma retries)
  * settings. */
-#define IEEE802154_HW_CSMA_PARAMS      0x00000040
+#define IEEE802154_HW_CSMA_PARAMS      0x00000008
 /* Indicates that transceiver will support ARET frame retries setting. */
-#define IEEE802154_HW_FRAME_RETRIES    0x00000080
+#define IEEE802154_HW_FRAME_RETRIES    0x00000010
 /* Indicates that transceiver will support hardware address filter setting. */
-#define IEEE802154_HW_AFILT            0x00000100
+#define IEEE802154_HW_AFILT            0x00000020
 /* Indicates that transceiver will support promiscuous mode setting. */
-#define IEEE802154_HW_PROMISCUOUS      0x00000200
+#define IEEE802154_HW_PROMISCUOUS      0x00000040
 /* Indicates that receiver omits FCS. */
-#define IEEE802154_HW_RX_OMIT_CKSUM    0x00000400
+#define IEEE802154_HW_RX_OMIT_CKSUM    0x00000080
 /* Indicates that receiver will not filter frames with bad checksum. */
-#define IEEE802154_HW_RX_DROP_BAD_CKSUM        0x00000800
+#define IEEE802154_HW_RX_DROP_BAD_CKSUM        0x00000100
 
 /* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */
 #define IEEE802154_HW_OMIT_CKSUM       (IEEE802154_HW_TX_OMIT_CKSUM | \
                                         IEEE802154_HW_RX_OMIT_CKSUM)
 
-/* This groups the most common CSMA support fields into one. */
-#define IEEE802154_HW_CSMA             (IEEE802154_HW_CCA_MODE | \
-                                        IEEE802154_HW_CCA_ED_LEVEL | \
-                                        IEEE802154_HW_CSMA_PARAMS)
-
-/* This groups the most common ARET support fields into one. */
-#define IEEE802154_HW_ARET             (IEEE802154_HW_CSMA | \
-                                        IEEE802154_HW_FRAME_RETRIES)
-
 /* struct ieee802154_ops - callbacks from mac802154 to the driver
  *
  * This structure contains various callbacks that the driver may
@@ -171,7 +156,7 @@ struct ieee802154_hw {
  *       Returns either zero, or negative errno.
  *
  * set_txpower:
- *       Set radio transmit power in dB. Called with pib_lock held.
+ *       Set radio transmit power in mBm. Called with pib_lock held.
  *       Returns either zero, or negative errno.
  *
  * set_lbt
@@ -184,7 +169,7 @@ struct ieee802154_hw {
  *       Returns either zero, or negative errno.
  *
  * set_cca_ed_level
- *       Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ *       Sets the CCA energy detection threshold in mBm. Called with pib_lock
  *       held.
  *       Returns either zero, or negative errno.
  *
@@ -213,12 +198,11 @@ struct ieee802154_ops {
        int             (*set_hw_addr_filt)(struct ieee802154_hw *hw,
                                            struct ieee802154_hw_addr_filt *filt,
                                            unsigned long changed);
-       int             (*set_txpower)(struct ieee802154_hw *hw, s8 dbm);
+       int             (*set_txpower)(struct ieee802154_hw *hw, s32 mbm);
        int             (*set_lbt)(struct ieee802154_hw *hw, bool on);
        int             (*set_cca_mode)(struct ieee802154_hw *hw,
                                        const struct wpan_phy_cca *cca);
-       int             (*set_cca_ed_level)(struct ieee802154_hw *hw,
-                                           s32 level);
+       int             (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm);
        int             (*set_csma_params)(struct ieee802154_hw *hw,
                                           u8 min_be, u8 max_be, u8 retries);
        int             (*set_frame_retries)(struct ieee802154_hw *hw,
index f733656404de0ea5cba33c83a641e37dc4e4d5e5..72eb2372329453e2f118a9af1627c7ad27e50721 100644 (file)
@@ -58,6 +58,7 @@ struct net {
        struct list_head        exit_list;      /* Use only net_mutex */
 
        struct user_namespace   *user_ns;       /* Owning user namespace */
+       spinlock_t              nsid_lock;
        struct idr              netns_ids;
 
        struct ns_common        ns;
@@ -271,7 +272,9 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
 #define __net_initconst        __initconst
 #endif
 
+int peernet2id_alloc(struct net *net, struct net *peer);
 int peernet2id(struct net *net, struct net *peer);
+bool peernet_has_id(struct net *net, struct net *peer);
 struct net *get_net_ns_by_id(struct net *net, int id);
 
 struct pernet_operations {
index e6bcf55dcf2008b83a8a40896fcce2e011653e1b..3d6f48ca40a7493a4becce92c5d4abb1636adcc0 100644 (file)
@@ -819,6 +819,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt,
  *     @use: number of chain references to this table
  *     @flags: table flag (see enum nft_table_flags)
  *     @name: name of the table
+ *     @dev: this table is bound to this device (if any)
  */
 struct nft_table {
        struct list_head                list;
@@ -828,6 +829,11 @@ struct nft_table {
        u32                             use;
        u16                             flags;
        char                            name[NFT_TABLE_MAXNAMELEN];
+       struct net_device               *dev;
+};
+
+enum nft_af_flags {
+       NFT_AF_NEEDS_DEV        = (1 << 0),
 };
 
 /**
@@ -838,6 +844,7 @@ struct nft_table {
  *     @nhooks: number of hooks in this family
  *     @owner: module owner
  *     @tables: used internally
+ *     @flags: family flags
  *     @nops: number of hook ops in this family
  *     @hook_ops_init: initialization function for chain hook ops
  *     @hooks: hookfn overrides for packet validation
@@ -848,6 +855,7 @@ struct nft_af_info {
        unsigned int                    nhooks;
        struct module                   *owner;
        struct list_head                tables;
+       u32                             flags;
        unsigned int                    nops;
        void                            (*hook_ops_init)(struct nf_hook_ops *,
                                                         unsigned int);
index 614a49be68a92897525ce9a1b5a7afd458faf126..c68926b4899c36e77c38b6244dfb8d126685b905 100644 (file)
@@ -19,6 +19,7 @@ struct sock;
 struct local_ports {
        seqlock_t       lock;
        int             range[2];
+       bool            warned;
 };
 
 struct ping_group_range {
@@ -77,6 +78,8 @@ struct netns_ipv4 {
        struct local_ports ip_local_ports;
 
        int sysctl_tcp_ecn;
+       int sysctl_tcp_ecn_fallback;
+
        int sysctl_ip_no_pmtu_disc;
        int sysctl_ip_fwd_use_pmtu;
        int sysctl_ip_nonlocal_bind;
index d2527bf81142e28508e4d415353d8a4ecf8c609e..8d93544a2d2b5f21c7ed8b8137394df0758dbef3 100644 (file)
@@ -34,6 +34,7 @@ struct netns_sysctl_ipv6 {
        int fwmark_reflect;
        int idgen_retries;
        int idgen_delay;
+       int flowlabel_state_ranges;
 };
 
 struct netns_ipv6 {
index eee608b12cc95f3267a00eed85467f94f761d298..c807811460191dd20c1ba13eb5be8e6555c97167 100644 (file)
@@ -13,6 +13,7 @@ struct netns_nftables {
        struct nft_af_info      *inet;
        struct nft_af_info      *arp;
        struct nft_af_info      *bridge;
+       struct nft_af_info      *netdev;
        unsigned int            base_seq;
        u8                      gencursor;
 };
index f8b5bc997959f7ba171258bd31852c696873bc21..0badebd1de7fbe179542a440f31e9042821a2e3d 100644 (file)
@@ -100,6 +100,8 @@ enum nl802154_attrs {
 
        NL802154_ATTR_EXTENDED_ADDR,
 
+       NL802154_ATTR_WPAN_PHY_CAPS,
+
        /* add attributes here, update the policy in nl802154.c */
 
        __NL802154_ATTR_AFTER_LAST,
@@ -119,6 +121,61 @@ enum nl802154_iftype {
        NL802154_IFTYPE_MAX = NUM_NL802154_IFTYPES - 1
 };
 
+/**
+ * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes
+ *
+ * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved
+ * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr
+ * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for
+ *     nl802154_wpan_phy_tx_power
+ * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level
+ * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags
+ * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags
+ * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value
+ * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value
+ * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value
+ * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value
+ * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value
+ * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value
+ * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value
+ * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags
+ * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags
+ * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined
+ * @__NL802154_CAP_ATTR_AFTER_LAST: internal use
+ */
+enum nl802154_wpan_phy_capability_attr {
+       __NL802154_CAP_ATTR_INVALID,
+
+       NL802154_CAP_ATTR_IFTYPES,
+
+       NL802154_CAP_ATTR_CHANNELS,
+       NL802154_CAP_ATTR_TX_POWERS,
+
+       NL802154_CAP_ATTR_CCA_ED_LEVELS,
+       NL802154_CAP_ATTR_CCA_MODES,
+       NL802154_CAP_ATTR_CCA_OPTS,
+
+       NL802154_CAP_ATTR_MIN_MINBE,
+       NL802154_CAP_ATTR_MAX_MINBE,
+
+       NL802154_CAP_ATTR_MIN_MAXBE,
+       NL802154_CAP_ATTR_MAX_MAXBE,
+
+       NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+       NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+
+       NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+       NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+
+       NL802154_CAP_ATTR_LBT,
+
+       /* keep last */
+       __NL802154_CAP_ATTR_AFTER_LAST,
+       NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1
+};
+
 /**
  * enum nl802154_cca_modes - cca modes
  *
@@ -162,4 +219,26 @@ enum nl802154_cca_opts {
        NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1
 };
 
+/**
+ * enum nl802154_supported_bool_states - bool states for bool capability entry
+ *
+ * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false
+ * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true
+ * @__NL802154_SUPPORTED_BOOL_INVALD: reserved
+ * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false
+ * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal
+ * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states
+ */
+enum nl802154_supported_bool_states {
+       NL802154_SUPPORTED_BOOL_FALSE,
+       NL802154_SUPPORTED_BOOL_TRUE,
+       /* to handle them in a mask */
+       __NL802154_SUPPORTED_BOOL_INVALD,
+       NL802154_SUPPORTED_BOOL_BOTH,
+
+       /* keep last */
+       __NL802154_SUPPORTED_BOOL_AFTER_LAST,
+       NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1
+};
+
 #endif /* __NL802154_H */
index 9f4265ce88927b0fe1d7418dbeff5f306855ae15..87935cad2f7b37eadd01d30a3d39a9f73d0145b9 100644 (file)
@@ -64,6 +64,7 @@ struct request_sock {
        struct timer_list               rsk_timer;
        const struct request_sock_ops   *rsk_ops;
        struct sock                     *sk;
+       u32                             *saved_syn;
        u32                             secid;
        u32                             peer_secid;
 };
@@ -77,7 +78,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
                req->rsk_ops = ops;
                sock_hold(sk_listener);
                req->rsk_listener = sk_listener;
-
+               req->saved_syn = NULL;
                /* Following is temporary. It is coupled with debugging
                 * helpers in reqsk_put() & reqsk_free()
                 */
@@ -104,6 +105,7 @@ static inline void reqsk_free(struct request_sock *req)
        req->rsk_ops->destructor(req);
        if (req->rsk_listener)
                sock_put(req->rsk_listener);
+       kfree(req->saved_syn);
        kmem_cache_free(req->rsk_ops->slab, req);
 }
 
index 6d778efcfdfd6c8a3973e03424625667ec350c3e..2738f6f8790836b1b88d5163e5ba297b0f4421c0 100644 (file)
@@ -501,12 +501,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        return sch->enqueue(skb, sch);
 }
 
-static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
-{
-       qdisc_skb_cb(skb)->pkt_len = skb->len;
-       return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
-}
-
 static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
 {
        return q->flags & TCQ_F_CPUSTATS;
@@ -745,23 +739,6 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
        return rtab->data[slot];
 }
 
-#ifdef CONFIG_NET_CLS_ACT
-static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
-                                           int action)
-{
-       struct sk_buff *n;
-
-       n = skb_clone(skb, gfp_mask);
-
-       if (n) {
-               n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
-               n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
-               n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
-       }
-       return n;
-}
-#endif
-
 struct psched_ratecfg {
        u64     rate_bytes_ps; /* bytes per second */
        u32     mult;
index 3a4898ec8c67c5242e9467a0d32f2e68f3a55302..26c1c3171e004eb6bf7afa5f8a0b8f6672cb590f 100644 (file)
@@ -184,6 +184,7 @@ struct sock_common {
        unsigned char           skc_reuse:4;
        unsigned char           skc_reuseport:1;
        unsigned char           skc_ipv6only:1;
+       unsigned char           skc_net_refcnt:1;
        int                     skc_bound_dev_if;
        union {
                struct hlist_node       skc_bind_node;
@@ -323,6 +324,7 @@ struct sock {
 #define sk_reuse               __sk_common.skc_reuse
 #define sk_reuseport           __sk_common.skc_reuseport
 #define sk_ipv6only            __sk_common.skc_ipv6only
+#define sk_net_refcnt          __sk_common.skc_net_refcnt
 #define sk_bound_dev_if                __sk_common.skc_bound_dev_if
 #define sk_bind_node           __sk_common.skc_bind_node
 #define sk_prot                        __sk_common.skc_prot
@@ -1366,7 +1368,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
  * Functions for memory accounting
  */
 int __sk_mem_schedule(struct sock *sk, int size, int kind);
-void __sk_mem_reclaim(struct sock *sk);
+void __sk_mem_reclaim(struct sock *sk, int amount);
 
 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1407,7 +1409,7 @@ static inline void sk_mem_reclaim(struct sock *sk)
        if (!sk_has_account(sk))
                return;
        if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
-               __sk_mem_reclaim(sk);
+               __sk_mem_reclaim(sk, sk->sk_forward_alloc);
 }
 
 static inline void sk_mem_reclaim_partial(struct sock *sk)
@@ -1415,7 +1417,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk)
        if (!sk_has_account(sk))
                return;
        if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
-               __sk_mem_reclaim(sk);
+               __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
 }
 
 static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1514,9 +1516,8 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
 
 
 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
-                     struct proto *prot);
+                     struct proto *prot, int kern);
 void sk_free(struct sock *sk);
-void sk_release_kernel(struct sock *sk);
 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
 
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
@@ -2024,7 +2025,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
        }
 }
 
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+                                   bool force_schedule);
 
 /**
  * sk_page_frag - return an appropriate page_frag
@@ -2192,22 +2194,6 @@ void sock_net_set(struct sock *sk, struct net *net)
        write_pnet(&sk->sk_net, net);
 }
 
-/*
- * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
- * They should not hold a reference to a namespace in order to allow
- * to stop it.
- * Sockets after sk_change_net should be released using sk_release_kernel
- */
-static inline void sk_change_net(struct sock *sk, struct net *net)
-{
-       struct net *current_net = sock_net(sk);
-
-       if (!net_eq(current_net, net)) {
-               put_net(current_net);
-               sock_net_set(sk, net);
-       }
-}
-
 static inline struct sock *skb_steal_sock(struct sk_buff *skb)
 {
        if (skb->sk) {
index d2e69ee3019a06308dbf2951283d6db663d0ad78..437f8fe75705baf56df2886748d8e49a2e00ffd5 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/notifier.h>
 
+#define SWITCHDEV_F_NO_RECURSE         BIT(0)
+
+enum switchdev_trans {
+       SWITCHDEV_TRANS_NONE,
+       SWITCHDEV_TRANS_PREPARE,
+       SWITCHDEV_TRANS_ABORT,
+       SWITCHDEV_TRANS_COMMIT,
+};
+
+enum switchdev_attr_id {
+       SWITCHDEV_ATTR_UNDEFINED,
+       SWITCHDEV_ATTR_PORT_PARENT_ID,
+       SWITCHDEV_ATTR_PORT_STP_STATE,
+       SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+};
+
+struct switchdev_attr {
+       enum switchdev_attr_id id;
+       enum switchdev_trans trans;
+       u32 flags;
+       union {
+               struct netdev_phys_item_id ppid;        /* PORT_PARENT_ID */
+               u8 stp_state;                           /* PORT_STP_STATE */
+               unsigned long brport_flags;             /* PORT_BRIDGE_FLAGS */
+       } u;
+};
+
 struct fib_info;
 
+enum switchdev_obj_id {
+       SWITCHDEV_OBJ_UNDEFINED,
+       SWITCHDEV_OBJ_PORT_VLAN,
+       SWITCHDEV_OBJ_IPV4_FIB,
+       SWITCHDEV_OBJ_PORT_FDB,
+};
+
+struct switchdev_obj {
+       enum switchdev_obj_id id;
+       enum switchdev_trans trans;
+       int (*cb)(struct net_device *dev, struct switchdev_obj *obj);
+       union {
+               struct switchdev_obj_vlan {             /* PORT_VLAN */
+                       u16 flags;
+                       u16 vid_start;
+                       u16 vid_end;
+               } vlan;
+               struct switchdev_obj_ipv4_fib {         /* IPV4_FIB */
+                       u32 dst;
+                       int dst_len;
+                       struct fib_info *fi;
+                       u8 tos;
+                       u8 type;
+                       u32 nlflags;
+                       u32 tb_id;
+               } ipv4_fib;
+               struct switchdev_obj_fdb {              /* PORT_FDB */
+                       const unsigned char *addr;
+                       u16 vid;
+               } fdb;
+       } u;
+};
+
 /**
  * struct switchdev_ops - switchdev operations
  *
- * @swdev_parent_id_get: Called to get an ID of the switch chip this port
- *   is part of.  If driver implements this, it indicates that it
- *   represents a port of a switch chip.
+ * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
+ *
+ * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
  *
- * @swdev_port_stp_update: Called to notify switch device port of bridge
- *   port STP state change.
+ * @switchdev_port_obj_add: Add an object to port (see switchdev_obj).
  *
- * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device.
+ * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj).
  *
- * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device.
+ * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj).
  */
-struct swdev_ops {
-       int     (*swdev_parent_id_get)(struct net_device *dev,
-                                      struct netdev_phys_item_id *psid);
-       int     (*swdev_port_stp_update)(struct net_device *dev, u8 state);
-       int     (*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst,
-                                     int dst_len, struct fib_info *fi,
-                                     u8 tos, u8 type, u32 nlflags,
-                                     u32 tb_id);
-       int     (*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst,
-                                     int dst_len, struct fib_info *fi,
-                                     u8 tos, u8 type, u32 tb_id);
+struct switchdev_ops {
+       int     (*switchdev_port_attr_get)(struct net_device *dev,
+                                          struct switchdev_attr *attr);
+       int     (*switchdev_port_attr_set)(struct net_device *dev,
+                                          struct switchdev_attr *attr);
+       int     (*switchdev_port_obj_add)(struct net_device *dev,
+                                         struct switchdev_obj *obj);
+       int     (*switchdev_port_obj_del)(struct net_device *dev,
+                                         struct switchdev_obj *obj);
+       int     (*switchdev_port_obj_dump)(struct net_device *dev,
+                                         struct switchdev_obj *obj);
 };
 
-enum netdev_switch_notifier_type {
-       NETDEV_SWITCH_FDB_ADD = 1,
-       NETDEV_SWITCH_FDB_DEL,
+enum switchdev_notifier_type {
+       SWITCHDEV_FDB_ADD = 1,
+       SWITCHDEV_FDB_DEL,
 };
 
-struct netdev_switch_notifier_info {
+struct switchdev_notifier_info {
        struct net_device *dev;
 };
 
-struct netdev_switch_notifier_fdb_info {
-       struct netdev_switch_notifier_info info; /* must be first */
+struct switchdev_notifier_fdb_info {
+       struct switchdev_notifier_info info; /* must be first */
        const unsigned char *addr;
        u16 vid;
 };
 
 static inline struct net_device *
-netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info)
+switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
 {
        return info->dev;
 }
 
 #ifdef CONFIG_NET_SWITCHDEV
 
-int netdev_switch_parent_id_get(struct net_device *dev,
-                               struct netdev_phys_item_id *psid);
-int netdev_switch_port_stp_update(struct net_device *dev, u8 state);
-int register_netdev_switch_notifier(struct notifier_block *nb);
-int unregister_netdev_switch_notifier(struct notifier_block *nb);
-int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
-                                struct netdev_switch_notifier_info *info);
-int netdev_switch_port_bridge_setlink(struct net_device *dev,
-                               struct nlmsghdr *nlh, u16 flags);
-int netdev_switch_port_bridge_dellink(struct net_device *dev,
-                               struct nlmsghdr *nlh, u16 flags);
-int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
-                                              struct nlmsghdr *nlh, u16 flags);
-int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
-                                              struct nlmsghdr *nlh, u16 flags);
-int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
-                              u8 tos, u8 type, u32 nlflags, u32 tb_id);
-int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
-                              u8 tos, u8 type, u32 tb_id);
-void netdev_switch_fib_ipv4_abort(struct fib_info *fi);
+int switchdev_port_attr_get(struct net_device *dev,
+                           struct switchdev_attr *attr);
+int switchdev_port_attr_set(struct net_device *dev,
+                           struct switchdev_attr *attr);
+int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj);
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj);
+int register_switchdev_notifier(struct notifier_block *nb);
+int unregister_switchdev_notifier(struct notifier_block *nb);
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+                            struct switchdev_notifier_info *info);
+int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                 struct net_device *dev, u32 filter_mask,
+                                 int nlflags);
+int switchdev_port_bridge_setlink(struct net_device *dev,
+                                 struct nlmsghdr *nlh, u16 flags);
+int switchdev_port_bridge_dellink(struct net_device *dev,
+                                 struct nlmsghdr *nlh, u16 flags);
+int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+                          u8 tos, u8 type, u32 nlflags, u32 tb_id);
+int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+                          u8 tos, u8 type, u32 tb_id);
+void switchdev_fib_ipv4_abort(struct fib_info *fi);
+int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                          struct net_device *dev, const unsigned char *addr,
+                          u16 vid, u16 nlm_flags);
+int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                          struct net_device *dev, const unsigned char *addr,
+                          u16 vid);
+int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+                           struct net_device *dev,
+                           struct net_device *filter_dev, int idx);
 
 #else
 
-static inline int netdev_switch_parent_id_get(struct net_device *dev,
-                                             struct netdev_phys_item_id *psid)
+static inline int switchdev_port_attr_get(struct net_device *dev,
+                                         struct switchdev_attr *attr)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_attr_set(struct net_device *dev,
+                                         struct switchdev_attr *attr)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_obj_add(struct net_device *dev,
+                                        struct switchdev_obj *obj)
 {
        return -EOPNOTSUPP;
 }
 
-static inline int netdev_switch_port_stp_update(struct net_device *dev,
-                                               u8 state)
+static inline int switchdev_port_obj_del(struct net_device *dev,
+                                        struct switchdev_obj *obj)
 {
        return -EOPNOTSUPP;
 }
 
-static inline int register_netdev_switch_notifier(struct notifier_block *nb)
+static inline int switchdev_port_obj_dump(struct net_device *dev,
+                                         struct switchdev_obj *obj)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int register_switchdev_notifier(struct notifier_block *nb)
 {
        return 0;
 }
 
-static inline int unregister_netdev_switch_notifier(struct notifier_block *nb)
+static inline int unregister_switchdev_notifier(struct notifier_block *nb)
 {
        return 0;
 }
 
-static inline int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
-                                              struct netdev_switch_notifier_info *info)
+static inline int call_switchdev_notifiers(unsigned long val,
+                                          struct net_device *dev,
+                                          struct switchdev_notifier_info *info)
 {
        return NOTIFY_DONE;
 }
 
-static inline int netdev_switch_port_bridge_setlink(struct net_device *dev,
-                                                   struct nlmsghdr *nlh,
-                                                   u16 flags)
+static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid,
+                                           u32 seq, struct net_device *dev,
+                                           u32 filter_mask, int nlflags)
 {
        return -EOPNOTSUPP;
 }
 
-static inline int netdev_switch_port_bridge_dellink(struct net_device *dev,
-                                                   struct nlmsghdr *nlh,
-                                                   u16 flags)
+static inline int switchdev_port_bridge_setlink(struct net_device *dev,
+                                               struct nlmsghdr *nlh,
+                                               u16 flags)
 {
        return -EOPNOTSUPP;
 }
 
-static inline int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
-                                                       struct nlmsghdr *nlh,
-                                                       u16 flags)
+static inline int switchdev_port_bridge_dellink(struct net_device *dev,
+                                               struct nlmsghdr *nlh,
+                                               u16 flags)
 {
-       return 0;
+       return -EOPNOTSUPP;
 }
 
-static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
-                                                       struct nlmsghdr *nlh,
-                                                       u16 flags)
+static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
+                                        struct fib_info *fi,
+                                        u8 tos, u8 type,
+                                        u32 nlflags, u32 tb_id)
 {
        return 0;
 }
 
-static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len,
-                                            struct fib_info *fi,
-                                            u8 tos, u8 type,
-                                            u32 nlflags, u32 tb_id)
+static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
+                                        struct fib_info *fi,
+                                        u8 tos, u8 type, u32 tb_id)
 {
        return 0;
 }
 
-static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len,
-                                            struct fib_info *fi,
-                                            u8 tos, u8 type, u32 tb_id)
+static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
 {
-       return 0;
 }
 
-static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                                        struct net_device *dev,
+                                        const unsigned char *addr,
+                                        u16 vid, u16 nlm_flags)
 {
+       return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                                        struct net_device *dev,
+                                        const unsigned char *addr, u16 vid)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
+                                         struct netlink_callback *cb,
+                                         struct net_device *dev,
+                                         struct net_device *filter_dev,
+                                         int idx)
+{
+       return -EOPNOTSUPP;
 }
 
 #endif
index 6d204f3f9df8cafb82d856db08769a7d24dfd79e..978cebedd3fc5c5e12bcfe46e4835c480e53a1be 100644 (file)
@@ -286,6 +286,14 @@ extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
 extern int tcp_memory_pressure;
 
+/* optimized version of sk_under_memory_pressure() for TCP sockets */
+static inline bool tcp_under_memory_pressure(const struct sock *sk)
+{
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+               return !!sk->sk_cgrp->memory_pressure;
+
+       return tcp_memory_pressure;
+}
 /*
  * The next routines deal with comparing 32 bit unsigned ints
  * and worry about wraparound (automatic with unsigned arithmetic).
@@ -311,6 +319,8 @@ static inline bool tcp_out_of_memory(struct sock *sk)
        return false;
 }
 
+void sk_forced_mem_schedule(struct sock *sk, int size);
+
 static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 {
        struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -326,18 +336,6 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
 
 bool tcp_check_oom(struct sock *sk, int shift);
 
-/* syncookies: remember time of last synqueue overflow */
-static inline void tcp_synq_overflow(struct sock *sk)
-{
-       tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
-}
-
-/* syncookies: no recent synqueue overflow on this listening socket? */
-static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
-{
-       unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
-       return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
-}
 
 extern struct proto tcp_prot;
 
@@ -471,6 +469,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 
 /* From syncookies.c */
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+                                struct request_sock *req,
+                                struct dst_entry *dst);
 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
                      u32 cookie);
 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
@@ -483,13 +484,35 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
  * the counter advances immediately after a cookie is generated).
  */
-#define MAX_SYNCOOKIE_AGE 2
+#define MAX_SYNCOOKIE_AGE      2
+#define TCP_SYNCOOKIE_PERIOD   (60 * HZ)
+#define TCP_SYNCOOKIE_VALID    (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
+
+/* syncookies: remember time of last synqueue overflow
+ * But do not dirty this field too often (once per second is enough)
+ */
+static inline void tcp_synq_overflow(struct sock *sk)
+{
+       unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+       unsigned long now = jiffies;
+
+       if (time_after(now, last_overflow + HZ))
+               tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
+}
+
+/* syncookies: no recent synqueue overflow on this listening socket? */
+static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
+{
+       unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+
+       return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+}
 
 static inline u32 tcp_cookie_time(void)
 {
        u64 val = get_jiffies_64();
 
-       do_div(val, 60 * HZ);
+       do_div(val, TCP_SYNCOOKIE_PERIOD);
        return val;
 }
 
@@ -527,7 +550,7 @@ int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
 
 void tcp_send_probe0(struct sock *);
 void tcp_send_partial(struct sock *);
-int tcp_write_wakeup(struct sock *);
+int tcp_write_wakeup(struct sock *, int mib);
 void tcp_send_fin(struct sock *sk);
 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
 int tcp_send_synack(struct sock *);
@@ -692,6 +715,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
 #define TCPHDR_ECE 0x40
 #define TCPHDR_CWR 0x80
 
+#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
+
 /* This is what the send packet queuing engine uses to pass
  * TCP per-packet control information to the transmission code.
  * We also store the host-order sequence numbers in here too.
@@ -1043,14 +1068,31 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
        return tp->is_cwnd_limited;
 }
 
-static inline void tcp_check_probe_timer(struct sock *sk)
+/* Something is really bad, we could not queue an additional packet,
+ * because qdisc is full or receiver sent a 0 window.
+ * We do not want to add fuel to the fire, or abort too early,
+ * so make sure the timer we arm now is at least 200ms in the future,
+ * regardless of current icsk_rto value (as it could be ~2ms)
+ */
+static inline unsigned long tcp_probe0_base(const struct sock *sk)
 {
-       const struct tcp_sock *tp = tcp_sk(sk);
-       const struct inet_connection_sock *icsk = inet_csk(sk);
+       return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
+}
+
+/* Variant of inet_csk_rto_backoff() used for zero window probes */
+static inline unsigned long tcp_probe0_when(const struct sock *sk,
+                                           unsigned long max_when)
+{
+       u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
 
-       if (!tp->packets_out && !icsk->icsk_pending)
+       return (unsigned long)min_t(u64, when, max_when);
+}
+
+static inline void tcp_check_probe_timer(struct sock *sk)
+{
+       if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                         icsk->icsk_rto, TCP_RTO_MAX);
+                                         tcp_probe0_base(sk), TCP_RTO_MAX);
 }
 
 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
index a9ebdf5701e8ddc62eae3979d75999f874760313..602f05b7a2757f9c80c76ded04855df826819dba 100644 (file)
@@ -113,6 +113,7 @@ enum bpf_map_type {
        BPF_MAP_TYPE_UNSPEC,
        BPF_MAP_TYPE_HASH,
        BPF_MAP_TYPE_ARRAY,
+       BPF_MAP_TYPE_PROG_ARRAY,
 };
 
 enum bpf_prog_type {
@@ -210,6 +211,25 @@ enum bpf_func_id {
         * Return: 0 on success
         */
        BPF_FUNC_l4_csum_replace,
+
+       /**
+        * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
+        * @ctx: context pointer passed to next program
+        * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+        * @index: index inside array that selects specific program to run
+        * Return: 0 on success
+        */
+       BPF_FUNC_tail_call,
+
+       /**
+        * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
+        * @skb: pointer to skb
+        * @ifindex: ifindex of the net device
+        * @flags: bit 0 - if set, redirect to ingress instead of egress
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_clone_redirect,
        __BPF_FUNC_MAX_ID,
 };
 
@@ -226,6 +246,10 @@ struct __sk_buff {
        __u32 vlan_tci;
        __u32 vlan_proto;
        __u32 priority;
+       __u32 ingress_ifindex;
+       __u32 ifindex;
+       __u32 tc_index;
+       __u32 cb[5];
 };
 
 #endif /* _UAPI__LINUX_BPF_H__ */
index 41892f720057df2cc23f96c7d44d6ab2808fdfdd..9692cda5f8fc2fe7789de59f93281cf4c0bd2a5e 100644 (file)
@@ -95,11 +95,17 @@ typedef __u32 can_err_mask_t;
  * @can_dlc: frame payload length in byte (0 .. 8) aka data length code
  *           N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
  *           mapping of the 'data length code' to the real payload length
+ * @__pad:   padding
+ * @__res0:  reserved / padding
+ * @__res1:  reserved / padding
  * @data:    CAN frame payload (up to 8 byte)
  */
 struct can_frame {
        canid_t can_id;  /* 32 bit CAN_ID + EFF/RTR/ERR flags */
        __u8    can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
+       __u8    __pad;   /* padding */
+       __u8    __res0;  /* reserved / padding */
+       __u8    __res1;  /* reserved / padding */
        __u8    data[CAN_MAX_DLEN] __attribute__((aligned(8)));
 };
 
index 2e49fc880d29a31bc167142748b23356c1b72005..0594933cdf55c715965db2ff46c88e272ca8114f 100644 (file)
@@ -796,6 +796,31 @@ struct ethtool_rx_flow_spec {
        __u32           location;
 };
 
+/* How rings are layed out when accessing virtual functions or
+ * offloaded queues is device specific. To allow users to do flow
+ * steering and specify these queues the ring cookie is partitioned
+ * into a 32bit queue index with an 8 bit virtual function id.
+ * This also leaves the 3bytes for further specifiers. It is possible
+ * future devices may support more than 256 virtual functions if
+ * devices start supporting PCIe w/ARI. However at the moment I
+ * do not know of any devices that support this so I do not reserve
+ * space for this at this time. If a future patch consumes the next
+ * byte it should be aware of this possiblity.
+ */
+#define ETHTOOL_RX_FLOW_SPEC_RING      0x00000000FFFFFFFFLL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF   0x000000FF00000000LL
+#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32
+static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
+{
+       return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
+};
+
+static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
+{
+       return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
+                               ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+};
+
 /**
  * struct ethtool_rxnfc - command to get or set RX flow classification rules
  * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH,
@@ -1264,15 +1289,19 @@ enum ethtool_sfeatures_retval_bits {
  * it was forced up into this mode or autonegotiated.
  */
 
-/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|10|20|40|56]GbE. */
+/* The forced speed, 10Mb, 100Mb, gigabit, [2.5|5|10|20|25|40|50|56|100]GbE. */
 #define SPEED_10               10
 #define SPEED_100              100
 #define SPEED_1000             1000
 #define SPEED_2500             2500
+#define SPEED_5000             5000
 #define SPEED_10000            10000
 #define SPEED_20000            20000
+#define SPEED_25000            25000
 #define SPEED_40000            40000
+#define SPEED_50000            50000
 #define SPEED_56000            56000
+#define SPEED_100000           100000
 
 #define SPEED_UNKNOWN          -1
 
index d9cd19214b9816c036db8f8d26de81c3d53dffb7..1737b7a8272bbec19a195b27f68f6b363ad200d1 100644 (file)
@@ -390,6 +390,17 @@ struct ifla_vxlan_port_range {
        __be16  high;
 };
 
+/* GENEVE section */
+enum {
+       IFLA_GENEVE_UNSPEC,
+       IFLA_GENEVE_ID,
+       IFLA_GENEVE_REMOTE,
+       IFLA_GENEVE_TTL,
+       IFLA_GENEVE_TOS,
+       __IFLA_GENEVE_MAX
+};
+#define IFLA_GENEVE_MAX        (__IFLA_GENEVE_MAX - 1)
+
 /* Bonding section */
 
 enum {
@@ -417,6 +428,9 @@ enum {
        IFLA_BOND_AD_LACP_RATE,
        IFLA_BOND_AD_SELECT,
        IFLA_BOND_AD_INFO,
+       IFLA_BOND_AD_ACTOR_SYS_PRIO,
+       IFLA_BOND_AD_USER_PORT_KEY,
+       IFLA_BOND_AD_ACTOR_SYSTEM,
        __IFLA_BOND_MAX,
 };
 
index 053bd102fbe00a0affd7227359e25c7246de9d7e..d3d715f8c88f6d57c4318dc5b001e8efad2d074f 100644 (file)
@@ -54,6 +54,7 @@ struct sockaddr_ll {
 #define PACKET_FANOUT                  18
 #define PACKET_TX_HAS_OFF              19
 #define PACKET_QDISC_BYPASS            20
+#define PACKET_ROLLOVER_STATS          21
 
 #define PACKET_FANOUT_HASH             0
 #define PACKET_FANOUT_LB               1
@@ -75,6 +76,12 @@ struct tpacket_stats_v3 {
        unsigned int    tp_freeze_q_cnt;
 };
 
+struct tpacket_rollover_stats {
+       __aligned_u64   tp_all;
+       __aligned_u64   tp_huge;
+       __aligned_u64   tp_failed;
+};
+
 union tpacket_stats_u {
        struct tpacket_stats stats1;
        struct tpacket_stats_v3 stats3;
index 589ced069e8a1a68a9b1c9336517d66a675a75d2..83d6236a2f083d787f4ed887b71aa41c76330d9e 100644 (file)
@@ -69,6 +69,8 @@ enum {
 #define IPPROTO_SCTP           IPPROTO_SCTP
   IPPROTO_UDPLITE = 136,       /* UDP-Lite (RFC 3828)                  */
 #define IPPROTO_UDPLITE                IPPROTO_UDPLITE
+  IPPROTO_MPLS = 137,          /* MPLS in IP (RFC 4023)                */
+#define IPPROTO_MPLS           IPPROTO_MPLS
   IPPROTO_RAW = 255,           /* Raw IP packets                       */
 #define IPPROTO_RAW            IPPROTO_RAW
   IPPROTO_MAX
@@ -110,6 +112,7 @@ struct in_addr {
 #define IP_MINTTL       21
 #define IP_NODEFRAG     22
 #define IP_CHECKSUM    23
+#define IP_BIND_ADDRESS_NO_PORT        24
 
 /* IP_MTU_DISCOVER values */
 #define IP_PMTUDISC_DONT               0       /* Never send DF frames */
index 2be7bd174751ae393e3c15f702e07308afbcbe16..f6598d1c886ef6e11b704a088abe31477eea981d 100644 (file)
@@ -34,6 +34,7 @@
 #define RTF_PREF(pref) ((pref) << 27)
 #define RTF_PREF_MASK  0x18000000
 
+#define RTF_PCPU       0x40000000
 #define RTF_LOCAL      0x80000000
 
 
index ef1b1f88ca18476f2166dd1007da3e373164f391..177027cce6b347638d5ebdb887193ff7b825c9a6 100644 (file)
@@ -51,11 +51,17 @@ enum nf_inet_hooks {
        NF_INET_NUMHOOKS
 };
 
+enum nf_dev_hooks {
+       NF_NETDEV_INGRESS,
+       NF_NETDEV_NUMHOOKS
+};
+
 enum {
        NFPROTO_UNSPEC =  0,
        NFPROTO_INET   =  1,
        NFPROTO_IPV4   =  2,
        NFPROTO_ARP    =  3,
+       NFPROTO_NETDEV =  5,
        NFPROTO_BRIDGE =  7,
        NFPROTO_IPV6   = 10,
        NFPROTO_DECNET = 12,
index 5fa1cd04762e47ac1a1a60d95ed889c6b8afe2a8..89a671e0f5e7813a47939ec90c6364f76df7ed51 100644 (file)
@@ -146,12 +146,14 @@ enum nft_table_flags {
  * @NFTA_TABLE_NAME: name of the table (NLA_STRING)
  * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
  * @NFTA_TABLE_USE: number of chains in this table (NLA_U32)
+ * @NFTA_TABLE_DEV: net device name (NLA_STRING)
  */
 enum nft_table_attributes {
        NFTA_TABLE_UNSPEC,
        NFTA_TABLE_NAME,
        NFTA_TABLE_FLAGS,
        NFTA_TABLE_USE,
+       NFTA_TABLE_DEV,
        __NFTA_TABLE_MAX
 };
 #define NFTA_TABLE_MAX         (__NFTA_TABLE_MAX - 1)
index 1a85940f8ab733e85f50ade2ee25f8a527316a89..3e34b7d702f8a4884ee15afeca35c651861d40bc 100644 (file)
@@ -108,6 +108,7 @@ struct nlmsgerr {
 #define NETLINK_NO_ENOBUFS     5
 #define NETLINK_RX_RING                6
 #define NETLINK_TX_RING                7
+#define NETLINK_LISTEN_ALL_NSID        8
 
 struct nl_pktinfo {
        __u32   group;
index 241220c43e861233789420a4012a90f0e2308d28..c0ab6b0a391941157a1dc96560de515774a5abde 100644 (file)
@@ -2620,16 +2620,17 @@ enum nl80211_band_attr {
  *     an indoor surroundings, i.e., it is connected to AC power (and not
  *     through portable DC inverters) or is under the control of a master
  *     that is acting as an AP and is connected to AC power.
- * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
+ * @NL80211_FREQUENCY_ATTR_IR_CONCURRENT: IR operation is allowed on this
  *     channel if it's connected concurrently to a BSS on the same channel on
  *     the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
- *     band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
- *     channel that has the GO_CONCURRENT attribute set can be done when there
- *     is a clear assessment that the device is operating under the guidance of
- *     an authorized master, i.e., setting up a GO while the device is also
- *     connected to an AP with DFS and radar detection on the UNII band (it is
- *     up to user-space, i.e., wpa_supplicant to perform the required
- *     verifications)
+ *     band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO or TDLS
+ *     off-channel on a channel that has the IR_CONCURRENT attribute set can be
+ *     done when there is a clear assessment that the device is operating under
+ *     the guidance of an authorized master, i.e., setting up a GO or TDLS
+ *     off-channel while the device is also connected to an AP with DFS and
+ *     radar detection on the UNII band (it is up to user-space, i.e.,
+ *     wpa_supplicant to perform the required verifications). Using this
+ *     attribute for IR is disallowed for master interfaces (IBSS, AP).
  * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
  *     on this channel in current regulatory domain.
  * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
@@ -2641,7 +2642,7 @@ enum nl80211_band_attr {
  * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
  * for more information on the FCC description of the relaxations allowed
  * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
- * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
+ * NL80211_FREQUENCY_ATTR_IR_CONCURRENT.
  */
 enum nl80211_frequency_attr {
        __NL80211_FREQUENCY_ATTR_INVALID,
@@ -2659,7 +2660,7 @@ enum nl80211_frequency_attr {
        NL80211_FREQUENCY_ATTR_NO_160MHZ,
        NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
        NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
-       NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
+       NL80211_FREQUENCY_ATTR_IR_CONCURRENT,
        NL80211_FREQUENCY_ATTR_NO_20MHZ,
        NL80211_FREQUENCY_ATTR_NO_10MHZ,
 
@@ -2672,6 +2673,8 @@ enum nl80211_frequency_attr {
 #define NL80211_FREQUENCY_ATTR_PASSIVE_SCAN    NL80211_FREQUENCY_ATTR_NO_IR
 #define NL80211_FREQUENCY_ATTR_NO_IBSS         NL80211_FREQUENCY_ATTR_NO_IR
 #define NL80211_FREQUENCY_ATTR_NO_IR           NL80211_FREQUENCY_ATTR_NO_IR
+#define NL80211_FREQUENCY_ATTR_GO_CONCURRENT \
+                                       NL80211_FREQUENCY_ATTR_IR_CONCURRENT
 
 /**
  * enum nl80211_bitrate_attr - bitrate attributes
@@ -2830,7 +2833,7 @@ enum nl80211_sched_scan_match_attr {
  * @NL80211_RRF_AUTO_BW: maximum available bandwidth should be calculated
  *     base on contiguous rules and wider channels will be allowed to cross
  *     multiple contiguous/overlapping frequency ranges.
- * @NL80211_RRF_GO_CONCURRENT: See &NL80211_FREQUENCY_ATTR_GO_CONCURRENT
+ * @NL80211_RRF_IR_CONCURRENT: See &NL80211_FREQUENCY_ATTR_IR_CONCURRENT
  * @NL80211_RRF_NO_HT40MINUS: channels can't be used in HT40- operation
  * @NL80211_RRF_NO_HT40PLUS: channels can't be used in HT40+ operation
  * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed
@@ -2847,7 +2850,7 @@ enum nl80211_reg_rule_flags {
        NL80211_RRF_NO_IR               = 1<<7,
        __NL80211_RRF_NO_IBSS           = 1<<8,
        NL80211_RRF_AUTO_BW             = 1<<11,
-       NL80211_RRF_GO_CONCURRENT       = 1<<12,
+       NL80211_RRF_IR_CONCURRENT       = 1<<12,
        NL80211_RRF_NO_HT40MINUS        = 1<<13,
        NL80211_RRF_NO_HT40PLUS         = 1<<14,
        NL80211_RRF_NO_80MHZ            = 1<<15,
@@ -2859,6 +2862,7 @@ enum nl80211_reg_rule_flags {
 #define NL80211_RRF_NO_IR              NL80211_RRF_NO_IR
 #define NL80211_RRF_NO_HT40            (NL80211_RRF_NO_HT40MINUS |\
                                         NL80211_RRF_NO_HT40PLUS)
+#define NL80211_RRF_GO_CONCURRENT      NL80211_RRF_IR_CONCURRENT
 
 /* For backport compatibility with older userspace */
 #define NL80211_RRF_NO_IR_ALL          (NL80211_RRF_NO_IR | __NL80211_RRF_NO_IBSS)
index bbd49a0c46c7775c4bdeac171a33d721f77b287a..1dab77601c217c19bb7b0432fca4dd5371b0111d 100644 (file)
@@ -153,6 +153,8 @@ enum ovs_packet_cmd {
  * flow key against the kernel's.
  * @OVS_PACKET_ATTR_ACTIONS: Contains actions for the packet.  Used
  * for %OVS_PACKET_CMD_EXECUTE.  It has nested %OVS_ACTION_ATTR_* attributes.
+ * Also used in upcall when %OVS_ACTION_ATTR_USERSPACE has optional
+ * %OVS_USERSPACE_ATTR_ACTIONS attribute.
  * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
  * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
  * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
@@ -528,6 +530,7 @@ enum ovs_sample_attr {
  * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
  * @OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: If present, u32 output port to get
  * tunnel info.
+ * @OVS_USERSPACE_ATTR_ACTIONS: If present, send actions with upcall.
  */
 enum ovs_userspace_attr {
        OVS_USERSPACE_ATTR_UNSPEC,
@@ -535,6 +538,7 @@ enum ovs_userspace_attr {
        OVS_USERSPACE_ATTR_USERDATA,  /* Optional user-specified cookie. */
        OVS_USERSPACE_ATTR_EGRESS_TUN_PORT,  /* Optional, u32 output port
                                              * to get tunnel info. */
+       OVS_USERSPACE_ATTR_ACTIONS,   /* Optional flag to get actions. */
        __OVS_USERSPACE_ATTR_MAX
 };
 
index bf08e76bf50525912360edbfe09b1e220f683e94..4f0d1bc3647dc266de5cf017efafe37a2f4f2e2a 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/types.h>
 #include <linux/pkt_sched.h>
 
+#ifdef __KERNEL__
 /* I think i could have done better macros ; for now this is stolen from
  * some arch/mips code - jhs
 */
@@ -35,20 +36,6 @@ bits 9,10,11: redirect counter -  redirect TTL. Loop avoidance
  *
  * */
 
-#define TC_MUNGED          _TC_MAKEMASK1(0)
-#define SET_TC_MUNGED(v)   ( TC_MUNGED | (v & ~TC_MUNGED))
-#define CLR_TC_MUNGED(v)   ( v & ~TC_MUNGED)
-
-#define TC_OK2MUNGE        _TC_MAKEMASK1(1)
-#define SET_TC_OK2MUNGE(v)   ( TC_OK2MUNGE | (v & ~TC_OK2MUNGE))
-#define CLR_TC_OK2MUNGE(v)   ( v & ~TC_OK2MUNGE)
-
-#define S_TC_VERD          _TC_MAKE32(2)
-#define M_TC_VERD          _TC_MAKEMASK(4,S_TC_VERD)
-#define G_TC_VERD(x)       _TC_GETVALUE(x,S_TC_VERD,M_TC_VERD)
-#define V_TC_VERD(x)       _TC_MAKEVALUE(x,S_TC_VERD)
-#define SET_TC_VERD(v,n)   ((V_TC_VERD(n)) | (v & ~M_TC_VERD))
-
 #define S_TC_FROM          _TC_MAKE32(6)
 #define M_TC_FROM          _TC_MAKEMASK(2,S_TC_FROM)
 #define G_TC_FROM(x)       _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM)
@@ -62,18 +49,16 @@ bits 9,10,11: redirect counter -  redirect TTL. Loop avoidance
 #define SET_TC_NCLS(v)   ( TC_NCLS | (v & ~TC_NCLS))
 #define CLR_TC_NCLS(v)   ( v & ~TC_NCLS)
 
-#define S_TC_RTTL          _TC_MAKE32(9)
-#define M_TC_RTTL          _TC_MAKEMASK(3,S_TC_RTTL)
-#define G_TC_RTTL(x)       _TC_GETVALUE(x,S_TC_RTTL,M_TC_RTTL)
-#define V_TC_RTTL(x)       _TC_MAKEVALUE(x,S_TC_RTTL)
-#define SET_TC_RTTL(v,n)   ((V_TC_RTTL(n)) | (v & ~M_TC_RTTL))
-
 #define S_TC_AT          _TC_MAKE32(12)
 #define M_TC_AT          _TC_MAKEMASK(2,S_TC_AT)
 #define G_TC_AT(x)       _TC_GETVALUE(x,S_TC_AT,M_TC_AT)
 #define V_TC_AT(x)       _TC_MAKEVALUE(x,S_TC_AT)
 #define SET_TC_AT(v,n)   ((V_TC_AT(n)) | (v & ~M_TC_AT))
 
+#define MAX_REC_LOOP 4
+#define MAX_RED_LOOP 4
+#endif
+
 /* Action attributes */
 enum {
        TCA_ACT_UNSPEC,
@@ -93,8 +78,6 @@ enum {
 #define TCA_ACT_NOUNBIND       0
 #define TCA_ACT_REPLACE                1
 #define TCA_ACT_NOREPLACE      0
-#define MAX_REC_LOOP 4
-#define MAX_RED_LOOP 4
 
 #define TC_ACT_UNSPEC  (-1)
 #define TC_ACT_OK              0
@@ -404,6 +387,36 @@ enum {
 
 #define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
 
+/* Flower classifier */
+
+enum {
+       TCA_FLOWER_UNSPEC,
+       TCA_FLOWER_CLASSID,
+       TCA_FLOWER_INDEV,
+       TCA_FLOWER_ACT,
+       TCA_FLOWER_KEY_ETH_DST,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_DST_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_SRC,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_SRC_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_TYPE,        /* be16 */
+       TCA_FLOWER_KEY_IP_PROTO,        /* u8 */
+       TCA_FLOWER_KEY_IPV4_SRC,        /* be32 */
+       TCA_FLOWER_KEY_IPV4_SRC_MASK,   /* be32 */
+       TCA_FLOWER_KEY_IPV4_DST,        /* be32 */
+       TCA_FLOWER_KEY_IPV4_DST_MASK,   /* be32 */
+       TCA_FLOWER_KEY_IPV6_SRC,        /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_SRC_MASK,   /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_DST,        /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_DST_MASK,   /* struct in6_addr */
+       TCA_FLOWER_KEY_TCP_SRC,         /* be16 */
+       TCA_FLOWER_KEY_TCP_DST,         /* be16 */
+       TCA_FLOWER_KEY_UDP_SRC,         /* be16 */
+       TCA_FLOWER_KEY_UDP_DST,         /* be16 */
+       __TCA_FLOWER_MAX,
+};
+
+#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+
 /* Extended Matches */
 
 struct tcf_ematch_tree_hdr {
index 534b847107453019d362e9f9f9c0969fc3100c8b..8d2530daca9f084c7c033952a57a8f3eb52062b2 100644 (file)
@@ -268,7 +268,8 @@ enum {
        TCA_GRED_STAB,
        TCA_GRED_DPS,
        TCA_GRED_MAX_P,
-          __TCA_GRED_MAX,
+       TCA_GRED_LIMIT,
+       __TCA_GRED_MAX,
 };
 
 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
@@ -679,6 +680,7 @@ enum {
        TCA_CODEL_LIMIT,
        TCA_CODEL_INTERVAL,
        TCA_CODEL_ECN,
+       TCA_CODEL_CE_THRESHOLD,
        __TCA_CODEL_MAX
 };
 
@@ -695,6 +697,7 @@ struct tc_codel_xstats {
        __u32   drop_overlimit; /* number of time max qdisc packet limit was hit */
        __u32   ecn_mark;  /* number of packets we ECN marked instead of dropped */
        __u32   dropping;  /* are we in dropping state ? */
+       __u32   ce_mark;   /* number of CE marked packets because of ce_threshold */
 };
 
 /* FQ_CODEL */
@@ -707,6 +710,7 @@ enum {
        TCA_FQ_CODEL_ECN,
        TCA_FQ_CODEL_FLOWS,
        TCA_FQ_CODEL_QUANTUM,
+       TCA_FQ_CODEL_CE_THRESHOLD,
        __TCA_FQ_CODEL_MAX
 };
 
@@ -730,6 +734,7 @@ struct tc_fq_codel_qd_stats {
                                 */
        __u32   new_flows_len;  /* count of flows in new list */
        __u32   old_flows_len;  /* count of flows in old list */
+       __u32   ce_mark;        /* packets above ce_threshold */
 };
 
 struct tc_fq_codel_cl_stats {
index 91950950aa598060a8e0e370f82654cd9a75e7d6..0f9265cb2a96999f92d415a5cca12af237451433 100644 (file)
@@ -38,6 +38,8 @@
 
 #define RDS_IB_ABI_VERSION             0x301
 
+#define        SOL_RDS         276
+
 /*
  * setsockopt/getsockopt for SOL_RDS
  */
 #define RDS_RECVERR                    5
 #define RDS_CONG_MONITOR               6
 #define RDS_GET_MR_FOR_DEST            7
+#define SO_RDS_TRANSPORT               8
+
+/* supported values for SO_RDS_TRANSPORT */
+#define        RDS_TRANS_IB    0
+#define        RDS_TRANS_IWARP 1
+#define        RDS_TRANS_TCP   2
+#define RDS_TRANS_COUNT        3
+#define        RDS_TRANS_NONE  (~0)
 
 /*
  * Control message types for SOL_RDS.
index 6a6fb747c78db0bfb763212c826832f206107a0d..eee8968407f063b5d9c4776a30ebe45e5b782f2d 100644 (file)
@@ -276,6 +276,8 @@ enum
        LINUX_MIB_TCPACKSKIPPEDFINWAIT2,        /* TCPACKSkippedFinWait2 */
        LINUX_MIB_TCPACKSKIPPEDTIMEWAIT,        /* TCPACKSkippedTimeWait */
        LINUX_MIB_TCPACKSKIPPEDCHALLENGE,       /* TCPACKSkippedChallenge */
+       LINUX_MIB_TCPWINPROBE,                  /* TCPWinProbe */
+       LINUX_MIB_TCPKEEPALIVE,                 /* TCPKeepAlive */
        __LINUX_MIB_MAX
 };
 
index faa72f4fa547bcfe643b9cfd32b83b62caf6b973..65a77b071e22bec39225799e808b44b35bb1910c 100644 (file)
@@ -113,6 +113,8 @@ enum {
 #define TCP_TIMESTAMP          24
 #define TCP_NOTSENT_LOWAT      25      /* limit number of unsent bytes in write queue */
 #define TCP_CC_INFO            26      /* Get Congestion Control (optional) info */
+#define TCP_SAVE_SYN           27      /* Record SYN headers for new connections */
+#define TCP_SAVED_SYN          28      /* Get SYN headers recorded for connection */
 
 struct tcp_repair_opt {
        __u32   opt_code;
@@ -190,8 +192,10 @@ struct tcp_info {
 
        __u64   tcpi_pacing_rate;
        __u64   tcpi_max_pacing_rate;
-       __u64   tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
+       __u64   tcpi_bytes_acked;    /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
        __u64   tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
+       __u32   tcpi_segs_out;       /* RFC4898 tcpEStatsPerfSegsOut */
+       __u32   tcpi_segs_in;        /* RFC4898 tcpEStatsPerfSegsIn */
 };
 
 /* for TCP_MD5SIG socket option */
index 8a6616583f38adce1e90ecc19b3bce92e01d2f83..cb31229a6fa4ddd39c1d69038b998bc0c0dee4db 100644 (file)
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
-
-struct bpf_array {
-       struct bpf_map map;
-       u32 elem_size;
-       char value[0] __aligned(8);
-};
+#include <linux/filter.h>
 
 /* Called from syscall */
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
@@ -154,3 +149,109 @@ static int __init register_array_map(void)
        return 0;
 }
 late_initcall(register_array_map);
+
+static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+{
+       /* only bpf_prog file descriptors can be stored in prog_array map */
+       if (attr->value_size != sizeof(u32))
+               return ERR_PTR(-EINVAL);
+       return array_map_alloc(attr);
+}
+
+static void prog_array_map_free(struct bpf_map *map)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       int i;
+
+       synchronize_rcu();
+
+       /* make sure it's empty */
+       for (i = 0; i < array->map.max_entries; i++)
+               BUG_ON(array->prog[i] != NULL);
+       kvfree(array);
+}
+
+static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       return NULL;
+}
+
+/* only called from syscall */
+static int prog_array_map_update_elem(struct bpf_map *map, void *key,
+                                     void *value, u64 map_flags)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       struct bpf_prog *prog, *old_prog;
+       u32 index = *(u32 *)key, ufd;
+
+       if (map_flags != BPF_ANY)
+               return -EINVAL;
+
+       if (index >= array->map.max_entries)
+               return -E2BIG;
+
+       ufd = *(u32 *)value;
+       prog = bpf_prog_get(ufd);
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
+       if (!bpf_prog_array_compatible(array, prog)) {
+               bpf_prog_put(prog);
+               return -EINVAL;
+       }
+
+       old_prog = xchg(array->prog + index, prog);
+       if (old_prog)
+               bpf_prog_put_rcu(old_prog);
+
+       return 0;
+}
+
+static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       struct bpf_prog *old_prog;
+       u32 index = *(u32 *)key;
+
+       if (index >= array->map.max_entries)
+               return -E2BIG;
+
+       old_prog = xchg(array->prog + index, NULL);
+       if (old_prog) {
+               bpf_prog_put_rcu(old_prog);
+               return 0;
+       } else {
+               return -ENOENT;
+       }
+}
+
+/* decrement refcnt of all bpf_progs that are stored in this map */
+void bpf_prog_array_map_clear(struct bpf_map *map)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       int i;
+
+       for (i = 0; i < array->map.max_entries; i++)
+               prog_array_map_delete_elem(map, &i);
+}
+
+static const struct bpf_map_ops prog_array_ops = {
+       .map_alloc = prog_array_map_alloc,
+       .map_free = prog_array_map_free,
+       .map_get_next_key = array_map_get_next_key,
+       .map_lookup_elem = prog_array_map_lookup_elem,
+       .map_update_elem = prog_array_map_update_elem,
+       .map_delete_elem = prog_array_map_delete_elem,
+};
+
+static struct bpf_map_type_list prog_array_type __read_mostly = {
+       .ops = &prog_array_ops,
+       .type = BPF_MAP_TYPE_PROG_ARRAY,
+};
+
+static int __init register_prog_array_map(void)
+{
+       bpf_register_map_type(&prog_array_type);
+       return 0;
+}
+late_initcall(register_prog_array_map);
index 54f0e7fcd0e288b4506fab80091dcc39069ae422..1e00aa3316dcb53f2ab832c0aec5074e053c050c 100644 (file)
 #include <linux/vmalloc.h>
 #include <linux/random.h>
 #include <linux/moduleloader.h>
-#include <asm/unaligned.h>
 #include <linux/bpf.h>
 
+#include <asm/unaligned.h>
+
 /* Registers */
 #define BPF_R0 regs[BPF_REG_0]
 #define BPF_R1 regs[BPF_REG_1]
@@ -62,6 +63,7 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns
                ptr = skb_network_header(skb) + k - SKF_NET_OFF;
        else if (k >= SKF_LL_OFF)
                ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
+
        if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
                return ptr;
 
@@ -244,6 +246,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
                [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
                /* Call instruction */
                [BPF_JMP | BPF_CALL] = &&JMP_CALL,
+               [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL,
                /* Jumps */
                [BPF_JMP | BPF_JA] = &&JMP_JA,
                [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
@@ -286,6 +289,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
                [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
                [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
        };
+       u32 tail_call_cnt = 0;
        void *ptr;
        int off;
 
@@ -431,6 +435,30 @@ select_insn:
                                                       BPF_R4, BPF_R5);
                CONT;
 
+       JMP_TAIL_CALL: {
+               struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
+               struct bpf_array *array = container_of(map, struct bpf_array, map);
+               struct bpf_prog *prog;
+               u64 index = BPF_R3;
+
+               if (unlikely(index >= array->map.max_entries))
+                       goto out;
+
+               if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
+                       goto out;
+
+               tail_call_cnt++;
+
+               prog = READ_ONCE(array->prog[index]);
+               if (unlikely(!prog))
+                       goto out;
+
+               ARG1 = BPF_R1;
+               insn = prog->insnsi;
+               goto select_insn;
+out:
+               CONT;
+       }
        /* JMP */
        JMP_JA:
                insn += insn->off;
@@ -615,25 +643,63 @@ load_byte:
                return 0;
 }
 
-void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+bool bpf_prog_array_compatible(struct bpf_array *array,
+                              const struct bpf_prog *fp)
 {
+       if (!array->owner_prog_type) {
+               /* There's no owner yet where we could check for
+                * compatibility.
+                */
+               array->owner_prog_type = fp->type;
+               array->owner_jited = fp->jited;
+
+               return true;
+       }
+
+       return array->owner_prog_type == fp->type &&
+              array->owner_jited == fp->jited;
+}
+
+static int bpf_check_tail_call(const struct bpf_prog *fp)
+{
+       struct bpf_prog_aux *aux = fp->aux;
+       int i;
+
+       for (i = 0; i < aux->used_map_cnt; i++) {
+               struct bpf_map *map = aux->used_maps[i];
+               struct bpf_array *array;
+
+               if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+                       continue;
+
+               array = container_of(map, struct bpf_array, map);
+               if (!bpf_prog_array_compatible(array, fp))
+                       return -EINVAL;
+       }
+
+       return 0;
 }
 
 /**
- *     bpf_prog_select_runtime - select execution runtime for BPF program
+ *     bpf_prog_select_runtime - select exec runtime for BPF program
  *     @fp: bpf_prog populated with internal BPF program
  *
- * try to JIT internal BPF program, if JIT is not available select interpreter
- * BPF program will be executed via BPF_PROG_RUN() macro
+ * Try to JIT eBPF program, if JIT is not available, use interpreter.
+ * The BPF program will be executed via BPF_PROG_RUN() macro.
  */
-void bpf_prog_select_runtime(struct bpf_prog *fp)
+int bpf_prog_select_runtime(struct bpf_prog *fp)
 {
        fp->bpf_func = (void *) __bpf_prog_run;
 
-       /* Probe if internal BPF can be JITed */
        bpf_int_jit_compile(fp);
-       /* Lock whole bpf_prog as read-only */
        bpf_prog_lock_ro(fp);
+
+       /* The tail call compatibility check can only be done at
+        * this late stage as we need to determine, if we deal
+        * with JITed or non JITed program concatenations and not
+        * all eBPF JITs might immediately support all features.
+        */
+       return bpf_check_tail_call(fp);
 }
 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
 
@@ -663,6 +729,22 @@ const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
 
 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
+const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
+
+/* Always built-in helper functions. */
+const struct bpf_func_proto bpf_tail_call_proto = {
+       .func           = NULL,
+       .gpl_only       = false,
+       .ret_type       = RET_VOID,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_CONST_MAP_PTR,
+       .arg3_type      = ARG_ANYTHING,
+};
+
+/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
+void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+{
+}
 
 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
index bd7f5988ed9cca0f4973388f68741444b0cbfcb8..7ad5d8842d5b28e2fcdfeba7b14061bb65d1c30e 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/rcupdate.h>
 #include <linux/random.h>
 #include <linux/smp.h>
+#include <linux/ktime.h>
 
 /* If kernel subsystem is allowing eBPF programs to call this function,
  * inside its own verifier_ops->get_func_proto() callback it should return
@@ -44,11 +45,11 @@ static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 }
 
 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
-       .func = bpf_map_lookup_elem,
-       .gpl_only = false,
-       .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
-       .arg1_type = ARG_CONST_MAP_PTR,
-       .arg2_type = ARG_PTR_TO_MAP_KEY,
+       .func           = bpf_map_lookup_elem,
+       .gpl_only       = false,
+       .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_PTR_TO_MAP_KEY,
 };
 
 static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -63,13 +64,13 @@ static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 }
 
 const struct bpf_func_proto bpf_map_update_elem_proto = {
-       .func = bpf_map_update_elem,
-       .gpl_only = false,
-       .ret_type = RET_INTEGER,
-       .arg1_type = ARG_CONST_MAP_PTR,
-       .arg2_type = ARG_PTR_TO_MAP_KEY,
-       .arg3_type = ARG_PTR_TO_MAP_VALUE,
-       .arg4_type = ARG_ANYTHING,
+       .func           = bpf_map_update_elem,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_PTR_TO_MAP_KEY,
+       .arg3_type      = ARG_PTR_TO_MAP_VALUE,
+       .arg4_type      = ARG_ANYTHING,
 };
 
 static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -83,11 +84,11 @@ static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 }
 
 const struct bpf_func_proto bpf_map_delete_elem_proto = {
-       .func = bpf_map_delete_elem,
-       .gpl_only = false,
-       .ret_type = RET_INTEGER,
-       .arg1_type = ARG_CONST_MAP_PTR,
-       .arg2_type = ARG_PTR_TO_MAP_KEY,
+       .func           = bpf_map_delete_elem,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_PTR_TO_MAP_KEY,
 };
 
 static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
@@ -111,3 +112,15 @@ const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
 };
+
+static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       /* NMI safe access to clock monotonic */
+       return ktime_get_mono_fast_ns();
+}
+
+const struct bpf_func_proto bpf_ktime_get_ns_proto = {
+       .func           = bpf_ktime_get_ns,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+};
index 3bae6c59191483d48eb5934ad216cc952abaf9b1..a1b14d197a4fc2da6acf2d812372284e78bc5af8 100644 (file)
@@ -68,6 +68,12 @@ static int bpf_map_release(struct inode *inode, struct file *filp)
 {
        struct bpf_map *map = filp->private_data;
 
+       if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
+               /* prog_array stores refcnt-ed bpf_prog pointers
+                * release them all when user space closes prog_array_fd
+                */
+               bpf_prog_array_map_clear(map);
+
        bpf_map_put(map);
        return 0;
 }
@@ -392,6 +398,19 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
                         */
                        BUG_ON(!prog->aux->ops->get_func_proto);
 
+                       if (insn->imm == BPF_FUNC_tail_call) {
+                               /* mark bpf_tail_call as different opcode
+                                * to avoid conditional branch in
+                                * interpeter for every normal call
+                                * and to prevent accidental JITing by
+                                * JIT compiler that doesn't support
+                                * bpf_tail_call yet
+                                */
+                               insn->imm = 0;
+                               insn->code |= BPF_X;
+                               continue;
+                       }
+
                        fn = prog->aux->ops->get_func_proto(insn->imm);
                        /* all functions that have prototype and verifier allowed
                         * programs to call them, must be real in-kernel functions
@@ -413,6 +432,23 @@ static void free_used_maps(struct bpf_prog_aux *aux)
        kfree(aux->used_maps);
 }
 
+static void __prog_put_rcu(struct rcu_head *rcu)
+{
+       struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
+
+       free_used_maps(aux);
+       bpf_prog_free(aux->prog);
+}
+
+/* version of bpf_prog_put() that is called after a grace period */
+void bpf_prog_put_rcu(struct bpf_prog *prog)
+{
+       if (atomic_dec_and_test(&prog->aux->refcnt)) {
+               prog->aux->prog = prog;
+               call_rcu(&prog->aux->rcu, __prog_put_rcu);
+       }
+}
+
 void bpf_prog_put(struct bpf_prog *prog)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
@@ -426,7 +462,7 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
 {
        struct bpf_prog *prog = filp->private_data;
 
-       bpf_prog_put(prog);
+       bpf_prog_put_rcu(prog);
        return 0;
 }
 
@@ -532,7 +568,9 @@ static int bpf_prog_load(union bpf_attr *attr)
        fixup_bpf_calls(prog);
 
        /* eBPF program is ready to be JITed */
-       bpf_prog_select_runtime(prog);
+       err = bpf_prog_select_runtime(prog);
+       if (err < 0)
+               goto free_used_maps;
 
        err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
        if (err < 0)
index 47dcd3aa6e236e14d85f33e6d74c44e6257d159b..039d866fd36ab0e1d553166acbf2aa8b86bbab06 100644 (file)
@@ -907,6 +907,23 @@ static int check_call(struct verifier_env *env, int func_id)
                        fn->ret_type, func_id);
                return -EINVAL;
        }
+
+       if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
+           func_id != BPF_FUNC_tail_call)
+               /* prog_array map type needs extra care:
+                * only allow to pass it into bpf_tail_call() for now.
+                * bpf_map_delete_elem() can be allowed in the future,
+                * while bpf_map_update_elem() must only be done via syscall
+                */
+               return -EINVAL;
+
+       if (func_id == BPF_FUNC_tail_call &&
+           map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+               /* don't allow any other map type to be passed into
+                * bpf_tail_call()
+                */
+               return -EINVAL;
+
        return 0;
 }
 
@@ -1675,6 +1692,8 @@ static int do_check(struct verifier_env *env)
                        }
 
                } else if (class == BPF_STX) {
+                       enum bpf_reg_type dst_reg_type;
+
                        if (BPF_MODE(insn->code) == BPF_XADD) {
                                err = check_xadd(env, insn);
                                if (err)
@@ -1683,11 +1702,6 @@ static int do_check(struct verifier_env *env)
                                continue;
                        }
 
-                       if (BPF_MODE(insn->code) != BPF_MEM ||
-                           insn->imm != 0) {
-                               verbose("BPF_STX uses reserved fields\n");
-                               return -EINVAL;
-                       }
                        /* check src1 operand */
                        err = check_reg_arg(regs, insn->src_reg, SRC_OP);
                        if (err)
@@ -1697,6 +1711,8 @@ static int do_check(struct verifier_env *env)
                        if (err)
                                return err;
 
+                       dst_reg_type = regs[insn->dst_reg].type;
+
                        /* check that memory (dst_reg + off) is writeable */
                        err = check_mem_access(env, insn->dst_reg, insn->off,
                                               BPF_SIZE(insn->code), BPF_WRITE,
@@ -1704,6 +1720,15 @@ static int do_check(struct verifier_env *env)
                        if (err)
                                return err;
 
+                       if (insn->imm == 0) {
+                               insn->imm = dst_reg_type;
+                       } else if (dst_reg_type != insn->imm &&
+                                  (dst_reg_type == PTR_TO_CTX ||
+                                   insn->imm == PTR_TO_CTX)) {
+                               verbose("same insn cannot be used with different pointers\n");
+                               return -EINVAL;
+                       }
+
                } else if (class == BPF_ST) {
                        if (BPF_MODE(insn->code) != BPF_MEM ||
                            insn->src_reg != BPF_REG_0) {
@@ -1822,12 +1847,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
 
        for (i = 0; i < insn_cnt; i++, insn++) {
                if (BPF_CLASS(insn->code) == BPF_LDX &&
-                   (BPF_MODE(insn->code) != BPF_MEM ||
-                    insn->imm != 0)) {
+                   (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
                        verbose("BPF_LDX uses reserved fields\n");
                        return -EINVAL;
                }
 
+               if (BPF_CLASS(insn->code) == BPF_STX &&
+                   ((BPF_MODE(insn->code) != BPF_MEM &&
+                     BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
+                       verbose("BPF_STX uses reserved fields\n");
+                       return -EINVAL;
+               }
+
                if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
                        struct bpf_map *map;
                        struct fd f;
@@ -1950,12 +1981,17 @@ static int convert_ctx_accesses(struct verifier_env *env)
        struct bpf_prog *new_prog;
        u32 cnt;
        int i;
+       enum bpf_access_type type;
 
        if (!env->prog->aux->ops->convert_ctx_access)
                return 0;
 
        for (i = 0; i < insn_cnt; i++, insn++) {
-               if (insn->code != (BPF_LDX | BPF_MEM | BPF_W))
+               if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
+                       type = BPF_READ;
+               else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
+                       type = BPF_WRITE;
+               else
                        continue;
 
                if (insn->imm != PTR_TO_CTX) {
@@ -1965,7 +2001,7 @@ static int convert_ctx_accesses(struct verifier_env *env)
                }
 
                cnt = env->prog->aux->ops->
-                       convert_ctx_access(insn->dst_reg, insn->src_reg,
+                       convert_ctx_access(type, insn->dst_reg, insn->src_reg,
                                           insn->off, insn_buf);
                if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
                        verbose("bpf verifier is misconfigured\n");
index 4f44028943e663391fe35827c8c5acb4966b5101..245df6b32b81f8eef778a203c2edb8432a52abd6 100644 (file)
@@ -346,16 +346,13 @@ static inline void seccomp_sync_threads(void)
  */
 static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
 {
-       struct seccomp_filter *filter;
-       unsigned long fp_size;
-       struct sock_filter *fp;
-       int new_len;
-       long ret;
+       struct seccomp_filter *sfilter;
+       int ret;
 
        if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
                return ERR_PTR(-EINVAL);
+
        BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter));
-       fp_size = fprog->len * sizeof(struct sock_filter);
 
        /*
         * Installing a seccomp filter requires that the task has
@@ -368,60 +365,21 @@ static struct seccomp_filter *seccomp_prepare_filter(struct sock_fprog *fprog)
                                     CAP_SYS_ADMIN) != 0)
                return ERR_PTR(-EACCES);
 
-       fp = kzalloc(fp_size, GFP_KERNEL|__GFP_NOWARN);
-       if (!fp)
-               return ERR_PTR(-ENOMEM);
-
-       /* Copy the instructions from fprog. */
-       ret = -EFAULT;
-       if (copy_from_user(fp, fprog->filter, fp_size))
-               goto free_prog;
-
-       /* Check and rewrite the fprog via the skb checker */
-       ret = bpf_check_classic(fp, fprog->len);
-       if (ret)
-               goto free_prog;
-
-       /* Check and rewrite the fprog for seccomp use */
-       ret = seccomp_check_filter(fp, fprog->len);
-       if (ret)
-               goto free_prog;
-
-       /* Convert 'sock_filter' insns to 'bpf_insn' insns */
-       ret = bpf_convert_filter(fp, fprog->len, NULL, &new_len);
-       if (ret)
-               goto free_prog;
-
        /* Allocate a new seccomp_filter */
-       ret = -ENOMEM;
-       filter = kzalloc(sizeof(struct seccomp_filter),
-                        GFP_KERNEL|__GFP_NOWARN);
-       if (!filter)
-               goto free_prog;
-
-       filter->prog = bpf_prog_alloc(bpf_prog_size(new_len), __GFP_NOWARN);
-       if (!filter->prog)
-               goto free_filter;
-
-       ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
-       if (ret)
-               goto free_filter_prog;
-
-       kfree(fp);
-       atomic_set(&filter->usage, 1);
-       filter->prog->len = new_len;
+       sfilter = kzalloc(sizeof(*sfilter), GFP_KERNEL | __GFP_NOWARN);
+       if (!sfilter)
+               return ERR_PTR(-ENOMEM);
 
-       bpf_prog_select_runtime(filter->prog);
+       ret = bpf_prog_create_from_user(&sfilter->prog, fprog,
+                                       seccomp_check_filter);
+       if (ret < 0) {
+               kfree(sfilter);
+               return ERR_PTR(ret);
+       }
 
-       return filter;
+       atomic_set(&sfilter->usage, 1);
 
-free_filter_prog:
-       __bpf_prog_free(filter->prog);
-free_filter:
-       kfree(filter);
-free_prog:
-       kfree(fp);
-       return ERR_PTR(ret);
+       return sfilter;
 }
 
 /**
index 2d56ce50163264e79fdefc5c2859753c3c0f2bfb..50c4015a8ad3ee4e89f9000934bda2143c8bf40b 100644 (file)
@@ -79,18 +79,6 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
-static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
-       /* NMI safe access to clock monotonic */
-       return ktime_get_mono_fast_ns();
-}
-
-static const struct bpf_func_proto bpf_ktime_get_ns_proto = {
-       .func           = bpf_ktime_get_ns,
-       .gpl_only       = true,
-       .ret_type       = RET_INTEGER,
-};
-
 /*
  * limited trace_printk()
  * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
@@ -172,6 +160,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
                return &bpf_probe_read_proto;
        case BPF_FUNC_ktime_get_ns:
                return &bpf_ktime_get_ns_proto;
+       case BPF_FUNC_tail_call:
+               return &bpf_tail_call_proto;
 
        case BPF_FUNC_trace_printk:
                /*
index 8609378e6505123a3688e0e95a18cdde013e278a..a60a6d335a91a6aa90f019f77062e7be069939fd 100644 (file)
@@ -585,7 +585,6 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
        struct bucket_table *tbl = iter->walker->tbl;
        struct rhashtable *ht = iter->ht;
        struct rhash_head *p = iter->p;
-       void *obj = NULL;
 
        if (p) {
                p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
@@ -605,8 +604,7 @@ next:
                if (!rht_is_a_nulls(p)) {
                        iter->skip++;
                        iter->p = p;
-                       obj = rht_obj(ht, p);
-                       goto out;
+                       return rht_obj(ht, p);
                }
 
                iter->skip = 0;
@@ -624,9 +622,7 @@ next:
 
        iter->p = NULL;
 
-out:
-
-       return obj;
+       return NULL;
 }
 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
 
index 80d78c51f65fc2bf007b2cc9222bb0714d4b3e68..7f58c735d745049025407806e972bb4c7f124888 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
+#include <linux/random.h>
 
 /* General test specific settings */
 #define MAX_SUBTESTS   3
@@ -67,6 +68,10 @@ struct bpf_test {
        union {
                struct sock_filter insns[MAX_INSNS];
                struct bpf_insn insns_int[MAX_INSNS];
+               struct {
+                       void *insns;
+                       unsigned int len;
+               } ptr;
        } u;
        __u8 aux;
        __u8 data[MAX_DATA];
@@ -74,8 +79,282 @@ struct bpf_test {
                int data_size;
                __u32 result;
        } test[MAX_SUBTESTS];
+       int (*fill_helper)(struct bpf_test *self);
 };
 
+/* Large test cases need separate allocation and fill handler. */
+
+static int bpf_fill_maxinsns1(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       __u32 k = ~0;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       for (i = 0; i < len; i++, k--)
+               insn[i] = __BPF_STMT(BPF_RET | BPF_K, k);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns2(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       for (i = 0; i < len; i++)
+               insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns3(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       struct rnd_state rnd;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       prandom_seed_state(&rnd, 3141592653589793238ULL);
+
+       for (i = 0; i < len - 1; i++) {
+               __u32 k = prandom_u32_state(&rnd);
+
+               insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k);
+       }
+
+       insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns4(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS + 1;
+       struct sock_filter *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       for (i = 0; i < len; i++)
+               insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns5(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);
+
+       for (i = 1; i < len - 1; i++)
+               insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);
+
+       insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns6(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       for (i = 0; i < len - 1; i++)
+               insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+                                    SKF_AD_VLAN_TAG_PRESENT);
+
+       insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns7(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       for (i = 0; i < len - 4; i++)
+               insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+                                    SKF_AD_CPU);
+
+       insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0);
+       insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
+                                  SKF_AD_CPU);
+       insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0);
+       insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns8(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       int i, jmp_off = len - 3;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff);
+
+       for (i = 1; i < len - 1; i++)
+               insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0);
+
+       insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns9(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct bpf_insn *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2);
+       insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab);
+       insn[2] = BPF_EXIT_INSN();
+
+       for (i = 3; i < len - 2; i++)
+               insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe);
+
+       insn[len - 2] = BPF_EXIT_INSN();
+       insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1));
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns10(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS, hlen = len - 2;
+       struct bpf_insn *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       for (i = 0; i < hlen / 2; i++)
+               insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i);
+       for (i = hlen - 1; i > hlen / 2; i--)
+               insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i);
+
+       insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1);
+       insn[hlen]     = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac);
+       insn[hlen + 1] = BPF_EXIT_INSN();
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
+                        unsigned int plen)
+{
+       struct sock_filter *insn;
+       unsigned int rlen;
+       int i, j;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       rlen = (len % plen) - 1;
+
+       for (i = 0; i + plen < len; i += plen)
+               for (j = 0; j < plen; j++)
+                       insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA,
+                                                plen - 1 - j, 0, 0);
+       for (j = 0; j < rlen; j++)
+               insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j,
+                                        0, 0);
+
+       insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+static int bpf_fill_maxinsns11(struct bpf_test *self)
+{
+       /* Hits 70 passes on x86_64, so cannot get JITed there. */
+       return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
+}
+
+static int bpf_fill_ja(struct bpf_test *self)
+{
+       /* Hits exactly 11 passes on x86_64 JIT. */
+       return __bpf_fill_ja(self, 12, 9);
+}
+
 static struct bpf_test tests[] = {
        {
                "TAX",
@@ -1755,7 +2034,8 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                        BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
                        BPF_EXIT_INSN(),
-                       BPF_ALU64_IMM(BPF_MOV, R0, 1),
+                       BPF_LD_IMM64(R0, 0x1ffffffffLL),
+                       BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
@@ -1805,168 +2085,2488 @@ static struct bpf_test tests[] = {
                  0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6},
                { { 38, 256 } }
        },
-};
-
-static struct net_device dev;
-
-static struct sk_buff *populate_skb(char *buf, int size)
-{
-       struct sk_buff *skb;
-
-       if (size >= MAX_DATA)
-               return NULL;
-
-       skb = alloc_skb(MAX_DATA, GFP_KERNEL);
-       if (!skb)
-               return NULL;
-
-       memcpy(__skb_put(skb, size), buf, size);
-
-       /* Initialize a fake skb with test pattern. */
-       skb_reset_mac_header(skb);
-       skb->protocol = htons(ETH_P_IP);
-       skb->pkt_type = SKB_TYPE;
-       skb->mark = SKB_MARK;
-       skb->hash = SKB_HASH;
-       skb->queue_mapping = SKB_QUEUE_MAP;
-       skb->vlan_tci = SKB_VLAN_TCI;
-       skb->dev = &dev;
-       skb->dev->ifindex = SKB_DEV_IFINDEX;
-       skb->dev->type = SKB_DEV_TYPE;
-       skb_set_network_header(skb, min(size, ETH_HLEN));
-
-       return skb;
-}
-
-static void *generate_test_data(struct bpf_test *test, int sub)
-{
-       if (test->aux & FLAG_NO_DATA)
-               return NULL;
-
-       /* Test case expects an skb, so populate one. Various
-        * subtests generate skbs of different sizes based on
-        * the same data.
-        */
-       return populate_skb(test->data, test->test[sub].data_size);
-}
-
-static void release_test_data(const struct bpf_test *test, void *data)
-{
-       if (test->aux & FLAG_NO_DATA)
-               return;
-
-       kfree_skb(data);
-}
-
-static int probe_filter_length(struct sock_filter *fp)
-{
-       int len = 0;
-
-       for (len = MAX_INSNS - 1; len > 0; --len)
-               if (fp[len].code != 0 || fp[len].k != 0)
-                       break;
-
-       return len + 1;
-}
-
-static struct bpf_prog *generate_filter(int which, int *err)
-{
-       struct bpf_prog *fp;
-       struct sock_fprog_kern fprog;
-       unsigned int flen = probe_filter_length(tests[which].u.insns);
-       __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
-
-       switch (test_type) {
-       case CLASSIC:
-               fprog.filter = tests[which].u.insns;
-               fprog.len = flen;
-
-               *err = bpf_prog_create(&fp, &fprog);
-               if (tests[which].aux & FLAG_EXPECTED_FAIL) {
-                       if (*err == -EINVAL) {
-                               pr_cont("PASS\n");
-                               /* Verifier rejected filter as expected. */
-                               *err = 0;
-                               return NULL;
-                       } else {
-                               pr_cont("UNEXPECTED_PASS\n");
-                               /* Verifier didn't reject the test that's
-                                * bad enough, just return!
-                                */
-                               *err = -EINVAL;
-                               return NULL;
-                       }
-               }
-               /* We don't expect to fail. */
-               if (*err) {
-                       pr_cont("FAIL to attach err=%d len=%d\n",
-                               *err, fprog.len);
-                       return NULL;
-               }
-               break;
-
-       case INTERNAL:
-               fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
-               if (fp == NULL) {
-                       pr_cont("UNEXPECTED_FAIL no memory left\n");
-                       *err = -ENOMEM;
-                       return NULL;
-               }
-
-               fp->len = flen;
-               memcpy(fp->insnsi, tests[which].u.insns_int,
-                      fp->len * sizeof(struct bpf_insn));
-
-               bpf_prog_select_runtime(fp);
-               break;
-       }
-
-       *err = 0;
-       return fp;
-}
-
-static void release_filter(struct bpf_prog *fp, int which)
-{
-       __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
-
-       switch (test_type) {
-       case CLASSIC:
-               bpf_prog_destroy(fp);
-               break;
-       case INTERNAL:
-               bpf_prog_free(fp);
-               break;
-       }
-}
-
-static int __run_one(const struct bpf_prog *fp, const void *data,
-                    int runs, u64 *duration)
-{
-       u64 start, finish;
-       int ret = 0, i;
-
-       start = ktime_to_us(ktime_get());
-
-       for (i = 0; i < runs; i++)
-               ret = BPF_PROG_RUN(fp, data);
-
-       finish = ktime_to_us(ktime_get());
-
-       *duration = (finish - start) * 1000ULL;
-       do_div(*duration, runs);
-
-       return ret;
-}
-
-static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
-{
-       int err_cnt = 0, i, runs = MAX_TESTRUNS;
-
-       for (i = 0; i < MAX_SUBTESTS; i++) {
-               void *data;
-               u64 duration;
-               u32 ret;
-
+       /* BPF_ALU | BPF_MOV | BPF_X */
+       {
+               "ALU_MOV_X: dst = 2",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU32_REG(BPF_MOV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU_MOV_X: dst = 4294967295",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+                       BPF_ALU32_REG(BPF_MOV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 4294967295U } },
+       },
+       {
+               "ALU64_MOV_X: dst = 2",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU64_REG(BPF_MOV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_MOV_X: dst = 4294967295",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+                       BPF_ALU64_REG(BPF_MOV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 4294967295U } },
+       },
+       /* BPF_ALU | BPF_MOV | BPF_K */
+       {
+               "ALU_MOV_K: dst = 2",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU_MOV_K: dst = 4294967295",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 4294967295U } },
+       },
+       {
+               "ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
+                       BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_MOV_K: dst = 2",
+               .u.insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_MOV_K: dst = 2147483647",
+               .u.insns_int = {
+                       BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2147483647 } },
+       },
+       {
+               "ALU64_OR_K: dst = 0x0",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0x0),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_MOV_K: dst = -1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+                       BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       /* BPF_ALU | BPF_ADD | BPF_X */
+       {
+               "ALU_ADD_X: 1 + 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_ADD_X: 1 + 4294967294 = 4294967295",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 4294967295U } },
+       },
+       {
+               "ALU64_ADD_X: 1 + 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU64_REG(BPF_ADD, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_ADD_X: 1 + 4294967294 = 4294967295",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+                       BPF_ALU64_REG(BPF_ADD, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 4294967295U } },
+       },
+       /* BPF_ALU | BPF_ADD | BPF_K */
+       {
+               "ALU_ADD_K: 1 + 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_ADD, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_ADD_K: 3 + 0 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_ADD, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_ADD_K: 1 + 4294967294 = 4294967295",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 4294967295U } },
+       },
+       {
+               "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0x00000000ffffffff),
+                       BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_ADD_K: 1 + 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU64_IMM(BPF_ADD, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_ADD_K: 3 + 0 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_ADD, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_ADD_K: 1 + 2147483646 = 2147483647",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2147483647 } },
+       },
+       {
+               "ALU64_ADD_K: 2147483646 + -2147483647 = -1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2147483646),
+                       BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, -1 } },
+       },
+       {
+               "ALU64_ADD_K: 1 + 0 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x1),
+                       BPF_LD_IMM64(R3, 0x1),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0),
+                       BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+                       BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       /* BPF_ALU | BPF_SUB | BPF_X */
+       {
+               "ALU_SUB_X: 3 - 1 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU32_REG(BPF_SUB, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU_SUB_X: 4294967295 - 4294967294 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967295U),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+                       BPF_ALU32_REG(BPF_SUB, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_SUB_X: 3 - 1 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_REG(BPF_SUB, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_SUB_X: 4294967295 - 4294967294 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967295U),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
+                       BPF_ALU64_REG(BPF_SUB, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_ALU | BPF_SUB | BPF_K */
+       {
+               "ALU_SUB_K: 3 - 1 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_SUB, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU_SUB_K: 3 - 0 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_SUB, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_SUB_K: 4294967295 - 4294967294 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967295U),
+                       BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_SUB_K: 3 - 1 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_SUB_K: 3 - 0 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_SUB_K: 4294967294 - 4294967295 = -1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967294U),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, -1 } },
+       },
+       {
+               "ALU64_ADD_K: 2147483646 - 2147483647 = -1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2147483646),
+                       BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, -1 } },
+       },
+       /* BPF_ALU | BPF_MUL | BPF_X */
+       {
+               "ALU_MUL_X: 2 * 3 = 6",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 3),
+                       BPF_ALU32_REG(BPF_MUL, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 6 } },
+       },
+       {
+               "ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
+                       BPF_ALU32_REG(BPF_MUL, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xFFFFFFF0 } },
+       },
+       {
+               "ALU_MUL_X: -1 * -1 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, -1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, -1),
+                       BPF_ALU32_REG(BPF_MUL, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_MUL_X: 2 * 3 = 6",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 3),
+                       BPF_ALU64_REG(BPF_MUL, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 6 } },
+       },
+       {
+               "ALU64_MUL_X: 1 * 2147483647 = 2147483647",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+                       BPF_ALU64_REG(BPF_MUL, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2147483647 } },
+       },
+       /* BPF_ALU | BPF_MUL | BPF_K */
+       {
+               "ALU_MUL_K: 2 * 3 = 6",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU32_IMM(BPF_MUL, R0, 3),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 6 } },
+       },
+       {
+               "ALU_MUL_K: 3 * 1 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MUL, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xFFFFFFF0 } },
+       },
+       {
+               "ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x1),
+                       BPF_LD_IMM64(R3, 0x00000000ffffffff),
+                       BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_MUL_K: 2 * 3 = 6",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU64_IMM(BPF_MUL, R0, 3),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 6 } },
+       },
+       {
+               "ALU64_MUL_K: 3 * 1 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_MUL, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_MUL_K: 1 * 2147483647 = 2147483647",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU64_IMM(BPF_MUL, R0, 2147483647),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2147483647 } },
+       },
+       {
+               "ALU64_MUL_K: 1 * -2147483647 = -2147483647",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU64_IMM(BPF_MUL, R0, -2147483647),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, -2147483647 } },
+       },
+       {
+               "ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x1),
+                       BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+                       BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       /* BPF_ALU | BPF_DIV | BPF_X */
+       {
+               "ALU_DIV_X: 6 / 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 6),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU32_REG(BPF_DIV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_DIV_X: 4294967295 / 4294967295 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967295U),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
+                       BPF_ALU32_REG(BPF_DIV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_DIV_X: 6 / 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 6),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU64_REG(BPF_DIV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_DIV_X: 2147483647 / 2147483647 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2147483647),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
+                       BPF_ALU64_REG(BPF_DIV, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+                       BPF_LD_IMM64(R4, 0xffffffffffffffffLL),
+                       BPF_LD_IMM64(R3, 0x0000000000000001LL),
+                       BPF_ALU64_REG(BPF_DIV, R2, R4),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       /* BPF_ALU | BPF_DIV | BPF_K */
+       {
+               "ALU_DIV_K: 6 / 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 6),
+                       BPF_ALU32_IMM(BPF_DIV, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_DIV_K: 3 / 1 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_DIV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_DIV_K: 4294967295 / 4294967295 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967295U),
+                       BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+                       BPF_LD_IMM64(R3, 0x1UL),
+                       BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_DIV_K: 6 / 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 6),
+                       BPF_ALU64_IMM(BPF_DIV, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_DIV_K: 3 / 1 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_DIV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_DIV_K: 2147483647 / 2147483647 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2147483647),
+                       BPF_ALU64_IMM(BPF_DIV, R0, 2147483647),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+                       BPF_LD_IMM64(R3, 0x0000000000000001LL),
+                       BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       /* BPF_ALU | BPF_MOD | BPF_X */
+       {
+               "ALU_MOD_X: 3 % 2 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU32_REG(BPF_MOD, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU_MOD_X: 4294967295 % 4294967293 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967295U),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U),
+                       BPF_ALU32_REG(BPF_MOD, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_MOD_X: 3 % 2 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU64_REG(BPF_MOD, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_MOD_X: 2147483647 % 2147483645 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2147483647),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2147483645),
+                       BPF_ALU64_REG(BPF_MOD, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       /* BPF_ALU | BPF_MOD | BPF_K */
+       {
+               "ALU_MOD_K: 3 % 2 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MOD, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU_MOD_K: 3 % 1 = 0",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MOD, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0 } },
+       },
+       {
+               "ALU_MOD_K: 4294967295 % 4294967293 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 4294967295U),
+                       BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_MOD_K: 3 % 2 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_MOD, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_MOD_K: 3 % 1 = 0",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_MOD, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0 } },
+       },
+       {
+               "ALU64_MOD_K: 2147483647 % 2147483645 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2147483647),
+                       BPF_ALU64_IMM(BPF_MOD, R0, 2147483645),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       /* BPF_ALU | BPF_AND | BPF_X */
+       {
+               "ALU_AND_X: 3 & 2 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU32_REG(BPF_AND, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0xffffffff),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+                       BPF_ALU32_REG(BPF_AND, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ALU64_AND_X: 3 & 2 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU64_REG(BPF_AND, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0xffffffff),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+                       BPF_ALU64_REG(BPF_AND, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       /* BPF_ALU | BPF_AND | BPF_K */
+       {
+               "ALU_AND_K: 3 & 2 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU32_IMM(BPF_AND, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0xffffffff),
+                       BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ALU64_AND_K: 3 & 2 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_AND, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0xffffffff),
+                       BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0x0000000000000000LL),
+                       BPF_ALU64_IMM(BPF_AND, R2, 0x0),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+                       BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0xffffffffffffffffLL),
+                       BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+                       BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       /* BPF_ALU | BPF_OR | BPF_X */
+       {
+               "ALU_OR_X: 1 | 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU32_REG(BPF_OR, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+                       BPF_ALU32_REG(BPF_OR, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ALU64_OR_X: 1 | 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 2),
+                       BPF_ALU64_REG(BPF_OR, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_OR_X: 0 | 0xffffffff = 0xffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+                       BPF_ALU64_REG(BPF_OR, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       /* BPF_ALU | BPF_OR | BPF_K */
+       {
+               "ALU_OR_K: 1 | 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_OR, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_OR_K: 0 & 0xffffffff = 0xffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0),
+                       BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ALU64_OR_K: 1 | 2 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU64_IMM(BPF_OR, R0, 2),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_OR_K: 0 & 0xffffffff = 0xffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0),
+                       BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+                       BPF_ALU64_IMM(BPF_OR, R2, 0x0),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+                       BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000000000000000LL),
+                       BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+                       BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       /* BPF_ALU | BPF_XOR | BPF_X */
+       {
+               "ALU_XOR_X: 5 ^ 6 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 5),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 6),
+                       BPF_ALU32_REG(BPF_XOR, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+                       BPF_ALU32_REG(BPF_XOR, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xfffffffe } },
+       },
+       {
+               "ALU64_XOR_X: 5 ^ 6 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 5),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 6),
+                       BPF_ALU64_REG(BPF_XOR, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+                       BPF_ALU64_REG(BPF_XOR, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xfffffffe } },
+       },
+       /* BPF_ALU | BPF_XOR | BPF_K */
+       {
+               "ALU_XOR_K: 5 ^ 6 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 5),
+                       BPF_ALU32_IMM(BPF_XOR, R0, 6),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xfffffffe } },
+       },
+       {
+               "ALU64_XOR_K: 5 ^ 6 = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 5),
+                       BPF_ALU64_IMM(BPF_XOR, R0, 6),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xfffffffe } },
+       },
+       {
+               "ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
+                       BPF_ALU64_IMM(BPF_XOR, R2, 0x0),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
+                       BPF_LD_IMM64(R3, 0xffff00000000ffffLL),
+                       BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0x0000000000000000LL),
+                       BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+                       BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       /* BPF_ALU | BPF_LSH | BPF_X */
+       {
+               "ALU_LSH_X: 1 << 1 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU32_REG(BPF_LSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU_LSH_X: 1 << 31 = 0x80000000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 31),
+                       BPF_ALU32_REG(BPF_LSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x80000000 } },
+       },
+       {
+               "ALU64_LSH_X: 1 << 1 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_REG(BPF_LSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_LSH_X: 1 << 31 = 0x80000000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 31),
+                       BPF_ALU64_REG(BPF_LSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x80000000 } },
+       },
+       /* BPF_ALU | BPF_LSH | BPF_K */
+       {
+               "ALU_LSH_K: 1 << 1 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_LSH, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU_LSH_K: 1 << 31 = 0x80000000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU32_IMM(BPF_LSH, R0, 31),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x80000000 } },
+       },
+       {
+               "ALU64_LSH_K: 1 << 1 = 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU64_IMM(BPF_LSH, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 2 } },
+       },
+       {
+               "ALU64_LSH_K: 1 << 31 = 0x80000000",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 1),
+                       BPF_ALU64_IMM(BPF_LSH, R0, 31),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x80000000 } },
+       },
+       /* BPF_ALU | BPF_RSH | BPF_X */
+       {
+               "ALU_RSH_X: 2 >> 1 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU32_REG(BPF_RSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU_RSH_X: 0x80000000 >> 31 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x80000000),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 31),
+                       BPF_ALU32_REG(BPF_RSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_RSH_X: 2 >> 1 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 1),
+                       BPF_ALU64_REG(BPF_RSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_RSH_X: 0x80000000 >> 31 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x80000000),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 31),
+                       BPF_ALU64_REG(BPF_RSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_ALU | BPF_RSH | BPF_K */
+       {
+               "ALU_RSH_K: 2 >> 1 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU32_IMM(BPF_RSH, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU_RSH_K: 0x80000000 >> 31 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x80000000),
+                       BPF_ALU32_IMM(BPF_RSH, R0, 31),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_RSH_K: 2 >> 1 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 2),
+                       BPF_ALU64_IMM(BPF_RSH, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "ALU64_RSH_K: 0x80000000 >> 31 = 1",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x80000000),
+                       BPF_ALU64_IMM(BPF_RSH, R0, 31),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_ALU | BPF_ARSH | BPF_X */
+       {
+               "ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+                       BPF_ALU32_IMM(BPF_MOV, R1, 40),
+                       BPF_ALU64_REG(BPF_ARSH, R0, R1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffff00ff } },
+       },
+       /* BPF_ALU | BPF_ARSH | BPF_K */
+       {
+               "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
+                       BPF_ALU64_IMM(BPF_ARSH, R0, 40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffff00ff } },
+       },
+       /* BPF_ALU | BPF_NEG */
+       {
+               "ALU_NEG: -(3) = -3",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 3),
+                       BPF_ALU32_IMM(BPF_NEG, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, -3 } },
+       },
+       {
+               "ALU_NEG: -(-3) = 3",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, -3),
+                       BPF_ALU32_IMM(BPF_NEG, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       {
+               "ALU64_NEG: -(3) = -3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 3),
+                       BPF_ALU64_IMM(BPF_NEG, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, -3 } },
+       },
+       {
+               "ALU64_NEG: -(-3) = 3",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, -3),
+                       BPF_ALU64_IMM(BPF_NEG, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 3 } },
+       },
+       /* BPF_ALU | BPF_END | BPF_FROM_BE */
+       {
+               "ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+                       BPF_ENDIAN(BPF_FROM_BE, R0, 16),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0,  cpu_to_be16(0xcdef) } },
+       },
+       {
+               "ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+                       BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, cpu_to_be32(0x89abcdef) } },
+       },
+       {
+               "ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+                       BPF_ENDIAN(BPF_FROM_BE, R0, 64),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } },
+       },
+       /* BPF_ALU | BPF_END | BPF_FROM_LE */
+       {
+               "ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+                       BPF_ENDIAN(BPF_FROM_LE, R0, 16),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, cpu_to_le16(0xcdef) } },
+       },
+       {
+               "ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+                       BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, cpu_to_le32(0x89abcdef) } },
+       },
+       {
+               "ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+                       BPF_ENDIAN(BPF_FROM_LE, R0, 64),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } },
+       },
+       /* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */
+       {
+               "ST_MEM_B: Store/Load byte: max negative",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_ST_MEM(BPF_B, R10, -40, 0xff),
+                       BPF_LDX_MEM(BPF_B, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xff } },
+       },
+       {
+               "ST_MEM_B: Store/Load byte: max positive",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_ST_MEM(BPF_H, R10, -40, 0x7f),
+                       BPF_LDX_MEM(BPF_H, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x7f } },
+       },
+       {
+               "STX_MEM_B: Store/Load byte: max negative",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0),
+                       BPF_LD_IMM64(R1, 0xffLL),
+                       BPF_STX_MEM(BPF_B, R10, R1, -40),
+                       BPF_LDX_MEM(BPF_B, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xff } },
+       },
+       {
+               "ST_MEM_H: Store/Load half word: max negative",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_ST_MEM(BPF_H, R10, -40, 0xffff),
+                       BPF_LDX_MEM(BPF_H, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffff } },
+       },
+       {
+               "ST_MEM_H: Store/Load half word: max positive",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_ST_MEM(BPF_H, R10, -40, 0x7fff),
+                       BPF_LDX_MEM(BPF_H, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x7fff } },
+       },
+       {
+               "STX_MEM_H: Store/Load half word: max negative",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0),
+                       BPF_LD_IMM64(R1, 0xffffLL),
+                       BPF_STX_MEM(BPF_H, R10, R1, -40),
+                       BPF_LDX_MEM(BPF_H, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffff } },
+       },
+       {
+               "ST_MEM_W: Store/Load word: max negative",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff),
+                       BPF_LDX_MEM(BPF_W, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ST_MEM_W: Store/Load word: max positive",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff),
+                       BPF_LDX_MEM(BPF_W, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x7fffffff } },
+       },
+       {
+               "STX_MEM_W: Store/Load word: max negative",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0),
+                       BPF_LD_IMM64(R1, 0xffffffffLL),
+                       BPF_STX_MEM(BPF_W, R10, R1, -40),
+                       BPF_LDX_MEM(BPF_W, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ST_MEM_DW: Store/Load double word: max negative",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+                       BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       {
+               "ST_MEM_DW: Store/Load double word: max negative 2",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R2, 0xffff00000000ffffLL),
+                       BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
+                       BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff),
+                       BPF_LDX_MEM(BPF_DW, R2, R10, -40),
+                       BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+                       BPF_MOV32_IMM(R0, 2),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV32_IMM(R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x1 } },
+       },
+       {
+               "ST_MEM_DW: Store/Load double word: max positive",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff),
+                       BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x7fffffff } },
+       },
+       {
+               "STX_MEM_DW: Store/Load double word: max negative",
+               .u.insns_int = {
+                       BPF_LD_IMM64(R0, 0),
+                       BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+                       BPF_STX_MEM(BPF_W, R10, R1, -40),
+                       BPF_LDX_MEM(BPF_W, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0xffffffff } },
+       },
+       /* BPF_STX | BPF_XADD | BPF_W/DW */
+       {
+               "STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+                       BPF_ST_MEM(BPF_W, R10, -40, 0x10),
+                       BPF_STX_XADD(BPF_W, R10, R0, -40),
+                       BPF_LDX_MEM(BPF_W, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x22 } },
+       },
+       {
+               "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
+                       BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
+                       BPF_STX_XADD(BPF_DW, R10, R0, -40),
+                       BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x22 } },
+       },
+       /* BPF_JMP | BPF_EXIT */
+       {
+               "JMP_EXIT",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0x4711),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0x4712),
+               },
+               INTERNAL,
+               { },
+               { { 0, 0x4711 } },
+       },
+       /* BPF_JMP | BPF_JA */
+       {
+               "JMP_JA: Unconditional jump: if (true) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JSGT | BPF_K */
+       {
+               "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+                       BPF_JMP_IMM(BPF_JSGT, R1, -2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSGT_K: Signed jump: if (-1 > -1) return 0",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+                       BPF_JMP_IMM(BPF_JSGT, R1, -1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JSGE | BPF_K */
+       {
+               "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+                       BPF_JMP_IMM(BPF_JSGE, R1, -2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
+                       BPF_JMP_IMM(BPF_JSGE, R1, -1, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JGT | BPF_K */
+       {
+               "JMP_JGT_K: if (3 > 2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JGT, R1, 2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JGE | BPF_K */
+       {
+               "JMP_JGE_K: if (3 >= 2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JGE, R1, 2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JGT | BPF_K jump backwards */
+       {
+               "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)",
+               .u.insns_int = {
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */
+                       BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */
+                       BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JGE_K: if (3 >= 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JGE, R1, 3, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JNE | BPF_K */
+       {
+               "JMP_JNE_K: if (3 != 2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JEQ | BPF_K */
+       {
+               "JMP_JEQ_K: if (3 == 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JEQ, R1, 3, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JSET | BPF_K */
+       {
+               "JMP_JSET_K: if (0x3 & 0x2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSET_K: if (0x3 & 0xffffffff) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JSGT | BPF_X */
+       {
+               "JMP_JSGT_X: Signed jump: if (-1 > -2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, -2),
+                       BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSGT_X: Signed jump: if (-1 > -1) return 0",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, -1),
+                       BPF_JMP_REG(BPF_JSGT, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JSGE | BPF_X */
+       {
+               "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, -2),
+                       BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, -1),
+                       BPF_LD_IMM64(R2, -1),
+                       BPF_JMP_REG(BPF_JSGE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JGT | BPF_X */
+       {
+               "JMP_JGT_X: if (3 > 2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JGE | BPF_X */
+       {
+               "JMP_JGE_X: if (3 >= 2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JGE_X: if (3 >= 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 3),
+                       BPF_JMP_REG(BPF_JGE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JNE | BPF_X */
+       {
+               "JMP_JNE_X: if (3 != 2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JEQ | BPF_X */
+       {
+               "JMP_JEQ_X: if (3 == 3) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 3),
+                       BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       /* BPF_JMP | BPF_JSET | BPF_X */
+       {
+               "JMP_JSET_X: if (0x3 & 0x2) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 2),
+                       BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JSET_X: if (0x3 & 0xffffffff) return 1",
+               .u.insns_int = {
+                       BPF_ALU32_IMM(BPF_MOV, R0, 0),
+                       BPF_LD_IMM64(R1, 3),
+                       BPF_LD_IMM64(R2, 0xffffffff),
+                       BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_ALU32_IMM(BPF_MOV, R0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               INTERNAL,
+               { },
+               { { 0, 1 } },
+       },
+       {
+               "JMP_JA: Jump, gap, jump, ...",
+               { },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0xababcbac } },
+               .fill_helper = bpf_fill_ja,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Maximum possible literals",
+               { },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0xffffffff } },
+               .fill_helper = bpf_fill_maxinsns1,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Single literal",
+               { },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0xfefefefe } },
+               .fill_helper = bpf_fill_maxinsns2,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Run/add until end",
+               { },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0x947bf368 } },
+               .fill_helper = bpf_fill_maxinsns3,
+       },
+       {
+               "BPF_MAXINSNS: Too many instructions",
+               { },
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+               { },
+               { },
+               .fill_helper = bpf_fill_maxinsns4,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Very long jump",
+               { },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0xabababab } },
+               .fill_helper = bpf_fill_maxinsns5,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Ctx heavy transformations",
+               { },
+               CLASSIC,
+               { },
+               {
+                       {  1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
+                       { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+               },
+               .fill_helper = bpf_fill_maxinsns6,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Call heavy transformations",
+               { },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 1, 0 }, { 10, 0 } },
+               .fill_helper = bpf_fill_maxinsns7,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Jump heavy test",
+               { },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0xffffffff } },
+               .fill_helper = bpf_fill_maxinsns8,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Very long jump backwards",
+               { },
+               INTERNAL | FLAG_NO_DATA,
+               { },
+               { { 0, 0xcbababab } },
+               .fill_helper = bpf_fill_maxinsns9,
+       },
+       {       /* Mainly checking JIT here. */
+               "BPF_MAXINSNS: Edge hopping nuthouse",
+               { },
+               INTERNAL | FLAG_NO_DATA,
+               { },
+               { { 0, 0xabababac } },
+               .fill_helper = bpf_fill_maxinsns10,
+       },
+       {
+               "BPF_MAXINSNS: Jump, gap, jump, ...",
+               { },
+               CLASSIC | FLAG_NO_DATA,
+               { },
+               { { 0, 0xababcbac } },
+               .fill_helper = bpf_fill_maxinsns11,
+       },
+};
+
+static struct net_device dev;
+
+static struct sk_buff *populate_skb(char *buf, int size)
+{
+       struct sk_buff *skb;
+
+       if (size >= MAX_DATA)
+               return NULL;
+
+       skb = alloc_skb(MAX_DATA, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+
+       memcpy(__skb_put(skb, size), buf, size);
+
+       /* Initialize a fake skb with test pattern. */
+       skb_reset_mac_header(skb);
+       skb->protocol = htons(ETH_P_IP);
+       skb->pkt_type = SKB_TYPE;
+       skb->mark = SKB_MARK;
+       skb->hash = SKB_HASH;
+       skb->queue_mapping = SKB_QUEUE_MAP;
+       skb->vlan_tci = SKB_VLAN_TCI;
+       skb->dev = &dev;
+       skb->dev->ifindex = SKB_DEV_IFINDEX;
+       skb->dev->type = SKB_DEV_TYPE;
+       skb_set_network_header(skb, min(size, ETH_HLEN));
+
+       return skb;
+}
+
+static void *generate_test_data(struct bpf_test *test, int sub)
+{
+       if (test->aux & FLAG_NO_DATA)
+               return NULL;
+
+       /* Test case expects an skb, so populate one. Various
+        * subtests generate skbs of different sizes based on
+        * the same data.
+        */
+       return populate_skb(test->data, test->test[sub].data_size);
+}
+
+static void release_test_data(const struct bpf_test *test, void *data)
+{
+       if (test->aux & FLAG_NO_DATA)
+               return;
+
+       kfree_skb(data);
+}
+
+static int filter_length(int which)
+{
+       struct sock_filter *fp;
+       int len;
+
+       if (tests[which].fill_helper)
+               return tests[which].u.ptr.len;
+
+       fp = tests[which].u.insns;
+       for (len = MAX_INSNS - 1; len > 0; --len)
+               if (fp[len].code != 0 || fp[len].k != 0)
+                       break;
+
+       return len + 1;
+}
+
+static void *filter_pointer(int which)
+{
+       if (tests[which].fill_helper)
+               return tests[which].u.ptr.insns;
+       else
+               return tests[which].u.insns;
+}
+
+static struct bpf_prog *generate_filter(int which, int *err)
+{
+       __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+       unsigned int flen = filter_length(which);
+       void *fptr = filter_pointer(which);
+       struct sock_fprog_kern fprog;
+       struct bpf_prog *fp;
+
+       switch (test_type) {
+       case CLASSIC:
+               fprog.filter = fptr;
+               fprog.len = flen;
+
+               *err = bpf_prog_create(&fp, &fprog);
+               if (tests[which].aux & FLAG_EXPECTED_FAIL) {
+                       if (*err == -EINVAL) {
+                               pr_cont("PASS\n");
+                               /* Verifier rejected filter as expected. */
+                               *err = 0;
+                               return NULL;
+                       } else {
+                               pr_cont("UNEXPECTED_PASS\n");
+                               /* Verifier didn't reject the test that's
+                                * bad enough, just return!
+                                */
+                               *err = -EINVAL;
+                               return NULL;
+                       }
+               }
+               /* We don't expect to fail. */
+               if (*err) {
+                       pr_cont("FAIL to attach err=%d len=%d\n",
+                               *err, fprog.len);
+                       return NULL;
+               }
+               break;
+
+       case INTERNAL:
+               fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
+               if (fp == NULL) {
+                       pr_cont("UNEXPECTED_FAIL no memory left\n");
+                       *err = -ENOMEM;
+                       return NULL;
+               }
+
+               fp->len = flen;
+               memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
+
+               bpf_prog_select_runtime(fp);
+               break;
+       }
+
+       *err = 0;
+       return fp;
+}
+
+static void release_filter(struct bpf_prog *fp, int which)
+{
+       __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
+
+       switch (test_type) {
+       case CLASSIC:
+               bpf_prog_destroy(fp);
+               break;
+       case INTERNAL:
+               bpf_prog_free(fp);
+               break;
+       }
+}
+
+static int __run_one(const struct bpf_prog *fp, const void *data,
+                    int runs, u64 *duration)
+{
+       u64 start, finish;
+       int ret = 0, i;
+
+       start = ktime_to_us(ktime_get());
+
+       for (i = 0; i < runs; i++)
+               ret = BPF_PROG_RUN(fp, data);
+
+       finish = ktime_to_us(ktime_get());
+
+       *duration = (finish - start) * 1000ULL;
+       do_div(*duration, runs);
+
+       return ret;
+}
+
+static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
+{
+       int err_cnt = 0, i, runs = MAX_TESTRUNS;
+
+       for (i = 0; i < MAX_SUBTESTS; i++) {
+               void *data;
+               u64 duration;
+               u32 ret;
+
                if (test->test[i].data_size == 0 &&
                    test->test[i].result == 0)
                        break;
@@ -1987,9 +4587,33 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
        return err_cnt;
 }
 
+static __init int prepare_bpf_tests(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(tests); i++) {
+               if (tests[i].fill_helper &&
+                   tests[i].fill_helper(&tests[i]) < 0)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static __init void destroy_bpf_tests(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(tests); i++) {
+               if (tests[i].fill_helper)
+                       kfree(tests[i].u.ptr.insns);
+       }
+}
+
 static __init int test_bpf(void)
 {
        int i, err_cnt = 0, pass_cnt = 0;
+       int jit_cnt = 0, run_cnt = 0;
 
        for (i = 0; i < ARRAY_SIZE(tests); i++) {
                struct bpf_prog *fp;
@@ -2006,6 +4630,13 @@ static __init int test_bpf(void)
 
                        return err;
                }
+
+               pr_cont("jited:%u ", fp->jited);
+
+               run_cnt++;
+               if (fp->jited)
+                       jit_cnt++;
+
                err = run_one(fp, &tests[i]);
                release_filter(fp, i);
 
@@ -2018,13 +4649,24 @@ static __init int test_bpf(void)
                }
        }
 
-       pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt);
+       pr_info("Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
+               pass_cnt, err_cnt, jit_cnt, run_cnt);
+
        return err_cnt ? -EINVAL : 0;
 }
 
 static int __init test_bpf_init(void)
 {
-       return test_bpf();
+       int ret;
+
+       ret = prepare_bpf_tests();
+       if (ret < 0)
+               return ret;
+
+       ret = test_bpf();
+
+       destroy_bpf_tests();
+       return ret;
 }
 
 static void __exit test_bpf_exit(void)
index b2957540d3c722d5c7b3d9f94e5b5a1d6c9d7975..c90777eae1f837f84b1b53fd8704b567b9d90835 100644 (file)
@@ -1,14 +1,9 @@
 /*
  * Resizable, Scalable, Concurrent Hash Table
  *
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  *
- * Based on the following paper:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
- * Code partially derived from nft_hash
- *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
 #include <linux/rhashtable.h>
 #include <linux/slab.h>
 
+#define MAX_ENTRIES    1000000
+#define TEST_INSERT_FAIL INT_MAX
+
+static int entries = 50000;
+module_param(entries, int, 0);
+MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)");
+
+static int runs = 4;
+module_param(runs, int, 0);
+MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
+
+static int max_size = 65536;
+module_param(max_size, int, 0);
+MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)");
 
-#define TEST_HT_SIZE   8
-#define TEST_ENTRIES   2048
-#define TEST_PTR       ((void *) 0xdeadbeef)
-#define TEST_NEXPANDS  4
+static bool shrinking = false;
+module_param(shrinking, bool, 0);
+MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
+
+static int size = 8;
+module_param(size, int, 0);
+MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
 
 struct test_obj {
-       void                    *ptr;
        int                     value;
        struct rhash_head       node;
 };
 
-static const struct rhashtable_params test_rht_params = {
-       .nelem_hint = TEST_HT_SIZE,
+static struct test_obj array[MAX_ENTRIES];
+
+static struct rhashtable_params test_rht_params = {
        .head_offset = offsetof(struct test_obj, node),
        .key_offset = offsetof(struct test_obj, value),
        .key_len = sizeof(int),
@@ -51,11 +63,14 @@ static int __init test_rht_lookup(struct rhashtable *ht)
 {
        unsigned int i;
 
-       for (i = 0; i < TEST_ENTRIES * 2; i++) {
+       for (i = 0; i < entries * 2; i++) {
                struct test_obj *obj;
                bool expected = !(i % 2);
                u32 key = i;
 
+               if (array[i / 2].value == TEST_INSERT_FAIL)
+                       expected = false;
+
                obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
 
                if (expected && !obj) {
@@ -66,9 +81,9 @@ static int __init test_rht_lookup(struct rhashtable *ht)
                                key);
                        return -EEXIST;
                } else if (expected && obj) {
-                       if (obj->ptr != TEST_PTR || obj->value != i) {
-                               pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
-                                       obj->ptr, TEST_PTR, obj->value, i);
+                       if (obj->value != i) {
+                               pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
+                                       obj->value, i);
                                return -EINVAL;
                        }
                }
@@ -77,129 +92,147 @@ static int __init test_rht_lookup(struct rhashtable *ht)
        return 0;
 }
 
-static void test_bucket_stats(struct rhashtable *ht, bool quiet)
+static void test_bucket_stats(struct rhashtable *ht)
 {
-       unsigned int cnt, rcu_cnt, i, total = 0;
+       unsigned int err, total = 0, chain_len = 0;
+       struct rhashtable_iter hti;
        struct rhash_head *pos;
-       struct test_obj *obj;
-       struct bucket_table *tbl;
 
-       tbl = rht_dereference_rcu(ht->tbl, ht);
-       for (i = 0; i < tbl->size; i++) {
-               rcu_cnt = cnt = 0;
+       err = rhashtable_walk_init(ht, &hti);
+       if (err) {
+               pr_warn("Test failed: allocation error");
+               return;
+       }
 
-               if (!quiet)
-                       pr_info(" [%#4x/%u]", i, tbl->size);
+       err = rhashtable_walk_start(&hti);
+       if (err && err != -EAGAIN) {
+               pr_warn("Test failed: iterator failed: %d\n", err);
+               return;
+       }
 
-               rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
-                       cnt++;
-                       total++;
-                       if (!quiet)
-                               pr_cont(" [%p],", obj);
+       while ((pos = rhashtable_walk_next(&hti))) {
+               if (PTR_ERR(pos) == -EAGAIN) {
+                       pr_info("Info: encountered resize\n");
+                       chain_len++;
+                       continue;
+               } else if (IS_ERR(pos)) {
+                       pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
+                               PTR_ERR(pos));
+                       break;
                }
 
-               rht_for_each_entry_rcu(obj, pos, tbl, i, node)
-                       rcu_cnt++;
-
-               if (rcu_cnt != cnt)
-                       pr_warn("Test failed: Chain count mismach %d != %d",
-                               cnt, rcu_cnt);
-
-               if (!quiet)
-                       pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
-                               i, tbl->buckets[i], cnt);
+               total++;
        }
 
-       pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d\n",
-               total, atomic_read(&ht->nelems), TEST_ENTRIES);
+       rhashtable_walk_stop(&hti);
+       rhashtable_walk_exit(&hti);
+
+       pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
+               total, atomic_read(&ht->nelems), entries, chain_len);
 
-       if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
+       if (total != atomic_read(&ht->nelems) || total != entries)
                pr_warn("Test failed: Total count mismatch ^^^");
 }
 
-static int __init test_rhashtable(struct rhashtable *ht)
+static s64 __init test_rhashtable(struct rhashtable *ht)
 {
-       struct bucket_table *tbl;
        struct test_obj *obj;
-       struct rhash_head *pos, *next;
        int err;
-       unsigned int i;
+       unsigned int i, insert_fails = 0;
+       s64 start, end;
 
        /*
         * Insertion Test:
-        * Insert TEST_ENTRIES into table with all keys even numbers
+        * Insert entries into table with all keys even numbers
         */
-       pr_info("  Adding %d keys\n", TEST_ENTRIES);
-       for (i = 0; i < TEST_ENTRIES; i++) {
-               struct test_obj *obj;
-
-               obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-               if (!obj) {
-                       err = -ENOMEM;
-                       goto error;
-               }
+       pr_info("  Adding %d keys\n", entries);
+       start = ktime_get_ns();
+       for (i = 0; i < entries; i++) {
+               struct test_obj *obj = &array[i];
 
-               obj->ptr = TEST_PTR;
                obj->value = i * 2;
 
                err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
-               if (err) {
-                       kfree(obj);
-                       goto error;
+               if (err == -ENOMEM || err == -EBUSY) {
+                       /* Mark failed inserts but continue */
+                       obj->value = TEST_INSERT_FAIL;
+                       insert_fails++;
+               } else if (err) {
+                       return err;
                }
        }
 
+       if (insert_fails)
+               pr_info("  %u insertions failed due to memory pressure\n",
+                       insert_fails);
+
+       test_bucket_stats(ht);
        rcu_read_lock();
-       test_bucket_stats(ht, true);
        test_rht_lookup(ht);
        rcu_read_unlock();
 
-       rcu_read_lock();
-       test_bucket_stats(ht, true);
-       rcu_read_unlock();
+       test_bucket_stats(ht);
 
-       pr_info("  Deleting %d keys\n", TEST_ENTRIES);
-       for (i = 0; i < TEST_ENTRIES; i++) {
+       pr_info("  Deleting %d keys\n", entries);
+       for (i = 0; i < entries; i++) {
                u32 key = i * 2;
 
-               obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
-               BUG_ON(!obj);
+               if (array[i].value != TEST_INSERT_FAIL) {
+                       obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
+                       BUG_ON(!obj);
 
-               rhashtable_remove_fast(ht, &obj->node, test_rht_params);
-               kfree(obj);
+                       rhashtable_remove_fast(ht, &obj->node, test_rht_params);
+               }
        }
 
-       return 0;
-
-error:
-       tbl = rht_dereference_rcu(ht->tbl, ht);
-       for (i = 0; i < tbl->size; i++)
-               rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
-                       kfree(obj);
+       end = ktime_get_ns();
+       pr_info("  Duration of test: %lld ns\n", end - start);
 
-       return err;
+       return end - start;
 }
 
 static struct rhashtable ht;
 
 static int __init test_rht_init(void)
 {
-       int err;
+       int i, err;
+       u64 total_time = 0;
 
-       pr_info("Running resizable hashtable tests...\n");
+       entries = min(entries, MAX_ENTRIES);
 
-       err = rhashtable_init(&ht, &test_rht_params);
-       if (err < 0) {
-               pr_warn("Test failed: Unable to initialize hashtable: %d\n",
-                       err);
-               return err;
-       }
+       test_rht_params.automatic_shrinking = shrinking;
+       test_rht_params.max_size = max_size;
+       test_rht_params.nelem_hint = size;
 
-       err = test_rhashtable(&ht);
+       pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
+               size, max_size, shrinking);
 
-       rhashtable_destroy(&ht);
+       for (i = 0; i < runs; i++) {
+               s64 time;
 
-       return err;
+               pr_info("Test %02d:\n", i);
+               memset(&array, 0, sizeof(array));
+               err = rhashtable_init(&ht, &test_rht_params);
+               if (err < 0) {
+                       pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+                               err);
+                       continue;
+               }
+
+               time = test_rhashtable(&ht);
+               rhashtable_destroy(&ht);
+               if (time < 0) {
+                       pr_warn("Test failed: return code %lld\n", time);
+                       return -EINVAL;
+               }
+
+               total_time += time;
+       }
+
+       do_div(total_time, runs);
+       pr_info("Average test time: %llu\n", total_time);
+
+       return 0;
 }
 
 static void __exit test_rht_exit(void)
index ebffa0e4a9c0451cfbd78bcf4835825ffd72c6de..2fd31aebef30c4abfb27d8890787bc914e880356 100644 (file)
@@ -2966,6 +2966,104 @@ void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
+/*
+ * Page Fragment:
+ *  An arbitrary-length arbitrary-offset area of memory which resides
+ *  within a 0 or higher order page.  Multiple fragments within that page
+ *  are individually refcounted, in the page's reference counter.
+ *
+ * The page_frag functions below provide a simple allocation framework for
+ * page fragments.  This is used by the network stack and network device
+ * drivers to provide a backing region of memory for use as either an
+ * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
+ */
+static struct page *__page_frag_refill(struct page_frag_cache *nc,
+                                      gfp_t gfp_mask)
+{
+       struct page *page = NULL;
+       gfp_t gfp = gfp_mask;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+       gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
+                   __GFP_NOMEMALLOC;
+       page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+                               PAGE_FRAG_CACHE_MAX_ORDER);
+       nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
+#endif
+       if (unlikely(!page))
+               page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+
+       nc->va = page ? page_address(page) : NULL;
+
+       return page;
+}
+
+void *__alloc_page_frag(struct page_frag_cache *nc,
+                       unsigned int fragsz, gfp_t gfp_mask)
+{
+       unsigned int size = PAGE_SIZE;
+       struct page *page;
+       int offset;
+
+       if (unlikely(!nc->va)) {
+refill:
+               page = __page_frag_refill(nc, gfp_mask);
+               if (!page)
+                       return NULL;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+               /* if size can vary use size else just use PAGE_SIZE */
+               size = nc->size;
+#endif
+               /* Even if we own the page, we do not use atomic_set().
+                * This would break get_page_unless_zero() users.
+                */
+               atomic_add(size - 1, &page->_count);
+
+               /* reset page count bias and offset to start of new frag */
+               nc->pfmemalloc = page->pfmemalloc;
+               nc->pagecnt_bias = size;
+               nc->offset = size;
+       }
+
+       offset = nc->offset - fragsz;
+       if (unlikely(offset < 0)) {
+               page = virt_to_page(nc->va);
+
+               if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
+                       goto refill;
+
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+               /* if size can vary use size else just use PAGE_SIZE */
+               size = nc->size;
+#endif
+               /* OK, page count is 0, we can safely set it */
+               atomic_set(&page->_count, size);
+
+               /* reset page count bias and offset to start of new frag */
+               nc->pagecnt_bias = size;
+               offset = size - fragsz;
+       }
+
+       nc->pagecnt_bias--;
+       nc->offset = offset;
+
+       return nc->va + offset;
+}
+EXPORT_SYMBOL(__alloc_page_frag);
+
+/*
+ * Frees a page fragment allocated out of either a compound or order 0 page.
+ */
+void __free_page_frag(void *addr)
+{
+       struct page *page = virt_to_head_page(addr);
+
+       if (unlikely(put_page_testzero(page)))
+               __free_pages_ok(page, compound_order(page));
+}
+EXPORT_SYMBOL(__free_page_frag);
+
 /*
  * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
  * of the current memory cgroup.
index 59555f0f8fc85b039cab18350906ae3c86a477af..d2cd9de4b7241dcc8c1df5fd7832b3b317799923 100644 (file)
@@ -618,6 +618,92 @@ out:
        return err;
 }
 
+static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb)
+{
+       struct sk_buff *p, **pp = NULL;
+       struct vlan_hdr *vhdr;
+       unsigned int hlen, off_vlan;
+       const struct packet_offload *ptype;
+       __be16 type;
+       int flush = 1;
+
+       off_vlan = skb_gro_offset(skb);
+       hlen = off_vlan + sizeof(*vhdr);
+       vhdr = skb_gro_header_fast(skb, off_vlan);
+       if (skb_gro_header_hard(skb, hlen)) {
+               vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
+               if (unlikely(!vhdr))
+                       goto out;
+       }
+
+       type = vhdr->h_vlan_encapsulated_proto;
+
+       rcu_read_lock();
+       ptype = gro_find_receive_by_type(type);
+       if (!ptype)
+               goto out_unlock;
+
+       flush = 0;
+
+       for (p = *head; p; p = p->next) {
+               struct vlan_hdr *vhdr2;
+
+               if (!NAPI_GRO_CB(p)->same_flow)
+                       continue;
+
+               vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
+               if (compare_vlan_header(vhdr, vhdr2))
+                       NAPI_GRO_CB(p)->same_flow = 0;
+       }
+
+       skb_gro_pull(skb, sizeof(*vhdr));
+       skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
+       pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+       rcu_read_unlock();
+out:
+       NAPI_GRO_CB(skb)->flush |= flush;
+
+       return pp;
+}
+
+static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
+       __be16 type = vhdr->h_vlan_encapsulated_proto;
+       struct packet_offload *ptype;
+       int err = -ENOENT;
+
+       rcu_read_lock();
+       ptype = gro_find_complete_by_type(type);
+       if (ptype)
+               err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
+
+       rcu_read_unlock();
+       return err;
+}
+
+static struct packet_offload vlan_packet_offloads[] __read_mostly = {
+       {
+               .type = cpu_to_be16(ETH_P_8021Q),
+               .priority = 10,
+               .callbacks = {
+                       .gro_receive = vlan_gro_receive,
+                       .gro_complete = vlan_gro_complete,
+               },
+       },
+       {
+               .type = cpu_to_be16(ETH_P_8021AD),
+               .priority = 10,
+               .callbacks = {
+                       .gro_receive = vlan_gro_receive,
+                       .gro_complete = vlan_gro_complete,
+               },
+       },
+};
+
 static int __net_init vlan_init_net(struct net *net)
 {
        struct vlan_net *vn = net_generic(net, vlan_net_id);
@@ -645,6 +731,7 @@ static struct pernet_operations vlan_net_ops = {
 static int __init vlan_proto_init(void)
 {
        int err;
+       unsigned int i;
 
        pr_info("%s v%s\n", vlan_fullname, vlan_version);
 
@@ -668,6 +755,9 @@ static int __init vlan_proto_init(void)
        if (err < 0)
                goto err5;
 
+       for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+               dev_add_offload(&vlan_packet_offloads[i]);
+
        vlan_ioctl_set(vlan_ioctl_handler);
        return 0;
 
@@ -685,7 +775,13 @@ err0:
 
 static void __exit vlan_cleanup_module(void)
 {
+       unsigned int i;
+
        vlan_ioctl_set(NULL);
+
+       for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+               dev_remove_offload(&vlan_packet_offloads[i]);
+
        vlan_netlink_fini();
 
        unregister_netdevice_notifier(&vlan_notifier_block);
index 44dd5786ee91da16ae920d3c9f62b1f4bac353c8..57a7c5af3175d1826f0708c551227e5e4281577e 100644 (file)
@@ -45,6 +45,9 @@ config COMPAT_NETLINK_MESSAGES
          Newly written code should NEVER need this option but do
          compat-independent messages instead!
 
+config NET_INGRESS
+       bool
+
 menu "Networking options"
 
 source "net/packet/Kconfig"
index 3b7ad43c7dad948d192ace962f3a4d4422c47319..d5871ac493eb10c984b9e74f2beee3049bc79645 100644 (file)
@@ -1030,7 +1030,7 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
        if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
                goto out;
        rc = -ENOMEM;
-       sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto);
+       sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
        if (!sk)
                goto out;
        rc = 0;
index ed0466637e13326be6796e81cdd182d4096a1509..49a872db7e42581853cc2e8c1a6585285a3172d4 100644 (file)
@@ -141,7 +141,7 @@ static struct proto vcc_proto = {
        .release_cb = vcc_release_cb,
 };
 
-int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
+int vcc_create(struct net *net, struct socket *sock, int protocol, int family, int kern)
 {
        struct sock *sk;
        struct atm_vcc *vcc;
@@ -149,7 +149,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
        sock->sk = NULL;
        if (sock->type == SOCK_STREAM)
                return -EINVAL;
-       sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto);
+       sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, kern);
        if (!sk)
                return -ENOMEM;
        sock_init_data(sock, sk);
index 4d6f5b2068ac704a7bbbc720b0d99174a7d523b6..959436b871822d0aad8769de9ee0f81e78cc1bfe 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/poll.h> /* for poll_table */
 
 
-int vcc_create(struct net *net, struct socket *sock, int protocol, int family);
+int vcc_create(struct net *net, struct socket *sock, int protocol, int family, int kern);
 int vcc_release(struct socket *sock);
 int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
 int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
index ae0324021407c2b61aaf7c45a39bebb9d419972c..040207ec399fb45f5ac317c89f116d3020c8705d 100644 (file)
@@ -136,7 +136,7 @@ static int pvc_create(struct net *net, struct socket *sock, int protocol,
                return -EAFNOSUPPORT;
 
        sock->ops = &pvc_proto_ops;
-       return vcc_create(net, sock, protocol, PF_ATMPVC);
+       return vcc_create(net, sock, protocol, PF_ATMPVC, kern);
 }
 
 static const struct net_proto_family pvc_family_ops = {
index 1ba23f5018e76199a7fb19be5079b512822f3dab..3fa0a9ee98d19ba5635554362ce23c3f80f0de69 100644 (file)
@@ -660,7 +660,7 @@ static int svc_create(struct net *net, struct socket *sock, int protocol,
                return -EAFNOSUPPORT;
 
        sock->ops = &svc_proto_ops;
-       error = vcc_create(net, sock, protocol, AF_ATMSVC);
+       error = vcc_create(net, sock, protocol, AF_ATMSVC, kern);
        if (error)
                return error;
        ATM_SD(sock)->local.sas_family = AF_ATMSVC;
index 330c1f4a5a0b6edfca55bece27b38d1e30482d55..4273533d22b1d56f0c3a34d61336f144a227932c 100644 (file)
@@ -855,7 +855,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
                return -ESOCKTNOSUPPORT;
        }
 
-       sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto);
+       sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, kern);
        if (sk == NULL)
                return -ENOMEM;
 
@@ -881,7 +881,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev)
        struct sock *sk;
        ax25_cb *ax25, *oax25;
 
-       sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC,       osk->sk_prot);
+       sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot, 0);
        if (sk == NULL)
                return NULL;
 
index eb7d8c0388e4a32a52ba076460463a722d0db612..21434ab79d2ce7ad4a959be69aebcaac7771233e 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
 #
@@ -20,7 +20,7 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
 batman-adv-y += bat_iv_ogm.o
 batman-adv-y += bitarray.o
 batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
-batman-adv-y += debugfs.o
+batman-adv-$(CONFIG_DEBUG_FS) += debugfs.o
 batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
 batman-adv-y += fragmentation.o
 batman-adv-y += gateway_client.o
@@ -29,6 +29,7 @@ batman-adv-y += hard-interface.o
 batman-adv-y += hash.o
 batman-adv-y += icmp_socket.o
 batman-adv-y += main.o
+batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
 batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
 batman-adv-y += originator.o
 batman-adv-y += routing.o
@@ -36,4 +37,3 @@ batman-adv-y += send.o
 batman-adv-y += soft-interface.o
 batman-adv-y += sysfs.o
 batman-adv-y += translation-table.o
-batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o
index 4e49666f8c653d8751e20692587f028b99001677..4e59cf3eb079ec0c1d1836a0be39e95901146b91 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 00e00e09b0003ea80585151d854bc8b4559aee50..4e93d2d3958b42f4ecebf79989ab2b9617c0ef13 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -28,7 +28,7 @@
 
 /**
  * enum batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is a duplicate
+ * @BATADV_NO_DUP: the packet is no duplicate
  * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
  *  neighbor)
  * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
@@ -55,7 +55,7 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
 }
 
 /**
- * batadv_ring_buffer_set - compute the average of all non-zero values stored
+ * batadv_ring_buffer_avg - compute the average of all non-zero values stored
  * in the given ring buffer
  * @lq_recv: pointer to the ring buffer
  *
@@ -64,7 +64,9 @@ static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
 static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
 {
        const uint8_t *ptr;
-       uint16_t count = 0, i = 0, sum = 0;
+       uint16_t count = 0;
+       uint16_t i = 0;
+       uint16_t sum = 0;
 
        ptr = lq_recv;
 
@@ -308,7 +310,6 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        struct batadv_ogm_packet *batadv_ogm_packet;
        unsigned char *ogm_buff;
        uint32_t random_seqno;
-       int res = -ENOMEM;
 
        /* randomize initial seqno to avoid collision */
        get_random_bytes(&random_seqno, sizeof(random_seqno));
@@ -317,7 +318,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
        ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
        if (!ogm_buff)
-               goto out;
+               return -ENOMEM;
 
        hard_iface->bat_iv.ogm_buff = ogm_buff;
 
@@ -329,10 +330,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet->reserved = 0;
        batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
 
-       res = 0;
-
-out:
-       return res;
+       return 0;
 }
 
 static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
@@ -396,8 +394,8 @@ static uint8_t batadv_hop_penalty(uint8_t tq,
 }
 
 /* is there another aggregated packet here? */
-static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
-                                    __be16 tvlv_len)
+static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+                                     __be16 tvlv_len)
 {
        int next_buff_pos = 0;
 
@@ -413,7 +411,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                                     struct batadv_hard_iface *hard_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-       char *fwd_str;
+       const char *fwd_str;
        uint8_t packet_num;
        int16_t buff_pos;
        struct batadv_ogm_packet *batadv_ogm_packet;
@@ -548,58 +546,62 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
         * - the send time is within our MAX_AGGREGATION_MS time
         * - the resulting packet wont be bigger than
         *   MAX_AGGREGATION_BYTES
+        * otherwise aggregation is not possible
         */
-       if (time_before(send_time, forw_packet->send_time) &&
-           time_after_eq(aggregation_end_time, forw_packet->send_time) &&
-           (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
-               /* check aggregation compatibility
-                * -> direct link packets are broadcasted on
-                *    their interface only
-                * -> aggregate packet if the current packet is
-                *    a "global" packet as well as the base
-                *    packet
-                */
-               primary_if = batadv_primary_if_get_selected(bat_priv);
-               if (!primary_if)
-                       goto out;
-
-               /* packet is not leaving on the same interface. */
-               if (forw_packet->if_outgoing != if_outgoing)
-                       goto out;
+       if (!time_before(send_time, forw_packet->send_time) ||
+           !time_after_eq(aggregation_end_time, forw_packet->send_time))
+               return false;
+
+       if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES)
+               return false;
+
+       /* packet is not leaving on the same interface. */
+       if (forw_packet->if_outgoing != if_outgoing)
+               return false;
+
+       /* check aggregation compatibility
+        * -> direct link packets are broadcasted on
+        *    their interface only
+        * -> aggregate packet if the current packet is
+        *    a "global" packet as well as the base
+        *    packet
+        */
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               return false;
 
-               /* packets without direct link flag and high TTL
-                * are flooded through the net
-                */
-               if ((!directlink) &&
-                   (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
-                   (batadv_ogm_packet->ttl != 1) &&
-
-                   /* own packets originating non-primary
-                    * interfaces leave only that interface
-                    */
-                   ((!forw_packet->own) ||
-                    (forw_packet->if_incoming == primary_if))) {
-                       res = true;
-                       goto out;
-               }
+       /* packets without direct link flag and high TTL
+        * are flooded through the net
+        */
+       if (!directlink &&
+           !(batadv_ogm_packet->flags & BATADV_DIRECTLINK) &&
+           batadv_ogm_packet->ttl != 1 &&
+
+           /* own packets originating non-primary
+            * interfaces leave only that interface
+            */
+           (!forw_packet->own ||
+            forw_packet->if_incoming == primary_if)) {
+               res = true;
+               goto out;
+       }
 
-               /* if the incoming packet is sent via this one
-                * interface only - we still can aggregate
-                */
-               if ((directlink) &&
-                   (new_bat_ogm_packet->ttl == 1) &&
-                   (forw_packet->if_incoming == if_incoming) &&
-
-                   /* packets from direct neighbors or
-                    * own secondary interface packets
-                    * (= secondary interface packets in general)
-                    */
-                   (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
-                    (forw_packet->own &&
-                     forw_packet->if_incoming != primary_if))) {
-                       res = true;
-                       goto out;
-               }
+       /* if the incoming packet is sent via this one
+        * interface only - we still can aggregate
+        */
+       if (directlink &&
+           new_bat_ogm_packet->ttl == 1 &&
+           forw_packet->if_incoming == if_incoming &&
+
+           /* packets from direct neighbors or
+            * own secondary interface packets
+            * (= secondary interface packets in general)
+            */
+           (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
+            (forw_packet->own &&
+             forw_packet->if_incoming != primary_if))) {
+               res = true;
+               goto out;
        }
 
 out:
@@ -642,19 +644,16 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
                if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
                        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                                   "batman packet queue full\n");
-                       goto out;
+                       goto out_free_outgoing;
                }
        }
 
        forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
-       if (!forw_packet_aggr) {
-               if (!own_packet)
-                       atomic_inc(&bat_priv->batman_queue_left);
-               goto out;
-       }
+       if (!forw_packet_aggr)
+               goto out_nomem;
 
-       if ((atomic_read(&bat_priv->aggregated_ogms)) &&
-           (packet_len < BATADV_MAX_AGGREGATION_BYTES))
+       if (atomic_read(&bat_priv->aggregated_ogms) &&
+           packet_len < BATADV_MAX_AGGREGATION_BYTES)
                skb_size = BATADV_MAX_AGGREGATION_BYTES;
        else
                skb_size = packet_len;
@@ -662,12 +661,8 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
        skb_size += ETH_HLEN;
 
        forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size);
-       if (!forw_packet_aggr->skb) {
-               if (!own_packet)
-                       atomic_inc(&bat_priv->batman_queue_left);
-               kfree(forw_packet_aggr);
-               goto out;
-       }
+       if (!forw_packet_aggr->skb)
+               goto out_free_forw_packet;
        forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
        skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
 
@@ -699,7 +694,12 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
                           send_time - jiffies);
 
        return;
-out:
+out_free_forw_packet:
+       kfree(forw_packet_aggr);
+out_nomem:
+       if (!own_packet)
+               atomic_inc(&bat_priv->batman_queue_left);
+out_free_outgoing:
        batadv_hardif_free_ref(if_outgoing);
 out_free_incoming:
        batadv_hardif_free_ref(if_incoming);
@@ -752,13 +752,13 @@ static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
        unsigned long max_aggregation_jiffies;
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
-       direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0;
+       direct_link = !!(batadv_ogm_packet->flags & BATADV_DIRECTLINK);
        max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
 
        /* find position for the packet in the forward queue */
        spin_lock_bh(&bat_priv->forw_bat_list_lock);
        /* own packets are not to be aggregated */
-       if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
+       if (atomic_read(&bat_priv->aggregated_ogms) && !own_packet) {
                hlist_for_each_entry(forw_packet_pos,
                                     &bat_priv->forw_bat_list, list) {
                        if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
@@ -1034,9 +1034,10 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                batadv_orig_node_free_ref(orig_tmp);
                if (!neigh_node)
                        goto unlock;
-       } else
+       } else {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Updating existing last-hop neighbor of originator\n");
+       }
 
        rcu_read_unlock();
        neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
@@ -1081,7 +1082,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
         * won't consider it either
         */
        if (router_ifinfo &&
-           (neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg)) {
+           neigh_ifinfo->bat_iv.tq_avg == router_ifinfo->bat_iv.tq_avg) {
                orig_node_tmp = router->orig_node;
                spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
                if_num = router->if_incoming->if_num;
@@ -1356,8 +1357,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
 out:
        spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
        batadv_orig_node_free_ref(orig_node);
-       if (orig_ifinfo)
-               batadv_orig_ifinfo_free_ref(orig_ifinfo);
+       batadv_orig_ifinfo_free_ref(orig_ifinfo);
        return ret;
 }
 
index e3da07a64026ed0552799fb954f512f7e7c013b3..40e4a2a18e4563379dad5eab09d25c79c99c561b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 2acaafe60188711ad5f2d704b99c00435b0ee40b..be497be696d1d77104c11291dafd7812babe227f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index ac4b96eccadeb2b055de46fcf48a7aa8b6da1350..fa941cd7d8adb978a2d854cf8fe8f4044a693203 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
index 43c985d92c3ed66b14959f17c95903e27fd84483..1f506d34039e6f38ccfc06886fda61eda0611a86 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
index a4972874c056d3b9447e5ecea4906483b0839628..46118084221a1c05f2d98ddf10f5bc9bb905305c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -482,11 +482,7 @@ rem_attr:
        debugfs_remove_recursive(hard_iface->debug_dir);
        hard_iface->debug_dir = NULL;
 out:
-#ifdef CONFIG_DEBUG_FS
        return -ENOMEM;
-#else
-       return 0;
-#endif /* CONFIG_DEBUG_FS */
 }
 
 /**
@@ -541,11 +537,7 @@ rem_attr:
        debugfs_remove_recursive(bat_priv->debug_dir);
        bat_priv->debug_dir = NULL;
 out:
-#ifdef CONFIG_DEBUG_FS
        return -ENOMEM;
-#else
-       return 0;
-#endif /* CONFIG_DEBUG_FS */
 }
 
 void batadv_debugfs_del_meshif(struct net_device *dev)
index 37c4d6ddd04d44e4c19eaee97d6b4a877baa0df9..ed25605ca7324f956458fe820081affcadd9f205 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -20,6 +20,8 @@
 
 #define BATADV_DEBUGFS_SUBDIR "batman_adv"
 
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
 void batadv_debugfs_init(void);
 void batadv_debugfs_destroy(void);
 int batadv_debugfs_add_meshif(struct net_device *dev);
@@ -27,4 +29,36 @@ void batadv_debugfs_del_meshif(struct net_device *dev);
 int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
 void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
 
+#else
+
+static inline void batadv_debugfs_init(void)
+{
+}
+
+static inline void batadv_debugfs_destroy(void)
+{
+}
+
+static inline int batadv_debugfs_add_meshif(struct net_device *dev)
+{
+       return 0;
+}
+
+static inline void batadv_debugfs_del_meshif(struct net_device *dev)
+{
+}
+
+static inline
+int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
+{
+       return 0;
+}
+
+static inline
+void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
+{
+}
+
+#endif
+
 #endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
index aad022dd15df5408aaffe6f80c7e17baca2d0002..da1742d9059fd46ed57165ab1ebdec10348d6c1f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
index 2fe0764c64be80c2598b12b7f00933c36221838e..ed41b8edba18e50eefdfe8f5609c74ea15569590 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
index 3d1dcaa3e8b5af2040ec1cab987d98be876f35f8..6ce3c84a7e55aa3881e8969ca82fb50ebbb69722 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
@@ -161,6 +161,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
                hlist_add_head(&frag_entry_new->list, &chain->head);
                chain->size = skb->len - hdr_size;
                chain->timestamp = jiffies;
+               chain->total_size = ntohs(frag_packet->total_size);
                ret = true;
                goto out;
        }
@@ -195,9 +196,11 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
 
 out:
        if (chain->size > batadv_frag_size_limit() ||
-           ntohs(frag_packet->total_size) > batadv_frag_size_limit()) {
+           chain->total_size != ntohs(frag_packet->total_size) ||
+           chain->total_size > batadv_frag_size_limit()) {
                /* Clear chain if total size of either the list or the packet
-                * exceeds the maximum size of one merged packet.
+                * exceeds the maximum size of one merged packet. Don't allow
+                * packets to have different total_size.
                 */
                batadv_frag_clear_chain(&chain->head);
                chain->size = 0;
@@ -228,19 +231,13 @@ err:
  * Returns the merged skb or NULL on error.
  */
 static struct sk_buff *
-batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
+batadv_frag_merge_packets(struct hlist_head *chain)
 {
        struct batadv_frag_packet *packet;
        struct batadv_frag_list_entry *entry;
        struct sk_buff *skb_out = NULL;
        int size, hdr_size = sizeof(struct batadv_frag_packet);
 
-       /* Make sure incoming skb has non-bogus data. */
-       packet = (struct batadv_frag_packet *)skb->data;
-       size = ntohs(packet->total_size);
-       if (size > batadv_frag_size_limit())
-               goto free;
-
        /* Remove first entry, as this is the destination for the rest of the
         * fragments.
         */
@@ -249,6 +246,9 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
        skb_out = entry->skb;
        kfree(entry);
 
+       packet = (struct batadv_frag_packet *)skb_out->data;
+       size = ntohs(packet->total_size);
+
        /* Make room for the rest of the fragments. */
        if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
                kfree_skb(skb_out);
@@ -304,7 +304,7 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb,
        if (hlist_empty(&head))
                goto out;
 
-       skb_out = batadv_frag_merge_packets(&head, *skb);
+       skb_out = batadv_frag_merge_packets(&head);
        if (!skb_out)
                goto out_err;
 
index d848cf6676a2e4b7853a263f496d1ff0a0260a86..ec1e86f899e8cf0aa549801e59615132e6c79158 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
index 090828cf1fa7a5999a0dcec4f98bbf227b54d078..a85eaca344e89a07ceebc05483823850333d1924 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 7ee53bb7d50f20926083132069b370d4d893dc25..185fb0887654905ed91a72ad17e5f0958bbe1bd5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 88a1bc3804d148972761276b347e258873a1b59e..0792e2f101e4d960ac0e24f281b309403093d5a0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index aa51165619476bfd654fda9cafca026e8fc9dea2..df5434229675e8516c2296825b48951b89f52152 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index baf1f9843f2c42a78c7df31b0c60b3012d7fe22d..bdb020e292729c401dc32e0f51a8140714779b74 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 1918cd50b62ed7254a043f1cf02acce1ab207b4c..e8b6ffea703d491bfcecf250cc6deab863182f96 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 7c1c63080e20362cd628251b37f44da446cdbdba..3a0e1dcd1f2941ce5c59603c680906250791ac97 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 539fc12667938f75319b34f3b0f4b45551d54053..379e32acf2b49ebadf226a347040d6b1e2378d02 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 161ef8f17d2ef273615bfaad9e9c728fbe754acc..6c3cfb57d132bc86071e1994c150b8ec33603f2b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 0c33950aa4aac62f59eb23fa4141499760b2de24..4815824e2f613630f7e13017885ae97ec244b12b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 12fc77bef23faf0313a551ec268c836891171813..548e405d13c100a144affa3a47e25675cc4bd119 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -209,10 +209,13 @@ void batadv_mesh_free(struct net_device *soft_iface)
  * interfaces in the current mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address to check
+ *
+ * Returns 'true' if the mac address was found, false otherwise.
  */
-int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
 {
        const struct batadv_hard_iface *hard_iface;
+       bool is_my_mac = false;
 
        rcu_read_lock();
        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -223,12 +226,12 @@ int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
                        continue;
 
                if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
-                       rcu_read_unlock();
-                       return 1;
+                       is_my_mac = true;
+                       break;
                }
        }
        rcu_read_unlock();
-       return 0;
+       return is_my_mac;
 }
 
 /**
@@ -510,14 +513,12 @@ static struct batadv_algo_ops *batadv_algo_get(char *name)
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
 {
        struct batadv_algo_ops *bat_algo_ops_tmp;
-       int ret;
 
        bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
        if (bat_algo_ops_tmp) {
                pr_info("Trying to register already registered routing algorithm: %s\n",
                        bat_algo_ops->name);
-               ret = -EEXIST;
-               goto out;
+               return -EEXIST;
        }
 
        /* all algorithms must implement all ops (for now) */
@@ -531,32 +532,26 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
            !bat_algo_ops->bat_neigh_is_equiv_or_better) {
                pr_info("Routing algo '%s' does not implement required ops\n",
                        bat_algo_ops->name);
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        INIT_HLIST_NODE(&bat_algo_ops->list);
        hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
-       ret = 0;
 
-out:
-       return ret;
+       return 0;
 }
 
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
 {
        struct batadv_algo_ops *bat_algo_ops;
-       int ret = -EINVAL;
 
        bat_algo_ops = batadv_algo_get(name);
        if (!bat_algo_ops)
-               goto out;
+               return -EINVAL;
 
        bat_priv->bat_algo_ops = bat_algo_ops;
-       ret = 0;
 
-out:
-       return ret;
+       return 0;
 }
 
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
@@ -819,15 +814,15 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
        new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
 
        /* keep old buffer if kmalloc should fail */
-       if (new_buff) {
-               memcpy(new_buff, *packet_buff, min_packet_len);
-               kfree(*packet_buff);
-               *packet_buff = new_buff;
-               *packet_buff_len = min_packet_len + additional_packet_len;
-               return true;
-       }
+       if (!new_buff)
+               return false;
+
+       memcpy(new_buff, *packet_buff, min_packet_len);
+       kfree(*packet_buff);
+       *packet_buff = new_buff;
+       *packet_buff_len = min_packet_len + additional_packet_len;
 
-       return false;
+       return true;
 }
 
 /**
index 4d2318829a3420582aab6963535a1ba51a3d7087..af0a3361d4b2dd6139d1b3b3daed637cbde1a8b0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2015.0"
+#define BATADV_SOURCE_VERSION "2015.1"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -44,7 +44,7 @@
 #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
 #define BATADV_TT_WORK_PERIOD 5000 /* 5 seconds */
 #define BATADV_ORIG_WORK_PERIOD 1000 /* 1 second */
-#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */
+#define BATADV_DAT_ENTRY_TIMEOUT (5 * 60000) /* 5 mins in milliseconds */
 /* sliding packet range of received originator messages in sequence numbers
  * (should be a multiple of our word size)
  */
@@ -195,7 +195,7 @@ extern struct workqueue_struct *batadv_event_workqueue;
 
 int batadv_mesh_init(struct net_device *soft_iface);
 void batadv_mesh_free(struct net_device *soft_iface);
-int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
 int batadv_max_header_len(void);
@@ -279,7 +279,7 @@ static inline void _batadv_dbg(int type __always_unused,
  *
  * note: can't use ether_addr_equal() as it requires aligned memory
  */
-static inline int batadv_compare_eth(const void *data1, const void *data2)
+static inline bool batadv_compare_eth(const void *data1, const void *data2)
 {
        return ether_addr_equal_unaligned(data1, data2);
 }
index b24e4bb64fb5fd51c813df7e5801b98d27caf959..09f2838dedf2cbbd37ebf85013171394ffe5671c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
index 3a44ebdb43cba601b34e932a4d752baa33ee2f0d..033d80e84fdfb61871d3741c7aced06f6b9e23f7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
index 127cc4d7380a1e7186aee6cc9064199ef48ff9bb..b984bc49deaf22c5de736665005999154b4a6321 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
@@ -155,7 +155,7 @@ err:
  */
 void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
 {
-       atomic_set(&bat_priv->network_coding, 1);
+       atomic_set(&bat_priv->network_coding, 0);
        bat_priv->nc.min_tq = 200;
        bat_priv->nc.max_fwd_delay = 10;
        bat_priv->nc.max_buffer_time = 200;
@@ -275,7 +275,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
         * max_buffer time
         */
        return batadv_has_timed_out(nc_path->last_valid,
-                                   bat_priv->nc.max_buffer_time*10);
+                                   bat_priv->nc.max_buffer_time * 10);
 }
 
 /**
index 358c0d686ab0aaf76c848aadcb40c5c4b340f7ad..b5ab8ff544eead4e3b349f82b8d9e64dbc883a33 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
index 90e805aba3795dd2ad08d5c29f06b8d925351275..e3900e452616c91c6e8d64d87b601cd82b4c758b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index aa4a4369629569840e310acc2c6e6619f49a5768..91339143a2f7ee6638a575386c7495e79a5a7e58 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index b81fbbf21a6393af841ababdb845ba6179d79ca1..9468bc09c7c47bfcee6ab2f203aa7eb5fda2c9f9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index da83982bf974c5d84d8046b7b415b4f2f1b07cb9..c5d90095bc3cecd3e4f64630838e9efcb9fa86c2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 557d3d12a9ab0c8edf583d8e28fa22dbfd8ad6fe..6573f12b3ddcefef44cd8a561593e1f2c84ad8d8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 3d64ed20c393528793ca4aedd30e21284c51b7ed..23635bd63fec8df7ba2d0fe21d08d574308080ca 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -255,8 +255,8 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
                            struct batadv_orig_node *orig_node,
                            unsigned short vid)
 {
-       struct ethhdr *ethhdr;
        struct batadv_unicast_packet *unicast_packet;
+       struct ethhdr *ethhdr;
        int ret = NET_XMIT_DROP;
 
        if (!orig_node)
index 38d0ec1833aed32363a5f1d3a1e471ec972dc3a8..60c233eb35ed6f05fec3b0aeb4ae8b9c40d7d24f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 5ec31d7de24f17704b5f62d3bbabfc6101d720ae..50cf722f4e1bc704b51a52f9b350816112ba3652 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -732,7 +732,7 @@ static int batadv_softif_init_late(struct net_device *dev)
        atomic_set(&bat_priv->aggregated_ogms, 1);
        atomic_set(&bat_priv->bonding, 0);
 #ifdef CONFIG_BATMAN_ADV_BLA
-       atomic_set(&bat_priv->bridge_loop_avoidance, 0);
+       atomic_set(&bat_priv->bridge_loop_avoidance, 1);
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
        atomic_set(&bat_priv->distributed_arp_table, 1);
@@ -818,7 +818,7 @@ static int batadv_softif_slave_add(struct net_device *dev,
        int ret = -EINVAL;
 
        hard_iface = batadv_hardif_get_by_netdev(slave_dev);
-       if (!hard_iface || hard_iface->soft_iface != NULL)
+       if (!hard_iface || hard_iface->soft_iface)
                goto out;
 
        ret = batadv_hardif_enable_interface(hard_iface, dev->name);
index dbab22fd89a51aa4e228d0df062a2b2094df9afd..9ce08049ffd0e16210e406f1e2b88fc4e373a432 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index a75dc12f96f8c75432ad1c709e6813748bf7506b..fa8c347bf0577aa19c54c77c45a0c659f78e4dd9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index b715b60db7cd6551b77ea77d869e7e68764f9063..b9e79ad806ac3f156217724edbe29c6239facc69 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 07b263a437d1b2488d0e882696669593df23eb62..b098e53edded8a8ea6d6f563a8142880992e5d30 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
index ad84d7b89e399930132ac2537d66222eb4712119..5769037c7e2db453339e38fe7b9b0717744dcff2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
index 9398c3fb417472962ddb58b0344de56006006b85..c1000c0d6b0de06c8abc0983bbfa79e20ff1cc8a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -132,6 +132,7 @@ struct batadv_orig_ifinfo {
  * @timestamp: time (jiffie) of last received fragment
  * @seqno: sequence number of the fragments in the list
  * @size: accumulated size of packets in list
+ * @total_size: expected size of the assembled packet
  */
 struct batadv_frag_table_entry {
        struct hlist_head head;
@@ -139,6 +140,7 @@ struct batadv_frag_table_entry {
        unsigned long timestamp;
        uint16_t seqno;
        uint16_t size;
+       uint16_t total_size;
 };
 
 /**
@@ -181,9 +183,10 @@ struct batadv_orig_node_vlan {
 
 /**
  * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: bitfield containing the number of our OGMs this orig_node
- *  rebroadcasted "back" to us (relative to last_real_seqno)
- * @bcast_own_sum: counted result of bcast_own
+ * @bcast_own: set of bitfields (one per hard interface) where each one counts
+ * the number of our OGMs this orig_node rebroadcasted "back" to us  (relative
+ * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
+ * @bcast_own_sum: sum of bcast_own
  * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
  *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
  */
index 1742b849fcff7ca6d5bb9fbe8b375c2514992531..f3d6046c8ee743cc05a46750e7027149676c8832 100644 (file)
@@ -192,7 +192,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
                if (ipv6_addr_any(nexthop))
                        return NULL;
        } else {
-               nexthop = rt6_nexthop(rt);
+               nexthop = rt6_nexthop(rt, daddr);
 
                /* We need to remember the address because it is needed
                 * by bt_xmit() when sending the packet. In bt_xmit(), the
index bde2bdd9e929e854c9e2d001a7bf9e6a364d6a2c..b5116fa9835e1d0875ace8529f1ff00d1a347a96 100644 (file)
@@ -202,7 +202,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto);
+       sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern);
        if (!sk)
                return -ENOMEM;
 
index d82787d417bdc242483de3ab4bac4bc7a91eca62..ce86a7bae844c3b98bbfeff25a8c3fabb43d0737 100644 (file)
@@ -205,7 +205,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto);
+       sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, kern);
        if (!sk)
                return -ENOMEM;
 
index c4802f3bd4c51086de62c048858fb7f6057f3bbb..f6c99098959f6b4c29a9f0f6f54254f6a5aafa4e 100644 (file)
@@ -94,7 +94,6 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
        char buf[32];
        size_t buf_size = min(count, (sizeof(buf)-1));
        bool enable;
-       int err;
 
        if (!test_bit(HCI_UP, &hdev->flags))
                return -ENETDOWN;
@@ -121,12 +120,8 @@ static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
-       err = -bt_to_errno(skb->data[0]);
        kfree_skb(skb);
 
-       if (err < 0)
-               return err;
-
        hci_dev_change_flag(hdev, HCI_DUT_MODE);
 
        return count;
index 56f9edbf3d05dc6a2c6ba4f42174b2314d5e920d..5b14dcafcd084103eee55be0929b180b99e7bb94 100644 (file)
@@ -1377,7 +1377,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
 
        sock->ops = &hci_sock_ops;
 
-       sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
+       sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
        if (!sk)
                return -ENOMEM;
 
index cb3fdde1968a0fabafe038382dbdf9dacd3e331f..008ba439bd62ae2e55a7ff92900ebb88339d67d1 100644 (file)
@@ -235,7 +235,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
        if (sock->type != SOCK_RAW)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto);
+       sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern);
        if (!sk)
                return -ENOMEM;
 
index a7278f05eafbbda65da7c991820474a95e4e1a3c..244287706f910bdaf69afaef98cf2706cd301654 100644 (file)
@@ -43,7 +43,7 @@ static struct bt_sock_list l2cap_sk_list = {
 static const struct proto_ops l2cap_sock_ops;
 static void l2cap_sock_init(struct sock *sk, struct sock *parent);
 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
-                                    int proto, gfp_t prio);
+                                    int proto, gfp_t prio, int kern);
 
 bool l2cap_is_socket(struct socket *sock)
 {
@@ -1193,7 +1193,7 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
        }
 
        sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
-                             GFP_ATOMIC);
+                             GFP_ATOMIC, 0);
        if (!sk) {
                release_sock(parent);
                return NULL;
@@ -1523,12 +1523,12 @@ static struct proto l2cap_proto = {
 };
 
 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
-                                    int proto, gfp_t prio)
+                                    int proto, gfp_t prio, int kern)
 {
        struct sock *sk;
        struct l2cap_chan *chan;
 
-       sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
+       sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, kern);
        if (!sk)
                return NULL;
 
@@ -1574,7 +1574,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
 
        sock->ops = &l2cap_sock_ops;
 
-       sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+       sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
        if (!sk)
                return -ENOMEM;
 
index 7fd87e7135b52753c0bcefd58cb4a290c57c77ba..a6f21f8c2f984b42607c5f13de5c2eeed245c0ea 100644 (file)
@@ -7577,7 +7577,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
        memset(&ev, 0, sizeof(ev));
 
        /* Devices using resolvable or non-resolvable random addresses
-        * without providing an indentity resolving key don't require
+        * without providing an identity resolving key don't require
         * to store long term keys. Their addresses will change the
         * next time around.
         *
@@ -7617,7 +7617,7 @@ void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
        /* For identity resolving keys from devices that are already
         * using a public address or static random address, do not
         * ask for storing this key. The identity resolving key really
-        * is only mandatory for devices using resovlable random
+        * is only mandatory for devices using resolvable random
         * addresses.
         *
         * Storing all identity resolving keys has the downside that
@@ -7646,7 +7646,7 @@ void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
        memset(&ev, 0, sizeof(ev));
 
        /* Devices using resolvable or non-resolvable random addresses
-        * without providing an indentity resolving key don't require
+        * without providing an identity resolving key don't require
         * to store signature resolving keys. Their addresses will change
         * the next time around.
         *
index 4fea24275b17a5b3ab4fa614c3982d8cdaed8fee..29709fbfd1f509326fd3fdcfdd6bfbd51edaf82f 100644 (file)
@@ -200,7 +200,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
 
        BT_DBG("");
 
-       err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock);
+       err = sock_create_kern(&init_net, PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock);
        if (!err) {
                struct sock *sk = (*sock)->sk;
                sk->sk_data_ready   = rfcomm_l2data_ready;
index 825e8fb5114b16a5276bf7c6ee251883c6f0bfd2..b2338e971b335c3d100337709a42a729a5df188a 100644 (file)
@@ -269,12 +269,12 @@ static struct proto rfcomm_proto = {
        .obj_size       = sizeof(struct rfcomm_pinfo)
 };
 
-static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
 {
        struct rfcomm_dlc *d;
        struct sock *sk;
 
-       sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto);
+       sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern);
        if (!sk)
                return NULL;
 
@@ -324,7 +324,7 @@ static int rfcomm_sock_create(struct net *net, struct socket *sock,
 
        sock->ops = &rfcomm_sock_ops;
 
-       sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+       sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
        if (!sk)
                return -ENOMEM;
 
@@ -969,7 +969,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
                goto done;
        }
 
-       sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC);
+       sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0);
        if (!sk)
                goto done;
 
index 4322c833e74891d20a627f21e64cdedd67cd806f..6b6e59dc54cf6000af76da743412c6d66c497138 100644 (file)
@@ -460,11 +460,11 @@ static struct proto sco_proto = {
        .obj_size       = sizeof(struct sco_pinfo)
 };
 
-static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
+static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
 {
        struct sock *sk;
 
-       sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto);
+       sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern);
        if (!sk)
                return NULL;
 
@@ -501,7 +501,7 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
 
        sock->ops = &sco_sock_ops;
 
-       sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC);
+       sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
        if (!sk)
                return -ENOMEM;
 
@@ -1026,7 +1026,7 @@ static void sco_conn_ready(struct sco_conn *conn)
                bh_lock_sock(parent);
 
                sk = sco_sock_alloc(sock_net(parent), NULL,
-                                   BTPROTO_SCO, GFP_ATOMIC);
+                                   BTPROTO_SCO, GFP_ATOMIC, 0);
                if (!sk) {
                        bh_unlock_sock(parent);
                        sco_conn_unlock(conn);
index 1ab3dc9c8f99bf425a2a24403cfe6e54ddbbd550..659371af39e44e0346d021ffba1fc6448830715a 100644 (file)
@@ -371,6 +371,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        uint8_t tmp[16], data[16];
        int err;
 
+       SMP_DBG("k %16phN r %16phN", k, r);
+
        if (!tfm) {
                BT_ERR("tfm %p", tfm);
                return -EINVAL;
@@ -400,6 +402,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        /* Most significant octet of encryptedData corresponds to data[0] */
        swap_buf(data, r, 16);
 
+       SMP_DBG("r %16phN", r);
+
        return err;
 }
 
@@ -410,6 +414,10 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
        u8 p1[16], p2[16];
        int err;
 
+       SMP_DBG("k %16phN r %16phN", k, r);
+       SMP_DBG("iat %u ia %6phN rat %u ra %6phN", _iat, ia, _rat, ra);
+       SMP_DBG("preq %7phN pres %7phN", preq, pres);
+
        memset(p1, 0, 16);
 
        /* p1 = pres || preq || _rat || _iat */
@@ -418,10 +426,7 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
        memcpy(p1 + 2, preq, 7);
        memcpy(p1 + 9, pres, 7);
 
-       /* p2 = padding || ia || ra */
-       memcpy(p2, ra, 6);
-       memcpy(p2 + 6, ia, 6);
-       memset(p2 + 12, 0, 4);
+       SMP_DBG("p1 %16phN", p1);
 
        /* res = r XOR p1 */
        u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
@@ -433,6 +438,13 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
                return err;
        }
 
+       /* p2 = padding || ia || ra */
+       memcpy(p2, ra, 6);
+       memcpy(p2 + 6, ia, 6);
+       memset(p2 + 12, 0, 4);
+
+       SMP_DBG("p2 %16phN", p2);
+
        /* res = res XOR p2 */
        u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
 
index 02c24cf63c344a3b15bcf87369da7f847150fab3..a1abe4936fe15299b7643b8944023050c5f938c1 100644 (file)
@@ -121,13 +121,13 @@ static struct notifier_block br_device_notifier = {
        .notifier_call = br_device_event
 };
 
-static int br_netdev_switch_event(struct notifier_block *unused,
-                                 unsigned long event, void *ptr)
+static int br_switchdev_event(struct notifier_block *unused,
+                             unsigned long event, void *ptr)
 {
-       struct net_device *dev = netdev_switch_notifier_info_to_dev(ptr);
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
        struct net_bridge_port *p;
        struct net_bridge *br;
-       struct netdev_switch_notifier_fdb_info *fdb_info;
+       struct switchdev_notifier_fdb_info *fdb_info;
        int err = NOTIFY_DONE;
 
        rtnl_lock();
@@ -138,14 +138,14 @@ static int br_netdev_switch_event(struct notifier_block *unused,
        br = p->br;
 
        switch (event) {
-       case NETDEV_SWITCH_FDB_ADD:
+       case SWITCHDEV_FDB_ADD:
                fdb_info = ptr;
                err = br_fdb_external_learn_add(br, p, fdb_info->addr,
                                                fdb_info->vid);
                if (err)
                        err = notifier_from_errno(err);
                break;
-       case NETDEV_SWITCH_FDB_DEL:
+       case SWITCHDEV_FDB_DEL:
                fdb_info = ptr;
                err = br_fdb_external_learn_del(br, p, fdb_info->addr,
                                                fdb_info->vid);
@@ -159,8 +159,8 @@ out:
        return err;
 }
 
-static struct notifier_block br_netdev_switch_notifier = {
-       .notifier_call = br_netdev_switch_event,
+static struct notifier_block br_switchdev_notifier = {
+       .notifier_call = br_switchdev_event,
 };
 
 static void __net_exit br_net_exit(struct net *net)
@@ -214,7 +214,7 @@ static int __init br_init(void)
        if (err)
                goto err_out3;
 
-       err = register_netdev_switch_notifier(&br_netdev_switch_notifier);
+       err = register_switchdev_notifier(&br_switchdev_notifier);
        if (err)
                goto err_out4;
 
@@ -235,7 +235,7 @@ static int __init br_init(void)
        return 0;
 
 err_out5:
-       unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
+       unregister_switchdev_notifier(&br_switchdev_notifier);
 err_out4:
        unregister_netdevice_notifier(&br_device_notifier);
 err_out3:
@@ -253,7 +253,7 @@ static void __exit br_deinit(void)
 {
        stp_proto_unregister(&br_stp_proto);
        br_netlink_fini();
-       unregister_netdev_switch_notifier(&br_netdev_switch_notifier);
+       unregister_switchdev_notifier(&br_switchdev_notifier);
        unregister_netdevice_notifier(&br_device_notifier);
        brioctl_set(NULL);
        unregister_pernet_subsys(&br_net_ops);
index 659fb96672e41e2e6525323697ca23a41d271fbb..cecb482ed919e1862a15a0142eeb810d08a8ddfc 100644 (file)
@@ -736,6 +736,12 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
        struct net_bridge_fdb_entry *fdb;
        bool modified = false;
 
+       /* If the port cannot learn allow only local and static entries */
+       if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
+           !(source->state == BR_STATE_LEARNING ||
+             source->state == BR_STATE_FORWARDING))
+               return -EPERM;
+
        fdb = fdb_find(head, addr, vid);
        if (fdb == NULL) {
                if (!(flags & NLM_F_CREATE))
index 22fd0419b31455965223566f4676b46efedd8722..0b38ee98024b724c15285b2baadf3e461145ceed 100644 (file)
@@ -975,9 +975,6 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
        int err = 0;
        __be32 group;
 
-       if (!pskb_may_pull(skb, sizeof(*ih)))
-               return -EINVAL;
-
        ih = igmpv3_report_hdr(skb);
        num = ntohs(ih->ngrec);
        len = sizeof(*ih);
@@ -1248,25 +1245,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
                        max_delay = 10 * HZ;
                        group = 0;
                }
-       } else {
-               if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
-                       err = -EINVAL;
-                       goto out;
-               }
-
+       } else if (skb->len >= sizeof(*ih3)) {
                ih3 = igmpv3_query_hdr(skb);
                if (ih3->nsrcs)
                        goto out;
 
                max_delay = ih3->code ?
                            IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
-       }
-
-       /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
-        * all-systems destination addresses (224.0.0.1) for general queries
-        */
-       if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
-               err = -EINVAL;
+       } else {
                goto out;
        }
 
@@ -1329,12 +1315,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
            (port && port->state == BR_STATE_DISABLED))
                goto out;
 
-       /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
-       if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
-               err = -EINVAL;
-               goto out;
-       }
-
        if (skb->len == sizeof(*mld)) {
                if (!pskb_may_pull(skb, sizeof(*mld))) {
                        err = -EINVAL;
@@ -1358,14 +1338,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
 
        is_general_query = group && ipv6_addr_any(group);
 
-       /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
-        * all-nodes destination address (ff02::1) for general queries
-        */
-       if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
-               err = -EINVAL;
-               goto out;
-       }
-
        if (is_general_query) {
                saddr.proto = htons(ETH_P_IPV6);
                saddr.u.ip6 = ip6h->saddr;
@@ -1557,74 +1529,22 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                                 struct sk_buff *skb,
                                 u16 vid)
 {
-       struct sk_buff *skb2 = skb;
-       const struct iphdr *iph;
+       struct sk_buff *skb_trimmed = NULL;
        struct igmphdr *ih;
-       unsigned int len;
-       unsigned int offset;
        int err;
 
-       /* We treat OOM as packet loss for now. */
-       if (!pskb_may_pull(skb, sizeof(*iph)))
-               return -EINVAL;
-
-       iph = ip_hdr(skb);
-
-       if (iph->ihl < 5 || iph->version != 4)
-               return -EINVAL;
-
-       if (!pskb_may_pull(skb, ip_hdrlen(skb)))
-               return -EINVAL;
-
-       iph = ip_hdr(skb);
+       err = ip_mc_check_igmp(skb, &skb_trimmed);
 
-       if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
-               return -EINVAL;
-
-       if (iph->protocol != IPPROTO_IGMP) {
-               if (!ipv4_is_local_multicast(iph->daddr))
+       if (err == -ENOMSG) {
+               if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr))
                        BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                return 0;
+       } else if (err < 0) {
+               return err;
        }
 
-       len = ntohs(iph->tot_len);
-       if (skb->len < len || len < ip_hdrlen(skb))
-               return -EINVAL;
-
-       if (skb->len > len) {
-               skb2 = skb_clone(skb, GFP_ATOMIC);
-               if (!skb2)
-                       return -ENOMEM;
-
-               err = pskb_trim_rcsum(skb2, len);
-               if (err)
-                       goto err_out;
-       }
-
-       len -= ip_hdrlen(skb2);
-       offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
-       __skb_pull(skb2, offset);
-       skb_reset_transport_header(skb2);
-
-       err = -EINVAL;
-       if (!pskb_may_pull(skb2, sizeof(*ih)))
-               goto out;
-
-       switch (skb2->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!csum_fold(skb2->csum))
-                       break;
-               /* fall through */
-       case CHECKSUM_NONE:
-               skb2->csum = 0;
-               if (skb_checksum_complete(skb2))
-                       goto out;
-       }
-
-       err = 0;
-
        BR_INPUT_SKB_CB(skb)->igmp = 1;
-       ih = igmp_hdr(skb2);
+       ih = igmp_hdr(skb);
 
        switch (ih->type) {
        case IGMP_HOST_MEMBERSHIP_REPORT:
@@ -1633,21 +1553,19 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                err = br_ip4_multicast_add_group(br, port, ih->group, vid);
                break;
        case IGMPV3_HOST_MEMBERSHIP_REPORT:
-               err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
+               err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
                break;
        case IGMP_HOST_MEMBERSHIP_QUERY:
-               err = br_ip4_multicast_query(br, port, skb2, vid);
+               err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
                break;
        case IGMP_HOST_LEAVE_MESSAGE:
                br_ip4_multicast_leave_group(br, port, ih->group, vid);
                break;
        }
 
-out:
-       __skb_push(skb2, offset);
-err_out:
-       if (skb2 != skb)
-               kfree_skb(skb2);
+       if (skb_trimmed)
+               kfree_skb(skb_trimmed);
+
        return err;
 }
 
@@ -1657,138 +1575,42 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                                 struct sk_buff *skb,
                                 u16 vid)
 {
-       struct sk_buff *skb2;
-       const struct ipv6hdr *ip6h;
-       u8 icmp6_type;
-       u8 nexthdr;
-       __be16 frag_off;
-       unsigned int len;
-       int offset;
+       struct sk_buff *skb_trimmed = NULL;
+       struct mld_msg *mld;
        int err;
 
-       if (!pskb_may_pull(skb, sizeof(*ip6h)))
-               return -EINVAL;
-
-       ip6h = ipv6_hdr(skb);
-
-       /*
-        * We're interested in MLD messages only.
-        *  - Version is 6
-        *  - MLD has always Router Alert hop-by-hop option
-        *  - But we do not support jumbrograms.
-        */
-       if (ip6h->version != 6)
-               return 0;
-
-       /* Prevent flooding this packet if there is no listener present */
-       if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr))
-               BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-
-       if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
-           ip6h->payload_len == 0)
-               return 0;
-
-       len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
-       if (skb->len < len)
-               return -EINVAL;
-
-       nexthdr = ip6h->nexthdr;
-       offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr, &frag_off);
+       err = ipv6_mc_check_mld(skb, &skb_trimmed);
 
-       if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
+       if (err == -ENOMSG) {
+               if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
+                       BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                return 0;
-
-       /* Okay, we found ICMPv6 header */
-       skb2 = skb_clone(skb, GFP_ATOMIC);
-       if (!skb2)
-               return -ENOMEM;
-
-       err = -EINVAL;
-       if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
-               goto out;
-
-       len -= offset - skb_network_offset(skb2);
-
-       __skb_pull(skb2, offset);
-       skb_reset_transport_header(skb2);
-       skb_postpull_rcsum(skb2, skb_network_header(skb2),
-                          skb_network_header_len(skb2));
-
-       icmp6_type = icmp6_hdr(skb2)->icmp6_type;
-
-       switch (icmp6_type) {
-       case ICMPV6_MGM_QUERY:
-       case ICMPV6_MGM_REPORT:
-       case ICMPV6_MGM_REDUCTION:
-       case ICMPV6_MLD2_REPORT:
-               break;
-       default:
-               err = 0;
-               goto out;
-       }
-
-       /* Okay, we found MLD message. Check further. */
-       if (skb2->len > len) {
-               err = pskb_trim_rcsum(skb2, len);
-               if (err)
-                       goto out;
-               err = -EINVAL;
-       }
-
-       ip6h = ipv6_hdr(skb2);
-
-       switch (skb2->ip_summed) {
-       case CHECKSUM_COMPLETE:
-               if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb2->len,
-                                       IPPROTO_ICMPV6, skb2->csum))
-                       break;
-               /*FALLTHROUGH*/
-       case CHECKSUM_NONE:
-               skb2->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr,
-                                                       &ip6h->daddr,
-                                                       skb2->len,
-                                                       IPPROTO_ICMPV6, 0));
-               if (__skb_checksum_complete(skb2))
-                       goto out;
+       } else if (err < 0) {
+               return err;
        }
 
-       err = 0;
-
        BR_INPUT_SKB_CB(skb)->igmp = 1;
+       mld = (struct mld_msg *)skb_transport_header(skb);
 
-       switch (icmp6_type) {
+       switch (mld->mld_type) {
        case ICMPV6_MGM_REPORT:
-           {
-               struct mld_msg *mld;
-               if (!pskb_may_pull(skb2, sizeof(*mld))) {
-                       err = -EINVAL;
-                       goto out;
-               }
-               mld = (struct mld_msg *)skb_transport_header(skb2);
                BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
                err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
                break;
-           }
        case ICMPV6_MLD2_REPORT:
-               err = br_ip6_multicast_mld2_report(br, port, skb2, vid);
+               err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
                break;
        case ICMPV6_MGM_QUERY:
-               err = br_ip6_multicast_query(br, port, skb2, vid);
+               err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
                break;
        case ICMPV6_MGM_REDUCTION:
-           {
-               struct mld_msg *mld;
-               if (!pskb_may_pull(skb2, sizeof(*mld))) {
-                       err = -EINVAL;
-                       goto out;
-               }
-               mld = (struct mld_msg *)skb_transport_header(skb2);
                br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
-           }
+               break;
        }
 
-out:
-       kfree_skb(skb2);
+       if (skb_trimmed)
+               kfree_skb(skb_trimmed);
+
        return err;
 }
 #endif
@@ -1950,11 +1772,9 @@ out:
 
 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
 {
-       int err = -ENOENT;
+       int err = -EINVAL;
 
        spin_lock_bh(&br->multicast_lock);
-       if (!netif_running(br->dev))
-               goto unlock;
 
        switch (val) {
        case 0:
@@ -1965,13 +1785,8 @@ int br_multicast_set_router(struct net_bridge *br, unsigned long val)
                br->multicast_router = val;
                err = 0;
                break;
-
-       default:
-               err = -EINVAL;
-               break;
        }
 
-unlock:
        spin_unlock_bh(&br->multicast_lock);
 
        return err;
@@ -1980,11 +1795,9 @@ unlock:
 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
 {
        struct net_bridge *br = p->br;
-       int err = -ENOENT;
+       int err = -EINVAL;
 
        spin_lock(&br->multicast_lock);
-       if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
-               goto unlock;
 
        switch (val) {
        case 0:
@@ -2006,13 +1819,8 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
 
                br_multicast_add_router(br, p);
                break;
-
-       default:
-               err = -EINVAL;
-               break;
        }
 
-unlock:
        spin_unlock(&br->multicast_lock);
 
        return err;
@@ -2117,15 +1925,11 @@ unlock:
 
 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
 {
-       int err = -ENOENT;
+       int err = -EINVAL;
        u32 old;
        struct net_bridge_mdb_htable *mdb;
 
        spin_lock_bh(&br->multicast_lock);
-       if (!netif_running(br->dev))
-               goto unlock;
-
-       err = -EINVAL;
        if (!is_power_of_2(val))
                goto unlock;
 
index 60ddfbeb47f598fed5908dfc492abef48c0acd75..46660a28feef7c21750dd76e8337e6146532ee28 100644 (file)
@@ -125,6 +125,14 @@ static struct nf_bridge_info *nf_bridge_info_get(const struct sk_buff *skb)
        return skb->nf_bridge;
 }
 
+static void nf_bridge_info_free(struct sk_buff *skb)
+{
+       if (skb->nf_bridge) {
+               nf_bridge_put(skb->nf_bridge);
+               skb->nf_bridge = NULL;
+       }
+}
+
 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
 {
        struct net_bridge_port *port;
@@ -832,17 +840,39 @@ static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
        skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
        __skb_push(skb, data->encap_size);
 
+       nf_bridge_info_free(skb);
        return br_dev_queue_push_xmit(sk, skb);
 }
 
+static int br_nf_ip_fragment(struct sock *sk, struct sk_buff *skb,
+                            int (*output)(struct sock *, struct sk_buff *))
+{
+       unsigned int mtu = ip_skb_dst_mtu(skb);
+       struct iphdr *iph = ip_hdr(skb);
+       struct rtable *rt = skb_rtable(skb);
+       struct net_device *dev = rt->dst.dev;
+
+       if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
+                    (IPCB(skb)->frag_max_size &&
+                     IPCB(skb)->frag_max_size > mtu))) {
+               IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+               kfree_skb(skb);
+               return -EMSGSIZE;
+       }
+
+       return ip_do_fragment(sk, skb, output);
+}
+
 static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
 {
        int ret;
        int frag_max_size;
        unsigned int mtu_reserved;
 
-       if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP))
+       if (skb_is_gso(skb) || skb->protocol != htons(ETH_P_IP)) {
+               nf_bridge_info_free(skb);
                return br_dev_queue_push_xmit(sk, skb);
+       }
 
        mtu_reserved = nf_bridge_mtu_reduction(skb);
        /* This is wrong! We should preserve the original fragment
@@ -866,8 +896,9 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
                skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
                                                 data->size);
 
-               ret = ip_fragment(sk, skb, br_nf_push_frag_xmit);
+               ret = br_nf_ip_fragment(sk, skb, br_nf_push_frag_xmit);
        } else {
+               nf_bridge_info_free(skb);
                ret = br_dev_queue_push_xmit(sk, skb);
        }
 
@@ -876,7 +907,8 @@ static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
 #else
 static int br_nf_dev_queue_xmit(struct sock *sk, struct sk_buff *skb)
 {
-        return br_dev_queue_push_xmit(sk, skb);
+       nf_bridge_info_free(skb);
+       return br_dev_queue_push_xmit(sk, skb);
 }
 #endif
 
@@ -964,6 +996,8 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
                                       nf_bridge->neigh_header,
                                       ETH_HLEN - ETH_ALEN);
        skb->dev = nf_bridge->physindev;
+
+       nf_bridge->physoutdev = NULL;
        br_handle_frame_finish(NULL, skb);
 }
 
index 4b5c236998ff1010831711a17b773e16c7d8ba58..6b67ed3831de504c7f2d52e8cf7436c24365f183 100644 (file)
@@ -586,7 +586,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
        struct nlattr *afspec;
        struct net_bridge_port *p;
        struct nlattr *tb[IFLA_BRPORT_MAX + 1];
-       int err = 0, ret_offload = 0;
+       int err = 0;
 
        protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
        afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
@@ -628,16 +628,6 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
                                afspec, RTM_SETLINK);
        }
 
-       if (p && !(flags & BRIDGE_FLAGS_SELF)) {
-               /* set bridge attributes in hardware if supported
-                */
-               ret_offload = netdev_switch_port_bridge_setlink(dev, nlh,
-                                                               flags);
-               if (ret_offload && ret_offload != -EOPNOTSUPP)
-                       br_warn(p->br, "error setting attrs on port %u(%s)\n",
-                               (unsigned int)p->port_no, p->dev->name);
-       }
-
        if (err == 0)
                br_ifinfo_notify(RTM_NEWLINK, p);
 out:
@@ -649,7 +639,7 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
 {
        struct nlattr *afspec;
        struct net_bridge_port *p;
-       int err = 0, ret_offload = 0;
+       int err = 0;
 
        afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
        if (!afspec)
@@ -668,16 +658,6 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
                 */
                br_ifinfo_notify(RTM_NEWLINK, p);
 
-       if (p && !(flags & BRIDGE_FLAGS_SELF)) {
-               /* del bridge attributes in hardware
-                */
-               ret_offload = netdev_switch_port_bridge_dellink(dev, nlh,
-                                                               flags);
-               if (ret_offload && ret_offload != -EOPNOTSUPP)
-                       br_warn(p->br, "error deleting attrs on port %u (%s)\n",
-                               (unsigned int)p->port_no, p->dev->name);
-       }
-
        return err;
 }
 static int br_validate(struct nlattr *tb[], struct nlattr *data[])
index 3362c29400f182c90db6e67a04fcc5018906fa41..1f36fa70639b7ca0657090c86a7f9d4bb67b1090 100644 (file)
@@ -33,8 +33,8 @@
 
 /* Control of forwarding link local multicast */
 #define BR_GROUPFWD_DEFAULT    0
-/* Don't allow forwarding control protocols like STP and LLDP */
-#define BR_GROUPFWD_RESTRICTED 0x4007u
+/* Don't allow forwarding of control protocols like STP, MAC PAUSE and LACP */
+#define BR_GROUPFWD_RESTRICTED 0x0007u
 /* The Nearest Customer Bridge Group Address, 01-80-C2-00-00-[00,0B,0C,0D,0F] */
 #define BR_GROUPFWD_8021AD     0xB801u
 
index fb3ebe6155134b4532ad4c85b2fea2260f0d6140..45f1ff113af9591bb5d7d3f05c0792b99fc4103d 100644 (file)
@@ -39,10 +39,14 @@ void br_log_state(const struct net_bridge_port *p)
 
 void br_set_state(struct net_bridge_port *p, unsigned int state)
 {
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_PORT_STP_STATE,
+               .u.stp_state = state,
+       };
        int err;
 
        p->state = state;
-       err = netdev_switch_port_stp_update(p->dev, state);
+       err = switchdev_port_attr_set(p->dev, &attr);
        if (err && err != -EOPNOTSUPP)
                br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
                                (unsigned int) p->port_no, p->dev->name);
index 071d87214dde44754f2ec98da167fcd8477a541c..0c40570069ba28ce87d2fcd9883224e0ff4588e6 100644 (file)
@@ -164,8 +164,10 @@ static int ebt_stp_mt_check(const struct xt_mtchk_param *par)
            !(info->bitmask & EBT_STP_MASK))
                return -EINVAL;
        /* Make sure the match only receives stp frames */
-       if (!ether_addr_equal(e->destmac, bridge_ula) ||
-           !ether_addr_equal(e->destmsk, msk) || !(e->bitmask & EBT_DESTMAC))
+       if (!par->nft_compat &&
+           (!ether_addr_equal(e->destmac, bridge_ula) ||
+            !ether_addr_equal(e->destmsk, msk) ||
+            !(e->bitmask & EBT_DESTMAC)))
                return -EINVAL;
 
        return 0;
index 91180a7fc94376ea3ca7eecf274c03c3bc919590..5149d9e7111458cff19954b200a9ea3e807638d1 100644 (file)
@@ -139,7 +139,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
                ethproto = h->h_proto;
 
        if (e->bitmask & EBT_802_3) {
-               if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
+               if (FWINV2(eth_proto_is_802_3(ethproto), EBT_IPROTO))
                        return 1;
        } else if (!(e->bitmask & EBT_NOPROTO) &&
           FWINV2(e->ethproto != ethproto, EBT_IPROTO))
index 112ad784838a5bf6b46eed6c2b90f2d8b0e50d7a..3cc71b9f551756ca63b1299e95d9b6424e5afb72 100644 (file)
@@ -1055,7 +1055,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
         * is really not used at all in the net/core or socket.c but the
         * initialization makes sure that sock->state is not uninitialized.
         */
-       sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
+       sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot, kern);
        if (!sk)
                return -ENOMEM;
 
index 32d710eaf1fc991b2ef4638fe8ccc95b84352ace..d4d404bdfc9a5d142ab18fa685aa62829d5e56c5 100644 (file)
@@ -179,7 +179,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
 
        sock->ops = cp->ops;
 
-       sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
+       sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
        if (!sk) {
                err = -ENOMEM;
                goto errout;
index 967080a9f0436e8575663909572f063ac60440fe..073262fea6ddab4acb4cd128ee402ab54b870552 100644 (file)
@@ -480,8 +480,8 @@ static int ceph_tcp_connect(struct ceph_connection *con)
        int ret;
 
        BUG_ON(con->sock);
-       ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
-                              IPPROTO_TCP, &sock);
+       ret = sock_create_kern(&init_net, con->peer_addr.in_addr.ss_family,
+                              SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret)
                return ret;
        sock->sk->sk_allocation = GFP_NOFS;
index aa82f9ab6a36d164769bf7c9633fcdfd5971466f..6778a9999d525307d5bd41a1750a6e96a6e22bf3 100644 (file)
 #include <linux/if_macvlan.h>
 #include <linux/errqueue.h>
 #include <linux/hrtimer.h>
+#include <linux/netfilter_ingress.h>
 
 #include "net-sysfs.h"
 
@@ -468,10 +469,14 @@ EXPORT_SYMBOL(dev_remove_pack);
  */
 void dev_add_offload(struct packet_offload *po)
 {
-       struct list_head *head = &offload_base;
+       struct packet_offload *elem;
 
        spin_lock(&offload_lock);
-       list_add_rcu(&po->list, head);
+       list_for_each_entry(elem, &offload_base, list) {
+               if (po->priority < elem->priority)
+                       break;
+       }
+       list_add_rcu(&po->list, elem->list.prev);
        spin_unlock(&offload_lock);
 }
 EXPORT_SYMBOL(dev_add_offload);
@@ -1630,7 +1635,7 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
 static struct static_key ingress_needed __read_mostly;
 
 void net_inc_ingress_queue(void)
@@ -2343,6 +2348,34 @@ void netif_device_attach(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_device_attach);
 
+/*
+ * Returns a Tx hash based on the given packet descriptor a Tx queues' number
+ * to be used as a distribution range.
+ */
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+                 unsigned int num_tx_queues)
+{
+       u32 hash;
+       u16 qoffset = 0;
+       u16 qcount = num_tx_queues;
+
+       if (skb_rx_queue_recorded(skb)) {
+               hash = skb_get_rx_queue(skb);
+               while (unlikely(hash >= num_tx_queues))
+                       hash -= num_tx_queues;
+               return hash;
+       }
+
+       if (dev->num_tc) {
+               u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+               qoffset = dev->tc_to_txq[tc].offset;
+               qcount = dev->tc_to_txq[tc].count;
+       }
+
+       return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
+}
+EXPORT_SYMBOL(__skb_tx_hash);
+
 static void skb_warn_bad_offload(const struct sk_buff *skb)
 {
        static const netdev_features_t null_features = 0;
@@ -2901,6 +2934,84 @@ int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(dev_loopback_xmit);
 
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+       struct xps_dev_maps *dev_maps;
+       struct xps_map *map;
+       int queue_index = -1;
+
+       rcu_read_lock();
+       dev_maps = rcu_dereference(dev->xps_maps);
+       if (dev_maps) {
+               map = rcu_dereference(
+                   dev_maps->cpu_map[skb->sender_cpu - 1]);
+               if (map) {
+                       if (map->len == 1)
+                               queue_index = map->queues[0];
+                       else
+                               queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
+                                                                          map->len)];
+                       if (unlikely(queue_index >= dev->real_num_tx_queues))
+                               queue_index = -1;
+               }
+       }
+       rcu_read_unlock();
+
+       return queue_index;
+#else
+       return -1;
+#endif
+}
+
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+{
+       struct sock *sk = skb->sk;
+       int queue_index = sk_tx_queue_get(sk);
+
+       if (queue_index < 0 || skb->ooo_okay ||
+           queue_index >= dev->real_num_tx_queues) {
+               int new_index = get_xps_queue(dev, skb);
+               if (new_index < 0)
+                       new_index = skb_tx_hash(dev, skb);
+
+               if (queue_index != new_index && sk &&
+                   rcu_access_pointer(sk->sk_dst_cache))
+                       sk_tx_queue_set(sk, new_index);
+
+               queue_index = new_index;
+       }
+
+       return queue_index;
+}
+
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+                                   struct sk_buff *skb,
+                                   void *accel_priv)
+{
+       int queue_index = 0;
+
+#ifdef CONFIG_XPS
+       if (skb->sender_cpu == 0)
+               skb->sender_cpu = raw_smp_processor_id() + 1;
+#endif
+
+       if (dev->real_num_tx_queues != 1) {
+               const struct net_device_ops *ops = dev->netdev_ops;
+               if (ops->ndo_select_queue)
+                       queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+                                                           __netdev_pick_tx);
+               else
+                       queue_index = __netdev_pick_tx(dev, skb);
+
+               if (!accel_priv)
+                       queue_index = netdev_cap_txqueue(dev, queue_index);
+       }
+
+       skb_set_queue_mapping(skb, queue_index);
+       return netdev_get_tx_queue(dev, queue_index);
+}
+
 /**
  *     __dev_queue_xmit - transmit a buffer
  *     @skb: buffer to transmit
@@ -3513,66 +3624,47 @@ int (*br_fdb_test_addr_hook)(struct net_device *dev,
 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
 #endif
 
-#ifdef CONFIG_NET_CLS_ACT
-/* TODO: Maybe we should just force sch_ingress to be compiled in
- * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
- * a compare and 2 stores extra right now if we dont have it on
- * but have CONFIG_NET_CLS_ACT
- * NOTE: This doesn't stop any functionality; if you dont have
- * the ingress scheduler, you just can't add policies on ingress.
- *
- */
-static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
-{
-       struct net_device *dev = skb->dev;
-       u32 ttl = G_TC_RTTL(skb->tc_verd);
-       int result = TC_ACT_OK;
-       struct Qdisc *q;
-
-       if (unlikely(MAX_RED_LOOP < ttl++)) {
-               net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
-                                    skb->skb_iif, dev->ifindex);
-               return TC_ACT_SHOT;
-       }
-
-       skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
-       skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
-
-       q = rcu_dereference(rxq->qdisc);
-       if (q != &noop_qdisc) {
-               spin_lock(qdisc_lock(q));
-               if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
-                       result = qdisc_enqueue_root(skb, q);
-               spin_unlock(qdisc_lock(q));
-       }
-
-       return result;
-}
-
 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                                         struct packet_type **pt_prev,
                                         int *ret, struct net_device *orig_dev)
 {
-       struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
+#ifdef CONFIG_NET_CLS_ACT
+       struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
+       struct tcf_result cl_res;
 
-       if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
+       /* If there's at least one ingress present somewhere (so
+        * we get here via enabled static key), remaining devices
+        * that are not configured with an ingress qdisc will bail
+        * out here.
+        */
+       if (!cl)
                return skb;
-
        if (*pt_prev) {
                *ret = deliver_skb(skb, *pt_prev, orig_dev);
                *pt_prev = NULL;
        }
 
-       switch (ing_filter(skb, rxq)) {
+       qdisc_skb_cb(skb)->pkt_len = skb->len;
+       skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
+       qdisc_bstats_update_cpu(cl->q, skb);
+
+       switch (tc_classify(skb, cl, &cl_res)) {
+       case TC_ACT_OK:
+       case TC_ACT_RECLASSIFY:
+               skb->tc_index = TC_H_MIN(cl_res.classid);
+               break;
        case TC_ACT_SHOT:
+               qdisc_qstats_drop_cpu(cl->q);
        case TC_ACT_STOLEN:
+       case TC_ACT_QUEUED:
                kfree_skb(skb);
                return NULL;
+       default:
+               break;
        }
-
+#endif /* CONFIG_NET_CLS_ACT */
        return skb;
 }
-#endif
 
 /**
  *     netdev_rx_handler_register - register receive handler
@@ -3645,6 +3737,22 @@ static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
        }
 }
 
+static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
+                            int *ret, struct net_device *orig_dev)
+{
+#ifdef CONFIG_NETFILTER_INGRESS
+       if (nf_hook_ingress_active(skb)) {
+               if (*pt_prev) {
+                       *ret = deliver_skb(skb, *pt_prev, orig_dev);
+                       *pt_prev = NULL;
+               }
+
+               return nf_hook_ingress(skb);
+       }
+#endif /* CONFIG_NETFILTER_INGRESS */
+       return 0;
+}
+
 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
 {
        struct packet_type *ptype, *pt_prev;
@@ -3704,13 +3812,17 @@ another_round:
        }
 
 skip_taps:
-#ifdef CONFIG_NET_CLS_ACT
+#ifdef CONFIG_NET_INGRESS
        if (static_key_false(&ingress_needed)) {
                skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
                if (!skb)
                        goto unlock;
-       }
 
+               if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
+                       goto unlock;
+       }
+#endif
+#ifdef CONFIG_NET_CLS_ACT
        skb->tc_verd = 0;
 ncls:
 #endif
@@ -6313,6 +6425,17 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
        return 0;
 }
 
+void netif_tx_stop_all_queues(struct net_device *dev)
+{
+       unsigned int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+               netif_tx_stop_queue(txq);
+       }
+}
+EXPORT_SYMBOL(netif_tx_stop_all_queues);
+
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -6862,6 +6985,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
        dev->group = INIT_NETDEV_GROUP;
        if (!dev->ethtool_ops)
                dev->ethtool_ops = &default_ethtool_ops;
+
+       nf_hook_ingress_init(dev);
+
        return dev;
 
 free_all:
index 1d00b89229024b45fef3955cd27221fafe2bfb74..eb0c3ace7458cb45c37db30700e495257a804f16 100644 (file)
@@ -98,7 +98,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_RXALL_BIT] =            "rx-all",
        [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
        [NETIF_F_BUSY_POLL_BIT] =        "busy-poll",
-       [NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
 };
 
 static const char
index bf831a85c315905896496c3eb537c0e7c6d15d4b..d271c06bf01f70c8c84ae9aa166e9f3d94011488 100644 (file)
@@ -36,6 +36,7 @@
 #include <net/netlink.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
+#include <net/flow_dissector.h>
 #include <linux/errno.h>
 #include <linux/timer.h>
 #include <asm/uaccess.h>
@@ -45,6 +46,7 @@
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 #include <linux/bpf.h>
+#include <net/sch_generic.h>
 
 /**
  *     sk_filter - run a packet through a socket filter
@@ -355,8 +357,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
  * for socket filters: ctx == 'struct sk_buff *', for seccomp:
  * ctx == 'struct seccomp_data *'.
  */
-int bpf_convert_filter(struct sock_filter *prog, int len,
-                      struct bpf_insn *new_prog, int *new_len)
+static int bpf_convert_filter(struct sock_filter *prog, int len,
+                             struct bpf_insn *new_prog, int *new_len)
 {
        int new_flen = 0, pass = 0, target, i;
        struct bpf_insn *new_insn;
@@ -371,7 +373,8 @@ int bpf_convert_filter(struct sock_filter *prog, int len,
                return -EINVAL;
 
        if (new_prog) {
-               addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
+               addrs = kcalloc(len, sizeof(*addrs),
+                               GFP_KERNEL | __GFP_NOWARN);
                if (!addrs)
                        return -ENOMEM;
        }
@@ -751,7 +754,8 @@ static bool chk_code_allowed(u16 code_to_probe)
  *
  * Returns 0 if the rule set is legal or -EINVAL if not.
  */
-int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
+static int bpf_check_classic(const struct sock_filter *filter,
+                            unsigned int flen)
 {
        bool anc_found;
        int pc;
@@ -825,7 +829,6 @@ int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
 
        return -EINVAL;
 }
-EXPORT_SYMBOL(bpf_check_classic);
 
 static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
                                      const struct sock_fprog *fprog)
@@ -839,7 +842,9 @@ static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
 
        fkprog = fp->orig_prog;
        fkprog->len = fprog->len;
-       fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL);
+
+       fkprog->filter = kmemdup(fp->insns, fsize,
+                                GFP_KERNEL | __GFP_NOWARN);
        if (!fkprog->filter) {
                kfree(fp->orig_prog);
                return -ENOMEM;
@@ -941,7 +946,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
         * pass. At this time, the user BPF is stored in fp->insns.
         */
        old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
-                          GFP_KERNEL);
+                          GFP_KERNEL | __GFP_NOWARN);
        if (!old_prog) {
                err = -ENOMEM;
                goto out_err;
@@ -988,7 +993,8 @@ out_err:
        return ERR_PTR(err);
 }
 
-static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
+static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
+                                          bpf_aux_classic_check_t trans)
 {
        int err;
 
@@ -1001,6 +1007,17 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
                return ERR_PTR(err);
        }
 
+       /* There might be additional checks and transformations
+        * needed on classic filters, f.e. in case of seccomp.
+        */
+       if (trans) {
+               err = trans(fp->insns, fp->len);
+               if (err) {
+                       __bpf_prog_release(fp);
+                       return ERR_PTR(err);
+               }
+       }
+
        /* Probe if we can JIT compile the filter and if so, do
         * the compilation of the filter.
         */
@@ -1050,7 +1067,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
        /* bpf_prepare_filter() already takes care of freeing
         * memory in case something goes wrong.
         */
-       fp = bpf_prepare_filter(fp);
+       fp = bpf_prepare_filter(fp, NULL);
        if (IS_ERR(fp))
                return PTR_ERR(fp);
 
@@ -1059,6 +1076,53 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
 }
 EXPORT_SYMBOL_GPL(bpf_prog_create);
 
+/**
+ *     bpf_prog_create_from_user - create an unattached filter from user buffer
+ *     @pfp: the unattached filter that is created
+ *     @fprog: the filter program
+ *     @trans: post-classic verifier transformation handler
+ *
+ * This function effectively does the same as bpf_prog_create(), only
+ * that it builds up its insns buffer from user space provided buffer.
+ * It also allows for passing a bpf_aux_classic_check_t handler.
+ */
+int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
+                             bpf_aux_classic_check_t trans)
+{
+       unsigned int fsize = bpf_classic_proglen(fprog);
+       struct bpf_prog *fp;
+
+       /* Make sure new filter is there and in the right amounts. */
+       if (fprog->filter == NULL)
+               return -EINVAL;
+
+       fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
+       if (!fp)
+               return -ENOMEM;
+
+       if (copy_from_user(fp->insns, fprog->filter, fsize)) {
+               __bpf_prog_free(fp);
+               return -EFAULT;
+       }
+
+       fp->len = fprog->len;
+       /* Since unattached filters are not copied back to user
+        * space through sk_get_filter(), we do not need to hold
+        * a copy here, and can spare us the work.
+        */
+       fp->orig_prog = NULL;
+
+       /* bpf_prepare_filter() already takes care of freeing
+        * memory in case something goes wrong.
+        */
+       fp = bpf_prepare_filter(fp, trans);
+       if (IS_ERR(fp))
+               return PTR_ERR(fp);
+
+       *pfp = fp;
+       return 0;
+}
+
 void bpf_prog_destroy(struct bpf_prog *fp)
 {
        __bpf_prog_release(fp);
@@ -1135,7 +1199,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        /* bpf_prepare_filter() already takes care of freeing
         * memory in case something goes wrong.
         */
-       prog = bpf_prepare_filter(prog);
+       prog = bpf_prepare_filter(prog, NULL);
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
@@ -1175,21 +1239,6 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
        return 0;
 }
 
-/**
- *     bpf_skb_clone_not_writable - is the header of a clone not writable
- *     @skb: buffer to check
- *     @len: length up to which to write, can be negative
- *
- *     Returns true if modifying the header part of the cloned buffer
- *     does require the data to be copied. I.e. this version works with
- *     negative lengths needed for eBPF case!
- */
-static bool bpf_skb_clone_unwritable(const struct sk_buff *skb, int len)
-{
-       return skb_header_cloned(skb) ||
-              (int) skb_headroom(skb) + len > skb->hdr_len;
-}
-
 #define BPF_RECOMPUTE_CSUM(flags)      ((flags) & 1)
 
 static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
@@ -1212,9 +1261,8 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
        if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
                return -EFAULT;
 
-       offset -= skb->data - skb_mac_header(skb);
        if (unlikely(skb_cloned(skb) &&
-                    bpf_skb_clone_unwritable(skb, offset + len)))
+                    !skb_clone_writable(skb, offset + len)))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, len, buf);
@@ -1258,9 +1306,8 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
 
-       offset -= skb->data - skb_mac_header(skb);
        if (unlikely(skb_cloned(skb) &&
-                    bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
+                    !skb_clone_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1306,9 +1353,8 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
        if (unlikely((u32) offset > 0xffff))
                return -EFAULT;
 
-       offset -= skb->data - skb_mac_header(skb);
        if (unlikely(skb_cloned(skb) &&
-                    bpf_skb_clone_unwritable(skb, offset + sizeof(sum))))
+                    !skb_clone_writable(skb, offset + sizeof(sum))))
                return -EFAULT;
 
        ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
@@ -1344,6 +1390,40 @@ const struct bpf_func_proto bpf_l4_csum_replace_proto = {
        .arg5_type      = ARG_ANYTHING,
 };
 
+#define BPF_IS_REDIRECT_INGRESS(flags) ((flags) & 1)
+
+static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
+       struct net_device *dev;
+
+       dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
+       if (unlikely(!dev))
+               return -EINVAL;
+
+       if (unlikely(!(dev->flags & IFF_UP)))
+               return -EINVAL;
+
+       skb2 = skb_clone(skb, GFP_ATOMIC);
+       if (unlikely(!skb2))
+               return -ENOMEM;
+
+       if (BPF_IS_REDIRECT_INGRESS(flags))
+               return dev_forward_skb(dev, skb2);
+
+       skb2->dev = dev;
+       return dev_queue_xmit(skb2);
+}
+
+const struct bpf_func_proto bpf_clone_redirect_proto = {
+       .func           = bpf_clone_redirect,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -1358,6 +1438,10 @@ sk_filter_func_proto(enum bpf_func_id func_id)
                return &bpf_get_prandom_u32_proto;
        case BPF_FUNC_get_smp_processor_id:
                return &bpf_get_smp_processor_id_proto;
+       case BPF_FUNC_tail_call:
+               return &bpf_tail_call_proto;
+       case BPF_FUNC_ktime_get_ns:
+               return &bpf_ktime_get_ns_proto;
        default:
                return NULL;
        }
@@ -1373,18 +1457,15 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_l3_csum_replace_proto;
        case BPF_FUNC_l4_csum_replace:
                return &bpf_l4_csum_replace_proto;
+       case BPF_FUNC_clone_redirect:
+               return &bpf_clone_redirect_proto;
        default:
                return sk_filter_func_proto(func_id);
        }
 }
 
-static bool sk_filter_is_valid_access(int off, int size,
-                                     enum bpf_access_type type)
+static bool __is_valid_access(int off, int size, enum bpf_access_type type)
 {
-       /* only read is allowed */
-       if (type != BPF_READ)
-               return false;
-
        /* check bounds */
        if (off < 0 || off >= sizeof(struct __sk_buff))
                return false;
@@ -1400,8 +1481,42 @@ static bool sk_filter_is_valid_access(int off, int size,
        return true;
 }
 
-static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
-                                       struct bpf_insn *insn_buf)
+static bool sk_filter_is_valid_access(int off, int size,
+                                     enum bpf_access_type type)
+{
+       if (type == BPF_WRITE) {
+               switch (off) {
+               case offsetof(struct __sk_buff, cb[0]) ...
+                       offsetof(struct __sk_buff, cb[4]):
+                       break;
+               default:
+                       return false;
+               }
+       }
+
+       return __is_valid_access(off, size, type);
+}
+
+static bool tc_cls_act_is_valid_access(int off, int size,
+                                      enum bpf_access_type type)
+{
+       if (type == BPF_WRITE) {
+               switch (off) {
+               case offsetof(struct __sk_buff, mark):
+               case offsetof(struct __sk_buff, tc_index):
+               case offsetof(struct __sk_buff, cb[0]) ...
+                       offsetof(struct __sk_buff, cb[4]):
+                       break;
+               default:
+                       return false;
+               }
+       }
+       return __is_valid_access(off, size, type);
+}
+
+static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
+                                     int src_reg, int ctx_off,
+                                     struct bpf_insn *insn_buf)
 {
        struct bpf_insn *insn = insn_buf;
 
@@ -1434,8 +1549,34 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
                                      offsetof(struct sk_buff, priority));
                break;
 
+       case offsetof(struct __sk_buff, ingress_ifindex):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
+
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, skb_iif));
+               break;
+
+       case offsetof(struct __sk_buff, ifindex):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+
+               *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)),
+                                     dst_reg, src_reg,
+                                     offsetof(struct sk_buff, dev));
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+                                     offsetof(struct net_device, ifindex));
+               break;
+
        case offsetof(struct __sk_buff, mark):
-               return convert_skb_access(SKF_AD_MARK, dst_reg, src_reg, insn);
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+
+               if (type == BPF_WRITE)
+                       *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+                                             offsetof(struct sk_buff, mark));
+               else
+                       *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                                             offsetof(struct sk_buff, mark));
+               break;
 
        case offsetof(struct __sk_buff, pkt_type):
                return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
@@ -1450,6 +1591,38 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
        case offsetof(struct __sk_buff, vlan_tci):
                return convert_skb_access(SKF_AD_VLAN_TAG,
                                          dst_reg, src_reg, insn);
+
+       case offsetof(struct __sk_buff, cb[0]) ...
+               offsetof(struct __sk_buff, cb[4]):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
+
+               ctx_off -= offsetof(struct __sk_buff, cb[0]);
+               ctx_off += offsetof(struct sk_buff, cb);
+               ctx_off += offsetof(struct qdisc_skb_cb, data);
+               if (type == BPF_WRITE)
+                       *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+               else
+                       *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+               break;
+
+       case offsetof(struct __sk_buff, tc_index):
+#ifdef CONFIG_NET_SCHED
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
+
+               if (type == BPF_WRITE)
+                       *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
+                                             offsetof(struct sk_buff, tc_index));
+               else
+                       *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+                                             offsetof(struct sk_buff, tc_index));
+               break;
+#else
+               if (type == BPF_WRITE)
+                       *insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
+               else
+                       *insn++ = BPF_MOV64_IMM(dst_reg, 0);
+               break;
+#endif
        }
 
        return insn - insn_buf;
@@ -1458,13 +1631,13 @@ static u32 sk_filter_convert_ctx_access(int dst_reg, int src_reg, int ctx_off,
 static const struct bpf_verifier_ops sk_filter_ops = {
        .get_func_proto = sk_filter_func_proto,
        .is_valid_access = sk_filter_is_valid_access,
-       .convert_ctx_access = sk_filter_convert_ctx_access,
+       .convert_ctx_access = bpf_net_convert_ctx_access,
 };
 
 static const struct bpf_verifier_ops tc_cls_act_ops = {
        .get_func_proto = tc_cls_act_func_proto,
-       .is_valid_access = sk_filter_is_valid_access,
-       .convert_ctx_access = sk_filter_convert_ctx_access,
+       .is_valid_access = tc_cls_act_is_valid_access,
+       .convert_ctx_access = bpf_net_convert_ctx_access,
 };
 
 static struct bpf_prog_type_list sk_filter_type __read_mostly = {
index 2c35c02a931e227fa368cd346873596d4b037a3d..77e22e4fc89826507b5f2ce197d62b4c1dff1b0e 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/kernel.h>
 #include <linux/skbuff.h>
 #include <linux/export.h>
 #include <linux/ip.h>
 #include <linux/if_tunnel.h>
 #include <linux/if_pppox.h>
 #include <linux/ppp_defs.h>
-#include <net/flow_keys.h>
+#include <linux/stddef.h>
+#include <linux/if_ether.h>
+#include <linux/mpls.h>
+#include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
 
-/* copy saddr & daddr, possibly using 64bit load/store
- * Equivalent to :     flow->src = iph->saddr;
- *                     flow->dst = iph->daddr;
- */
-static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
+static bool skb_flow_dissector_uses_key(struct flow_dissector *flow_dissector,
+                                       enum flow_dissector_key_id key_id)
 {
-       BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
-                    offsetof(typeof(*flow), src) + sizeof(flow->src));
-       memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
+       return flow_dissector->used_keys & (1 << key_id);
+}
+
+static void skb_flow_dissector_set_key(struct flow_dissector *flow_dissector,
+                                      enum flow_dissector_key_id key_id)
+{
+       flow_dissector->used_keys |= (1 << key_id);
+}
+
+static void *skb_flow_dissector_target(struct flow_dissector *flow_dissector,
+                                      enum flow_dissector_key_id key_id,
+                                      void *target_container)
+{
+       return ((char *) target_container) + flow_dissector->offset[key_id];
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+                            const struct flow_dissector_key *key,
+                            unsigned int key_count)
+{
+       unsigned int i;
+
+       memset(flow_dissector, 0, sizeof(*flow_dissector));
+
+       for (i = 0; i < key_count; i++, key++) {
+               /* User should make sure that every key target offset is withing
+                * boundaries of unsigned short.
+                */
+               BUG_ON(key->offset > USHRT_MAX);
+               BUG_ON(skb_flow_dissector_uses_key(flow_dissector,
+                                                  key->key_id));
+
+               skb_flow_dissector_set_key(flow_dissector, key->key_id);
+               flow_dissector->offset[key->key_id] = key->offset;
+       }
+
+       /* Ensure that the dissector always includes control and basic key.
+        * That way we are able to avoid handling lack of these in fast path.
+        */
+       BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
+                                           FLOW_DISSECTOR_KEY_CONTROL));
+       BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
+                                           FLOW_DISSECTOR_KEY_BASIC));
 }
+EXPORT_SYMBOL(skb_flow_dissector_init);
 
 /**
  * __skb_flow_get_ports - extract the upper layer ports and return them
@@ -63,17 +105,30 @@ EXPORT_SYMBOL(__skb_flow_get_ports);
 /**
  * __skb_flow_dissect - extract the flow_keys struct and return it
  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
+ * @flow_dissector: list of keys to dissect
+ * @target_container: target structure to put dissected values into
  * @data: raw buffer pointer to the packet, if NULL use skb->data
  * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
  * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
  *
- * The function will try to retrieve the struct flow_keys from either the skbuff
- * or a raw buffer specified by the rest parameters
+ * The function will try to retrieve individual keys into target specified
+ * by flow_dissector from either the skbuff or a raw buffer specified by the
+ * rest parameters.
+ *
+ * Caller must take care of zeroing target container memory.
  */
-bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
+bool __skb_flow_dissect(const struct sk_buff *skb,
+                       struct flow_dissector *flow_dissector,
+                       void *target_container,
                        void *data, __be16 proto, int nhoff, int hlen)
 {
+       struct flow_dissector_key_control *key_control;
+       struct flow_dissector_key_basic *key_basic;
+       struct flow_dissector_key_addrs *key_addrs;
+       struct flow_dissector_key_ports *key_ports;
+       struct flow_dissector_key_tags *key_tags;
+       struct flow_dissector_key_keyid *key_keyid;
        u8 ip_proto;
 
        if (!data) {
@@ -83,7 +138,30 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
                hlen = skb_headlen(skb);
        }
 
-       memset(flow, 0, sizeof(*flow));
+       /* It is ensured by skb_flow_dissector_init() that control key will
+        * be always present.
+        */
+       key_control = skb_flow_dissector_target(flow_dissector,
+                                               FLOW_DISSECTOR_KEY_CONTROL,
+                                               target_container);
+
+       /* It is ensured by skb_flow_dissector_init() that basic key will
+        * be always present.
+        */
+       key_basic = skb_flow_dissector_target(flow_dissector,
+                                             FLOW_DISSECTOR_KEY_BASIC,
+                                             target_container);
+
+       if (skb_flow_dissector_uses_key(flow_dissector,
+                                       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+               struct ethhdr *eth = eth_hdr(skb);
+               struct flow_dissector_key_eth_addrs *key_eth_addrs;
+
+               key_eth_addrs = skb_flow_dissector_target(flow_dissector,
+                                                         FLOW_DISSECTOR_KEY_ETH_ADDRS,
+                                                         target_container);
+               memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
+       }
 
 again:
        switch (proto) {
@@ -100,14 +178,15 @@ ip:
                if (ip_is_fragment(iph))
                        ip_proto = 0;
 
-               /* skip the address processing if skb is NULL.  The assumption
-                * here is that if there is no skb we are not looking for flow
-                * info but lengths and protocols.
-                */
-               if (!skb)
+               if (!skb_flow_dissector_uses_key(flow_dissector,
+                                                FLOW_DISSECTOR_KEY_IPV4_ADDRS))
                        break;
 
-               iph_to_flow_copy_addrs(flow, iph);
+               key_addrs = skb_flow_dissector_target(flow_dissector,
+                             FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container);
+               memcpy(&key_addrs->v4addrs, &iph->saddr,
+                      sizeof(key_addrs->v4addrs));
+               key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
                break;
        }
        case htons(ETH_P_IPV6): {
@@ -123,25 +202,27 @@ ipv6:
                ip_proto = iph->nexthdr;
                nhoff += sizeof(struct ipv6hdr);
 
-               /* see comment above in IPv4 section */
-               if (!skb)
-                       break;
+               if (skb_flow_dissector_uses_key(flow_dissector,
+                                               FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+                       struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs;
 
-               flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
-               flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
+                       key_ipv6_addrs = skb_flow_dissector_target(flow_dissector,
+                                                                  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+                                                                  target_container);
+
+                       memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs));
+                       key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+               }
 
                flow_label = ip6_flowlabel(iph);
                if (flow_label) {
-                       /* Awesome, IPv6 packet has a flow label so we can
-                        * use that to represent the ports without any
-                        * further dissection.
-                        */
-                       flow->n_proto = proto;
-                       flow->ip_proto = ip_proto;
-                       flow->ports = flow_label;
-                       flow->thoff = (u16)nhoff;
-
-                       return true;
+                       if (skb_flow_dissector_uses_key(flow_dissector,
+                               FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+                               key_tags = skb_flow_dissector_target(flow_dissector,
+                                                                    FLOW_DISSECTOR_KEY_FLOW_LABEL,
+                                                                    target_container);
+                               key_tags->flow_label = ntohl(flow_label);
+                       }
                }
 
                break;
@@ -155,6 +236,15 @@ ipv6:
                if (!vlan)
                        return false;
 
+               if (skb_flow_dissector_uses_key(flow_dissector,
+                                               FLOW_DISSECTOR_KEY_VLANID)) {
+                       key_tags = skb_flow_dissector_target(flow_dissector,
+                                                            FLOW_DISSECTOR_KEY_VLANID,
+                                                            target_container);
+
+                       key_tags->vlan_id = skb_vlan_tag_get_id(skb);
+               }
+
                proto = vlan->h_vlan_encapsulated_proto;
                nhoff += sizeof(*vlan);
                goto again;
@@ -186,14 +276,52 @@ ipv6:
                hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
                if (!hdr)
                        return false;
-               flow->src = hdr->srcnode;
-               flow->dst = 0;
-               flow->n_proto = proto;
-               flow->thoff = (u16)nhoff;
+               key_basic->n_proto = proto;
+               key_control->thoff = (u16)nhoff;
+
+               if (skb_flow_dissector_uses_key(flow_dissector,
+                                               FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
+                       key_addrs = skb_flow_dissector_target(flow_dissector,
+                                                             FLOW_DISSECTOR_KEY_TIPC_ADDRS,
+                                                             target_container);
+                       key_addrs->tipcaddrs.srcnode = hdr->srcnode;
+                       key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
+               }
                return true;
        }
+
+       case htons(ETH_P_MPLS_UC):
+       case htons(ETH_P_MPLS_MC): {
+               struct mpls_label *hdr, _hdr[2];
+mpls:
+               hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
+                                          hlen, &_hdr);
+               if (!hdr)
+                       return false;
+
+               if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) ==
+                    MPLS_LABEL_ENTROPY) {
+                       if (skb_flow_dissector_uses_key(flow_dissector,
+                                                       FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
+                               key_keyid = skb_flow_dissector_target(flow_dissector,
+                                                                     FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
+                                                                     target_container);
+                               key_keyid->keyid = hdr[1].entry &
+                                       htonl(MPLS_LS_LABEL_MASK);
+                       }
+
+                       key_basic->n_proto = proto;
+                       key_basic->ip_proto = ip_proto;
+                       key_control->thoff = (u16)nhoff;
+
+                       return true;
+               }
+
+               return true;
+       }
+
        case htons(ETH_P_FCOE):
-               flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
+               key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
                /* fall through */
        default:
                return false;
@@ -213,30 +341,47 @@ ipv6:
                 * Only look inside GRE if version zero and no
                 * routing
                 */
-               if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
-                       proto = hdr->proto;
+               if (hdr->flags & (GRE_VERSION | GRE_ROUTING))
+                       break;
+
+               proto = hdr->proto;
+               nhoff += 4;
+               if (hdr->flags & GRE_CSUM)
                        nhoff += 4;
-                       if (hdr->flags & GRE_CSUM)
-                               nhoff += 4;
-                       if (hdr->flags & GRE_KEY)
-                               nhoff += 4;
-                       if (hdr->flags & GRE_SEQ)
-                               nhoff += 4;
-                       if (proto == htons(ETH_P_TEB)) {
-                               const struct ethhdr *eth;
-                               struct ethhdr _eth;
-
-                               eth = __skb_header_pointer(skb, nhoff,
-                                                          sizeof(_eth),
-                                                          data, hlen, &_eth);
-                               if (!eth)
-                                       return false;
-                               proto = eth->h_proto;
-                               nhoff += sizeof(*eth);
+               if (hdr->flags & GRE_KEY) {
+                       const __be32 *keyid;
+                       __be32 _keyid;
+
+                       keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid),
+                                                    data, hlen, &_keyid);
+
+                       if (!keyid)
+                               return false;
+
+                       if (skb_flow_dissector_uses_key(flow_dissector,
+                                                       FLOW_DISSECTOR_KEY_GRE_KEYID)) {
+                               key_keyid = skb_flow_dissector_target(flow_dissector,
+                                                                     FLOW_DISSECTOR_KEY_GRE_KEYID,
+                                                                     target_container);
+                               key_keyid->keyid = *keyid;
                        }
-                       goto again;
+                       nhoff += 4;
                }
-               break;
+               if (hdr->flags & GRE_SEQ)
+                       nhoff += 4;
+               if (proto == htons(ETH_P_TEB)) {
+                       const struct ethhdr *eth;
+                       struct ethhdr _eth;
+
+                       eth = __skb_header_pointer(skb, nhoff,
+                                                  sizeof(_eth),
+                                                  data, hlen, &_eth);
+                       if (!eth)
+                               return false;
+                       proto = eth->h_proto;
+                       nhoff += sizeof(*eth);
+               }
+               goto again;
        }
        case IPPROTO_IPIP:
                proto = htons(ETH_P_IP);
@@ -244,18 +389,25 @@ ipv6:
        case IPPROTO_IPV6:
                proto = htons(ETH_P_IPV6);
                goto ipv6;
+       case IPPROTO_MPLS:
+               proto = htons(ETH_P_MPLS_UC);
+               goto mpls;
        default:
                break;
        }
 
-       flow->n_proto = proto;
-       flow->ip_proto = ip_proto;
-       flow->thoff = (u16) nhoff;
-
-       /* unless skb is set we don't need to record port info */
-       if (skb)
-               flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
-                                                  data, hlen);
+       key_basic->n_proto = proto;
+       key_basic->ip_proto = ip_proto;
+       key_control->thoff = (u16)nhoff;
+
+       if (skb_flow_dissector_uses_key(flow_dissector,
+                                       FLOW_DISSECTOR_KEY_PORTS)) {
+               key_ports = skb_flow_dissector_target(flow_dissector,
+                                                     FLOW_DISSECTOR_KEY_PORTS,
+                                                     target_container);
+               key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
+                                                       data, hlen);
+       }
 
        return true;
 }
@@ -267,27 +419,109 @@ static __always_inline void __flow_hash_secret_init(void)
        net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
+static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval)
 {
-       __flow_hash_secret_init();
-       return jhash_3words(a, b, c, hashrnd);
+       return jhash2(words, length, keyval);
 }
 
-static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
+static inline void *flow_keys_hash_start(struct flow_keys *flow)
 {
-       u32 hash;
+       BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
+       return (void *)flow + FLOW_KEYS_HASH_OFFSET;
+}
 
-       /* get a consistent hash (same value on both flow directions) */
-       if (((__force u32)keys->dst < (__force u32)keys->src) ||
-           (((__force u32)keys->dst == (__force u32)keys->src) &&
-            ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
-               swap(keys->dst, keys->src);
-               swap(keys->port16[0], keys->port16[1]);
+static inline size_t flow_keys_hash_length(struct flow_keys *flow)
+{
+       size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
+       BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
+       BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
+                    sizeof(*flow) - sizeof(flow->addrs));
+
+       switch (flow->control.addr_type) {
+       case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+               diff -= sizeof(flow->addrs.v4addrs);
+               break;
+       case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+               diff -= sizeof(flow->addrs.v6addrs);
+               break;
+       case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+               diff -= sizeof(flow->addrs.tipcaddrs);
+               break;
        }
+       return (sizeof(*flow) - diff) / sizeof(u32);
+}
 
-       hash = __flow_hash_3words((__force u32)keys->dst,
-                                 (__force u32)keys->src,
-                                 (__force u32)keys->ports);
+__be32 flow_get_u32_src(const struct flow_keys *flow)
+{
+       switch (flow->control.addr_type) {
+       case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+               return flow->addrs.v4addrs.src;
+       case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+               return (__force __be32)ipv6_addr_hash(
+                       &flow->addrs.v6addrs.src);
+       case FLOW_DISSECTOR_KEY_TIPC_ADDRS:
+               return flow->addrs.tipcaddrs.srcnode;
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(flow_get_u32_src);
+
+__be32 flow_get_u32_dst(const struct flow_keys *flow)
+{
+       switch (flow->control.addr_type) {
+       case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+               return flow->addrs.v4addrs.dst;
+       case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+               return (__force __be32)ipv6_addr_hash(
+                       &flow->addrs.v6addrs.dst);
+       default:
+               return 0;
+       }
+}
+EXPORT_SYMBOL(flow_get_u32_dst);
+
+static inline void __flow_hash_consistentify(struct flow_keys *keys)
+{
+       int addr_diff, i;
+
+       switch (keys->control.addr_type) {
+       case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+               addr_diff = (__force u32)keys->addrs.v4addrs.dst -
+                           (__force u32)keys->addrs.v4addrs.src;
+               if ((addr_diff < 0) ||
+                   (addr_diff == 0 &&
+                    ((__force u16)keys->ports.dst <
+                     (__force u16)keys->ports.src))) {
+                       swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
+                       swap(keys->ports.src, keys->ports.dst);
+               }
+               break;
+       case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+               addr_diff = memcmp(&keys->addrs.v6addrs.dst,
+                                  &keys->addrs.v6addrs.src,
+                                  sizeof(keys->addrs.v6addrs.dst));
+               if ((addr_diff < 0) ||
+                   (addr_diff == 0 &&
+                    ((__force u16)keys->ports.dst <
+                     (__force u16)keys->ports.src))) {
+                       for (i = 0; i < 4; i++)
+                               swap(keys->addrs.v6addrs.src.s6_addr32[i],
+                                    keys->addrs.v6addrs.dst.s6_addr32[i]);
+                       swap(keys->ports.src, keys->ports.dst);
+               }
+               break;
+       }
+}
+
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
+{
+       u32 hash;
+
+       __flow_hash_consistentify(keys);
+
+       hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys),
+                                flow_keys_hash_length(keys), keyval);
        if (!hash)
                hash = 1;
 
@@ -296,12 +530,52 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
 
 u32 flow_hash_from_keys(struct flow_keys *keys)
 {
-       return __flow_hash_from_keys(keys);
+       __flow_hash_secret_init();
+       return __flow_hash_from_keys(keys, hashrnd);
 }
 EXPORT_SYMBOL(flow_hash_from_keys);
 
-/*
- * __skb_get_hash: calculate a flow hash based on src/dst addresses
+static inline u32 ___skb_get_hash(const struct sk_buff *skb,
+                                 struct flow_keys *keys, u32 keyval)
+{
+       if (!skb_flow_dissect_flow_keys(skb, keys))
+               return 0;
+
+       return __flow_hash_from_keys(keys, keyval);
+}
+
+struct _flow_keys_digest_data {
+       __be16  n_proto;
+       u8      ip_proto;
+       u8      padding;
+       __be32  ports;
+       __be32  src;
+       __be32  dst;
+};
+
+void make_flow_keys_digest(struct flow_keys_digest *digest,
+                          const struct flow_keys *flow)
+{
+       struct _flow_keys_digest_data *data =
+           (struct _flow_keys_digest_data *)digest;
+
+       BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
+
+       memset(digest, 0, sizeof(*digest));
+
+       data->n_proto = flow->basic.n_proto;
+       data->ip_proto = flow->basic.ip_proto;
+       data->ports = flow->ports.ports;
+       data->src = flow->addrs.v4addrs.src;
+       data->dst = flow->addrs.v4addrs.dst;
+}
+EXPORT_SYMBOL(make_flow_keys_digest);
+
+/**
+ * __skb_get_hash: calculate a flow hash
+ * @skb: sk_buff to calculate flow hash from
+ *
+ * This function calculates a flow hash based on src/dst addresses
  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
  * if hash is a canonical 4-tuple hash over transport ports.
@@ -309,53 +583,34 @@ EXPORT_SYMBOL(flow_hash_from_keys);
 void __skb_get_hash(struct sk_buff *skb)
 {
        struct flow_keys keys;
+       u32 hash;
 
-       if (!skb_flow_dissect(skb, &keys))
-               return;
+       __flow_hash_secret_init();
 
-       if (keys.ports)
+       hash = ___skb_get_hash(skb, &keys, hashrnd);
+       if (!hash)
+               return;
+       if (keys.ports.ports)
                skb->l4_hash = 1;
-
        skb->sw_hash = 1;
-
-       skb->hash = __flow_hash_from_keys(&keys);
+       skb->hash = hash;
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
-/*
- * Returns a Tx hash based on the given packet descriptor a Tx queues' number
- * to be used as a distribution range.
- */
-u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
-                 unsigned int num_tx_queues)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
 {
-       u32 hash;
-       u16 qoffset = 0;
-       u16 qcount = num_tx_queues;
-
-       if (skb_rx_queue_recorded(skb)) {
-               hash = skb_get_rx_queue(skb);
-               while (unlikely(hash >= num_tx_queues))
-                       hash -= num_tx_queues;
-               return hash;
-       }
-
-       if (dev->num_tc) {
-               u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
-               qoffset = dev->tc_to_txq[tc].offset;
-               qcount = dev->tc_to_txq[tc].count;
-       }
+       struct flow_keys keys;
 
-       return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
+       return ___skb_get_hash(skb, &keys, perturb);
 }
-EXPORT_SYMBOL(__skb_tx_hash);
+EXPORT_SYMBOL(skb_get_hash_perturb);
 
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
                   const struct flow_keys *keys, int hlen)
 {
-       u32 poff = keys->thoff;
+       u32 poff = keys->control.thoff;
 
-       switch (keys->ip_proto) {
+       switch (keys->basic.ip_proto) {
        case IPPROTO_TCP: {
                /* access doff as u8 to avoid unaligned access */
                const u8 *doff;
@@ -396,8 +651,12 @@ u32 __skb_get_poff(const struct sk_buff *skb, void *data,
        return poff;
 }
 
-/* skb_get_poff() returns the offset to the payload as far as it could
- * be dissected. The main user is currently BPF, so that we can dynamically
+/**
+ * skb_get_poff - get the offset to the payload
+ * @skb: sk_buff to get the payload offset from
+ *
+ * The function will get the offset to the payload as far as it could
+ * be dissected.  The main user is currently BPF, so that we can dynamically
  * truncate packets without needing to push actual payload to the user
  * space and can analyze headers only, instead.
  */
@@ -405,86 +664,76 @@ u32 skb_get_poff(const struct sk_buff *skb)
 {
        struct flow_keys keys;
 
-       if (!skb_flow_dissect(skb, &keys))
+       if (!skb_flow_dissect_flow_keys(skb, &keys))
                return 0;
 
        return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
 }
 
-static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+static const struct flow_dissector_key flow_keys_dissector_keys[] = {
+       {
+               .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+               .offset = offsetof(struct flow_keys, control),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_BASIC,
+               .offset = offsetof(struct flow_keys, basic),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v4addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v6addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.tipcaddrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_PORTS,
+               .offset = offsetof(struct flow_keys, ports),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_VLANID,
+               .offset = offsetof(struct flow_keys, tags),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
+               .offset = offsetof(struct flow_keys, tags),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
+               .offset = offsetof(struct flow_keys, keyid),
+       },
+};
+
+static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
+       {
+               .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+               .offset = offsetof(struct flow_keys, control),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_BASIC,
+               .offset = offsetof(struct flow_keys, basic),
+       },
+};
+
+struct flow_dissector flow_keys_dissector __read_mostly;
+EXPORT_SYMBOL(flow_keys_dissector);
+
+struct flow_dissector flow_keys_buf_dissector __read_mostly;
+
+static int __init init_default_flow_dissectors(void)
 {
-#ifdef CONFIG_XPS
-       struct xps_dev_maps *dev_maps;
-       struct xps_map *map;
-       int queue_index = -1;
-
-       rcu_read_lock();
-       dev_maps = rcu_dereference(dev->xps_maps);
-       if (dev_maps) {
-               map = rcu_dereference(
-                   dev_maps->cpu_map[skb->sender_cpu - 1]);
-               if (map) {
-                       if (map->len == 1)
-                               queue_index = map->queues[0];
-                       else
-                               queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
-                                                                          map->len)];
-                       if (unlikely(queue_index >= dev->real_num_tx_queues))
-                               queue_index = -1;
-               }
-       }
-       rcu_read_unlock();
-
-       return queue_index;
-#else
-       return -1;
-#endif
-}
-
-static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
-{
-       struct sock *sk = skb->sk;
-       int queue_index = sk_tx_queue_get(sk);
-
-       if (queue_index < 0 || skb->ooo_okay ||
-           queue_index >= dev->real_num_tx_queues) {
-               int new_index = get_xps_queue(dev, skb);
-               if (new_index < 0)
-                       new_index = skb_tx_hash(dev, skb);
-
-               if (queue_index != new_index && sk &&
-                   rcu_access_pointer(sk->sk_dst_cache))
-                       sk_tx_queue_set(sk, new_index);
-
-               queue_index = new_index;
-       }
-
-       return queue_index;
+       skb_flow_dissector_init(&flow_keys_dissector,
+                               flow_keys_dissector_keys,
+                               ARRAY_SIZE(flow_keys_dissector_keys));
+       skb_flow_dissector_init(&flow_keys_buf_dissector,
+                               flow_keys_buf_dissector_keys,
+                               ARRAY_SIZE(flow_keys_buf_dissector_keys));
+       return 0;
 }
 
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-                                   struct sk_buff *skb,
-                                   void *accel_priv)
-{
-       int queue_index = 0;
-
-#ifdef CONFIG_XPS
-       if (skb->sender_cpu == 0)
-               skb->sender_cpu = raw_smp_processor_id() + 1;
-#endif
-
-       if (dev->real_num_tx_queues != 1) {
-               const struct net_device_ops *ops = dev->netdev_ops;
-               if (ops->ndo_select_queue)
-                       queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
-                                                           __netdev_pick_tx);
-               else
-                       queue_index = __netdev_pick_tx(dev, skb);
-
-               if (!accel_priv)
-                       queue_index = netdev_cap_txqueue(dev, queue_index);
-       }
-
-       skb_set_queue_mapping(skb, queue_index);
-       return netdev_get_tx_queue(dev, queue_index);
-}
+late_initcall_sync(init_default_flow_dissectors);
index 3de6542560288b3896ab243879a7b4a9b098ca0d..3a74df750af4044eba0e7d88ae01ca9b4dac0e72 100644 (file)
@@ -913,6 +913,7 @@ static void neigh_timer_handler(unsigned long arg)
                        neigh->nud_state = NUD_PROBE;
                        neigh->updated = jiffies;
                        atomic_set(&neigh->probes, 0);
+                       notify = 1;
                        next = now + NEIGH_VAR(neigh->parms, RETRANS_TIME);
                }
        } else {
@@ -1144,6 +1145,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
 
        if (new != old) {
                neigh_del_timer(neigh);
+               if (new & NUD_PROBE)
+                       atomic_set(&neigh->probes, 0);
                if (new & NUD_IN_TIMER)
                        neigh_add_timer(neigh, (jiffies +
                                                ((new & NUD_REACHABLE) ?
index 4238d6da5c60dc7ac7def10fb4e3ddda0a9377e6..18b34d771ed4dc7415a17cfdab83e56ee5683d02 100644 (file)
@@ -458,11 +458,15 @@ static ssize_t phys_switch_id_show(struct device *dev,
                return restart_syscall();
 
        if (dev_isalive(netdev)) {
-               struct netdev_phys_item_id ppid;
+               struct switchdev_attr attr = {
+                       .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+                       .flags = SWITCHDEV_F_NO_RECURSE,
+               };
 
-               ret = netdev_switch_parent_id_get(netdev, &ppid);
+               ret = switchdev_port_attr_get(netdev, &attr);
                if (!ret)
-                       ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+                       ret = sprintf(buf, "%*phN\n", attr.u.ppid.id_len,
+                                     attr.u.ppid.id);
        }
        rtnl_unlock();
 
index 572af0011997a2057f30ba0b5022760e11493d98..2c2eb1b629b11d6911abfa885549e32d927815a6 100644 (file)
@@ -147,24 +147,17 @@ static void ops_free_list(const struct pernet_operations *ops,
        }
 }
 
-static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
-                             int id);
+/* should be called with nsid_lock held */
 static int alloc_netid(struct net *net, struct net *peer, int reqid)
 {
-       int min = 0, max = 0, id;
-
-       ASSERT_RTNL();
+       int min = 0, max = 0;
 
        if (reqid >= 0) {
                min = reqid;
                max = reqid + 1;
        }
 
-       id = idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL);
-       if (id >= 0)
-               rtnl_net_notifyid(net, peer, RTM_NEWNSID, id);
-
-       return id;
+       return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
 }
 
 /* This function is used by idr_for_each(). If net is equal to peer, the
@@ -180,11 +173,16 @@ static int net_eq_idr(int id, void *net, void *peer)
        return 0;
 }
 
-static int __peernet2id(struct net *net, struct net *peer, bool alloc)
+/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
+ * is set to true, thus the caller knows that the new id must be notified via
+ * rtnl.
+ */
+static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
 {
        int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
+       bool alloc_it = *alloc;
 
-       ASSERT_RTNL();
+       *alloc = false;
 
        /* Magic value for id 0. */
        if (id == NET_ID_ZERO)
@@ -192,36 +190,77 @@ static int __peernet2id(struct net *net, struct net *peer, bool alloc)
        if (id > 0)
                return id;
 
-       if (alloc)
-               return alloc_netid(net, peer, -1);
+       if (alloc_it) {
+               id = alloc_netid(net, peer, -1);
+               *alloc = true;
+               return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
+       }
+
+       return NETNSA_NSID_NOT_ASSIGNED;
+}
+
+/* should be called with nsid_lock held */
+static int __peernet2id(struct net *net, struct net *peer)
+{
+       bool no = false;
 
-       return -ENOENT;
+       return __peernet2id_alloc(net, peer, &no);
 }
 
+static void rtnl_net_notifyid(struct net *net, int cmd, int id);
 /* This function returns the id of a peer netns. If no id is assigned, one will
  * be allocated and returned.
  */
+int peernet2id_alloc(struct net *net, struct net *peer)
+{
+       unsigned long flags;
+       bool alloc;
+       int id;
+
+       spin_lock_irqsave(&net->nsid_lock, flags);
+       alloc = atomic_read(&peer->count) == 0 ? false : true;
+       id = __peernet2id_alloc(net, peer, &alloc);
+       spin_unlock_irqrestore(&net->nsid_lock, flags);
+       if (alloc && id >= 0)
+               rtnl_net_notifyid(net, RTM_NEWNSID, id);
+       return id;
+}
+EXPORT_SYMBOL(peernet2id_alloc);
+
+/* This function returns, if assigned, the id of a peer netns. */
 int peernet2id(struct net *net, struct net *peer)
 {
-       bool alloc = atomic_read(&peer->count) == 0 ? false : true;
+       unsigned long flags;
        int id;
 
-       id = __peernet2id(net, peer, alloc);
-       return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
+       spin_lock_irqsave(&net->nsid_lock, flags);
+       id = __peernet2id(net, peer);
+       spin_unlock_irqrestore(&net->nsid_lock, flags);
+       return id;
+}
+
+/* This function returns true is the peer netns has an id assigned into the
+ * current netns.
+ */
+bool peernet_has_id(struct net *net, struct net *peer)
+{
+       return peernet2id(net, peer) >= 0;
 }
-EXPORT_SYMBOL(peernet2id);
 
 struct net *get_net_ns_by_id(struct net *net, int id)
 {
+       unsigned long flags;
        struct net *peer;
 
        if (id < 0)
                return NULL;
 
        rcu_read_lock();
+       spin_lock_irqsave(&net->nsid_lock, flags);
        peer = idr_find(&net->netns_ids, id);
        if (peer)
                get_net(peer);
+       spin_unlock_irqrestore(&net->nsid_lock, flags);
        rcu_read_unlock();
 
        return peer;
@@ -242,6 +281,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
        net->dev_base_seq = 1;
        net->user_ns = user_ns;
        idr_init(&net->netns_ids);
+       spin_lock_init(&net->nsid_lock);
 
        list_for_each_entry(ops, &pernet_list, list) {
                error = ops_init(ops, net);
@@ -362,14 +402,19 @@ static void cleanup_net(struct work_struct *work)
                list_del_rcu(&net->list);
                list_add_tail(&net->exit_list, &net_exit_list);
                for_each_net(tmp) {
-                       int id = __peernet2id(tmp, net, false);
+                       int id;
 
-                       if (id >= 0) {
-                               rtnl_net_notifyid(tmp, net, RTM_DELNSID, id);
+                       spin_lock_irq(&tmp->nsid_lock);
+                       id = __peernet2id(tmp, net);
+                       if (id >= 0)
                                idr_remove(&tmp->netns_ids, id);
-                       }
+                       spin_unlock_irq(&tmp->nsid_lock);
+                       if (id >= 0)
+                               rtnl_net_notifyid(tmp, RTM_DELNSID, id);
                }
+               spin_lock_irq(&net->nsid_lock);
                idr_destroy(&net->netns_ids);
+               spin_unlock_irq(&net->nsid_lock);
 
        }
        rtnl_unlock();
@@ -497,6 +542,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tb[NETNSA_MAX + 1];
+       unsigned long flags;
        struct net *peer;
        int nsid, err;
 
@@ -517,14 +563,19 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (IS_ERR(peer))
                return PTR_ERR(peer);
 
-       if (__peernet2id(net, peer, false) >= 0) {
+       spin_lock_irqsave(&net->nsid_lock, flags);
+       if (__peernet2id(net, peer) >= 0) {
+               spin_unlock_irqrestore(&net->nsid_lock, flags);
                err = -EEXIST;
                goto out;
        }
 
        err = alloc_netid(net, peer, nsid);
-       if (err > 0)
+       spin_unlock_irqrestore(&net->nsid_lock, flags);
+       if (err >= 0) {
+               rtnl_net_notifyid(net, RTM_NEWNSID, err);
                err = 0;
+       }
 out:
        put_net(peer);
        return err;
@@ -538,14 +589,10 @@ static int rtnl_net_get_size(void)
 }
 
 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
-                        int cmd, struct net *net, struct net *peer,
-                        int nsid)
+                        int cmd, struct net *net, int nsid)
 {
        struct nlmsghdr *nlh;
        struct rtgenmsg *rth;
-       int id;
-
-       ASSERT_RTNL();
 
        nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
        if (!nlh)
@@ -554,14 +601,7 @@ static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
        rth = nlmsg_data(nlh);
        rth->rtgen_family = AF_UNSPEC;
 
-       if (nsid >= 0) {
-               id = nsid;
-       } else {
-               id = __peernet2id(net, peer, false);
-               if  (id < 0)
-                       id = NETNSA_NSID_NOT_ASSIGNED;
-       }
-       if (nla_put_s32(skb, NETNSA_NSID, id))
+       if (nla_put_s32(skb, NETNSA_NSID, nsid))
                goto nla_put_failure;
 
        nlmsg_end(skb, nlh);
@@ -578,7 +618,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *tb[NETNSA_MAX + 1];
        struct sk_buff *msg;
        struct net *peer;
-       int err;
+       int err, id;
 
        err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
                          rtnl_net_policy);
@@ -600,8 +640,9 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
                goto out;
        }
 
+       id = peernet2id(net, peer);
        err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
-                           RTM_NEWNSID, net, peer, -1);
+                           RTM_NEWNSID, net, id);
        if (err < 0)
                goto err_out;
 
@@ -633,7 +674,7 @@ static int rtnl_net_dumpid_one(int id, void *peer, void *data)
 
        ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
                            net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                           RTM_NEWNSID, net_cb->net, peer, id);
+                           RTM_NEWNSID, net_cb->net, id);
        if (ret < 0)
                return ret;
 
@@ -652,17 +693,17 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
                .idx = 0,
                .s_idx = cb->args[0],
        };
+       unsigned long flags;
 
-       ASSERT_RTNL();
-
+       spin_lock_irqsave(&net->nsid_lock, flags);
        idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
+       spin_unlock_irqrestore(&net->nsid_lock, flags);
 
        cb->args[0] = net_cb.idx;
        return skb->len;
 }
 
-static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
-                             int id)
+static void rtnl_net_notifyid(struct net *net, int cmd, int id)
 {
        struct sk_buff *msg;
        int err = -ENOMEM;
@@ -671,7 +712,7 @@ static void rtnl_net_notifyid(struct net *net, struct net *peer, int cmd,
        if (!msg)
                goto out;
 
-       err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, peer, id);
+       err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
        if (err < 0)
                goto err_out;
 
index f17ccd291d3902a94ebd31653959851203402468..8b3bc4fac6136638de97124443fb6452b1d33fc2 100644 (file)
@@ -31,10 +31,7 @@ static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
  */
 int register_netevent_notifier(struct notifier_block *nb)
 {
-       int err;
-
-       err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
-       return err;
+       return atomic_notifier_chain_register(&netevent_notif_chain, nb);
 }
 EXPORT_SYMBOL_GPL(register_netevent_notifier);
 
index 508155b283ddcc73a967a2bc8068e67cb8cada7d..d93cbc5715f45bef064b398a6f47ae7828610cec 100644 (file)
 #include <asm/dma.h>
 #include <asm/div64.h>         /* do_div */
 
-#define VERSION        "2.74"
+#define VERSION        "2.75"
 #define IP_NAME_SZ 32
 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
 #define MPLS_STACK_BOTTOM htonl(0x00000100)
 #define T_REMDEVALL   (1<<2)   /* Remove all devs */
 #define T_REMDEV      (1<<3)   /* Remove one dev */
 
+/* Xmit modes */
+#define M_START_XMIT           0       /* Default normal TX */
+#define M_NETIF_RECEIVE        1       /* Inject packets into stack */
+
 /* If lock -- protects updating of if_list */
 #define   if_lock(t)           spin_lock(&(t->if_lock));
 #define   if_unlock(t)           spin_unlock(&(t->if_lock));
@@ -251,13 +255,14 @@ struct pktgen_dev {
         * we will do a random selection from within the range.
         */
        __u32 flags;
-       int removal_mark;       /* non-zero => the device is marked for
-                                * removal by worker thread */
-
+       int xmit_mode;
        int min_pkt_size;
        int max_pkt_size;
        int pkt_overhead;       /* overhead for MPLS, VLANs, IPSEC etc */
        int nfrags;
+       int removal_mark;       /* non-zero => the device is marked for
+                                * removal by worker thread */
+
        struct page *page;
        u64 delay;              /* nano-seconds */
 
@@ -507,7 +512,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
                pktgen_reset_all_threads(pn);
 
        else
-               pr_warn("Unknown command: %s\n", data);
+               return -EINVAL;
 
        return count;
 }
@@ -567,7 +572,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
                           "     dst_min: %s  dst_max: %s\n",
                           pkt_dev->dst_min, pkt_dev->dst_max);
                seq_printf(seq,
-                          "        src_min: %s  src_max: %s\n",
+                          "     src_min: %s  src_max: %s\n",
                           pkt_dev->src_min, pkt_dev->src_max);
        }
 
@@ -620,6 +625,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        if (pkt_dev->node >= 0)
                seq_printf(seq, "     node: %d\n", pkt_dev->node);
 
+       if (pkt_dev->xmit_mode == M_NETIF_RECEIVE)
+               seq_puts(seq, "     xmit_mode: netif_receive\n");
+
        seq_puts(seq, "     Flags: ");
 
        if (pkt_dev->flags & F_IPV6)
@@ -1081,7 +1089,8 @@ static ssize_t pktgen_if_write(struct file *file,
                if (len < 0)
                        return len;
                if ((value > 0) &&
-                   (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
+                   ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) ||
+                    !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
                        return -ENOTSUPP;
                i += len;
                pkt_dev->clone_skb = value;
@@ -1134,7 +1143,7 @@ static ssize_t pktgen_if_write(struct file *file,
                        return len;
 
                i += len;
-               if ((value > 1) &&
+               if ((value > 1) && (pkt_dev->xmit_mode == M_START_XMIT) &&
                    (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
                        return -ENOTSUPP;
                pkt_dev->burst = value < 1 ? 1 : value;
@@ -1160,6 +1169,45 @@ static ssize_t pktgen_if_write(struct file *file,
                        sprintf(pg_result, "ERROR: node not possible");
                return count;
        }
+       if (!strcmp(name, "xmit_mode")) {
+               char f[32];
+
+               memset(f, 0, 32);
+               len = strn_len(&user_buffer[i], sizeof(f) - 1);
+               if (len < 0)
+                       return len;
+
+               if (copy_from_user(f, &user_buffer[i], len))
+                       return -EFAULT;
+               i += len;
+
+               if (strcmp(f, "start_xmit") == 0) {
+                       pkt_dev->xmit_mode = M_START_XMIT;
+               } else if (strcmp(f, "netif_receive") == 0) {
+                       /* clone_skb set earlier, not supported in this mode */
+                       if (pkt_dev->clone_skb > 0)
+                               return -ENOTSUPP;
+
+                       pkt_dev->xmit_mode = M_NETIF_RECEIVE;
+
+                       /* make sure new packet is allocated every time
+                        * pktgen_xmit() is called
+                        */
+                       pkt_dev->last_ok = 1;
+
+                       /* override clone_skb if user passed default value
+                        * at module loading time
+                        */
+                       pkt_dev->clone_skb = 0;
+               } else {
+                       sprintf(pg_result,
+                               "xmit_mode -:%s:- unknown\nAvailable modes: %s",
+                               f, "start_xmit, netif_receive\n");
+                       return count;
+               }
+               sprintf(pg_result, "OK: xmit_mode=%s", f);
+               return count;
+       }
        if (!strcmp(name, "flag")) {
                char f[32];
                memset(f, 0, 32);
@@ -1267,6 +1315,9 @@ static ssize_t pktgen_if_write(struct file *file,
                else if (strcmp(f, "NO_TIMESTAMP") == 0)
                        pkt_dev->flags |= F_NO_TIMESTAMP;
 
+               else if (strcmp(f, "!NO_TIMESTAMP") == 0)
+                       pkt_dev->flags &= ~F_NO_TIMESTAMP;
+
                else {
                        sprintf(pg_result,
                                "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
@@ -2594,9 +2645,9 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
                struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
                int nhead = 0;
                if (x) {
-                       int ret;
-                       __u8 *eth;
+                       struct ethhdr *eth;
                        struct iphdr *iph;
+                       int ret;
 
                        nhead = x->props.header_len - skb_headroom(skb);
                        if (nhead > 0) {
@@ -2616,9 +2667,9 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
                                goto err;
                        }
                        /* restore ll */
-                       eth = (__u8 *) skb_push(skb, ETH_HLEN);
-                       memcpy(eth, pkt_dev->hh, 12);
-                       *(u16 *) &eth[12] = protocol;
+                       eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+                       memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN);
+                       eth->h_proto = protocol;
 
                        /* Update IPv4 header len as well as checksum value */
                        iph = ip_hdr(skb);
@@ -3317,6 +3368,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
        unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
        struct net_device *odev = pkt_dev->odev;
        struct netdev_queue *txq;
+       struct sk_buff *skb;
        int ret;
 
        /* If device is offline, then don't send */
@@ -3354,6 +3406,37 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
        if (pkt_dev->delay && pkt_dev->last_ok)
                spin(pkt_dev, pkt_dev->next_tx);
 
+       if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
+               skb = pkt_dev->skb;
+               skb->protocol = eth_type_trans(skb, skb->dev);
+               atomic_add(burst, &skb->users);
+               local_bh_disable();
+               do {
+                       ret = netif_receive_skb(skb);
+                       if (ret == NET_RX_DROP)
+                               pkt_dev->errors++;
+                       pkt_dev->sofar++;
+                       pkt_dev->seq_num++;
+                       if (atomic_read(&skb->users) != burst) {
+                               /* skb was queued by rps/rfs or taps,
+                                * so cannot reuse this skb
+                                */
+                               atomic_sub(burst - 1, &skb->users);
+                               /* get out of the loop and wait
+                                * until skb is consumed
+                                */
+                               break;
+                       }
+                       /* skb was 'freed' by stack, so clean few
+                        * bits and reuse it
+                        */
+#ifdef CONFIG_NET_CLS_ACT
+                       skb->tc_verd = 0; /* reset reclass/redir ttl */
+#endif
+               } while (--burst > 0);
+               goto out; /* Skips xmit_mode M_START_XMIT */
+       }
+
        txq = skb_get_tx_queue(odev, pkt_dev->skb);
 
        local_bh_disable();
@@ -3401,6 +3484,7 @@ xmit_more:
 unlock:
        HARD_TX_UNLOCK(odev, txq);
 
+out:
        local_bh_enable();
 
        /* If pkt_dev->count is zero, then run forever */
index 8de36824018de4da2369fb02234692c4e0260b27..077b6d280371641c44feff8d4c5272ac9dbd69b9 100644 (file)
@@ -1004,16 +1004,20 @@ static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
 static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
 {
        int err;
-       struct netdev_phys_item_id psid;
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+               .flags = SWITCHDEV_F_NO_RECURSE,
+       };
 
-       err = netdev_switch_parent_id_get(dev, &psid);
+       err = switchdev_port_attr_get(dev, &attr);
        if (err) {
                if (err == -EOPNOTSUPP)
                        return 0;
                return err;
        }
 
-       if (nla_put(skb, IFLA_PHYS_SWITCH_ID, psid.id_len, psid.id))
+       if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
+                   attr.u.ppid.id))
                return -EMSGSIZE;
 
        return 0;
@@ -1204,7 +1208,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
                struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
 
                if (!net_eq(dev_net(dev), link_net)) {
-                       int id = peernet2id(dev_net(dev), link_net);
+                       int id = peernet2id_alloc(dev_net(dev), link_net);
 
                        if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
                                goto nla_put_failure;
index 51dd3193a33ebb26ea3c008e752d1f2832f791d8..fd3ce461fbe6210ab95fbcbb4b5e6c862a262898 100644 (file)
@@ -154,7 +154,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
        net_secret_init();
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
-               secret[i] = net_secret[i] + daddr[i];
+               secret[i] = net_secret[i] + (__force u32)daddr[i];
        secret[4] = net_secret[4] +
                (((__force u16)sport << 16) + (__force u16)dport);
        for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
index 3cfff2a3d651fb7d7cd2baaa3698c123eb7fc00f..9bac0e6f8dfa22c71666c0ba5da5bb3d1a014251 100644 (file)
@@ -347,94 +347,18 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
 }
 EXPORT_SYMBOL(build_skb);
 
-struct netdev_alloc_cache {
-       struct page_frag        frag;
-       /* we maintain a pagecount bias, so that we dont dirty cache line
-        * containing page->_count every time we allocate a fragment.
-        */
-       unsigned int            pagecnt_bias;
-};
-static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
-static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
-
-static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
-                                      gfp_t gfp_mask)
-{
-       const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
-       struct page *page = NULL;
-       gfp_t gfp = gfp_mask;
-
-       if (order) {
-               gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
-                           __GFP_NOMEMALLOC;
-               page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
-               nc->frag.size = PAGE_SIZE << (page ? order : 0);
-       }
-
-       if (unlikely(!page))
-               page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
-
-       nc->frag.page = page;
-
-       return page;
-}
-
-static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
-                              unsigned int fragsz, gfp_t gfp_mask)
-{
-       struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
-       struct page *page = nc->frag.page;
-       unsigned int size;
-       int offset;
-
-       if (unlikely(!page)) {
-refill:
-               page = __page_frag_refill(nc, gfp_mask);
-               if (!page)
-                       return NULL;
-
-               /* if size can vary use frag.size else just use PAGE_SIZE */
-               size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
-
-               /* Even if we own the page, we do not use atomic_set().
-                * This would break get_page_unless_zero() users.
-                */
-               atomic_add(size - 1, &page->_count);
-
-               /* reset page count bias and offset to start of new frag */
-               nc->pagecnt_bias = size;
-               nc->frag.offset = size;
-       }
-
-       offset = nc->frag.offset - fragsz;
-       if (unlikely(offset < 0)) {
-               if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
-                       goto refill;
-
-               /* if size can vary use frag.size else just use PAGE_SIZE */
-               size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
-
-               /* OK, page count is 0, we can safely set it */
-               atomic_set(&page->_count, size);
-
-               /* reset page count bias and offset to start of new frag */
-               nc->pagecnt_bias = size;
-               offset = size - fragsz;
-       }
-
-       nc->pagecnt_bias--;
-       nc->frag.offset = offset;
-
-       return page_address(page) + offset;
-}
+static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
 
 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
+       struct page_frag_cache *nc;
        unsigned long flags;
        void *data;
 
        local_irq_save(flags);
-       data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
+       nc = this_cpu_ptr(&netdev_alloc_cache);
+       data = __alloc_page_frag(nc, fragsz, gfp_mask);
        local_irq_restore(flags);
        return data;
 }
@@ -454,7 +378,9 @@ EXPORT_SYMBOL(netdev_alloc_frag);
 
 static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
-       return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
+       struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+
+       return __alloc_page_frag(nc, fragsz, gfp_mask);
 }
 
 void *napi_alloc_frag(unsigned int fragsz)
@@ -464,76 +390,70 @@ void *napi_alloc_frag(unsigned int fragsz)
 EXPORT_SYMBOL(napi_alloc_frag);
 
 /**
- *     __alloc_rx_skb - allocate an skbuff for rx
+ *     __netdev_alloc_skb - allocate an skbuff for rx on a specific device
+ *     @dev: network device to receive on
  *     @length: length to allocate
  *     @gfp_mask: get_free_pages mask, passed to alloc_skb
- *     @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
- *             allocations in case we have to fallback to __alloc_skb()
- *             If SKB_ALLOC_NAPI is set, page fragment will be allocated
- *             from napi_cache instead of netdev_cache.
  *
  *     Allocate a new &sk_buff and assign it a usage count of one. The
- *     buffer has unspecified headroom built in. Users should allocate
+ *     buffer has NET_SKB_PAD headroom built in. Users should allocate
  *     the headroom they think they need without accounting for the
  *     built in space. The built in space is used for optimisations.
  *
  *     %NULL is returned if there is no free memory.
  */
-static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask,
-                                     int flags)
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
+                                  gfp_t gfp_mask)
 {
-       struct sk_buff *skb = NULL;
-       unsigned int fragsz = SKB_DATA_ALIGN(length) +
-                             SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       struct page_frag_cache *nc;
+       unsigned long flags;
+       struct sk_buff *skb;
+       bool pfmemalloc;
+       void *data;
 
-       if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
-               void *data;
+       len += NET_SKB_PAD;
 
-               if (sk_memalloc_socks())
-                       gfp_mask |= __GFP_MEMALLOC;
+       if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+           (gfp_mask & (__GFP_WAIT | GFP_DMA))) {
+               skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+               if (!skb)
+                       goto skb_fail;
+               goto skb_success;
+       }
 
-               data = (flags & SKB_ALLOC_NAPI) ?
-                       __napi_alloc_frag(fragsz, gfp_mask) :
-                       __netdev_alloc_frag(fragsz, gfp_mask);
+       len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       len = SKB_DATA_ALIGN(len);
 
-               if (likely(data)) {
-                       skb = build_skb(data, fragsz);
-                       if (unlikely(!skb))
-                               put_page(virt_to_head_page(data));
-               }
-       } else {
-               skb = __alloc_skb(length, gfp_mask,
-                                 SKB_ALLOC_RX, NUMA_NO_NODE);
-       }
-       return skb;
-}
+       if (sk_memalloc_socks())
+               gfp_mask |= __GFP_MEMALLOC;
 
-/**
- *     __netdev_alloc_skb - allocate an skbuff for rx on a specific device
- *     @dev: network device to receive on
- *     @length: length to allocate
- *     @gfp_mask: get_free_pages mask, passed to alloc_skb
- *
- *     Allocate a new &sk_buff and assign it a usage count of one. The
- *     buffer has NET_SKB_PAD headroom built in. Users should allocate
- *     the headroom they think they need without accounting for the
- *     built in space. The built in space is used for optimisations.
- *
- *     %NULL is returned if there is no free memory.
- */
-struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-                                  unsigned int length, gfp_t gfp_mask)
-{
-       struct sk_buff *skb;
+       local_irq_save(flags);
+
+       nc = this_cpu_ptr(&netdev_alloc_cache);
+       data = __alloc_page_frag(nc, len, gfp_mask);
+       pfmemalloc = nc->pfmemalloc;
 
-       length += NET_SKB_PAD;
-       skb = __alloc_rx_skb(length, gfp_mask, 0);
+       local_irq_restore(flags);
 
-       if (likely(skb)) {
-               skb_reserve(skb, NET_SKB_PAD);
-               skb->dev = dev;
+       if (unlikely(!data))
+               return NULL;
+
+       skb = __build_skb(data, len);
+       if (unlikely(!skb)) {
+               skb_free_frag(data);
+               return NULL;
        }
 
+       /* use OR instead of assignment to avoid clearing of bits in mask */
+       if (pfmemalloc)
+               skb->pfmemalloc = 1;
+       skb->head_frag = 1;
+
+skb_success:
+       skb_reserve(skb, NET_SKB_PAD);
+       skb->dev = dev;
+
+skb_fail:
        return skb;
 }
 EXPORT_SYMBOL(__netdev_alloc_skb);
@@ -551,19 +471,49 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
  *
  *     %NULL is returned if there is no free memory.
  */
-struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
-                                unsigned int length, gfp_t gfp_mask)
+struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+                                gfp_t gfp_mask)
 {
+       struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache);
        struct sk_buff *skb;
+       void *data;
 
-       length += NET_SKB_PAD + NET_IP_ALIGN;
-       skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI);
+       len += NET_SKB_PAD + NET_IP_ALIGN;
 
-       if (likely(skb)) {
-               skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-               skb->dev = napi->dev;
+       if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+           (gfp_mask & (__GFP_WAIT | GFP_DMA))) {
+               skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+               if (!skb)
+                       goto skb_fail;
+               goto skb_success;
        }
 
+       len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       len = SKB_DATA_ALIGN(len);
+
+       if (sk_memalloc_socks())
+               gfp_mask |= __GFP_MEMALLOC;
+
+       data = __alloc_page_frag(nc, len, gfp_mask);
+       if (unlikely(!data))
+               return NULL;
+
+       skb = __build_skb(data, len);
+       if (unlikely(!skb)) {
+               skb_free_frag(data);
+               return NULL;
+       }
+
+       /* use OR instead of assignment to avoid clearing of bits in mask */
+       if (nc->pfmemalloc)
+               skb->pfmemalloc = 1;
+       skb->head_frag = 1;
+
+skb_success:
+       skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+       skb->dev = napi->dev;
+
+skb_fail:
        return skb;
 }
 EXPORT_SYMBOL(__napi_alloc_skb);
@@ -611,10 +561,12 @@ static void skb_clone_fraglist(struct sk_buff *skb)
 
 static void skb_free_head(struct sk_buff *skb)
 {
+       unsigned char *head = skb->head;
+
        if (skb->head_frag)
-               put_page(virt_to_head_page(skb->head));
+               skb_free_frag(head);
        else
-               kfree(skb->head);
+               kfree(head);
 }
 
 static void skb_release_data(struct sk_buff *skb)
@@ -1918,15 +1870,39 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
        return false;
 }
 
+ssize_t skb_socket_splice(struct sock *sk,
+                         struct pipe_inode_info *pipe,
+                         struct splice_pipe_desc *spd)
+{
+       int ret;
+
+       /* Drop the socket lock, otherwise we have reverse
+        * locking dependencies between sk_lock and i_mutex
+        * here as compared to sendfile(). We enter here
+        * with the socket lock held, and splice_to_pipe() will
+        * grab the pipe inode lock. For sendfile() emulation,
+        * we call into ->sendpage() with the i_mutex lock held
+        * and networking will grab the socket lock.
+        */
+       release_sock(sk);
+       ret = splice_to_pipe(pipe, spd);
+       lock_sock(sk);
+
+       return ret;
+}
+
 /*
  * Map data from the skb to a pipe. Should handle both the linear part,
  * the fragments, and the frag list. It does NOT handle frag lists within
  * the frag list, if such a thing exists. We'd probably need to recurse to
  * handle that cleanly.
  */
-int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
                    struct pipe_inode_info *pipe, unsigned int tlen,
-                   unsigned int flags)
+                   unsigned int flags,
+                   ssize_t (*splice_cb)(struct sock *,
+                                        struct pipe_inode_info *,
+                                        struct splice_pipe_desc *))
 {
        struct partial_page partial[MAX_SKB_FRAGS];
        struct page *pages[MAX_SKB_FRAGS];
@@ -1939,7 +1915,6 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
                .spd_release = sock_spd_release,
        };
        struct sk_buff *frag_iter;
-       struct sock *sk = skb->sk;
        int ret = 0;
 
        /*
@@ -1962,23 +1937,12 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
        }
 
 done:
-       if (spd.nr_pages) {
-               /*
-                * Drop the socket lock, otherwise we have reverse
-                * locking dependencies between sk_lock and i_mutex
-                * here as compared to sendfile(). We enter here
-                * with the socket lock held, and splice_to_pipe() will
-                * grab the pipe inode lock. For sendfile() emulation,
-                * we call into ->sendpage() with the i_mutex lock held
-                * and networking will grab the socket lock.
-                */
-               release_sock(sk);
-               ret = splice_to_pipe(pipe, &spd);
-               lock_sock(sk);
-       }
+       if (spd.nr_pages)
+               ret = splice_cb(sk, pipe, &spd);
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(skb_splice_bits);
 
 /**
  *     skb_store_bits - store bits from kernel buffer to skb
@@ -2963,6 +2927,24 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(skb_append_datato_frags);
 
+int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
+                        int offset, size_t size)
+{
+       int i = skb_shinfo(skb)->nr_frags;
+
+       if (skb_can_coalesce(skb, i, page, offset)) {
+               skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
+       } else if (i < MAX_SKB_FRAGS) {
+               get_page(page);
+               skb_fill_page_desc(skb, i, page, offset, size);
+       } else {
+               return -EMSGSIZE;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(skb_append_pagefrags);
+
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
  *     @skb: buffer to update
@@ -4030,6 +4012,93 @@ int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
 }
 EXPORT_SYMBOL(skb_checksum_setup);
 
+/**
+ * skb_checksum_maybe_trim - maybe trims the given skb
+ * @skb: the skb to check
+ * @transport_len: the data length beyond the network header
+ *
+ * Checks whether the given skb has data beyond the given transport length.
+ * If so, returns a cloned skb trimmed to this transport length.
+ * Otherwise returns the provided skb. Returns NULL in error cases
+ * (e.g. transport_len exceeds skb length or out-of-memory).
+ *
+ * Caller needs to set the skb transport header and release the returned skb.
+ * Provided skb is consumed.
+ */
+static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
+                                              unsigned int transport_len)
+{
+       struct sk_buff *skb_chk;
+       unsigned int len = skb_transport_offset(skb) + transport_len;
+       int ret;
+
+       if (skb->len < len) {
+               kfree_skb(skb);
+               return NULL;
+       } else if (skb->len == len) {
+               return skb;
+       }
+
+       skb_chk = skb_clone(skb, GFP_ATOMIC);
+       kfree_skb(skb);
+
+       if (!skb_chk)
+               return NULL;
+
+       ret = pskb_trim_rcsum(skb_chk, len);
+       if (ret) {
+               kfree_skb(skb_chk);
+               return NULL;
+       }
+
+       return skb_chk;
+}
+
+/**
+ * skb_checksum_trimmed - validate checksum of an skb
+ * @skb: the skb to check
+ * @transport_len: the data length beyond the network header
+ * @skb_chkf: checksum function to use
+ *
+ * Applies the given checksum function skb_chkf to the provided skb.
+ * Returns a checked and maybe trimmed skb. Returns NULL on error.
+ *
+ * If the skb has data beyond the given transport length, then a
+ * trimmed & cloned skb is checked and returned.
+ *
+ * Caller needs to set the skb transport header and release the returned skb.
+ * Provided skb is consumed.
+ */
+struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
+                                    unsigned int transport_len,
+                                    __sum16(*skb_chkf)(struct sk_buff *skb))
+{
+       struct sk_buff *skb_chk;
+       unsigned int offset = skb_transport_offset(skb);
+       __sum16 ret;
+
+       skb_chk = skb_checksum_maybe_trim(skb, transport_len);
+       if (!skb_chk)
+               return NULL;
+
+       if (!pskb_may_pull(skb_chk, offset)) {
+               kfree_skb(skb_chk);
+               return NULL;
+       }
+
+       __skb_pull(skb_chk, offset);
+       ret = skb_chkf(skb_chk);
+       __skb_push(skb_chk, offset);
+
+       if (ret) {
+               kfree_skb(skb_chk);
+               return NULL;
+       }
+
+       return skb_chk;
+}
+EXPORT_SYMBOL(skb_checksum_trimmed);
+
 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
 {
        net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
index 292f42228bfb361b5748998bbcc538b1e16a2f22..e72633c346b197b9dd6c94800fa0c55110aedfb2 100644 (file)
@@ -1396,9 +1396,10 @@ EXPORT_SYMBOL_GPL(sock_update_netprioidx);
  *     @family: protocol family
  *     @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  *     @prot: struct proto associated with this new sock instance
+ *     @kern: is this to be a kernel socket?
  */
 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
-                     struct proto *prot)
+                     struct proto *prot, int kern)
 {
        struct sock *sk;
 
@@ -1411,7 +1412,10 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                 */
                sk->sk_prot = sk->sk_prot_creator = prot;
                sock_lock_init(sk);
-               sock_net_set(sk, get_net(net));
+               sk->sk_net_refcnt = kern ? 0 : 1;
+               if (likely(sk->sk_net_refcnt))
+                       get_net(net);
+               sock_net_set(sk, net);
                atomic_set(&sk->sk_wmem_alloc, 1);
 
                sock_update_classid(sk);
@@ -1445,7 +1449,8 @@ static void __sk_free(struct sock *sk)
        if (sk->sk_peer_cred)
                put_cred(sk->sk_peer_cred);
        put_pid(sk->sk_peer_pid);
-       put_net(sock_net(sk));
+       if (likely(sk->sk_net_refcnt))
+               put_net(sock_net(sk));
        sk_prot_free(sk->sk_prot_creator, sk);
 }
 
@@ -1461,25 +1466,6 @@ void sk_free(struct sock *sk)
 }
 EXPORT_SYMBOL(sk_free);
 
-/*
- * Last sock_put should drop reference to sk->sk_net. It has already
- * been dropped in sk_change_net. Taking reference to stopping namespace
- * is not an option.
- * Take reference to a socket to remove it from hash _alive_ and after that
- * destroy it in the context of init_net.
- */
-void sk_release_kernel(struct sock *sk)
-{
-       if (sk == NULL || sk->sk_socket == NULL)
-               return;
-
-       sock_hold(sk);
-       sock_release(sk->sk_socket);
-       sock_net_set(sk, get_net(&init_net));
-       sock_put(sk);
-}
-EXPORT_SYMBOL(sk_release_kernel);
-
 static void sk_update_clone(const struct sock *sk, struct sock *newsk)
 {
        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
@@ -1595,6 +1581,8 @@ EXPORT_SYMBOL_GPL(sk_clone_lock);
 
 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 {
+       u32 max_segs = 1;
+
        __sk_dst_set(sk, dst);
        sk->sk_route_caps = dst->dev->features;
        if (sk->sk_route_caps & NETIF_F_GSO)
@@ -1606,9 +1594,10 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
                } else {
                        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
                        sk->sk_gso_max_size = dst->dev->gso_max_size;
-                       sk->sk_gso_max_segs = dst->dev->gso_max_segs;
+                       max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
                }
        }
+       sk->sk_gso_max_segs = max_segs;
 }
 EXPORT_SYMBOL_GPL(sk_setup_caps);
 
@@ -2083,12 +2072,13 @@ EXPORT_SYMBOL(__sk_mem_schedule);
 /**
  *     __sk_reclaim - reclaim memory_allocated
  *     @sk: socket
+ *     @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
  */
-void __sk_mem_reclaim(struct sock *sk)
+void __sk_mem_reclaim(struct sock *sk, int amount)
 {
-       sk_memory_allocated_sub(sk,
-                               sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
-       sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
+       amount >>= SK_MEM_QUANTUM_SHIFT;
+       sk_memory_allocated_sub(sk, amount);
+       sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
 
        if (sk_under_memory_pressure(sk) &&
            (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
index 301c05f260600caaed94a9f63ecd3ed0adc5c233..d70f77a0c8898582e0adabd24c6165675d12dce7 100644 (file)
@@ -119,6 +119,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
        int err = 0;
        long vm_wait = 0;
        long current_timeo = *timeo_p;
+       bool noblock = (*timeo_p ? false : true);
        DEFINE_WAIT(wait);
 
        if (sk_stream_memory_free(sk))
@@ -131,8 +132,11 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 
                if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
                        goto do_error;
-               if (!*timeo_p)
+               if (!*timeo_p) {
+                       if (noblock)
+                               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                        goto do_nonblock;
+               }
                if (signal_pending(current))
                        goto do_interrupted;
                clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
index 7b803884c162834ddc5327805e6b0e3a636a82f0..a7732a06804376aa321c9982faaf981ad3c51c87 100644 (file)
@@ -304,13 +304,15 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
                              __be32 from, __be32 to, int pseudohdr)
 {
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
-               *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from),
-                                to));
+               csum_replace4(sum, from, to);
                if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
-                       skb->csum = ~csum_add(csum_sub(~(skb->csum), from), to);
+                       skb->csum = ~csum_add(csum_sub(~(skb->csum),
+                                                      (__force __wsum)from),
+                                             (__force __wsum)to);
        } else if (pseudohdr)
-               *sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum), from),
-                                 to));
+               *sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum),
+                                                   (__force __wsum)from),
+                                          (__force __wsum)to));
 }
 EXPORT_SYMBOL(inet_proto_csum_replace4);
 
index 754484b3cd0e8cc2f92f2800f89dd71762b733d8..675cf94e04f862b77644f86628af6e8a46933055 100644 (file)
@@ -468,10 +468,10 @@ static struct proto dn_proto = {
        .obj_size               = sizeof(struct dn_sock),
 };
 
-static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
+static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp, int kern)
 {
        struct dn_scp *scp;
-       struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto);
+       struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
 
        if  (!sk)
                goto out;
@@ -693,7 +693,7 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
        }
 
 
-       if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL)
+       if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
                return -ENOBUFS;
 
        sk->sk_protocol = protocol;
@@ -1096,7 +1096,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
 
        cb = DN_SKB_CB(skb);
        sk->sk_ack_backlog--;
-       newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation);
+       newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, 0);
        if (newsk == NULL) {
                release_sock(sk);
                kfree_skb(skb);
index 827cda560a552b7b0dca45d49a06816e6dda513b..04ffad311704852a5d2c35c99eea2f1c4293f5e1 100644 (file)
@@ -345,6 +345,24 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
        return ret;
 }
 
+static int dsa_slave_port_attr_set(struct net_device *dev,
+                                  struct switchdev_attr *attr)
+{
+       int ret = 0;
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_PORT_STP_STATE:
+               if (attr->trans == SWITCHDEV_TRANS_COMMIT)
+                       ret = dsa_slave_stp_update(dev, attr->u.stp_state);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
 static int dsa_slave_bridge_port_join(struct net_device *dev,
                                      struct net_device *br)
 {
@@ -382,14 +400,20 @@ static int dsa_slave_bridge_port_leave(struct net_device *dev)
        return ret;
 }
 
-static int dsa_slave_parent_id_get(struct net_device *dev,
-                                  struct netdev_phys_item_id *psid)
+static int dsa_slave_port_attr_get(struct net_device *dev,
+                                  struct switchdev_attr *attr)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct dsa_switch *ds = p->parent;
 
-       psid->id_len = sizeof(ds->index);
-       memcpy(&psid->id, &ds->index, psid->id_len);
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_PORT_PARENT_ID:
+               attr->u.ppid.id_len = sizeof(ds->index);
+               memcpy(&attr->u.ppid.id, &ds->index, attr->u.ppid.id_len);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
 
        return 0;
 }
@@ -675,9 +699,9 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
        .ndo_get_iflink         = dsa_slave_get_iflink,
 };
 
-static const struct swdev_ops dsa_slave_swdev_ops = {
-       .swdev_parent_id_get = dsa_slave_parent_id_get,
-       .swdev_port_stp_update = dsa_slave_stp_update,
+static const struct switchdev_ops dsa_slave_switchdev_ops = {
+       .switchdev_port_attr_get        = dsa_slave_port_attr_get,
+       .switchdev_port_attr_set        = dsa_slave_port_attr_set,
 };
 
 static void dsa_slave_adjust_link(struct net_device *dev)
@@ -810,12 +834,19 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
        return 0;
 }
 
+static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
+static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
+                                           struct netdev_queue *txq,
+                                           void *_unused)
+{
+       lockdep_set_class(&txq->_xmit_lock,
+                         &dsa_slave_netdev_xmit_lock_key);
+}
+
 int dsa_slave_suspend(struct net_device *slave_dev)
 {
        struct dsa_slave_priv *p = netdev_priv(slave_dev);
 
-       netif_device_detach(slave_dev);
-
        if (p->phy) {
                phy_stop(p->phy);
                p->old_pause = -1;
@@ -859,7 +890,10 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        eth_hw_addr_inherit(slave_dev, master);
        slave_dev->tx_queue_len = 0;
        slave_dev->netdev_ops = &dsa_slave_netdev_ops;
-       slave_dev->swdev_ops = &dsa_slave_swdev_ops;
+       slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
+
+       netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
+                                NULL);
 
        SET_NETDEV_DEV(slave_dev, parent);
        slave_dev->dev.of_node = ds->pd->port_dn[port];
index f3bad41d725f449f91d0b1b4f7119a9c9660e976..77e0f0e7a88e2cfd32755b1dffbe83aeb89be24b 100644 (file)
@@ -58,6 +58,7 @@
 #include <net/ipv6.h>
 #include <net/ip.h>
 #include <net/dsa.h>
+#include <net/flow_dissector.h>
 #include <linux/uaccess.h>
 
 __setup("ether=", netdev_boot_setup);
@@ -130,9 +131,9 @@ u32 eth_get_headlen(void *data, unsigned int len)
                return len;
 
        /* parse any remaining L2/L3 headers, check for L4 */
-       if (!__skb_flow_dissect(NULL, &keys, data,
-                               eth->h_proto, sizeof(*eth), len))
-               return max_t(u32, keys.thoff, sizeof(*eth));
+       if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto,
+                                           sizeof(*eth), len))
+               return max_t(u32, keys.control.thoff, sizeof(*eth));
 
        /* parse for any L4 headers */
        return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
@@ -156,10 +157,11 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
 
        skb->dev = dev;
        skb_reset_mac_header(skb);
+
+       eth = (struct ethhdr *)skb->data;
        skb_pull_inline(skb, ETH_HLEN);
-       eth = eth_hdr(skb);
 
-       if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+       if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
                if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
                        skb->pkt_type = PACKET_BROADCAST;
                else
@@ -178,7 +180,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(netdev_uses_dsa(dev)))
                return htons(ETH_P_XDSA);
 
-       if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
+       if (likely(eth_proto_is_802_3(eth->h_proto)))
                return eth->h_proto;
 
        /*
@@ -468,6 +470,7 @@ EXPORT_SYMBOL(eth_gro_complete);
 
 static struct packet_offload eth_packet_offload __read_mostly = {
        .type = cpu_to_be16(ETH_P_TEB),
+       .priority = 10,
        .callbacks = {
                .gro_receive = eth_gro_receive,
                .gro_complete = eth_gro_complete,
index 0ae5822ef944fb0e5c74b22de0fe8d426032135c..f20a387a1011021347af182060f3b8f4ceda7183 100644 (file)
 LIST_HEAD(lowpan_devices);
 static int lowpan_open_count;
 
-static __le16 lowpan_get_pan_id(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
-}
-
-static __le16 lowpan_get_short_addr(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
-}
-
-static u8 lowpan_get_dsn(const struct net_device *dev)
-{
-       struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
-
-       return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
-}
-
 static struct header_ops lowpan_header_ops = {
        .create = lowpan_header_create,
 };
@@ -103,12 +82,6 @@ static const struct net_device_ops lowpan_netdev_ops = {
        .ndo_start_xmit         = lowpan_xmit,
 };
 
-static struct ieee802154_mlme_ops lowpan_mlme = {
-       .get_pan_id = lowpan_get_pan_id,
-       .get_short_addr = lowpan_get_short_addr,
-       .get_dsn = lowpan_get_dsn,
-};
-
 static void lowpan_setup(struct net_device *dev)
 {
        dev->addr_len           = IEEE802154_ADDR_LEN;
@@ -124,7 +97,6 @@ static void lowpan_setup(struct net_device *dev)
 
        dev->netdev_ops         = &lowpan_netdev_ops;
        dev->header_ops         = &lowpan_header_ops;
-       dev->ml_priv            = &lowpan_mlme;
        dev->destructor         = free_netdev;
        dev->features           |= NETIF_F_NETNS_LOCAL;
 }
index 2349070bd534be9f12a4d8ea342be79fe89a8bcd..98acf7319754970f109a1c6f27d8ff9da161dac2 100644 (file)
@@ -207,7 +207,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
 
        /* prepare wpan address data */
        sa.mode = IEEE802154_ADDR_LONG;
-       sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       sa.pan_id = lowpan_dev_info(dev)->real_dev->ieee802154_ptr->pan_id;
        sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
 
        /* intra-PAN communications */
index 2ee00e8a03082aaebce7f017a5ca1e7219c6aade..b0248e934230d166c4e61a66a0ad18c268fa0a71 100644 (file)
@@ -121,8 +121,6 @@ wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size)
        /* atomic_inc_return makes it start at 1, make it start at 0 */
        rdev->wpan_phy_idx--;
 
-       mutex_init(&rdev->wpan_phy.pib_lock);
-
        INIT_LIST_HEAD(&rdev->wpan_dev_list);
        device_initialize(&rdev->wpan_phy.dev);
        dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx);
index 2b4955d7aae54b9b4e647e6b7de9b8fa7ef68c42..3503c38954f9f9e09e88706357c838a37b46ee17 100644 (file)
@@ -97,8 +97,10 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        BUG_ON(!phy);
        get_device(&phy->dev);
 
-       short_addr = ops->get_short_addr(dev);
-       pan_id = ops->get_pan_id(dev);
+       rtnl_lock();
+       short_addr = dev->ieee802154_ptr->short_addr;
+       pan_id = dev->ieee802154_ptr->pan_id;
+       rtnl_unlock();
 
        if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
            nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
@@ -117,12 +119,12 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
                rtnl_unlock();
 
                if (nla_put_s8(msg, IEEE802154_ATTR_TXPOWER,
-                              params.transmit_power) ||
+                              params.transmit_power / 100) ||
                    nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, params.lbt) ||
                    nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE,
                               params.cca.mode) ||
                    nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL,
-                               params.cca_ed_level) ||
+                               params.cca_ed_level / 100) ||
                    nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES,
                               params.csma_retries) ||
                    nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE,
@@ -166,10 +168,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
        if (!dev)
                return NULL;
 
-       /* Check on mtu is currently a hacked solution because lowpan
-        * and wpan have the same ARPHRD type.
-        */
-       if (dev->type != ARPHRD_IEEE802154 || dev->mtu != IEEE802154_MTU) {
+       if (dev->type != ARPHRD_IEEE802154) {
                dev_put(dev);
                return NULL;
        }
@@ -244,7 +243,9 @@ int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info)
        addr.mode = IEEE802154_ADDR_LONG;
        addr.extended_addr = nla_get_hwaddr(
                        info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]);
-       addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       rtnl_lock();
+       addr.pan_id = dev->ieee802154_ptr->pan_id;
+       rtnl_unlock();
 
        ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr,
                nla_get_shortaddr(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]),
@@ -281,7 +282,9 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
                addr.short_addr = nla_get_shortaddr(
                                info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]);
        }
-       addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+       rtnl_lock();
+       addr.pan_id = dev->ieee802154_ptr->pan_id;
+       rtnl_unlock();
 
        ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr,
                        nla_get_u8(info->attrs[IEEE802154_ATTR_REASON]));
@@ -449,11 +452,7 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
 
        idx = 0;
        for_each_netdev(net, dev) {
-               /* Check on mtu is currently a hacked solution because lowpan
-                * and wpan have the same ARPHRD type.
-                */
-               if (idx < s_idx || dev->type != ARPHRD_IEEE802154 ||
-                   dev->mtu != IEEE802154_MTU)
+               if (idx < s_idx || dev->type != ARPHRD_IEEE802154)
                        goto cont;
 
                if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
@@ -510,7 +509,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
        ops->get_mac_params(dev, &params);
 
        if (info->attrs[IEEE802154_ATTR_TXPOWER])
-               params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);
+               params.transmit_power = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]) * 100;
 
        if (info->attrs[IEEE802154_ATTR_LBT_ENABLED])
                params.lbt = nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
@@ -519,7 +518,7 @@ int ieee802154_set_macparams(struct sk_buff *skb, struct genl_info *info)
                params.cca.mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
 
        if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL])
-               params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
+               params.cca_ed_level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) * 100;
 
        if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
                params.csma_retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
@@ -783,11 +782,7 @@ ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
        int rc;
 
        for_each_netdev(net, dev) {
-               /* Check on mtu is currently a hacked solution because lowpan
-                * and wpan have the same ARPHRD type.
-                */
-               if (idx < first_dev || dev->type != ARPHRD_IEEE802154 ||
-                   dev->mtu != IEEE802154_MTU)
+               if (idx < first_dev || dev->type != ARPHRD_IEEE802154)
                        goto skip;
 
                data.ops = ieee802154_mlme_ops(dev);
index 346c6665d25e59bf372bacedc5a2ae6df30d227c..77d73014bde31ed285f3a11c40dd3ebaf82a8f2f 100644 (file)
@@ -50,26 +50,26 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
        if (!hdr)
                goto out;
 
-       mutex_lock(&phy->pib_lock);
+       rtnl_lock();
        if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
            nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
            nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
                goto nla_put_failure;
        for (i = 0; i < 32; i++) {
-               if (phy->channels_supported[i])
-                       buf[pages++] = phy->channels_supported[i] | (i << 27);
+               if (phy->supported.channels[i])
+                       buf[pages++] = phy->supported.channels[i] | (i << 27);
        }
        if (pages &&
            nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
                    pages * sizeof(uint32_t), buf))
                goto nla_put_failure;
-       mutex_unlock(&phy->pib_lock);
+       rtnl_unlock();
        kfree(buf);
        genlmsg_end(msg, hdr);
        return 0;
 
 nla_put_failure:
-       mutex_unlock(&phy->pib_lock);
+       rtnl_unlock();
        genlmsg_cancel(msg, hdr);
 out:
        kfree(buf);
index f3c12f6a4a392ad301e5a79fcabb2bb9f8521431..7dbb1f4ce7df6bebb28d202f8907e00f71bd497c 100644 (file)
@@ -207,10 +207,11 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
        [NL802154_ATTR_PAGE] = { .type = NLA_U8, },
        [NL802154_ATTR_CHANNEL] = { .type = NLA_U8, },
 
-       [NL802154_ATTR_TX_POWER] = { .type = NLA_S8, },
+       [NL802154_ATTR_TX_POWER] = { .type = NLA_S32, },
 
        [NL802154_ATTR_CCA_MODE] = { .type = NLA_U32, },
        [NL802154_ATTR_CCA_OPT] = { .type = NLA_U32, },
+       [NL802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
 
        [NL802154_ATTR_SUPPORTED_CHANNEL] = { .type = NLA_U32, },
 
@@ -225,6 +226,8 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = {
        [NL802154_ATTR_MAX_FRAME_RETRIES] = { .type = NLA_S8, },
 
        [NL802154_ATTR_LBT_MODE] = { .type = NLA_U8, },
+
+       [NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
 };
 
 /* message building helper */
@@ -235,6 +238,28 @@ static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
        return genlmsg_put(skb, portid, seq, &nl802154_fam, flags, cmd);
 }
 
+static int
+nl802154_put_flags(struct sk_buff *msg, int attr, u32 mask)
+{
+       struct nlattr *nl_flags = nla_nest_start(msg, attr);
+       int i;
+
+       if (!nl_flags)
+               return -ENOBUFS;
+
+       i = 0;
+       while (mask) {
+               if ((mask & 1) && nla_put_flag(msg, i))
+                       return -ENOBUFS;
+
+               mask >>= 1;
+               i++;
+       }
+
+       nla_nest_end(msg, nl_flags);
+       return 0;
+}
+
 static int
 nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
                                struct sk_buff *msg)
@@ -248,7 +273,7 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
 
        for (page = 0; page <= IEEE802154_MAX_PAGE; page++) {
                if (nla_put_u32(msg, NL802154_ATTR_SUPPORTED_CHANNEL,
-                               rdev->wpan_phy.channels_supported[page]))
+                               rdev->wpan_phy.supported.channels[page]))
                        return -ENOBUFS;
        }
        nla_nest_end(msg, nl_page);
@@ -256,6 +281,92 @@ nl802154_send_wpan_phy_channels(struct cfg802154_registered_device *rdev,
        return 0;
 }
 
+static int
+nl802154_put_capabilities(struct sk_buff *msg,
+                         struct cfg802154_registered_device *rdev)
+{
+       const struct wpan_phy_supported *caps = &rdev->wpan_phy.supported;
+       struct nlattr *nl_caps, *nl_channels;
+       int i;
+
+       nl_caps = nla_nest_start(msg, NL802154_ATTR_WPAN_PHY_CAPS);
+       if (!nl_caps)
+               return -ENOBUFS;
+
+       nl_channels = nla_nest_start(msg, NL802154_CAP_ATTR_CHANNELS);
+       if (!nl_channels)
+               return -ENOBUFS;
+
+       for (i = 0; i <= IEEE802154_MAX_PAGE; i++) {
+               if (caps->channels[i]) {
+                       if (nl802154_put_flags(msg, i, caps->channels[i]))
+                               return -ENOBUFS;
+               }
+       }
+
+       nla_nest_end(msg, nl_channels);
+
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+               struct nlattr *nl_ed_lvls;
+
+               nl_ed_lvls = nla_nest_start(msg,
+                                           NL802154_CAP_ATTR_CCA_ED_LEVELS);
+               if (!nl_ed_lvls)
+                       return -ENOBUFS;
+
+               for (i = 0; i < caps->cca_ed_levels_size; i++) {
+                       if (nla_put_s32(msg, i, caps->cca_ed_levels[i]))
+                               return -ENOBUFS;
+               }
+
+               nla_nest_end(msg, nl_ed_lvls);
+       }
+
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+               struct nlattr *nl_tx_pwrs;
+
+               nl_tx_pwrs = nla_nest_start(msg, NL802154_CAP_ATTR_TX_POWERS);
+               if (!nl_tx_pwrs)
+                       return -ENOBUFS;
+
+               for (i = 0; i < caps->tx_powers_size; i++) {
+                       if (nla_put_s32(msg, i, caps->tx_powers[i]))
+                               return -ENOBUFS;
+               }
+
+               nla_nest_end(msg, nl_tx_pwrs);
+       }
+
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+               if (nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_MODES,
+                                      caps->cca_modes) ||
+                   nl802154_put_flags(msg, NL802154_CAP_ATTR_CCA_OPTS,
+                                      caps->cca_opts))
+                       return -ENOBUFS;
+       }
+
+       if (nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MINBE, caps->min_minbe) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MINBE, caps->max_minbe) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MIN_MAXBE, caps->min_maxbe) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MAX_MAXBE, caps->max_maxbe) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS,
+                      caps->min_csma_backoffs) ||
+           nla_put_u8(msg, NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS,
+                      caps->max_csma_backoffs) ||
+           nla_put_s8(msg, NL802154_CAP_ATTR_MIN_FRAME_RETRIES,
+                      caps->min_frame_retries) ||
+           nla_put_s8(msg, NL802154_CAP_ATTR_MAX_FRAME_RETRIES,
+                      caps->max_frame_retries) ||
+           nl802154_put_flags(msg, NL802154_CAP_ATTR_IFTYPES,
+                              caps->iftypes) ||
+           nla_put_u32(msg, NL802154_CAP_ATTR_LBT, caps->lbt))
+               return -ENOBUFS;
+
+       nla_nest_end(msg, nl_caps);
+
+       return 0;
+}
+
 static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
                                  enum nl802154_commands cmd,
                                  struct sk_buff *msg, u32 portid, u32 seq,
@@ -286,23 +397,38 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev,
                       rdev->wpan_phy.current_channel))
                goto nla_put_failure;
 
-       /* supported channels array */
+       /* TODO remove this behaviour, we still keep support it for a while
+        * so users can change the behaviour to the new one.
+        */
        if (nl802154_send_wpan_phy_channels(rdev, msg))
                goto nla_put_failure;
 
        /* cca mode */
-       if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
-                       rdev->wpan_phy.cca.mode))
-               goto nla_put_failure;
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE) {
+               if (nla_put_u32(msg, NL802154_ATTR_CCA_MODE,
+                               rdev->wpan_phy.cca.mode))
+                       goto nla_put_failure;
+
+               if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
+                       if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
+                                       rdev->wpan_phy.cca.opt))
+                               goto nla_put_failure;
+               }
+       }
 
-       if (rdev->wpan_phy.cca.mode == NL802154_CCA_ENERGY_CARRIER) {
-               if (nla_put_u32(msg, NL802154_ATTR_CCA_OPT,
-                               rdev->wpan_phy.cca.opt))
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER) {
+               if (nla_put_s32(msg, NL802154_ATTR_TX_POWER,
+                               rdev->wpan_phy.transmit_power))
                        goto nla_put_failure;
        }
 
-       if (nla_put_s8(msg, NL802154_ATTR_TX_POWER,
-                      rdev->wpan_phy.transmit_power))
+       if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
+               if (nla_put_s32(msg, NL802154_ATTR_CCA_ED_LEVEL,
+                               rdev->wpan_phy.cca_ed_level))
+                       goto nla_put_failure;
+       }
+
+       if (nl802154_put_capabilities(msg, rdev))
                goto nla_put_failure;
 
 finish:
@@ -575,7 +701,8 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
 
        if (info->attrs[NL802154_ATTR_IFTYPE]) {
                type = nla_get_u32(info->attrs[NL802154_ATTR_IFTYPE]);
-               if (type > NL802154_IFTYPE_MAX)
+               if (type > NL802154_IFTYPE_MAX ||
+                   !(rdev->wpan_phy.supported.iftypes & BIT(type)))
                        return -EINVAL;
        }
 
@@ -625,7 +752,8 @@ static int nl802154_set_channel(struct sk_buff *skb, struct genl_info *info)
        channel = nla_get_u8(info->attrs[NL802154_ATTR_CHANNEL]);
 
        /* check 802.15.4 constraints */
-       if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL)
+       if (page > IEEE802154_MAX_PAGE || channel > IEEE802154_MAX_CHANNEL ||
+           !(rdev->wpan_phy.supported.channels[page] & BIT(channel)))
                return -EINVAL;
 
        return rdev_set_channel(rdev, page, channel);
@@ -636,12 +764,17 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
        struct cfg802154_registered_device *rdev = info->user_ptr[0];
        struct wpan_phy_cca cca;
 
+       if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_MODE))
+               return -EOPNOTSUPP;
+
        if (!info->attrs[NL802154_ATTR_CCA_MODE])
                return -EINVAL;
 
        cca.mode = nla_get_u32(info->attrs[NL802154_ATTR_CCA_MODE]);
        /* checking 802.15.4 constraints */
-       if (cca.mode < NL802154_CCA_ENERGY || cca.mode > NL802154_CCA_ATTR_MAX)
+       if (cca.mode < NL802154_CCA_ENERGY ||
+           cca.mode > NL802154_CCA_ATTR_MAX ||
+           !(rdev->wpan_phy.supported.cca_modes & BIT(cca.mode)))
                return -EINVAL;
 
        if (cca.mode == NL802154_CCA_ENERGY_CARRIER) {
@@ -649,13 +782,58 @@ static int nl802154_set_cca_mode(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
 
                cca.opt = nla_get_u32(info->attrs[NL802154_ATTR_CCA_OPT]);
-               if (cca.opt > NL802154_CCA_OPT_ATTR_MAX)
+               if (cca.opt > NL802154_CCA_OPT_ATTR_MAX ||
+                   !(rdev->wpan_phy.supported.cca_opts & BIT(cca.opt)))
                        return -EINVAL;
        }
 
        return rdev_set_cca_mode(rdev, &cca);
 }
 
+static int nl802154_set_cca_ed_level(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg802154_registered_device *rdev = info->user_ptr[0];
+       s32 ed_level;
+       int i;
+
+       if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_CCA_ED_LEVEL))
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_CCA_ED_LEVEL])
+               return -EINVAL;
+
+       ed_level = nla_get_s32(info->attrs[NL802154_ATTR_CCA_ED_LEVEL]);
+
+       for (i = 0; i < rdev->wpan_phy.supported.cca_ed_levels_size; i++) {
+               if (ed_level == rdev->wpan_phy.supported.cca_ed_levels[i])
+                       return rdev_set_cca_ed_level(rdev, ed_level);
+       }
+
+       return -EINVAL;
+}
+
+static int nl802154_set_tx_power(struct sk_buff *skb, struct genl_info *info)
+{
+       struct cfg802154_registered_device *rdev = info->user_ptr[0];
+       s32 power;
+       int i;
+
+       if (!(rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER))
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_TX_POWER])
+               return -EINVAL;
+
+       power = nla_get_s32(info->attrs[NL802154_ATTR_TX_POWER]);
+
+       for (i = 0; i < rdev->wpan_phy.supported.tx_powers_size; i++) {
+               if (power == rdev->wpan_phy.supported.tx_powers[i])
+                       return rdev_set_tx_power(rdev, power);
+       }
+
+       return -EINVAL;
+}
+
 static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
 {
        struct cfg802154_registered_device *rdev = info->user_ptr[0];
@@ -668,14 +846,22 @@ static int nl802154_set_pan_id(struct sk_buff *skb, struct genl_info *info)
                return -EBUSY;
 
        /* don't change address fields on monitor */
-       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
-               return -EINVAL;
-
-       if (!info->attrs[NL802154_ATTR_PAN_ID])
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+           !info->attrs[NL802154_ATTR_PAN_ID])
                return -EINVAL;
 
        pan_id = nla_get_le16(info->attrs[NL802154_ATTR_PAN_ID]);
 
+       /* TODO
+        * I am not sure about to check here on broadcast pan_id.
+        * Broadcast is a valid setting, comment from 802.15.4:
+        * If this value is 0xffff, the device is not associated.
+        *
+        * This could useful to simple deassociate an device.
+        */
+       if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
+               return -EINVAL;
+
        return rdev_set_pan_id(rdev, wpan_dev, pan_id);
 }
 
@@ -691,14 +877,27 @@ static int nl802154_set_short_addr(struct sk_buff *skb, struct genl_info *info)
                return -EBUSY;
 
        /* don't change address fields on monitor */
-       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
-               return -EINVAL;
-
-       if (!info->attrs[NL802154_ATTR_SHORT_ADDR])
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR ||
+           !info->attrs[NL802154_ATTR_SHORT_ADDR])
                return -EINVAL;
 
        short_addr = nla_get_le16(info->attrs[NL802154_ATTR_SHORT_ADDR]);
 
+       /* TODO
+        * I am not sure about to check here on broadcast short_addr.
+        * Broadcast is a valid setting, comment from 802.15.4:
+        * A value of 0xfffe indicates that the device has
+        * associated but has not been allocated an address. A
+        * value of 0xffff indicates that the device does not
+        * have a short address.
+        *
+        * I think we should allow to set these settings but
+        * don't allow to allow socket communication with it.
+        */
+       if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
+           short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
+               return -EINVAL;
+
        return rdev_set_short_addr(rdev, wpan_dev, short_addr);
 }
 
@@ -722,7 +921,11 @@ nl802154_set_backoff_exponent(struct sk_buff *skb, struct genl_info *info)
        max_be = nla_get_u8(info->attrs[NL802154_ATTR_MAX_BE]);
 
        /* check 802.15.4 constraints */
-       if (max_be < 3 || max_be > 8 || min_be > max_be)
+       if (min_be < rdev->wpan_phy.supported.min_minbe ||
+           min_be > rdev->wpan_phy.supported.max_minbe ||
+           max_be < rdev->wpan_phy.supported.min_maxbe ||
+           max_be > rdev->wpan_phy.supported.max_maxbe ||
+           min_be > max_be)
                return -EINVAL;
 
        return rdev_set_backoff_exponent(rdev, wpan_dev, min_be, max_be);
@@ -747,7 +950,8 @@ nl802154_set_max_csma_backoffs(struct sk_buff *skb, struct genl_info *info)
                        info->attrs[NL802154_ATTR_MAX_CSMA_BACKOFFS]);
 
        /* check 802.15.4 constraints */
-       if (max_csma_backoffs > 5)
+       if (max_csma_backoffs < rdev->wpan_phy.supported.min_csma_backoffs ||
+           max_csma_backoffs > rdev->wpan_phy.supported.max_csma_backoffs)
                return -EINVAL;
 
        return rdev_set_max_csma_backoffs(rdev, wpan_dev, max_csma_backoffs);
@@ -771,7 +975,8 @@ nl802154_set_max_frame_retries(struct sk_buff *skb, struct genl_info *info)
                        info->attrs[NL802154_ATTR_MAX_FRAME_RETRIES]);
 
        /* check 802.15.4 constraints */
-       if (max_frame_retries < -1 || max_frame_retries > 7)
+       if (max_frame_retries < rdev->wpan_phy.supported.min_frame_retries ||
+           max_frame_retries > rdev->wpan_phy.supported.max_frame_retries)
                return -EINVAL;
 
        return rdev_set_max_frame_retries(rdev, wpan_dev, max_frame_retries);
@@ -791,6 +996,9 @@ static int nl802154_set_lbt_mode(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
 
        mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+       if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
+               return -EINVAL;
+
        return rdev_set_lbt_mode(rdev, wpan_dev, mode);
 }
 
@@ -936,6 +1144,22 @@ static const struct genl_ops nl802154_ops[] = {
                .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
                                  NL802154_FLAG_NEED_RTNL,
        },
+       {
+               .cmd = NL802154_CMD_SET_CCA_ED_LEVEL,
+               .doit = nl802154_set_cca_ed_level,
+               .policy = nl802154_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+                                 NL802154_FLAG_NEED_RTNL,
+       },
+       {
+               .cmd = NL802154_CMD_SET_TX_POWER,
+               .doit = nl802154_set_tx_power,
+               .policy = nl802154_policy,
+               .flags = GENL_ADMIN_PERM,
+               .internal_flags = NL802154_FLAG_NEED_WPAN_PHY |
+                                 NL802154_FLAG_NEED_RTNL,
+       },
        {
                .cmd = NL802154_CMD_SET_PAN_ID,
                .doit = nl802154_set_pan_id,
index 7b5a9dd94fe5a2b55d01103aa529261141ece521..b2155a123f6c88980c180eeb7b4ffdcf68bea4fb 100644 (file)
@@ -74,6 +74,29 @@ rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
        return ret;
 }
 
+static inline int
+rdev_set_cca_ed_level(struct cfg802154_registered_device *rdev, s32 ed_level)
+{
+       int ret;
+
+       trace_802154_rdev_set_cca_ed_level(&rdev->wpan_phy, ed_level);
+       ret = rdev->ops->set_cca_ed_level(&rdev->wpan_phy, ed_level);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
+static inline int
+rdev_set_tx_power(struct cfg802154_registered_device *rdev,
+                 s32 power)
+{
+       int ret;
+
+       trace_802154_rdev_set_tx_power(&rdev->wpan_phy, power);
+       ret = rdev->ops->set_tx_power(&rdev->wpan_phy, power);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
 static inline int
 rdev_set_pan_id(struct cfg802154_registered_device *rdev,
                struct wpan_dev *wpan_dev, __le16 pan_id)
index b60c65f70346a48623209dc93fb06273d445fb7b..02abef2c162187ad12be3f4e7d61fcca803883f8 100644 (file)
@@ -64,10 +64,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
                        if (tmp->type != ARPHRD_IEEE802154)
                                continue;
 
-                       pan_id = ieee802154_mlme_ops(tmp)->get_pan_id(tmp);
-                       short_addr =
-                               ieee802154_mlme_ops(tmp)->get_short_addr(tmp);
-
+                       pan_id = tmp->ieee802154_ptr->pan_id;
+                       short_addr = tmp->ieee802154_ptr->short_addr;
                        if (pan_id == addr->pan_id &&
                            short_addr == addr->short_addr) {
                                dev = tmp;
@@ -228,15 +226,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
                goto out;
        }
 
-       if (dev->type != ARPHRD_IEEE802154) {
-               err = -ENODEV;
-               goto out_put;
-       }
-
        sk->sk_bound_dev_if = dev->ifindex;
        sk_dst_reset(sk);
 
-out_put:
        dev_put(dev);
 out:
        release_sock(sk);
@@ -286,7 +278,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 
        if (size > mtu) {
                pr_debug("size = %Zu, mtu = %u\n", size, mtu);
-               err = -EINVAL;
+               err = -EMSGSIZE;
                goto out_dev;
        }
 
@@ -797,9 +789,9 @@ static int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
        /* Data frame processing */
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
-       pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
-       short_addr = ieee802154_mlme_ops(dev)->get_short_addr(dev);
-       hw_addr = ieee802154_devaddr_from_raw(dev->dev_addr);
+       pan_id = dev->ieee802154_ptr->pan_id;
+       short_addr = dev->ieee802154_ptr->short_addr;
+       hw_addr = dev->ieee802154_ptr->extended_addr;
 
        read_lock(&dgram_lock);
        sk_for_each(sk, &dgram_head) {
@@ -1014,7 +1006,7 @@ static int ieee802154_create(struct net *net, struct socket *sock,
        }
 
        rc = -ENOMEM;
-       sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto);
+       sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto, kern);
        if (!sk)
                goto out;
        rc = 0;
index 5ac25eb6ed17869821c83d5a360fcafbf41f4a4b..73eb7605c1eb64ec6d342498ff32a0be9c195c01 100644 (file)
@@ -1,4 +1,4 @@
-/* Based on net/wireless/tracing.h */
+/* Based on net/wireless/trace.h */
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM cfg802154
@@ -93,6 +93,21 @@ TRACE_EVENT(802154_rdev_set_channel,
                  __entry->page, __entry->channel)
 );
 
+TRACE_EVENT(802154_rdev_set_tx_power,
+       TP_PROTO(struct wpan_phy *wpan_phy, s32 power),
+       TP_ARGS(wpan_phy, power),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               __field(s32, power)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               __entry->power = power;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", power: %d", WPAN_PHY_PR_ARG,
+                 __entry->power)
+);
+
 TRACE_EVENT(802154_rdev_set_cca_mode,
        TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca),
        TP_ARGS(wpan_phy, cca),
@@ -108,6 +123,21 @@ TRACE_EVENT(802154_rdev_set_cca_mode,
                  WPAN_CCA_PR_ARG)
 );
 
+TRACE_EVENT(802154_rdev_set_cca_ed_level,
+       TP_PROTO(struct wpan_phy *wpan_phy, s32 ed_level),
+       TP_ARGS(wpan_phy, ed_level),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               __field(s32, ed_level)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               __entry->ed_level = ed_level;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", ed_level: %d", WPAN_PHY_PR_ARG,
+                 __entry->ed_level)
+);
+
 DECLARE_EVENT_CLASS(802154_le16_template,
        TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                 __le16 le16arg),
index bd290160484263ba1507b8ae5b05580430fa9e56..d83071dccd7421b42360d34e5e732232e73ba024 100644 (file)
@@ -331,8 +331,8 @@ config NET_FOU_IP_TUNNELS
          When this option is enabled IP tunnels can be configured to use
          FOU or GUE encapsulation.
 
-config GENEVE
-       tristate "Generic Network Virtualization Encapsulation (Geneve)"
+config GENEVE_CORE
+       tristate "Generic Network Virtualization Encapsulation library"
        depends on INET
        select NET_UDP_TUNNEL
        ---help---
index 518c04ed666eee00f436872b97b43d224c31e111..b36236dd6014d5f01706b2b3fd0c34a7ed1e9cea 100644 (file)
@@ -56,7 +56,7 @@ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
 obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
-obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_GENEVE_CORE) += geneve_core.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
                      xfrm4_output.o xfrm4_protocol.o
index 8b47a4d79d040e39e592d3583affb7fec2d19f3d..cc858ef44451acf3fe20bd7db3777ec65a464700 100644 (file)
@@ -317,7 +317,7 @@ lookup_protocol:
        WARN_ON(!answer_prot->slab);
 
        err = -ENOBUFS;
-       sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot);
+       sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
        if (!sk)
                goto out;
 
@@ -488,7 +488,8 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                inet->inet_saddr = 0;  /* Use device */
 
        /* Make sure we are allowed to bind here. */
-       if (sk->sk_prot->get_port(sk, snum)) {
+       if ((snum || !inet->bind_address_no_port) &&
+           sk->sk_prot->get_port(sk, snum)) {
                inet->inet_saddr = inet->inet_rcv_saddr = 0;
                err = -EADDRINUSE;
                goto out_release_sock;
@@ -1430,7 +1431,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
                         struct net *net)
 {
        struct socket *sock;
-       int rc = sock_create_kern(family, type, protocol, &sock);
+       int rc = sock_create_kern(net, family, type, protocol, &sock);
 
        if (rc == 0) {
                *sk = sock->sk;
@@ -1440,8 +1441,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
                 * we do not wish this socket to see incoming packets.
                 */
                (*sk)->sk_prot->unhash(*sk);
-
-               sk_change_net(*sk, net);
        }
        return rc;
 }
@@ -1597,7 +1596,7 @@ static __net_init int inet_init_net(struct net *net)
         */
        seqlock_init(&net->ipv4.ip_local_ports.lock);
        net->ipv4.ip_local_ports.range[0] =  32768;
-       net->ipv4.ip_local_ports.range[1] =  61000;
+       net->ipv4.ip_local_ports.range[1] =  60999;
 
        seqlock_init(&net->ipv4.ping_group_range.lock);
        /*
index 8d695b6659c715f89e06c31d9890532b34b2727f..28ec3c1823bf390f5508e98777483cf82cc73cee 100644 (file)
@@ -713,8 +713,6 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
                        struct hlist_head *dest;
                        unsigned int new_hash;
 
-                       hlist_del(&fi->fib_hash);
-
                        new_hash = fib_info_hashfn(fi);
                        dest = &new_info_hash[new_hash];
                        hlist_add_head(&fi->fib_hash, dest);
@@ -731,8 +729,6 @@ static void fib_info_hash_move(struct hlist_head *new_info_hash,
                        struct hlist_head *ldest;
                        unsigned int new_hash;
 
-                       hlist_del(&fi->fib_lhash);
-
                        new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
                        ldest = &new_laddrhash[new_hash];
                        hlist_add_head(&fi->fib_lhash, ldest);
index 09b62e17dd8cba4b1041de5f208180d278010604..3c699c4e90a4645bb99bdd245fdbf1cde17e0e49 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/vmalloc.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
 #include <net/protocol.h>
@@ -324,13 +325,15 @@ static inline void empty_child_dec(struct key_vector *n)
 
 static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
 {
-       struct tnode *kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
-       struct key_vector *l = kv->kv;
+       struct key_vector *l;
+       struct tnode *kv;
 
+       kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
        if (!kv)
                return NULL;
 
        /* initialize key vector */
+       l = kv->kv;
        l->key = key;
        l->pos = 0;
        l->bits = 0;
@@ -345,24 +348,26 @@ static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
 
 static struct key_vector *tnode_new(t_key key, int pos, int bits)
 {
-       struct tnode *tnode = tnode_alloc(bits);
        unsigned int shift = pos + bits;
-       struct key_vector *tn = tnode->kv;
+       struct key_vector *tn;
+       struct tnode *tnode;
 
        /* verify bits and pos their msb bits clear and values are valid */
        BUG_ON(!bits || (shift > KEYLENGTH));
 
-       pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
-                sizeof(struct key_vector *) << bits);
-
+       tnode = tnode_alloc(bits);
        if (!tnode)
                return NULL;
 
+       pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
+                sizeof(struct key_vector *) << bits);
+
        if (bits == KEYLENGTH)
                tnode->full_children = 1;
        else
                tnode->empty_children = 1ul << bits;
 
+       tn = tnode->kv;
        tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
        tn->pos = pos;
        tn->bits = bits;
@@ -1166,13 +1171,13 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                        new_fa->fa_slen = fa->fa_slen;
                        new_fa->tb_id = tb->tb_id;
 
-                       err = netdev_switch_fib_ipv4_add(key, plen, fi,
-                                                        new_fa->fa_tos,
-                                                        cfg->fc_type,
-                                                        cfg->fc_nlflags,
-                                                        tb->tb_id);
+                       err = switchdev_fib_ipv4_add(key, plen, fi,
+                                                    new_fa->fa_tos,
+                                                    cfg->fc_type,
+                                                    cfg->fc_nlflags,
+                                                    tb->tb_id);
                        if (err) {
-                               netdev_switch_fib_ipv4_abort(fi);
+                               switchdev_fib_ipv4_abort(fi);
                                kmem_cache_free(fn_alias_kmem, new_fa);
                                goto out;
                        }
@@ -1216,12 +1221,10 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
        new_fa->tb_id = tb->tb_id;
 
        /* (Optionally) offload fib entry to switch hardware. */
-       err = netdev_switch_fib_ipv4_add(key, plen, fi, tos,
-                                        cfg->fc_type,
-                                        cfg->fc_nlflags,
-                                        tb->tb_id);
+       err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
+                                    cfg->fc_nlflags, tb->tb_id);
        if (err) {
-               netdev_switch_fib_ipv4_abort(fi);
+               switchdev_fib_ipv4_abort(fi);
                goto out_free_new_fa;
        }
 
@@ -1240,7 +1243,7 @@ succeeded:
        return 0;
 
 out_sw_fib_del:
-       netdev_switch_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
+       switchdev_fib_ipv4_del(key, plen, fi, tos, cfg->fc_type, tb->tb_id);
 out_free_new_fa:
        kmem_cache_free(fn_alias_kmem, new_fa);
 out:
@@ -1518,8 +1521,8 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
        if (!fa_to_delete)
                return -ESRCH;
 
-       netdev_switch_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
-                                  cfg->fc_type, tb->tb_id);
+       switchdev_fib_ipv4_del(key, plen, fa_to_delete->fa_info, tos,
+                              cfg->fc_type, tb->tb_id);
 
        rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
                  &cfg->fc_nlinfo, 0);
@@ -1768,10 +1771,9 @@ void fib_table_flush_external(struct fib_table *tb)
                        if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD))
                                continue;
 
-                       netdev_switch_fib_ipv4_del(n->key,
-                                                  KEYLENGTH - fa->fa_slen,
-                                                  fi, fa->fa_tos,
-                                                  fa->fa_type, tb->tb_id);
+                       switchdev_fib_ipv4_del(n->key, KEYLENGTH - fa->fa_slen,
+                                              fi, fa->fa_tos, fa->fa_type,
+                                              tb->tb_id);
                }
 
                /* update leaf slen */
@@ -1836,10 +1838,9 @@ int fib_table_flush(struct fib_table *tb)
                                continue;
                        }
 
-                       netdev_switch_fib_ipv4_del(n->key,
-                                                  KEYLENGTH - fa->fa_slen,
-                                                  fi, fa->fa_tos,
-                                                  fa->fa_type, tb->tb_id);
+                       switchdev_fib_ipv4_del(n->key, KEYLENGTH - fa->fa_slen,
+                                              fi, fa->fa_tos, fa->fa_type,
+                                              tb->tb_id);
                        hlist_del_rcu(&fa->fa_list);
                        fib_release_info(fa->fa_info);
                        alias_free_mem_rcu(fa);
@@ -2057,11 +2058,12 @@ static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
 static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
                                             struct trie *t)
 {
-       struct key_vector *n, *pn = t->kv;
+       struct key_vector *n, *pn;
 
        if (!t)
                return NULL;
 
+       pn = t->kv;
        n = rcu_dereference(pn->tnode[0]);
        if (!n)
                return NULL;
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
deleted file mode 100644 (file)
index 8986e63..0000000
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Geneve: Generic Network Virtualization Encapsulation
- *
- * Copyright (c) 2014 Nicira, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/igmp.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/ethtool.h>
-#include <linux/mutex.h>
-#include <net/arp.h>
-#include <net/ndisc.h>
-#include <net/ip.h>
-#include <net/ip_tunnels.h>
-#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/geneve.h>
-#include <net/protocol.h>
-#include <net/udp_tunnel.h>
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/addrconf.h>
-#include <net/ip6_tunnel.h>
-#include <net/ip6_checksum.h>
-#endif
-
-/* Protects sock_list and refcounts. */
-static DEFINE_MUTEX(geneve_mutex);
-
-/* per-network namespace private data for this module */
-struct geneve_net {
-       struct list_head        sock_list;
-};
-
-static int geneve_net_id;
-
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
-       return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
-static struct geneve_sock *geneve_find_sock(struct net *net,
-                                           sa_family_t family, __be16 port)
-{
-       struct geneve_net *gn = net_generic(net, geneve_net_id);
-       struct geneve_sock *gs;
-
-       list_for_each_entry(gs, &gn->sock_list, list) {
-               if (inet_sk(gs->sock->sk)->inet_sport == port &&
-                   inet_sk(gs->sock->sk)->sk.sk_family == family)
-                       return gs;
-       }
-
-       return NULL;
-}
-
-static void geneve_build_header(struct genevehdr *geneveh,
-                               __be16 tun_flags, u8 vni[3],
-                               u8 options_len, u8 *options)
-{
-       geneveh->ver = GENEVE_VER;
-       geneveh->opt_len = options_len / 4;
-       geneveh->oam = !!(tun_flags & TUNNEL_OAM);
-       geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
-       geneveh->rsvd1 = 0;
-       memcpy(geneveh->vni, vni, 3);
-       geneveh->proto_type = htons(ETH_P_TEB);
-       geneveh->rsvd2 = 0;
-
-       memcpy(geneveh->options, options, options_len);
-}
-
-/* Transmit a fully formatted Geneve frame.
- *
- * When calling this function. The skb->data should point
- * to the geneve header which is fully formed.
- *
- * This function will add other UDP tunnel headers.
- */
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
-                   struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
-                   __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
-                   __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-                   bool csum, bool xnet)
-{
-       struct genevehdr *gnvh;
-       int min_headroom;
-       int err;
-
-       min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
-                       + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
-                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-
-       err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err)) {
-               kfree_skb(skb);
-               return err;
-       }
-
-       skb = vlan_hwaccel_push_inside(skb);
-       if (unlikely(!skb))
-               return -ENOMEM;
-
-       skb = udp_tunnel_handle_offloads(skb, csum);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
-
-       gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
-       geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
-
-       skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
-       return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst,
-                                  tos, ttl, df, src_port, dst_port, xnet,
-                                  !csum);
-}
-EXPORT_SYMBOL_GPL(geneve_xmit_skb);
-
-static int geneve_hlen(struct genevehdr *gh)
-{
-       return sizeof(*gh) + gh->opt_len * 4;
-}
-
-static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
-                                          struct sk_buff *skb,
-                                          struct udp_offload *uoff)
-{
-       struct sk_buff *p, **pp = NULL;
-       struct genevehdr *gh, *gh2;
-       unsigned int hlen, gh_len, off_gnv;
-       const struct packet_offload *ptype;
-       __be16 type;
-       int flush = 1;
-
-       off_gnv = skb_gro_offset(skb);
-       hlen = off_gnv + sizeof(*gh);
-       gh = skb_gro_header_fast(skb, off_gnv);
-       if (skb_gro_header_hard(skb, hlen)) {
-               gh = skb_gro_header_slow(skb, hlen, off_gnv);
-               if (unlikely(!gh))
-                       goto out;
-       }
-
-       if (gh->ver != GENEVE_VER || gh->oam)
-               goto out;
-       gh_len = geneve_hlen(gh);
-
-       hlen = off_gnv + gh_len;
-       if (skb_gro_header_hard(skb, hlen)) {
-               gh = skb_gro_header_slow(skb, hlen, off_gnv);
-               if (unlikely(!gh))
-                       goto out;
-       }
-
-       flush = 0;
-
-       for (p = *head; p; p = p->next) {
-               if (!NAPI_GRO_CB(p)->same_flow)
-                       continue;
-
-               gh2 = (struct genevehdr *)(p->data + off_gnv);
-               if (gh->opt_len != gh2->opt_len ||
-                   memcmp(gh, gh2, gh_len)) {
-                       NAPI_GRO_CB(p)->same_flow = 0;
-                       continue;
-               }
-       }
-
-       type = gh->proto_type;
-
-       rcu_read_lock();
-       ptype = gro_find_receive_by_type(type);
-       if (!ptype) {
-               flush = 1;
-               goto out_unlock;
-       }
-
-       skb_gro_pull(skb, gh_len);
-       skb_gro_postpull_rcsum(skb, gh, gh_len);
-       pp = ptype->callbacks.gro_receive(head, skb);
-
-out_unlock:
-       rcu_read_unlock();
-out:
-       NAPI_GRO_CB(skb)->flush |= flush;
-
-       return pp;
-}
-
-static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
-                              struct udp_offload *uoff)
-{
-       struct genevehdr *gh;
-       struct packet_offload *ptype;
-       __be16 type;
-       int gh_len;
-       int err = -ENOSYS;
-
-       udp_tunnel_gro_complete(skb, nhoff);
-
-       gh = (struct genevehdr *)(skb->data + nhoff);
-       gh_len = geneve_hlen(gh);
-       type = gh->proto_type;
-
-       rcu_read_lock();
-       ptype = gro_find_complete_by_type(type);
-       if (ptype)
-               err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
-
-       rcu_read_unlock();
-       return err;
-}
-
-static void geneve_notify_add_rx_port(struct geneve_sock *gs)
-{
-       struct sock *sk = gs->sock->sk;
-       sa_family_t sa_family = sk->sk_family;
-       int err;
-
-       if (sa_family == AF_INET) {
-               err = udp_add_offload(&gs->udp_offloads);
-               if (err)
-                       pr_warn("geneve: udp_add_offload failed with status %d\n",
-                               err);
-       }
-}
-
-static void geneve_notify_del_rx_port(struct geneve_sock *gs)
-{
-       struct sock *sk = gs->sock->sk;
-       sa_family_t sa_family = sk->sk_family;
-
-       if (sa_family == AF_INET)
-               udp_del_offload(&gs->udp_offloads);
-}
-
-/* Callback from net/ipv4/udp.c to receive packets */
-static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
-       struct genevehdr *geneveh;
-       struct geneve_sock *gs;
-       int opts_len;
-
-       /* Need Geneve and inner Ethernet header to be present */
-       if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
-               goto error;
-
-       /* Return packets with reserved bits set */
-       geneveh = geneve_hdr(skb);
-
-       if (unlikely(geneveh->ver != GENEVE_VER))
-               goto error;
-
-       if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
-               goto error;
-
-       opts_len = geneveh->opt_len * 4;
-       if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
-                                htons(ETH_P_TEB)))
-               goto drop;
-
-       gs = rcu_dereference_sk_user_data(sk);
-       if (!gs)
-               goto drop;
-
-       gs->rcv(gs, skb);
-       return 0;
-
-drop:
-       /* Consume bad packet */
-       kfree_skb(skb);
-       return 0;
-
-error:
-       /* Let the UDP layer deal with the skb */
-       return 1;
-}
-
-static struct socket *geneve_create_sock(struct net *net, bool ipv6,
-                                        __be16 port)
-{
-       struct socket *sock;
-       struct udp_port_cfg udp_conf;
-       int err;
-
-       memset(&udp_conf, 0, sizeof(udp_conf));
-
-       if (ipv6) {
-               udp_conf.family = AF_INET6;
-       } else {
-               udp_conf.family = AF_INET;
-               udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
-       }
-
-       udp_conf.local_udp_port = port;
-
-       /* Open UDP socket */
-       err = udp_sock_create(net, &udp_conf, &sock);
-       if (err < 0)
-               return ERR_PTR(err);
-
-       return sock;
-}
-
-/* Create new listen socket if needed */
-static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
-                                               geneve_rcv_t *rcv, void *data,
-                                               bool ipv6)
-{
-       struct geneve_net *gn = net_generic(net, geneve_net_id);
-       struct geneve_sock *gs;
-       struct socket *sock;
-       struct udp_tunnel_sock_cfg tunnel_cfg;
-
-       gs = kzalloc(sizeof(*gs), GFP_KERNEL);
-       if (!gs)
-               return ERR_PTR(-ENOMEM);
-
-       sock = geneve_create_sock(net, ipv6, port);
-       if (IS_ERR(sock)) {
-               kfree(gs);
-               return ERR_CAST(sock);
-       }
-
-       gs->sock = sock;
-       gs->refcnt = 1;
-       gs->rcv = rcv;
-       gs->rcv_data = data;
-
-       /* Initialize the geneve udp offloads structure */
-       gs->udp_offloads.port = port;
-       gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
-       gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
-       geneve_notify_add_rx_port(gs);
-
-       /* Mark socket as an encapsulation socket */
-       tunnel_cfg.sk_user_data = gs;
-       tunnel_cfg.encap_type = 1;
-       tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
-       tunnel_cfg.encap_destroy = NULL;
-       setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
-
-       list_add(&gs->list, &gn->sock_list);
-
-       return gs;
-}
-
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
-                                   geneve_rcv_t *rcv, void *data,
-                                   bool no_share, bool ipv6)
-{
-       struct geneve_sock *gs;
-
-       mutex_lock(&geneve_mutex);
-
-       gs = geneve_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
-       if (gs) {
-               if (!no_share && gs->rcv == rcv)
-                       gs->refcnt++;
-               else
-                       gs = ERR_PTR(-EBUSY);
-       } else {
-               gs = geneve_socket_create(net, port, rcv, data, ipv6);
-       }
-
-       mutex_unlock(&geneve_mutex);
-
-       return gs;
-}
-EXPORT_SYMBOL_GPL(geneve_sock_add);
-
-void geneve_sock_release(struct geneve_sock *gs)
-{
-       mutex_lock(&geneve_mutex);
-
-       if (--gs->refcnt)
-               goto unlock;
-
-       list_del(&gs->list);
-       geneve_notify_del_rx_port(gs);
-       udp_tunnel_sock_release(gs->sock);
-       kfree_rcu(gs, rcu);
-
-unlock:
-       mutex_unlock(&geneve_mutex);
-}
-EXPORT_SYMBOL_GPL(geneve_sock_release);
-
-static __net_init int geneve_init_net(struct net *net)
-{
-       struct geneve_net *gn = net_generic(net, geneve_net_id);
-
-       INIT_LIST_HEAD(&gn->sock_list);
-
-       return 0;
-}
-
-static struct pernet_operations geneve_net_ops = {
-       .init = geneve_init_net,
-       .id   = &geneve_net_id,
-       .size = sizeof(struct geneve_net),
-};
-
-static int __init geneve_init_module(void)
-{
-       int rc;
-
-       rc = register_pernet_subsys(&geneve_net_ops);
-       if (rc)
-               return rc;
-
-       pr_info("Geneve driver\n");
-
-       return 0;
-}
-module_init(geneve_init_module);
-
-static void __exit geneve_cleanup_module(void)
-{
-       unregister_pernet_subsys(&geneve_net_ops);
-}
-module_exit(geneve_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
-MODULE_DESCRIPTION("Driver for GENEVE encapsulated traffic");
-MODULE_ALIAS_RTNL_LINK("geneve");
diff --git a/net/ipv4/geneve_core.c b/net/ipv4/geneve_core.c
new file mode 100644 (file)
index 0000000..311a4ba
--- /dev/null
@@ -0,0 +1,447 @@
+/*
+ * Geneve: Generic Network Virtualization Encapsulation
+ *
+ * Copyright (c) 2014 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/igmp.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <linux/mutex.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/geneve.h>
+#include <net/protocol.h>
+#include <net/udp_tunnel.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <net/ip6_tunnel.h>
+#include <net/ip6_checksum.h>
+#endif
+
+/* Protects sock_list and refcounts. */
+static DEFINE_MUTEX(geneve_mutex);
+
+/* per-network namespace private data for this module */
+struct geneve_net {
+       struct list_head        sock_list;
+};
+
+static int geneve_net_id;
+
+static struct geneve_sock *geneve_find_sock(struct net *net,
+                                           sa_family_t family, __be16 port)
+{
+       struct geneve_net *gn = net_generic(net, geneve_net_id);
+       struct geneve_sock *gs;
+
+       list_for_each_entry(gs, &gn->sock_list, list) {
+               if (inet_sk(gs->sock->sk)->inet_sport == port &&
+                   inet_sk(gs->sock->sk)->sk.sk_family == family)
+                       return gs;
+       }
+
+       return NULL;
+}
+
+static void geneve_build_header(struct genevehdr *geneveh,
+                               __be16 tun_flags, u8 vni[3],
+                               u8 options_len, u8 *options)
+{
+       geneveh->ver = GENEVE_VER;
+       geneveh->opt_len = options_len / 4;
+       geneveh->oam = !!(tun_flags & TUNNEL_OAM);
+       geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
+       geneveh->rsvd1 = 0;
+       memcpy(geneveh->vni, vni, 3);
+       geneveh->proto_type = htons(ETH_P_TEB);
+       geneveh->rsvd2 = 0;
+
+       memcpy(geneveh->options, options, options_len);
+}
+
+/* Transmit a fully formatted Geneve frame.
+ *
+ * When calling this function. The skb->data should point
+ * to the geneve header which is fully formed.
+ *
+ * This function will add other UDP tunnel headers.
+ */
+int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+                   struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
+                   __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
+                   __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+                   bool csum, bool xnet)
+{
+       struct genevehdr *gnvh;
+       int min_headroom;
+       int err;
+
+       min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+                       + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
+
+       err = skb_cow_head(skb, min_headroom);
+       if (unlikely(err)) {
+               kfree_skb(skb);
+               return err;
+       }
+
+       skb = vlan_hwaccel_push_inside(skb);
+       if (unlikely(!skb))
+               return -ENOMEM;
+
+       skb = udp_tunnel_handle_offloads(skb, csum);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+       geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
+
+       skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+
+       return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst,
+                                  tos, ttl, df, src_port, dst_port, xnet,
+                                  !csum);
+}
+EXPORT_SYMBOL_GPL(geneve_xmit_skb);
+
+static int geneve_hlen(struct genevehdr *gh)
+{
+       return sizeof(*gh) + gh->opt_len * 4;
+}
+
+static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
+                                          struct sk_buff *skb,
+                                          struct udp_offload *uoff)
+{
+       struct sk_buff *p, **pp = NULL;
+       struct genevehdr *gh, *gh2;
+       unsigned int hlen, gh_len, off_gnv;
+       const struct packet_offload *ptype;
+       __be16 type;
+       int flush = 1;
+
+       off_gnv = skb_gro_offset(skb);
+       hlen = off_gnv + sizeof(*gh);
+       gh = skb_gro_header_fast(skb, off_gnv);
+       if (skb_gro_header_hard(skb, hlen)) {
+               gh = skb_gro_header_slow(skb, hlen, off_gnv);
+               if (unlikely(!gh))
+                       goto out;
+       }
+
+       if (gh->ver != GENEVE_VER || gh->oam)
+               goto out;
+       gh_len = geneve_hlen(gh);
+
+       hlen = off_gnv + gh_len;
+       if (skb_gro_header_hard(skb, hlen)) {
+               gh = skb_gro_header_slow(skb, hlen, off_gnv);
+               if (unlikely(!gh))
+                       goto out;
+       }
+
+       flush = 0;
+
+       for (p = *head; p; p = p->next) {
+               if (!NAPI_GRO_CB(p)->same_flow)
+                       continue;
+
+               gh2 = (struct genevehdr *)(p->data + off_gnv);
+               if (gh->opt_len != gh2->opt_len ||
+                   memcmp(gh, gh2, gh_len)) {
+                       NAPI_GRO_CB(p)->same_flow = 0;
+                       continue;
+               }
+       }
+
+       type = gh->proto_type;
+
+       rcu_read_lock();
+       ptype = gro_find_receive_by_type(type);
+       if (!ptype) {
+               flush = 1;
+               goto out_unlock;
+       }
+
+       skb_gro_pull(skb, gh_len);
+       skb_gro_postpull_rcsum(skb, gh, gh_len);
+       pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+       rcu_read_unlock();
+out:
+       NAPI_GRO_CB(skb)->flush |= flush;
+
+       return pp;
+}
+
+static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
+                              struct udp_offload *uoff)
+{
+       struct genevehdr *gh;
+       struct packet_offload *ptype;
+       __be16 type;
+       int gh_len;
+       int err = -ENOSYS;
+
+       udp_tunnel_gro_complete(skb, nhoff);
+
+       gh = (struct genevehdr *)(skb->data + nhoff);
+       gh_len = geneve_hlen(gh);
+       type = gh->proto_type;
+
+       rcu_read_lock();
+       ptype = gro_find_complete_by_type(type);
+       if (ptype)
+               err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
+
+       rcu_read_unlock();
+       return err;
+}
+
+static void geneve_notify_add_rx_port(struct geneve_sock *gs)
+{
+       struct sock *sk = gs->sock->sk;
+       sa_family_t sa_family = sk->sk_family;
+       int err;
+
+       if (sa_family == AF_INET) {
+               err = udp_add_offload(&gs->udp_offloads);
+               if (err)
+                       pr_warn("geneve: udp_add_offload failed with status %d\n",
+                               err);
+       }
+}
+
+static void geneve_notify_del_rx_port(struct geneve_sock *gs)
+{
+       struct sock *sk = gs->sock->sk;
+       sa_family_t sa_family = sk->sk_family;
+
+       if (sa_family == AF_INET)
+               udp_del_offload(&gs->udp_offloads);
+}
+
+/* Callback from net/ipv4/udp.c to receive packets */
+static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+       struct genevehdr *geneveh;
+       struct geneve_sock *gs;
+       int opts_len;
+
+       /* Need Geneve and inner Ethernet header to be present */
+       if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
+               goto error;
+
+       /* Return packets with reserved bits set */
+       geneveh = geneve_hdr(skb);
+
+       if (unlikely(geneveh->ver != GENEVE_VER))
+               goto error;
+
+       if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+               goto error;
+
+       opts_len = geneveh->opt_len * 4;
+       if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
+                                htons(ETH_P_TEB)))
+               goto drop;
+
+       gs = rcu_dereference_sk_user_data(sk);
+       if (!gs)
+               goto drop;
+
+       gs->rcv(gs, skb);
+       return 0;
+
+drop:
+       /* Consume bad packet */
+       kfree_skb(skb);
+       return 0;
+
+error:
+       /* Let the UDP layer deal with the skb */
+       return 1;
+}
+
+static struct socket *geneve_create_sock(struct net *net, bool ipv6,
+                                        __be16 port)
+{
+       struct socket *sock;
+       struct udp_port_cfg udp_conf;
+       int err;
+
+       memset(&udp_conf, 0, sizeof(udp_conf));
+
+       if (ipv6) {
+               udp_conf.family = AF_INET6;
+       } else {
+               udp_conf.family = AF_INET;
+               udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+       }
+
+       udp_conf.local_udp_port = port;
+
+       /* Open UDP socket */
+       err = udp_sock_create(net, &udp_conf, &sock);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       return sock;
+}
+
+/* Create new listen socket if needed */
+static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
+                                               geneve_rcv_t *rcv, void *data,
+                                               bool ipv6)
+{
+       struct geneve_net *gn = net_generic(net, geneve_net_id);
+       struct geneve_sock *gs;
+       struct socket *sock;
+       struct udp_tunnel_sock_cfg tunnel_cfg;
+
+       gs = kzalloc(sizeof(*gs), GFP_KERNEL);
+       if (!gs)
+               return ERR_PTR(-ENOMEM);
+
+       sock = geneve_create_sock(net, ipv6, port);
+       if (IS_ERR(sock)) {
+               kfree(gs);
+               return ERR_CAST(sock);
+       }
+
+       gs->sock = sock;
+       gs->refcnt = 1;
+       gs->rcv = rcv;
+       gs->rcv_data = data;
+
+       /* Initialize the geneve udp offloads structure */
+       gs->udp_offloads.port = port;
+       gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
+       gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
+       geneve_notify_add_rx_port(gs);
+
+       /* Mark socket as an encapsulation socket */
+       tunnel_cfg.sk_user_data = gs;
+       tunnel_cfg.encap_type = 1;
+       tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+       tunnel_cfg.encap_destroy = NULL;
+       setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
+
+       list_add(&gs->list, &gn->sock_list);
+
+       return gs;
+}
+
+struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
+                                   geneve_rcv_t *rcv, void *data,
+                                   bool no_share, bool ipv6)
+{
+       struct geneve_sock *gs;
+
+       mutex_lock(&geneve_mutex);
+
+       gs = geneve_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
+       if (gs) {
+               if (!no_share && gs->rcv == rcv)
+                       gs->refcnt++;
+               else
+                       gs = ERR_PTR(-EBUSY);
+       } else {
+               gs = geneve_socket_create(net, port, rcv, data, ipv6);
+       }
+
+       mutex_unlock(&geneve_mutex);
+
+       return gs;
+}
+EXPORT_SYMBOL_GPL(geneve_sock_add);
+
+void geneve_sock_release(struct geneve_sock *gs)
+{
+       mutex_lock(&geneve_mutex);
+
+       if (--gs->refcnt)
+               goto unlock;
+
+       list_del(&gs->list);
+       geneve_notify_del_rx_port(gs);
+       udp_tunnel_sock_release(gs->sock);
+       kfree_rcu(gs, rcu);
+
+unlock:
+       mutex_unlock(&geneve_mutex);
+}
+EXPORT_SYMBOL_GPL(geneve_sock_release);
+
+static __net_init int geneve_init_net(struct net *net)
+{
+       struct geneve_net *gn = net_generic(net, geneve_net_id);
+
+       INIT_LIST_HEAD(&gn->sock_list);
+
+       return 0;
+}
+
+static struct pernet_operations geneve_net_ops = {
+       .init = geneve_init_net,
+       .id   = &geneve_net_id,
+       .size = sizeof(struct geneve_net),
+};
+
+static int __init geneve_init_module(void)
+{
+       int rc;
+
+       rc = register_pernet_subsys(&geneve_net_ops);
+       if (rc)
+               return rc;
+
+       pr_info("Geneve core logic\n");
+
+       return 0;
+}
+module_init(geneve_init_module);
+
+static void __exit geneve_cleanup_module(void)
+{
+       unregister_pernet_subsys(&geneve_net_ops);
+}
+module_exit(geneve_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
+MODULE_DESCRIPTION("Driver library for GENEVE encapsulated traffic");
index a3a697f5ffbaba1b30db8341ea9b51b229ac29df..651cdf648ec4728bff6e709b0324b7d52ffd65ed 100644 (file)
@@ -1339,6 +1339,168 @@ out:
 }
 EXPORT_SYMBOL(ip_mc_inc_group);
 
+static int ip_mc_check_iphdr(struct sk_buff *skb)
+{
+       const struct iphdr *iph;
+       unsigned int len;
+       unsigned int offset = skb_network_offset(skb) + sizeof(*iph);
+
+       if (!pskb_may_pull(skb, offset))
+               return -EINVAL;
+
+       iph = ip_hdr(skb);
+
+       if (iph->version != 4 || ip_hdrlen(skb) < sizeof(*iph))
+               return -EINVAL;
+
+       offset += ip_hdrlen(skb) - sizeof(*iph);
+
+       if (!pskb_may_pull(skb, offset))
+               return -EINVAL;
+
+       iph = ip_hdr(skb);
+
+       if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+               return -EINVAL;
+
+       len = skb_network_offset(skb) + ntohs(iph->tot_len);
+       if (skb->len < len || len < offset)
+               return -EINVAL;
+
+       skb_set_transport_header(skb, offset);
+
+       return 0;
+}
+
+static int ip_mc_check_igmp_reportv3(struct sk_buff *skb)
+{
+       unsigned int len = skb_transport_offset(skb);
+
+       len += sizeof(struct igmpv3_report);
+
+       return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+}
+
+static int ip_mc_check_igmp_query(struct sk_buff *skb)
+{
+       unsigned int len = skb_transport_offset(skb);
+
+       len += sizeof(struct igmphdr);
+       if (skb->len < len)
+               return -EINVAL;
+
+       /* IGMPv{1,2}? */
+       if (skb->len != len) {
+               /* or IGMPv3? */
+               len += sizeof(struct igmpv3_query) - sizeof(struct igmphdr);
+               if (skb->len < len || !pskb_may_pull(skb, len))
+                       return -EINVAL;
+       }
+
+       /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
+        * all-systems destination addresses (224.0.0.1) for general queries
+        */
+       if (!igmp_hdr(skb)->group &&
+           ip_hdr(skb)->daddr != htonl(INADDR_ALLHOSTS_GROUP))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ip_mc_check_igmp_msg(struct sk_buff *skb)
+{
+       switch (igmp_hdr(skb)->type) {
+       case IGMP_HOST_LEAVE_MESSAGE:
+       case IGMP_HOST_MEMBERSHIP_REPORT:
+       case IGMPV2_HOST_MEMBERSHIP_REPORT:
+               /* fall through */
+               return 0;
+       case IGMPV3_HOST_MEMBERSHIP_REPORT:
+               return ip_mc_check_igmp_reportv3(skb);
+       case IGMP_HOST_MEMBERSHIP_QUERY:
+               return ip_mc_check_igmp_query(skb);
+       default:
+               return -ENOMSG;
+       }
+}
+
+static inline __sum16 ip_mc_validate_checksum(struct sk_buff *skb)
+{
+       return skb_checksum_simple_validate(skb);
+}
+
+static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+
+{
+       struct sk_buff *skb_chk;
+       unsigned int transport_len;
+       unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
+       int ret;
+
+       transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
+
+       skb_get(skb);
+       skb_chk = skb_checksum_trimmed(skb, transport_len,
+                                      ip_mc_validate_checksum);
+       if (!skb_chk)
+               return -EINVAL;
+
+       if (!pskb_may_pull(skb_chk, len)) {
+               kfree_skb(skb_chk);
+               return -EINVAL;
+       }
+
+       ret = ip_mc_check_igmp_msg(skb_chk);
+       if (ret) {
+               kfree_skb(skb_chk);
+               return ret;
+       }
+
+       if (skb_trimmed)
+               *skb_trimmed = skb_chk;
+       else
+               kfree_skb(skb_chk);
+
+       return 0;
+}
+
+/**
+ * ip_mc_check_igmp - checks whether this is a sane IGMP packet
+ * @skb: the skb to validate
+ * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
+ *
+ * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
+ * skb network and transport headers accordingly and returns zero.
+ *
+ * -EINVAL: A broken packet was detected, i.e. it violates some internet
+ *  standard
+ * -ENOMSG: IP header validation succeeded but it is not an IGMP packet.
+ * -ENOMEM: A memory allocation failure happened.
+ *
+ * Optionally, an skb pointer might be provided via skb_trimmed (or set it
+ * to NULL): After parsing an IGMP packet successfully it will point to
+ * an skb which has its tail aligned to the IP packet end. This might
+ * either be the originally provided skb or a trimmed, cloned version if
+ * the skb frame had data beyond the IP packet. A cloned skb allows us
+ * to leave the original skb and its full frame unchanged (which might be
+ * desirable for layer 2 frame jugglers).
+ *
+ * The caller needs to release a reference count from any returned skb_trimmed.
+ */
+int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+{
+       int ret = ip_mc_check_iphdr(skb);
+
+       if (ret < 0)
+               return ret;
+
+       if (ip_hdr(skb)->protocol != IPPROTO_IGMP)
+               return -ENOMSG;
+
+       return __ip_mc_check_igmp(skb, skb_trimmed);
+}
+EXPORT_SYMBOL(ip_mc_check_igmp);
+
 /*
  *     Resend IGMP JOIN report; used by netdev notifier.
  */
index 8976ca423a074447f0d857973ab9ea3bc6bbca7c..60021d0d9326ac691dcef21e1f9c20de5f8fe7c6 100644 (file)
@@ -99,6 +99,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
        struct net *net = sock_net(sk);
        int smallest_size = -1, smallest_rover;
        kuid_t uid = sock_i_uid(sk);
+       int attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 
        local_bh_disable();
        if (!snum) {
@@ -106,6 +107,14 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
 
 again:
                inet_get_local_port_range(net, &low, &high);
+               if (attempt_half) {
+                       int half = low + ((high - low) >> 1);
+
+                       if (attempt_half == 1)
+                               high = half;
+                       else
+                               low = half;
+               }
                remaining = (high - low) + 1;
                smallest_rover = rover = prandom_u32() % remaining + low;
 
@@ -127,11 +136,6 @@ again:
                                            (tb->num_owners < smallest_size || smallest_size == -1)) {
                                                smallest_size = tb->num_owners;
                                                smallest_rover = rover;
-                                               if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
-                                                   !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
-                                                       snum = smallest_rover;
-                                                       goto tb_found;
-                                               }
                                        }
                                        if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
                                                snum = rover;
@@ -159,6 +163,11 @@ again:
                                snum = smallest_rover;
                                goto have_snum;
                        }
+                       if (attempt_half == 1) {
+                               /* OK we now try the upper half of the range */
+                               attempt_half = 2;
+                               goto again;
+                       }
                        goto fail;
                }
                /* OK, here is the one we will use.  HEAD is
index c6fb80bd5826ea840eebd033fb87d01c595ab120..5f9b063bbe8ab4f3755a5711ae19b816a3bc2026 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
+#include <linux/vmalloc.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_hashtables.h>
@@ -90,10 +91,6 @@ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket
 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
                    const unsigned short snum)
 {
-       struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
-
-       atomic_inc(&hashinfo->bsockets);
-
        inet_sk(sk)->inet_num = snum;
        sk_add_bind_node(sk, &tb->owners);
        tb->num_owners++;
@@ -111,8 +108,6 @@ static void __inet_put_port(struct sock *sk)
        struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
        struct inet_bind_bucket *tb;
 
-       atomic_dec(&hashinfo->bsockets);
-
        spin_lock(&head->lock);
        tb = inet_csk(sk)->icsk_bind_hash;
        __sk_del_bind_node(sk);
@@ -399,9 +394,10 @@ not_unique:
        return -EADDRNOTAVAIL;
 }
 
-static inline u32 inet_sk_port_offset(const struct sock *sk)
+static u32 inet_sk_port_offset(const struct sock *sk)
 {
        const struct inet_sock *inet = inet_sk(sk);
+
        return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr,
                                          inet->inet_daddr,
                                          inet->inet_dport);
@@ -507,8 +503,14 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
 
+               /* By starting with offset being an even number,
+                * we tend to leave about 50% of ports for other uses,
+                * like bind(0).
+                */
+               offset &= ~1;
+
                local_bh_disable();
-               for (i = 1; i <= remaining; i++) {
+               for (i = 0; i < remaining; i++) {
                        port = low + (i + offset) % remaining;
                        if (inet_is_local_reserved_port(net, port))
                                continue;
@@ -552,7 +554,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                return -EADDRNOTAVAIL;
 
 ok:
-               hint += i;
+               hint += (i + 2) & ~1;
 
                /* Head lock still held and bh's disabled */
                inet_bind_hash(sk, tb, port);
@@ -599,7 +601,11 @@ out:
 int inet_hash_connect(struct inet_timewait_death_row *death_row,
                      struct sock *sk)
 {
-       return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
+       u32 port_offset = 0;
+
+       if (!inet_sk(sk)->inet_num)
+               port_offset = inet_sk_port_offset(sk);
+       return __inet_hash_connect(death_row, sk, port_offset,
                                   __inet_check_established);
 }
 EXPORT_SYMBOL_GPL(inet_hash_connect);
@@ -608,7 +614,6 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
 {
        int i;
 
-       atomic_set(&h->bsockets, 0);
        for (i = 0; i < INET_LHTABLE_SIZE; i++) {
                spin_lock_init(&h->listening_hash[i].lock);
                INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].head,
@@ -616,3 +621,33 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
                }
 }
 EXPORT_SYMBOL_GPL(inet_hashinfo_init);
+
+int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
+{
+       unsigned int i, nblocks = 1;
+
+       if (sizeof(spinlock_t) != 0) {
+               /* allocate 2 cache lines or at least one spinlock per cpu */
+               nblocks = max_t(unsigned int,
+                               2 * L1_CACHE_BYTES / sizeof(spinlock_t),
+                               1);
+               nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
+
+               /* no more locks than number of hash buckets */
+               nblocks = min(nblocks, hashinfo->ehash_mask + 1);
+
+               hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t),
+                                                     GFP_KERNEL | __GFP_NOWARN);
+               if (!hashinfo->ehash_locks)
+                       hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t));
+
+               if (!hashinfo->ehash_locks)
+                       return -ENOMEM;
+
+               for (i = 0; i < nblocks; i++)
+                       spin_lock_init(&hashinfo->ehash_locks[i]);
+       }
+       hashinfo->ehash_locks_mask = nblocks - 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc);
index 00ec8d5d7e7ee2f1c79dc7446127f13c7e23a331..2ffbd16b79e00279235244c3412046062a86fec5 100644 (file)
@@ -170,7 +170,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
 
-void tw_timer_handler(unsigned long data)
+static void tw_timer_handler(unsigned long data)
 {
        struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
 
index 3674484946a5abeb14ff4d11dc9b7666458f8128..2d3aa408fbdca19230224269ebbcc7124dc7f5bb 100644 (file)
 #include <net/route.h>
 #include <net/xfrm.h>
 
-static bool ip_may_fragment(const struct sk_buff *skb)
-{
-       return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
-               skb->ignore_df;
-}
-
 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
 {
        if (skb->len <= mtu)
                return false;
 
+       if (unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0))
+               return false;
+
+       /* original fragment exceeds mtu and DF is set */
+       if (unlikely(IPCB(skb)->frag_max_size > mtu))
+               return true;
+
+       if (skb->ignore_df)
+               return false;
+
        if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
                return false;
 
@@ -114,7 +118,7 @@ int ip_forward(struct sk_buff *skb)
 
        IPCB(skb)->flags |= IPSKB_FORWARDED;
        mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
-       if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
+       if (ip_exceeds_mtu(skb, mtu)) {
                IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                          htonl(mtu));
index cc1da6d9cb351de56c7f357faebe32cdbb6f7c27..a50dc6d408d11c339b38f2436216c8568c4149cf 100644 (file)
@@ -75,6 +75,7 @@ struct ipq {
        __be16          id;
        u8              protocol;
        u8              ecn; /* RFC3168 support */
+       u16             max_df_size; /* largest frag with DF set seen */
        int             iif;
        unsigned int    rid;
        struct inet_peer *peer;
@@ -173,6 +174,15 @@ static void ipq_kill(struct ipq *ipq)
        inet_frag_kill(&ipq->q, &ip4_frags);
 }
 
+static bool frag_expire_skip_icmp(u32 user)
+{
+       return user == IP_DEFRAG_AF_PACKET ||
+              ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
+                                        __IP_DEFRAG_CONNTRACK_IN_END) ||
+              ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
+                                        __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
+}
+
 /*
  * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
  */
@@ -217,10 +227,8 @@ static void ip_expire(unsigned long arg)
                /* Only an end host needs to send an ICMP
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
-               if (qp->user == IP_DEFRAG_AF_PACKET ||
-                   ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
-                    (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
-                    (skb_rtable(head)->rt_type != RTN_LOCAL)))
+               if (frag_expire_skip_icmp(qp->user) &&
+                   (skb_rtable(head)->rt_type != RTN_LOCAL))
                        goto out_rcu_unlock;
 
                /* Send an ICMP "Fragment Reassembly Timeout" message. */
@@ -319,6 +327,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
 {
        struct sk_buff *prev, *next;
        struct net_device *dev;
+       unsigned int fragsize;
        int flags, offset;
        int ihl, end;
        int err = -ENOENT;
@@ -474,9 +483,14 @@ found:
        if (offset == 0)
                qp->q.flags |= INET_FRAG_FIRST_IN;
 
+       fragsize = skb->len + ihl;
+
+       if (fragsize > qp->q.max_size)
+               qp->q.max_size = fragsize;
+
        if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
-           skb->len + ihl > qp->q.max_size)
-               qp->q.max_size = skb->len + ihl;
+           fragsize > qp->max_df_size)
+               qp->max_df_size = fragsize;
 
        if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            qp->q.meat == qp->q.len) {
@@ -606,13 +620,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
        head->next = NULL;
        head->dev = dev;
        head->tstamp = qp->q.stamp;
-       IPCB(head)->frag_max_size = qp->q.max_size;
+       IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
 
        iph = ip_hdr(head);
-       /* max_size != 0 implies at least one fragment had IP_DF set */
-       iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
        iph->tot_len = htons(len);
        iph->tos |= ecn;
+
+       /* When we set IP_DF on a refragmented skb we must also force a
+        * call to ip_fragment to avoid forwarding a DF-skb of size s while
+        * original sender only sent fragments of size f (where f < s).
+        *
+        * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
+        * frag seen to avoid sending tiny DF-fragments in case skb was built
+        * from one very small df-fragment and one large non-df frag.
+        */
+       if (qp->max_df_size == qp->q.max_size) {
+               IPCB(head)->flags |= IPSKB_FRAG_PMTU;
+               iph->frag_off = htons(IP_DF);
+       } else {
+               iph->frag_off = 0;
+       }
+
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
        qp->q.fragments = NULL;
        qp->q.fragments_tail = NULL;
index c65b93a7b7113660d9f946128c0a4acee810de0f..f5f5ef1cebd518a1eedf97241bb4f375c94c9690 100644 (file)
 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
 EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
+static int ip_fragment(struct sock *sk, struct sk_buff *skb,
+                      unsigned int mtu,
+                      int (*output)(struct sock *, struct sk_buff *));
+
 /* Generate a checksum for an outgoing IP datagram. */
 void ip_send_check(struct iphdr *iph)
 {
@@ -91,7 +95,7 @@ void ip_send_check(struct iphdr *iph)
 }
 EXPORT_SYMBOL(ip_send_check);
 
-int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
+static int __ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
 {
        struct iphdr *iph = ip_hdr(skb);
 
@@ -216,7 +220,8 @@ static inline int ip_finish_output2(struct sock *sk, struct sk_buff *skb)
        return -EINVAL;
 }
 
-static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
+static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb,
+                               unsigned int mtu)
 {
        netdev_features_t features;
        struct sk_buff *segs;
@@ -224,7 +229,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
 
        /* common case: locally created skb or seglen is <= mtu */
        if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
-             skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
+             skb_gso_network_seglen(skb) <= mtu)
                return ip_finish_output2(sk, skb);
 
        /* Slowpath -  GSO segment length is exceeding the dst MTU.
@@ -248,7 +253,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
                int err;
 
                segs->next = NULL;
-               err = ip_fragment(sk, segs, ip_finish_output2);
+               err = ip_fragment(sk, segs, mtu, ip_finish_output2);
 
                if (err && ret == 0)
                        ret = err;
@@ -260,6 +265,8 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
 
 static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
 {
+       unsigned int mtu;
+
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
        /* Policy lookup after SNAT yielded a new policy */
        if (skb_dst(skb)->xfrm) {
@@ -267,11 +274,12 @@ static int ip_finish_output(struct sock *sk, struct sk_buff *skb)
                return dst_output_sk(sk, skb);
        }
 #endif
+       mtu = ip_skb_dst_mtu(skb);
        if (skb_is_gso(skb))
-               return ip_finish_output_gso(sk, skb);
+               return ip_finish_output_gso(sk, skb, mtu);
 
-       if (skb->len > ip_skb_dst_mtu(skb))
-               return ip_fragment(sk, skb, ip_finish_output2);
+       if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
+               return ip_fragment(sk, skb, mtu, ip_finish_output2);
 
        return ip_finish_output2(sk, skb);
 }
@@ -478,6 +486,31 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        skb_copy_secmark(to, from);
 }
 
+static int ip_fragment(struct sock *sk, struct sk_buff *skb,
+                      unsigned int mtu,
+                      int (*output)(struct sock *, struct sk_buff *))
+{
+       struct iphdr *iph = ip_hdr(skb);
+
+       if ((iph->frag_off & htons(IP_DF)) == 0)
+               return ip_do_fragment(sk, skb, output);
+
+       if (unlikely(!skb->ignore_df ||
+                    (IPCB(skb)->frag_max_size &&
+                     IPCB(skb)->frag_max_size > mtu))) {
+               struct rtable *rt = skb_rtable(skb);
+               struct net_device *dev = rt->dst.dev;
+
+               IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+               icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                         htonl(mtu));
+               kfree_skb(skb);
+               return -EMSGSIZE;
+       }
+
+       return ip_do_fragment(sk, skb, output);
+}
+
 /*
  *     This IP datagram is too large to be sent in one piece.  Break it up into
  *     smaller pieces (each of size equal to IP header plus
@@ -485,8 +518,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
  *     single device frame, and queue such a frame for sending.
  */
 
-int ip_fragment(struct sock *sk, struct sk_buff *skb,
-               int (*output)(struct sock *, struct sk_buff *))
+int ip_do_fragment(struct sock *sk, struct sk_buff *skb,
+                  int (*output)(struct sock *, struct sk_buff *))
 {
        struct iphdr *iph;
        int ptr;
@@ -507,15 +540,8 @@ int ip_fragment(struct sock *sk, struct sk_buff *skb,
        iph = ip_hdr(skb);
 
        mtu = ip_skb_dst_mtu(skb);
-       if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
-                    (IPCB(skb)->frag_max_size &&
-                     IPCB(skb)->frag_max_size > mtu))) {
-               IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
-               icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
-                         htonl(mtu));
-               kfree_skb(skb);
-               return -EMSGSIZE;
-       }
+       if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
+               mtu = IPCB(skb)->frag_max_size;
 
        /*
         *      Setup starting values.
@@ -711,6 +737,9 @@ slow_path:
                iph = ip_hdr(skb2);
                iph->frag_off = htons((offset >> 3));
 
+               if (IPCB(skb)->flags & IPSKB_FRAG_PMTU)
+                       iph->frag_off |= htons(IP_DF);
+
                /* ANK: dirty, but effective trick. Upgrade options only if
                 * the segment to be fragmented was THE FIRST (otherwise,
                 * options are already fixed) and make it ONCE
@@ -751,7 +780,7 @@ fail:
        IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
        return err;
 }
-EXPORT_SYMBOL(ip_fragment);
+EXPORT_SYMBOL(ip_do_fragment);
 
 int
 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
@@ -1217,11 +1246,9 @@ ssize_t  ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
        }
 
        while (size > 0) {
-               int i;
-
-               if (skb_is_gso(skb))
+               if (skb_is_gso(skb)) {
                        len = size;
-               else {
+               else {
 
                        /* Check if the remaining data fits into current packet. */
                        len = mtu - skb->len;
@@ -1273,15 +1300,10 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
                        continue;
                }
 
-               i = skb_shinfo(skb)->nr_frags;
                if (len > size)
                        len = size;
-               if (skb_can_coalesce(skb, i, page, offset)) {
-                       skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
-               } else if (i < MAX_SKB_FRAGS) {
-                       get_page(page);
-                       skb_fill_page_desc(skb, i, page, offset, len);
-               } else {
+
+               if (skb_append_pagefrags(skb, page, offset, len)) {
                        err = -EMSGSIZE;
                        goto error;
                }
index 7cfb0893f2636bcc87537da3014643362f72b10f..04ae2992a5cdbf341a006f0fc7329345be7fc9e1 100644 (file)
@@ -582,6 +582,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        case IP_TRANSPARENT:
        case IP_MINTTL:
        case IP_NODEFRAG:
+       case IP_BIND_ADDRESS_NO_PORT:
        case IP_UNICAST_IF:
        case IP_MULTICAST_TTL:
        case IP_MULTICAST_ALL:
@@ -732,6 +733,9 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                }
                inet->nodefrag = val ? 1 : 0;
                break;
+       case IP_BIND_ADDRESS_NO_PORT:
+               inet->bind_address_no_port = val ? 1 : 0;
+               break;
        case IP_MTU_DISCOVER:
                if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
                        goto e_inval;
@@ -1324,6 +1328,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
        case IP_NODEFRAG:
                val = inet->nodefrag;
                break;
+       case IP_BIND_ADDRESS_NO_PORT:
+               val = inet->bind_address_no_port;
+               break;
        case IP_MTU_DISCOVER:
                val = inet->pmtudisc;
                break;
index ce63ab21b6cda87f8caea29967a4651b7f78c909..6a51a71a6c67a0f3e48523a37e3b559306885de9 100644 (file)
@@ -98,7 +98,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
                        return -ENOMEM;
 
                eh = (struct ethhdr *)skb->data;
-               if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
+               if (likely(eth_proto_is_802_3(eh->h_proto)))
                        skb->protocol = eh->h_proto;
                else
                        skb->protocol = htons(ETH_P_802_2);
@@ -165,6 +165,8 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
 {
        int i;
 
+       netdev_stats_to_stats64(tot, &dev->stats);
+
        for_each_possible_cpu(i) {
                const struct pcpu_sw_netstats *tstats =
                                                   per_cpu_ptr(dev->tstats, i);
@@ -185,22 +187,6 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
                tot->tx_bytes   += tx_bytes;
        }
 
-       tot->multicast = dev->stats.multicast;
-
-       tot->rx_crc_errors = dev->stats.rx_crc_errors;
-       tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
-       tot->rx_length_errors = dev->stats.rx_length_errors;
-       tot->rx_frame_errors = dev->stats.rx_frame_errors;
-       tot->rx_errors = dev->stats.rx_errors;
-
-       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
-       tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
-       tot->tx_dropped = dev->stats.tx_dropped;
-       tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
-       tot->tx_errors = dev->stats.tx_errors;
-
-       tot->collisions  = dev->stats.collisions;
-
        return tot;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
index ff96396ebec5bdf794cf84776f21505d0b7de737..254238daf58bd9f6e398609b67ca6a0b946600d2 100644 (file)
@@ -251,7 +251,8 @@ ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        return -EINVAL;
        }
 
-       p.i_key = p.o_key = p.i_flags = p.o_flags = 0;
+       p.i_key = p.o_key = 0;
+       p.i_flags = p.o_flags = 0;
        if (p.iph.ttl)
                p.iph.frag_off |= htons(IP_DF);
 
index 2d0e265fef6e7f2c657c54d4db0fd10e010fa68b..e7abf5145edc126b60524f0cecf179f7eef75d7c 100644 (file)
@@ -1444,7 +1444,6 @@ static int
 compat_find_calc_match(struct xt_entry_match *m,
                       const char *name,
                       const struct ipt_ip *ip,
-                      unsigned int hookmask,
                       int *size)
 {
        struct xt_match *match;
@@ -1513,8 +1512,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
        entry_offset = (void *)e - (void *)base;
        j = 0;
        xt_ematch_foreach(ematch, e) {
-               ret = compat_find_calc_match(ematch, name,
-                                            &e->ip, e->comefrom, &off);
+               ret = compat_find_calc_match(ematch, name, &e->ip, &off);
                if (ret != 0)
                        goto release_matches;
                ++j;
index 771ab3d01ad3dc303ac999e539a9c8ab5845baf2..45cb16a6a4a337c564d880485c45ca46229cb152 100644 (file)
@@ -367,6 +367,11 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
        struct clusterip_config *config;
        int ret;
 
+       if (par->nft_compat) {
+               pr_err("cannot use CLUSTERIP target from nftables compat\n");
+               return -EOPNOTSUPP;
+       }
+
        if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
            cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
            cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT_DPT) {
index e9e67793055fce9b20ee836275a3eb4a9437592a..fe8cc183411e052f6e0ba4afefbeaef1e77313cd 100644 (file)
@@ -18,7 +18,7 @@
 #include <net/netfilter/nf_conntrack_synproxy.h>
 
 static struct iphdr *
-synproxy_build_ip(struct sk_buff *skb, u32 saddr, u32 daddr)
+synproxy_build_ip(struct sk_buff *skb, __be32 saddr, __be32 daddr)
 {
        struct iphdr *iph;
 
@@ -220,7 +220,7 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
        nth->ack_seq    = th->ack_seq;
        tcp_flag_word(nth) = TCP_FLAG_ACK;
        nth->doff       = tcp_hdr_size / 4;
-       nth->window     = ntohs(htons(th->window) >> opts->wscale);
+       nth->window     = htons(ntohs(th->window) >> opts->wscale);
        nth->check      = 0;
        nth->urg_ptr    = 0;
 
index e1f3b911dd1e3739a63e38b63a1b9a7b29bfd7f0..da5d483e236ac1e37b631c6091219fbefbe497b4 100644 (file)
@@ -298,6 +298,8 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPACKSkippedFinWait2", LINUX_MIB_TCPACKSKIPPEDFINWAIT2),
        SNMP_MIB_ITEM("TCPACKSkippedTimeWait", LINUX_MIB_TCPACKSKIPPEDTIMEWAIT),
        SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
+       SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
+       SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
        SNMP_MIB_SENTINEL
 };
 
index f45f2a12f37b25b7270560498423df9488405b1d..f6055984c3070a6b76ba79a09292bcd8f937f79d 100644 (file)
@@ -457,12 +457,9 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
 }
 
 #define IP_IDENTS_SZ 2048u
-struct ip_ident_bucket {
-       atomic_t        id;
-       u32             stamp32;
-};
 
-static struct ip_ident_bucket *ip_idents __read_mostly;
+static atomic_t *ip_idents __read_mostly;
+static u32 *ip_tstamps __read_mostly;
 
 /* In order to protect privacy, we add a perturbation to identifiers
  * if one generator is seldom used. This makes hard for an attacker
@@ -470,15 +467,16 @@ static struct ip_ident_bucket *ip_idents __read_mostly;
  */
 u32 ip_idents_reserve(u32 hash, int segs)
 {
-       struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
-       u32 old = ACCESS_ONCE(bucket->stamp32);
+       u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
+       atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
+       u32 old = ACCESS_ONCE(*p_tstamp);
        u32 now = (u32)jiffies;
        u32 delta = 0;
 
-       if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+       if (old != now && cmpxchg(p_tstamp, old, now) == old)
                delta = prandom_u32_max(now - old);
 
-       return atomic_add_return(segs + delta, &bucket->id) - segs;
+       return atomic_add_return(segs + delta, p_id) - segs;
 }
 EXPORT_SYMBOL(ip_idents_reserve);
 
@@ -2097,7 +2095,8 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                        goto out;
                }
                if (ipv4_is_local_multicast(fl4->daddr) ||
-                   ipv4_is_lbcast(fl4->daddr)) {
+                   ipv4_is_lbcast(fl4->daddr) ||
+                   fl4->flowi4_proto == IPPROTO_IGMP) {
                        if (!fl4->saddr)
                                fl4->saddr = inet_select_addr(dev_out, 0,
                                                              RT_SCOPE_LINK);
@@ -2742,6 +2741,10 @@ int __init ip_rt_init(void)
 
        prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
 
+       ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
+       if (!ip_tstamps)
+               panic("IP: failed to allocate ip_tstamps\n");
+
        for_each_possible_cpu(cpu) {
                struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
 
index df849e5a10f1d7f41fb3353a26356ede81f56f72..d70b1f603692554664c478ebb597ba73958ebd9a 100644 (file)
@@ -219,9 +219,9 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_check);
 
-static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
-                                   struct request_sock *req,
-                                   struct dst_entry *dst)
+struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
+                                struct request_sock *req,
+                                struct dst_entry *dst)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct sock *child;
@@ -235,7 +235,7 @@ static struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
        }
        return child;
 }
-
+EXPORT_SYMBOL(tcp_get_cookie_sock);
 
 /*
  * when syncookies are in effect and tcp timestamps are enabled we stored
@@ -391,7 +391,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        ireq->rcv_wscale  = rcv_wscale;
        ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
 
-       ret = get_cookie_sock(sk, skb, req, &rt->dst);
+       ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst);
        /* ip_queue_xmit() depends on our flow being setup
         * Normal sockets get it right from inet_csk_route_child_sock()
         */
index c3852a7ff3c7630f4114cbc33a51a35fa3645e8c..433231ccfb17fc6d01179247d1d81226803d18df 100644 (file)
@@ -41,11 +41,19 @@ static int tcp_syn_retries_min = 1;
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int min_sndbuf = SOCK_MIN_SNDBUF;
+static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
 {
+       bool same_parity = !((range[0] ^ range[1]) & 1);
+
        write_seqlock(&net->ipv4.ip_local_ports.lock);
+       if (same_parity && !net->ipv4.ip_local_ports.warned) {
+               net->ipv4.ip_local_ports.warned = true;
+               pr_err_ratelimited("ip_local_port_range: prefer different parity for start/end values.\n");
+       }
        net->ipv4.ip_local_ports.range[0] = range[0];
        net->ipv4.ip_local_ports.range[1] = range[1];
        write_sequnlock(&net->ipv4.ip_local_ports.lock);
@@ -522,7 +530,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_wmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_sndbuf,
        },
        {
                .procname       = "tcp_notsent_lowat",
@@ -537,7 +545,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_rmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one,
+               .extra1         = &min_rcvbuf,
        },
        {
                .procname       = "tcp_app_win",
@@ -702,7 +710,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &zero,
+               .extra1         = &one,
                .extra2         = &gso_max_segs,
        },
        {
@@ -750,7 +758,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_rmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one
+               .extra1         = &min_rcvbuf,
        },
        {
                .procname       = "udp_wmem_min",
@@ -758,7 +766,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_wmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &one
+               .extra1         = &min_sndbuf,
        },
        { }
 };
@@ -820,6 +828,13 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tcp_ecn_fallback",
+               .data           = &init_net.ipv4.sysctl_tcp_ecn_fallback,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        {
                .procname       = "ip_local_port_range",
                .maxlen         = sizeof(init_net.ipv4.ip_local_ports.range),
index f1377f2a0472ec26e88b92be2346cbc3c8a69b41..65f791f74845d51045d1a1af7ff9901babbf2f83 100644 (file)
@@ -695,8 +695,9 @@ static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
        struct tcp_splice_state *tss = rd_desc->arg.data;
        int ret;
 
-       ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
-                             tss->flags);
+       ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
+                             min(rd_desc->count, len), tss->flags,
+                             skb_socket_splice);
        if (ret > 0)
                rd_desc->count -= ret;
        return ret;
@@ -809,16 +810,28 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
 }
 EXPORT_SYMBOL(tcp_splice_read);
 
-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
+                                   bool force_schedule)
 {
        struct sk_buff *skb;
 
        /* The TCP header must be at least 32-bit aligned.  */
        size = ALIGN(size, 4);
 
+       if (unlikely(tcp_under_memory_pressure(sk)))
+               sk_mem_reclaim_partial(sk);
+
        skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
-       if (skb) {
-               if (sk_wmem_schedule(sk, skb->truesize)) {
+       if (likely(skb)) {
+               bool mem_scheduled;
+
+               if (force_schedule) {
+                       mem_scheduled = true;
+                       sk_forced_mem_schedule(sk, skb->truesize);
+               } else {
+                       mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
+               }
+               if (likely(mem_scheduled)) {
                        skb_reserve(skb, sk->sk_prot->max_header);
                        /*
                         * Make sure that we have exactly size bytes
@@ -908,7 +921,8 @@ new_segment:
                        if (!sk_stream_memory_free(sk))
                                goto wait_for_sndbuf;
 
-                       skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+                       skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
+                                                 skb_queue_empty(&sk->sk_write_queue));
                        if (!skb)
                                goto wait_for_memory;
 
@@ -987,6 +1001,9 @@ do_error:
        if (copied)
                goto out;
 out_err:
+       /* make sure we wake any epoll edge trigger waiter */
+       if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
+               sk->sk_write_space(sk);
        return sk_stream_error(sk, flags, err);
 }
 
@@ -1144,7 +1161,8 @@ new_segment:
 
                        skb = sk_stream_alloc_skb(sk,
                                                  select_size(sk, sg),
-                                                 sk->sk_allocation);
+                                                 sk->sk_allocation,
+                                                 skb_queue_empty(&sk->sk_write_queue));
                        if (!skb)
                                goto wait_for_memory;
 
@@ -1275,6 +1293,9 @@ do_error:
                goto out;
 out_err:
        err = sk_stream_error(sk, flags, err);
+       /* make sure we wake any epoll edge trigger waiter */
+       if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
+               sk->sk_write_space(sk);
        release_sock(sk);
        return err;
 }
@@ -2483,6 +2504,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                        icsk->icsk_syn_retries = val;
                break;
 
+       case TCP_SAVE_SYN:
+               if (val < 0 || val > 1)
+                       err = -EINVAL;
+               else
+                       tp->save_syn = val;
+               break;
+
        case TCP_LINGER2:
                if (val < 0)
                        tp->linger2 = -1;
@@ -2672,6 +2700,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
                info->tcpi_bytes_acked = tp->bytes_acked;
                info->tcpi_bytes_received = tp->bytes_received;
        } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
+       info->tcpi_segs_out = tp->segs_out;
+       info->tcpi_segs_in = tp->segs_in;
 }
 EXPORT_SYMBOL_GPL(tcp_get_info);
 
@@ -2821,6 +2851,42 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
        case TCP_NOTSENT_LOWAT:
                val = tp->notsent_lowat;
                break;
+       case TCP_SAVE_SYN:
+               val = tp->save_syn;
+               break;
+       case TCP_SAVED_SYN: {
+               if (get_user(len, optlen))
+                       return -EFAULT;
+
+               lock_sock(sk);
+               if (tp->saved_syn) {
+                       if (len < tp->saved_syn[0]) {
+                               if (put_user(tp->saved_syn[0], optlen)) {
+                                       release_sock(sk);
+                                       return -EFAULT;
+                               }
+                               release_sock(sk);
+                               return -EINVAL;
+                       }
+                       len = tp->saved_syn[0];
+                       if (put_user(len, optlen)) {
+                               release_sock(sk);
+                               return -EFAULT;
+                       }
+                       if (copy_to_user(optval, tp->saved_syn + 1, len)) {
+                               release_sock(sk);
+                               return -EFAULT;
+                       }
+                       tcp_saved_syn_free(tp);
+                       release_sock(sk);
+               } else {
+                       release_sock(sk);
+                       len = 0;
+                       if (put_user(len, optlen))
+                               return -EFAULT;
+               }
+               return 0;
+       }
        default:
                return -ENOPROTOOPT;
        }
@@ -3025,11 +3091,12 @@ __setup("thash_entries=", set_thash_entries);
 
 static void __init tcp_init_mem(void)
 {
-       unsigned long limit = nr_free_buffer_pages() / 8;
+       unsigned long limit = nr_free_buffer_pages() / 16;
+
        limit = max(limit, 128UL);
-       sysctl_tcp_mem[0] = limit / 4 * 3;
-       sysctl_tcp_mem[1] = limit;
-       sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
+       sysctl_tcp_mem[0] = limit / 4 * 3;              /* 4.68 % */
+       sysctl_tcp_mem[1] = limit;                      /* 6.25 % */
+       sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;      /* 9.37 % */
 }
 
 void __init tcp_init(void)
index c9ab964189a0162c7de19d4319f6c3e56194117b..15c4536188a4c3b9b67bb8db26c83f12da9115b0 100644 (file)
@@ -359,7 +359,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
        /* Check #1 */
        if (tp->rcv_ssthresh < tp->window_clamp &&
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
-           !sk_under_memory_pressure(sk)) {
+           !tcp_under_memory_pressure(sk)) {
                int incr;
 
                /* Check #2. Increase window, if skb with such overhead
@@ -446,7 +446,7 @@ static void tcp_clamp_window(struct sock *sk)
 
        if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
-           !sk_under_memory_pressure(sk) &&
+           !tcp_under_memory_pressure(sk) &&
            sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
                sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
                                    sysctl_tcp_rmem[2]);
@@ -1130,7 +1130,12 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
 struct tcp_sacktag_state {
        int     reord;
        int     fack_count;
-       long    rtt_us; /* RTT measured by SACKing never-retransmitted data */
+       /* Timestamps for earliest and latest never-retransmitted segment
+        * that was SACKed. RTO needs the earliest RTT to stay conservative,
+        * but congestion control should still get an accurate delay signal.
+        */
+       struct skb_mstamp first_sackt;
+       struct skb_mstamp last_sackt;
        int     flag;
 };
 
@@ -1233,14 +1238,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
                                                           state->reord);
                                if (!after(end_seq, tp->high_seq))
                                        state->flag |= FLAG_ORIG_SACK_ACKED;
-                               /* Pick the earliest sequence sacked for RTT */
-                               if (state->rtt_us < 0) {
-                                       struct skb_mstamp now;
-
-                                       skb_mstamp_get(&now);
-                                       state->rtt_us = skb_mstamp_us_delta(&now,
-                                                               xmit_time);
-                               }
+                               if (state->first_sackt.v64 == 0)
+                                       state->first_sackt = *xmit_time;
+                               state->last_sackt = *xmit_time;
                        }
 
                        if (sacked & TCPCB_LOST) {
@@ -1634,7 +1634,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
 
 static int
 tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
-                       u32 prior_snd_una, long *sack_rtt_us)
+                       u32 prior_snd_una, struct tcp_sacktag_state *state)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1642,7 +1642,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
        struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
        struct tcp_sack_block sp[TCP_NUM_SACKS];
        struct tcp_sack_block *cache;
-       struct tcp_sacktag_state state;
        struct sk_buff *skb;
        int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
        int used_sacks;
@@ -1650,9 +1649,8 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
        int i, j;
        int first_sack_index;
 
-       state.flag = 0;
-       state.reord = tp->packets_out;
-       state.rtt_us = -1L;
+       state->flag = 0;
+       state->reord = tp->packets_out;
 
        if (!tp->sacked_out) {
                if (WARN_ON(tp->fackets_out))
@@ -1663,7 +1661,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
        found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
                                         num_sacks, prior_snd_una);
        if (found_dup_sack)
-               state.flag |= FLAG_DSACKING_ACK;
+               state->flag |= FLAG_DSACKING_ACK;
 
        /* Eliminate too old ACKs, but take into
         * account more or less fresh ones, they can
@@ -1728,7 +1726,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
        }
 
        skb = tcp_write_queue_head(sk);
-       state.fack_count = 0;
+       state->fack_count = 0;
        i = 0;
 
        if (!tp->sacked_out) {
@@ -1762,10 +1760,10 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
 
                        /* Head todo? */
                        if (before(start_seq, cache->start_seq)) {
-                               skb = tcp_sacktag_skip(skb, sk, &state,
+                               skb = tcp_sacktag_skip(skb, sk, state,
                                                       start_seq);
                                skb = tcp_sacktag_walk(skb, sk, next_dup,
-                                                      &state,
+                                                      state,
                                                       start_seq,
                                                       cache->start_seq,
                                                       dup_sack);
@@ -1776,7 +1774,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                                goto advance_sp;
 
                        skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
-                                                      &state,
+                                                      state,
                                                       cache->end_seq);
 
                        /* ...tail remains todo... */
@@ -1785,12 +1783,12 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                                skb = tcp_highest_sack(sk);
                                if (!skb)
                                        break;
-                               state.fack_count = tp->fackets_out;
+                               state->fack_count = tp->fackets_out;
                                cache++;
                                goto walk;
                        }
 
-                       skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
+                       skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq);
                        /* Check overlap against next cached too (past this one already) */
                        cache++;
                        continue;
@@ -1800,12 +1798,12 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                        skb = tcp_highest_sack(sk);
                        if (!skb)
                                break;
-                       state.fack_count = tp->fackets_out;
+                       state->fack_count = tp->fackets_out;
                }
-               skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
+               skb = tcp_sacktag_skip(skb, sk, state, start_seq);
 
 walk:
-               skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
+               skb = tcp_sacktag_walk(skb, sk, next_dup, state,
                                       start_seq, end_seq, dup_sack);
 
 advance_sp:
@@ -1820,9 +1818,9 @@ advance_sp:
        for (j = 0; j < used_sacks; j++)
                tp->recv_sack_cache[i++] = sp[j];
 
-       if ((state.reord < tp->fackets_out) &&
+       if ((state->reord < tp->fackets_out) &&
            ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
-               tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
+               tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
        tcp_mark_lost_retrans(sk);
        tcp_verify_left_out(tp);
@@ -1834,8 +1832,7 @@ out:
        WARN_ON((int)tp->retrans_out < 0);
        WARN_ON((int)tcp_packets_in_flight(tp) < 0);
 #endif
-       *sack_rtt_us = state.rtt_us;
-       return state.flag;
+       return state->flag;
 }
 
 /* Limits sacked_out so that sum with lost_out isn't ever larger than
@@ -3055,7 +3052,8 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
  * arrived at the other end.
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
-                              u32 prior_snd_una, long sack_rtt_us)
+                              u32 prior_snd_una,
+                              struct tcp_sacktag_state *sack)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct skb_mstamp first_ackt, last_ackt, now;
@@ -3063,8 +3061,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        u32 prior_sacked = tp->sacked_out;
        u32 reord = tp->packets_out;
        bool fully_acked = true;
-       long ca_seq_rtt_us = -1L;
+       long sack_rtt_us = -1L;
        long seq_rtt_us = -1L;
+       long ca_rtt_us = -1L;
        struct sk_buff *skb;
        u32 pkts_acked = 0;
        bool rtt_update;
@@ -3153,15 +3152,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        skb_mstamp_get(&now);
        if (likely(first_ackt.v64)) {
                seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
-               ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+               ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+       }
+       if (sack->first_sackt.v64) {
+               sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt);
+               ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt);
        }
 
        rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
 
        if (flag & FLAG_ACKED) {
-               const struct tcp_congestion_ops *ca_ops
-                       = inet_csk(sk)->icsk_ca_ops;
-
                tcp_rearm_rto(sk);
                if (unlikely(icsk->icsk_mtup.probe_size &&
                             !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
@@ -3184,11 +3184,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
 
                tp->fackets_out -= min(pkts_acked, tp->fackets_out);
 
-               if (ca_ops->pkts_acked) {
-                       long rtt_us = min_t(ulong, ca_seq_rtt_us, sack_rtt_us);
-                       ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
-               }
-
        } else if (skb && rtt_update && sack_rtt_us >= 0 &&
                   sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
                /* Do not re-arm RTO if the sack RTT is measured from data sent
@@ -3198,6 +3193,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                tcp_rearm_rto(sk);
        }
 
+       if (icsk->icsk_ca_ops->pkts_acked)
+               icsk->icsk_ca_ops->pkts_acked(sk, pkts_acked, ca_rtt_us);
+
 #if FASTRETRANS_DEBUG > 0
        WARN_ON((int)tp->sacked_out < 0);
        WARN_ON((int)tp->lost_out < 0);
@@ -3238,7 +3236,7 @@ static void tcp_ack_probe(struct sock *sk)
                 * This function is not for random using!
                 */
        } else {
-               unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+               unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
 
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
                                          when, TCP_RTO_MAX);
@@ -3466,6 +3464,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct tcp_sacktag_state sack_state;
        u32 prior_snd_una = tp->snd_una;
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
@@ -3474,7 +3473,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        int prior_packets = tp->packets_out;
        const int prior_unsacked = tp->packets_out - tp->sacked_out;
        int acked = 0; /* Number of packets newly acked */
-       long sack_rtt_us = -1L;
+
+       sack_state.first_sackt.v64 = 0;
 
        /* We very likely will need to access write queue head. */
        prefetchw(sk->sk_write_queue.next);
@@ -3538,7 +3538,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
                if (TCP_SKB_CB(skb)->sacked)
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
-                                                       &sack_rtt_us);
+                                                       &sack_state);
 
                if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
                        flag |= FLAG_ECE;
@@ -3563,7 +3563,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        /* See if we can take anything off of the retransmit queue. */
        acked = tp->packets_out;
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
-                                   sack_rtt_us);
+                                   &sack_state);
        acked -= tp->packets_out;
 
        /* Advance cwnd if state allows */
@@ -3615,7 +3615,7 @@ old_ack:
         */
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
-                                               &sack_rtt_us);
+                                               &sack_state);
                tcp_fastretrans_alert(sk, acked, prior_unsacked,
                                      is_dupack, flag);
        }
@@ -4514,10 +4514,12 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 
                if (eaten <= 0) {
 queue_and_out:
-                       if (eaten < 0 &&
-                           tcp_try_rmem_schedule(sk, skb, skb->truesize))
-                               goto drop;
-
+                       if (eaten < 0) {
+                               if (skb_queue_len(&sk->sk_receive_queue) == 0)
+                                       sk_forced_mem_schedule(sk, skb->truesize);
+                               else if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+                                       goto drop;
+                       }
                        eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
                }
                tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
@@ -4788,7 +4790,7 @@ static int tcp_prune_queue(struct sock *sk)
 
        if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                tcp_clamp_window(sk);
-       else if (sk_under_memory_pressure(sk))
+       else if (tcp_under_memory_pressure(sk))
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
        tcp_collapse_ofo_queue(sk);
@@ -4832,7 +4834,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
                return false;
 
        /* If we are under global TCP memory pressure, do not expand.  */
-       if (sk_under_memory_pressure(sk))
+       if (tcp_under_memory_pressure(sk))
                return false;
 
        /* If we are under soft global TCP memory pressure, do not expand.  */
@@ -6067,6 +6069,23 @@ static bool tcp_syn_flood_action(struct sock *sk,
        return want_cookie;
 }
 
+static void tcp_reqsk_record_syn(const struct sock *sk,
+                                struct request_sock *req,
+                                const struct sk_buff *skb)
+{
+       if (tcp_sk(sk)->save_syn) {
+               u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
+               u32 *copy;
+
+               copy = kmalloc(len + sizeof(u32), GFP_ATOMIC);
+               if (copy) {
+                       copy[0] = len;
+                       memcpy(&copy[1], skb_network_header(skb), len);
+                       req->saved_syn = copy;
+               }
+       }
+}
+
 int tcp_conn_request(struct request_sock_ops *rsk_ops,
                     const struct tcp_request_sock_ops *af_ops,
                     struct sock *sk, struct sk_buff *skb)
@@ -6199,6 +6218,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                tcp_rsk(req)->tfo_listener = false;
                af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
        }
+       tcp_reqsk_record_syn(sk, req, skb);
 
        return 0;
 
index fc1c658ec6c18cb1daa1cc06039a9f19df67bb5e..d7d4c2b79cf2f516f9e3f62c6fe4415e9bc137a0 100644 (file)
@@ -1400,7 +1400,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
                return 0;
        }
 
-       if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
+       if (tcp_checksum_complete(skb))
                goto csum_err;
 
        if (sk->sk_state == TCP_LISTEN) {
@@ -1626,6 +1626,7 @@ process:
        skb->dev = NULL;
 
        bh_lock_sock_nested(sk);
+       tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
        ret = 0;
        if (!sock_owned_by_user(sk)) {
                if (!tcp_prequeue(sk, skb))
@@ -1646,7 +1647,7 @@ no_tcp_socket:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard_it;
 
-       if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
+       if (tcp_checksum_complete(skb)) {
 csum_error:
                TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
 bad_packet:
@@ -1670,10 +1671,6 @@ do_time_wait:
                goto discard_it;
        }
 
-       if (skb->len < (th->doff << 2)) {
-               inet_twsk_put(inet_twsk(sk));
-               goto bad_packet;
-       }
        if (tcp_checksum_complete(skb)) {
                inet_twsk_put(inet_twsk(sk));
                goto csum_error;
@@ -1802,6 +1799,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
+       tcp_saved_syn_free(tp);
 
        sk_sockets_allocated_dec(sk);
        sock_release_memcg(sk);
@@ -2410,12 +2408,15 @@ static int __net_init tcp_sk_init(struct net *net)
                        goto fail;
                *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
        }
+
        net->ipv4.sysctl_tcp_ecn = 2;
+       net->ipv4.sysctl_tcp_ecn_fallback = 1;
+
        net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
        net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
        net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
-       return 0;
 
+       return 0;
 fail:
        tcp_sk_exit(net);
 
index 17e7339ee5cadd077769de396b7568a7ccb73e13..4bc00cb79e603553076adf750712377586f4b2fb 100644 (file)
@@ -451,6 +451,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
 
                newtp->rcv_wup = newtp->copied_seq =
                newtp->rcv_nxt = treq->rcv_isn + 1;
+               newtp->segs_in = 0;
 
                newtp->snd_sml = newtp->snd_una =
                newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
@@ -539,6 +540,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
                newtp->fastopen_rsk = NULL;
                newtp->syn_data_acked = 0;
 
+               newtp->saved_syn = req->saved_syn;
+               req->saved_syn = NULL;
+
                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
        }
        return newsk;
index a369e8a70b2c775bfee94d7f329ee892c2cdc895..eeb59befaf06867b00e1dd6ded7742b2f0bcd821 100644 (file)
@@ -50,8 +50,8 @@ int sysctl_tcp_retrans_collapse __read_mostly = 1;
  */
 int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
 
-/* Default TSQ limit of two TSO segments */
-int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
+/* Default TSQ limit of four TSO segments */
+int sysctl_tcp_limit_output_bytes __read_mostly = 262144;
 
 /* This limits the percentage of the congestion window which we
  * will allow a single TSO frame to consume.  Building TSO frames
@@ -350,6 +350,15 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
        }
 }
 
+static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
+{
+       if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
+               /* tp->ecn_flags are cleared at a later point in time when
+                * SYN ACK is ultimatively being received.
+                */
+               TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
+}
+
 static void
 tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
                    struct sock *sk)
@@ -1018,6 +1027,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
                              tcp_skb_pcount(skb));
 
+       tp->segs_out += tcp_skb_pcount(skb);
        /* OK, its time to fill skb_shinfo(skb)->gso_segs */
        skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
 
@@ -1163,7 +1173,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
                return -ENOMEM;
 
        /* Get a new skb... force flag on. */
-       buff = sk_stream_alloc_skb(sk, nsize, gfp);
+       buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
        if (!buff)
                return -ENOMEM; /* We'll just try again later. */
 
@@ -1722,7 +1732,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
        if (skb->len != skb->data_len)
                return tcp_fragment(sk, skb, len, mss_now, gfp);
 
-       buff = sk_stream_alloc_skb(sk, 0, gfp);
+       buff = sk_stream_alloc_skb(sk, 0, gfp, true);
        if (unlikely(!buff))
                return -ENOMEM;
 
@@ -1941,7 +1951,7 @@ static int tcp_mtu_probe(struct sock *sk)
        }
 
        /* We're allowed to probe.  Build it now. */
-       nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC);
+       nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
        if (!nskb)
                return -1;
        sk->sk_wmem_queued += nskb->truesize;
@@ -2078,7 +2088,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
                        break;
 
-               if (tso_segs == 1 || !max_segs) {
+               if (tso_segs == 1) {
                        if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
                                                     (tcp_skb_is_last(sk, skb) ?
                                                      nonagle : TCP_NAGLE_PUSH))))
@@ -2091,7 +2101,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                }
 
                limit = mss_now;
-               if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
+               if (tso_segs > 1 && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
                                                    min_t(unsigned int,
                                                          cwnd_quota,
@@ -2392,7 +2402,7 @@ u32 __tcp_select_window(struct sock *sk)
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
-               if (sk_under_memory_pressure(sk))
+               if (tcp_under_memory_pressure(sk))
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh,
                                               4U * tp->advmss);
 
@@ -2615,6 +2625,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                }
        }
 
+       /* RFC3168, section 6.1.1.1. ECN fallback */
+       if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
+               tcp_ecn_clear_syn(sk, skb);
+
        tcp_retrans_try_collapse(sk, skb, cur_mss);
 
        /* Make a copy, if the first transmission SKB clone we made
@@ -2816,8 +2830,10 @@ begin_fwd:
  * connection tear down and (memory) recovery.
  * Otherwise tcp_send_fin() could be tempted to either delay FIN
  * or even be forced to close flow without any FIN.
+ * In general, we want to allow one skb per socket to avoid hangs
+ * with edge trigger epoll()
  */
-static void sk_forced_wmem_schedule(struct sock *sk, int size)
+void sk_forced_mem_schedule(struct sock *sk, int size)
 {
        int amt, status;
 
@@ -2841,7 +2857,7 @@ void tcp_send_fin(struct sock *sk)
         * Note: in the latter case, FIN packet will be sent after a timeout,
         * as TCP stack thinks it has already been transmitted.
         */
-       if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+       if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) {
 coalesce:
                TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
                TCP_SKB_CB(tskb)->end_seq++;
@@ -2864,7 +2880,7 @@ coalesce:
                        return;
                }
                skb_reserve(skb, MAX_TCP_HEADER);
-               sk_forced_wmem_schedule(sk, skb->truesize);
+               sk_forced_mem_schedule(sk, skb->truesize);
                /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
                tcp_init_nondata_skb(skb, tp->write_seq,
                                     TCPHDR_ACK | TCPHDR_FIN);
@@ -3175,7 +3191,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
        /* limit to order-0 allocations */
        space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
 
-       syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation);
+       syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
        if (!syn_data)
                goto fallback;
        syn_data->ip_summed = CHECKSUM_PARTIAL;
@@ -3241,7 +3257,7 @@ int tcp_connect(struct sock *sk)
                return 0;
        }
 
-       buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
+       buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
        if (unlikely(!buff))
                return -ENOBUFS;
 
@@ -3382,7 +3398,7 @@ EXPORT_SYMBOL_GPL(tcp_send_ack);
  * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
  * out-of-date with SND.UNA-1 to probe window.
  */
-static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
+static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -3400,6 +3416,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
         */
        tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
        skb_mstamp_get(&skb->skb_mstamp);
+       NET_INC_STATS_BH(sock_net(sk), mib);
        return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 }
 
@@ -3407,12 +3424,12 @@ void tcp_send_window_probe(struct sock *sk)
 {
        if (sk->sk_state == TCP_ESTABLISHED) {
                tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
-               tcp_xmit_probe_skb(sk, 0);
+               tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
        }
 }
 
 /* Initiate keepalive or window probe from timer. */
-int tcp_write_wakeup(struct sock *sk)
+int tcp_write_wakeup(struct sock *sk, int mib)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -3449,8 +3466,8 @@ int tcp_write_wakeup(struct sock *sk)
                return err;
        } else {
                if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
-                       tcp_xmit_probe_skb(sk, 1);
-               return tcp_xmit_probe_skb(sk, 0);
+                       tcp_xmit_probe_skb(sk, 1, mib);
+               return tcp_xmit_probe_skb(sk, 0, mib);
        }
 }
 
@@ -3464,7 +3481,7 @@ void tcp_send_probe0(struct sock *sk)
        unsigned long probe_max;
        int err;
 
-       err = tcp_write_wakeup(sk);
+       err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
 
        if (tp->packets_out || !tcp_send_head(sk)) {
                /* Cancel probe timer, if it is not required. */
@@ -3490,7 +3507,7 @@ void tcp_send_probe0(struct sock *sk)
                probe_max = TCP_RESOURCE_PROBE_INTERVAL;
        }
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                 inet_csk_rto_backoff(icsk, probe_max),
+                                 tcp_probe0_when(sk, probe_max),
                                  TCP_RTO_MAX);
 }
 
index 8c65dc147d8bcfb58e14c20b774711ffbcc30d5a..5b752f58a90063e7468b11f2853c7c006b679e60 100644 (file)
@@ -247,7 +247,7 @@ void tcp_delack_timer_handler(struct sock *sk)
        }
 
 out:
-       if (sk_under_memory_pressure(sk))
+       if (tcp_under_memory_pressure(sk))
                sk_mem_reclaim(sk);
 }
 
@@ -616,7 +616,7 @@ static void tcp_keepalive_timer (unsigned long data)
                        tcp_write_err(sk);
                        goto out;
                }
-               if (tcp_write_wakeup(sk) <= 0) {
+               if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
                        icsk->icsk_probes_out++;
                        elapsed = keepalive_intvl_when(tp);
                } else {
index 6bb98cc193c9a1b532b668b34e9b401357ad96e1..933ea903f7b8fffc02fb7f82deab9c98c35e2daf 100644 (file)
@@ -15,12 +15,10 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
        struct socket *sock = NULL;
        struct sockaddr_in udp_addr;
 
-       err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
+       err = sock_create_kern(net, AF_INET, SOCK_DGRAM, 0, &sock);
        if (err < 0)
                goto error;
 
-       sk_change_net(sock->sk, net);
-
        udp_addr.sin_family = AF_INET;
        udp_addr.sin_addr = cfg->local_ip;
        udp_addr.sin_port = cfg->local_udp_port;
@@ -47,7 +45,7 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
 error:
        if (sock) {
                kernel_sock_shutdown(sock, SHUT_RDWR);
-               sk_release_kernel(sock->sk);
+               sock_release(sock);
        }
        *sockp = NULL;
        return err;
@@ -101,7 +99,7 @@ void udp_tunnel_sock_release(struct socket *sock)
 {
        rcu_assign_sk_user_data(sock->sk, NULL);
        kernel_sock_shutdown(sock, SHUT_RDWR);
-       sk_release_kernel(sock->sk);
+       sock_release(sock);
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
 
index 2e8c06108ab9b8bcd572db2372c4f0ef02672eb2..0f3f1999719ac72617b14e68c13f2662f66054a7 100644 (file)
@@ -48,4 +48,5 @@ obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
 
 ifneq ($(CONFIG_IPV6),)
 obj-$(CONFIG_NET_UDP_TUNNEL) += ip6_udp_tunnel.o
+obj-y += mcast_snoop.o
 endif
index 37b70e82bff8ee9b9964a0237df9d66f3b78bcc7..21c2c818df3b8379226555268ef526c08553d00d 100644 (file)
@@ -2121,6 +2121,8 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
        fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
        if (!fn)
                goto out;
+
+       noflags |= RTF_CACHE;
        for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
                if (rt->dst.dev->ifindex != dev->ifindex)
                        continue;
index eef63b394c5ab9ae2e2b5c060a9960f62d671a3a..7de52b65173fa6a1b344b13e67106ad39591ed06 100644 (file)
@@ -167,7 +167,7 @@ lookup_protocol:
        WARN_ON(!answer_prot->slab);
 
        err = -ENOBUFS;
-       sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot);
+       sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, kern);
        if (!sk)
                goto out;
 
@@ -362,7 +362,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                np->saddr = addr->sin6_addr;
 
        /* Make sure we are allowed to bind here. */
-       if (sk->sk_prot->get_port(sk, snum)) {
+       if ((snum || !inet->bind_address_no_port) &&
+           sk->sk_prot->get_port(sk, snum)) {
                inet_reset_saddr(sk);
                err = -EADDRINUSE;
                goto out;
@@ -768,6 +769,7 @@ static int __net_init inet6_net_init(struct net *net)
        net->ipv6.sysctl.auto_flowlabels = 0;
        net->ipv6.sysctl.idgen_retries = 3;
        net->ipv6.sysctl.idgen_delay = 1 * HZ;
+       net->ipv6.sysctl.flowlabel_state_ranges = 1;
        atomic_set(&net->ipv6.fib6_sernum, 1);
 
        err = ipv6_init_mibs(net);
index 2c2b5d51f15cd24b7c1a5b9a416bbfd33201ec06..713d7434c9112432f800f377925ca68534094c34 100644 (file)
@@ -207,7 +207,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
                        struct inet_peer *peer;
 
                        peer = inet_getpeer_v6(net->ipv6.peers,
-                                              &rt->rt6i_dst.addr, 1);
+                                              &fl6->daddr, 1);
                        res = inet_peer_xrlim_allow(peer, tmo);
                        if (peer)
                                inet_putpeer(peer);
@@ -337,7 +337,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
         * We won't send icmp if the destination is known
         * anycast.
         */
-       if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
+       if (ipv6_anycast_destination(dst, &fl6->daddr)) {
                net_dbg_ratelimited("icmp6_send: acast source\n");
                dst_release(dst);
                return ERR_PTR(-EINVAL);
@@ -564,7 +564,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
 
        if (!ipv6_unicast_destination(skb) &&
            !(net->ipv6.sysctl.anycast_src_echo_reply &&
-             ipv6_anycast_destination(skb)))
+             ipv6_anycast_destination(skb_dst(skb), saddr)))
                saddr = NULL;
 
        memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
index 871641bc1ed4eb5b8f5f554c9efb75f2419fe5b6..b4fd96de97e61627003eff220e10bdd05a899e28 100644 (file)
@@ -257,7 +257,7 @@ not_unique:
        return -EADDRNOTAVAIL;
 }
 
-static inline u32 inet6_sk_port_offset(const struct sock *sk)
+static u32 inet6_sk_port_offset(const struct sock *sk)
 {
        const struct inet_sock *inet = inet_sk(sk);
 
@@ -269,7 +269,11 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk)
 int inet6_hash_connect(struct inet_timewait_death_row *death_row,
                       struct sock *sk)
 {
-       return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk),
+       u32 port_offset = 0;
+
+       if (!inet_sk(sk)->inet_num)
+               port_offset = inet6_sk_port_offset(sk);
+       return __inet_hash_connect(death_row, sk, port_offset,
                                   __inet6_check_established);
 }
 EXPORT_SYMBOL_GPL(inet6_hash_connect);
index bde57b113009794637a07b405173bef1fd3c6fb3..55d19861ab20f4a91b6b289be7ca3b0250df4531 100644 (file)
@@ -154,10 +154,32 @@ static void node_free(struct fib6_node *fn)
        kmem_cache_free(fib6_node_kmem, fn);
 }
 
+static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
+{
+       int cpu;
+
+       if (!non_pcpu_rt->rt6i_pcpu)
+               return;
+
+       for_each_possible_cpu(cpu) {
+               struct rt6_info **ppcpu_rt;
+               struct rt6_info *pcpu_rt;
+
+               ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu);
+               pcpu_rt = *ppcpu_rt;
+               if (pcpu_rt) {
+                       dst_free(&pcpu_rt->dst);
+                       *ppcpu_rt = NULL;
+               }
+       }
+}
+
 static void rt6_release(struct rt6_info *rt)
 {
-       if (atomic_dec_and_test(&rt->rt6i_ref))
+       if (atomic_dec_and_test(&rt->rt6i_ref)) {
+               rt6_free_pcpu(rt);
                dst_free(&rt->dst);
+       }
 }
 
 static void fib6_link_table(struct net *net, struct fib6_table *tb)
@@ -738,6 +760,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                                        rt6_clean_expires(iter);
                                else
                                        rt6_set_expires(iter, rt->dst.expires);
+                               iter->rt6i_pmtu = rt->rt6i_pmtu;
                                return -EEXIST;
                        }
                        /* If we have the same destination and the same metric,
index d491125011c4d1c47fd92180efb1cf2e22a85e22..1f9ebe3cbb4ac042edd0b05754d1fc03cdfe73cd 100644 (file)
@@ -595,6 +595,10 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
                        return -EINVAL;
 
+               if (net->ipv6.sysctl.flowlabel_state_ranges &&
+                   (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
+                       return -ERANGE;
+
                fl = fl_create(net, sk, &freq, optval, optlen, &err);
                if (!fl)
                        return err;
index bc09cb97b8401011c112afe469fd231382387622..d5f7716662dbc361e93ebc443e72ee5fb7343b10 100644 (file)
@@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
        }
 
        rcu_read_lock_bh();
-       nexthop = rt6_nexthop((struct rt6_info *)dst);
+       nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
        neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
        if (unlikely(!neigh))
                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -459,7 +459,7 @@ int ip6_forward(struct sk_buff *skb)
                else
                        target = &hdr->daddr;
 
-               peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+               peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
 
                /* Limit redirects both by destination (here)
                   and by source (inside ndisc_send_redirect)
@@ -551,7 +551,7 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
        struct frag_hdr *fh;
        unsigned int mtu, hlen, left, len;
        int hroom, troom;
-       __be32 frag_id = 0;
+       __be32 frag_id;
        int ptr, offset = 0, err = 0;
        u8 *prevhdr, nexthdr = 0;
        struct net *net = dev_net(skb_dst(skb)->dev);
@@ -564,18 +564,17 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
        /* We must not fragment if the socket is set to force MTU discovery
         * or if the skb it not generated by a local socket.
         */
-       if (unlikely(!skb->ignore_df && skb->len > mtu) ||
-                    (IP6CB(skb)->frag_max_size &&
-                     IP6CB(skb)->frag_max_size > mtu)) {
-               if (skb->sk && dst_allfrag(skb_dst(skb)))
-                       sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+       if (unlikely(!skb->ignore_df && skb->len > mtu))
+               goto fail_toobig;
 
-               skb->dev = skb_dst(skb)->dev;
-               icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
-               IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-                             IPSTATS_MIB_FRAGFAILS);
-               kfree_skb(skb);
-               return -EMSGSIZE;
+       if (IP6CB(skb)->frag_max_size) {
+               if (IP6CB(skb)->frag_max_size > mtu)
+                       goto fail_toobig;
+
+               /* don't send fragments larger than what we received */
+               mtu = IP6CB(skb)->frag_max_size;
+               if (mtu < IPV6_MIN_MTU)
+                       mtu = IPV6_MIN_MTU;
        }
 
        if (np && np->frag_size < mtu) {
@@ -584,6 +583,9 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
        }
        mtu -= hlen + sizeof(struct frag_hdr);
 
+       frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
+                                   &ipv6_hdr(skb)->saddr);
+
        if (skb_has_frag_list(skb)) {
                int first_len = skb_pagelen(skb);
                struct sk_buff *frag2;
@@ -632,11 +634,10 @@ int ip6_fragment(struct sock *sk, struct sk_buff *skb,
                skb_reset_network_header(skb);
                memcpy(skb_network_header(skb), tmp_hdr, hlen);
 
-               ipv6_select_ident(net, fh, rt);
                fh->nexthdr = nexthdr;
                fh->reserved = 0;
                fh->frag_off = htons(IP6_MF);
-               frag_id = fh->identification;
+               fh->identification = frag_id;
 
                first_len = skb_pagelen(skb);
                skb->data_len = first_len - skb_headlen(skb);
@@ -778,11 +779,7 @@ slow_path:
                 */
                fh->nexthdr = nexthdr;
                fh->reserved = 0;
-               if (!frag_id) {
-                       ipv6_select_ident(net, fh, rt);
-                       frag_id = fh->identification;
-               } else
-                       fh->identification = frag_id;
+               fh->identification = frag_id;
 
                /*
                 *      Copy a block of the IP datagram.
@@ -815,6 +812,14 @@ slow_path:
        consume_skb(skb);
        return err;
 
+fail_toobig:
+       if (skb->sk && dst_allfrag(skb_dst(skb)))
+               sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+
+       skb->dev = skb_dst(skb)->dev;
+       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+       err = -EMSGSIZE;
+
 fail:
        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
                      IPSTATS_MIB_FRAGFAILS);
@@ -936,7 +941,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         */
        rt = (struct rt6_info *) *dst;
        rcu_read_lock_bh();
-       n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
+       n = __ipv6_neigh_lookup_noref(rt->dst.dev,
+                                     rt6_nexthop(rt, &fl6->daddr));
        err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
        rcu_read_unlock_bh();
 
@@ -1060,11 +1066,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                        int odd, struct sk_buff *skb),
                        void *from, int length, int hh_len, int fragheaderlen,
                        int transhdrlen, int mtu, unsigned int flags,
-                       struct rt6_info *rt)
+                       const struct flowi6 *fl6)
 
 {
        struct sk_buff *skb;
-       struct frag_hdr fhdr;
        int err;
 
        /* There is support for UDP large send offload by network
@@ -1106,8 +1111,9 @@ static inline int ip6_ufo_append_data(struct sock *sk,
        skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
                                     sizeof(struct frag_hdr)) & ~7;
        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-       ipv6_select_ident(sock_net(sk), &fhdr, rt);
-       skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+       skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
+                                                        &fl6->daddr,
+                                                        &fl6->saddr);
 
 append:
        return skb_append_datato_frags(sk, skb, getfrag, from,
@@ -1332,7 +1338,7 @@ emsgsize:
            (sk->sk_type == SOCK_DGRAM)) {
                err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
                                          hh_len, fragheaderlen,
-                                         transhdrlen, mtu, flags, rt);
+                                         transhdrlen, mtu, flags, fl6);
                if (err)
                        goto error;
                return 0;
index 5cafd92c231270703af5bf948d131d99c5e9c193..2e67b660118bf7eeaf2f08033aa759190dea0c3a 100644 (file)
@@ -151,7 +151,7 @@ EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset);
 void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
 {
        struct rt6_info *rt = (struct rt6_info *) dst;
-       t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+       t->dst_cookie = rt6_get_cookie(rt);
        dst_release(t->dst_cache);
        t->dst_cache = dst;
 }
index bba8903e871fabd73c217efb6250eb743f7d66d8..e1a1136bda7c8f15e589c92e7ea752ddceee70f4 100644 (file)
@@ -19,12 +19,10 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
        int err;
        struct socket *sock = NULL;
 
-       err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
+       err = sock_create_kern(net, AF_INET6, SOCK_DGRAM, 0, &sock);
        if (err < 0)
                goto error;
 
-       sk_change_net(sock->sk, net);
-
        udp6_addr.sin6_family = AF_INET6;
        memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
               sizeof(udp6_addr.sin6_addr));
@@ -55,7 +53,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
 error:
        if (sock) {
                kernel_sock_shutdown(sock, SHUT_RDWR);
-               sk_release_kernel(sock->sk);
+               sock_release(sock);
        }
        *sockp = NULL;
        return err;
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
new file mode 100644 (file)
index 0000000..df8afe5
--- /dev/null
@@ -0,0 +1,213 @@
+/* Copyright (C) 2010: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+ * Copyright (C) 2015: Linus Lüssing <linus.luessing@c0d3.blue>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Based on the MLD support added to br_multicast.c by YOSHIFUJI Hideaki.
+ */
+
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <net/mld.h>
+#include <net/addrconf.h>
+#include <net/ip6_checksum.h>
+
+static int ipv6_mc_check_ip6hdr(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ip6h;
+       unsigned int len;
+       unsigned int offset = skb_network_offset(skb) + sizeof(*ip6h);
+
+       if (!pskb_may_pull(skb, offset))
+               return -EINVAL;
+
+       ip6h = ipv6_hdr(skb);
+
+       if (ip6h->version != 6)
+               return -EINVAL;
+
+       len = offset + ntohs(ip6h->payload_len);
+       if (skb->len < len || len <= offset)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ipv6_mc_check_exthdrs(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ip6h;
+       int offset;
+       u8 nexthdr;
+       __be16 frag_off;
+
+       ip6h = ipv6_hdr(skb);
+
+       if (ip6h->nexthdr != IPPROTO_HOPOPTS)
+               return -ENOMSG;
+
+       nexthdr = ip6h->nexthdr;
+       offset = skb_network_offset(skb) + sizeof(*ip6h);
+       offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+
+       if (offset < 0)
+               return -EINVAL;
+
+       if (nexthdr != IPPROTO_ICMPV6)
+               return -ENOMSG;
+
+       skb_set_transport_header(skb, offset);
+
+       return 0;
+}
+
+static int ipv6_mc_check_mld_reportv2(struct sk_buff *skb)
+{
+       unsigned int len = skb_transport_offset(skb);
+
+       len += sizeof(struct mld2_report);
+
+       return pskb_may_pull(skb, len) ? 0 : -EINVAL;
+}
+
+static int ipv6_mc_check_mld_query(struct sk_buff *skb)
+{
+       struct mld_msg *mld;
+       unsigned int len = skb_transport_offset(skb);
+
+       /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
+       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
+               return -EINVAL;
+
+       len += sizeof(struct mld_msg);
+       if (skb->len < len)
+               return -EINVAL;
+
+       /* MLDv1? */
+       if (skb->len != len) {
+               /* or MLDv2? */
+               len += sizeof(struct mld2_query) - sizeof(struct mld_msg);
+               if (skb->len < len || !pskb_may_pull(skb, len))
+                       return -EINVAL;
+       }
+
+       mld = (struct mld_msg *)skb_transport_header(skb);
+
+       /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
+        * all-nodes destination address (ff02::1) for general queries
+        */
+       if (ipv6_addr_any(&mld->mld_mca) &&
+           !ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
+{
+       struct mld_msg *mld = (struct mld_msg *)skb_transport_header(skb);
+
+       switch (mld->mld_type) {
+       case ICMPV6_MGM_REDUCTION:
+       case ICMPV6_MGM_REPORT:
+               /* fall through */
+               return 0;
+       case ICMPV6_MLD2_REPORT:
+               return ipv6_mc_check_mld_reportv2(skb);
+       case ICMPV6_MGM_QUERY:
+               return ipv6_mc_check_mld_query(skb);
+       default:
+               return -ENOMSG;
+       }
+}
+
+static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
+{
+       return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
+}
+
+static int __ipv6_mc_check_mld(struct sk_buff *skb,
+                              struct sk_buff **skb_trimmed)
+
+{
+       struct sk_buff *skb_chk = NULL;
+       unsigned int transport_len;
+       unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
+       int ret;
+
+       transport_len = ntohs(ipv6_hdr(skb)->payload_len);
+       transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
+
+       skb_get(skb);
+       skb_chk = skb_checksum_trimmed(skb, transport_len,
+                                      ipv6_mc_validate_checksum);
+       if (!skb_chk)
+               return -EINVAL;
+
+       if (!pskb_may_pull(skb_chk, len)) {
+               kfree_skb(skb_chk);
+               return -EINVAL;
+       }
+
+       ret = ipv6_mc_check_mld_msg(skb_chk);
+       if (ret) {
+               kfree_skb(skb_chk);
+               return ret;
+       }
+
+       if (skb_trimmed)
+               *skb_trimmed = skb_chk;
+       else
+               kfree_skb(skb_chk);
+
+       return 0;
+}
+
+/**
+ * ipv6_mc_check_mld - checks whether this is a sane MLD packet
+ * @skb: the skb to validate
+ * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
+ *
+ * Checks whether an IPv6 packet is a valid MLD packet. If so sets
+ * skb network and transport headers accordingly and returns zero.
+ *
+ * -EINVAL: A broken packet was detected, i.e. it violates some internet
+ *  standard
+ * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
+ * -ENOMEM: A memory allocation failure happened.
+ *
+ * Optionally, an skb pointer might be provided via skb_trimmed (or set it
+ * to NULL): After parsing an MLD packet successfully it will point to
+ * an skb which has its tail aligned to the IP packet end. This might
+ * either be the originally provided skb or a trimmed, cloned version if
+ * the skb frame had data beyond the IP packet. A cloned skb allows us
+ * to leave the original skb and its full frame unchanged (which might be
+ * desirable for layer 2 frame jugglers).
+ *
+ * The caller needs to release a reference count from any returned skb_trimmed.
+ */
+int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
+{
+       int ret;
+
+       ret = ipv6_mc_check_ip6hdr(skb);
+       if (ret < 0)
+               return ret;
+
+       ret = ipv6_mc_check_exthdrs(skb);
+       if (ret < 0)
+               return ret;
+
+       return __ipv6_mc_check_mld(skb, skb_trimmed);
+}
+EXPORT_SYMBOL(ipv6_mc_check_mld);
index 96f153c0846b7abcd9b1af995c49067efbe7064d..0a05b35a90fc946dc0de3ae76e4a682ca4438932 100644 (file)
@@ -1506,7 +1506,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
                          "Redirect: destination is not a neighbour\n");
                goto release;
        }
-       peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+       peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1);
        ret = inet_peer_xrlim_allow(peer, 1*HZ);
        if (peer)
                inet_putpeer(peer);
index 62f5b0d0bc9bfbf19940ba0c70ef9da464bf467f..cdd085f8b77011644e502c73da7b43188a267817 100644 (file)
@@ -1459,7 +1459,6 @@ static int
 compat_find_calc_match(struct xt_entry_match *m,
                       const char *name,
                       const struct ip6t_ip6 *ipv6,
-                      unsigned int hookmask,
                       int *size)
 {
        struct xt_match *match;
@@ -1528,8 +1527,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
        entry_offset = (void *)e - (void *)base;
        j = 0;
        xt_ematch_foreach(ematch, e) {
-               ret = compat_find_calc_match(ematch, name,
-                                            &e->ipv6, e->comefrom, &off);
+               ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
                if (ret != 0)
                        goto release_matches;
                ++j;
index 85892af5736491fef3978bb8b195045e8e6d2e2c..21678acd452165fae8a2c0b7c007ed1daa407344 100644 (file)
@@ -10,7 +10,8 @@
 #include <net/secure_seq.h>
 
 static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
-                              struct in6_addr *dst, struct in6_addr *src)
+                              const struct in6_addr *dst,
+                              const struct in6_addr *src)
 {
        u32 hash, id;
 
@@ -60,17 +61,17 @@ void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
 }
 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
 
-void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr,
-                      struct rt6_info *rt)
+__be32 ipv6_select_ident(struct net *net,
+                        const struct in6_addr *daddr,
+                        const struct in6_addr *saddr)
 {
        static u32 ip6_idents_hashrnd __read_mostly;
        u32 id;
 
        net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
 
-       id = __ipv6_select_ident(net, ip6_idents_hashrnd, &rt->rt6i_dst.addr,
-                                &rt->rt6i_src.addr);
-       fhdr->identification = htonl(id);
+       id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
+       return htonl(id);
 }
 EXPORT_SYMBOL(ipv6_select_ident);
 
index 8072bd4139b7576a797bdebd6c3c5b75c8412582..ca4700cb26c4feec258c8e5034389522125b113e 100644 (file)
@@ -865,6 +865,9 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                fl6.flowi6_oif = np->ucast_oif;
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
+       if (inet->hdrincl)
+               fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
+
        dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
        if (IS_ERR(dst)) {
                err = PTR_ERR(dst);
@@ -1324,13 +1327,7 @@ static struct inet_protosw rawv6_protosw = {
 
 int __init rawv6_init(void)
 {
-       int ret;
-
-       ret = inet6_register_protosw(&rawv6_protosw);
-       if (ret)
-               goto out;
-out:
-       return ret;
+       return inet6_register_protosw(&rawv6_protosw);
 }
 
 void rawv6_exit(void)
index c73ae5039e46d3811d60bf5df9e9482d966a4966..1a1122a6bbf5208481f81f1e2643cbc41ed2e7e9 100644 (file)
@@ -72,8 +72,7 @@ enum rt6_nud_state {
        RT6_NUD_SUCCEED = 1
 };
 
-static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
-                                   const struct in6_addr *dest);
+static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
 static struct dst_entry        *ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ip6_default_advmss(const struct dst_entry *dst);
 static unsigned int     ip6_mtu(const struct dst_entry *dst);
@@ -92,6 +91,7 @@ static void           ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
                                           struct sk_buff *skb, u32 mtu);
 static void            rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
+static void            rt6_dst_from_metrics_check(struct rt6_info *rt);
 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -104,65 +104,82 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
                                           const struct in6_addr *gwaddr, int ifindex);
 #endif
 
-static void rt6_bind_peer(struct rt6_info *rt, int create)
+struct uncached_list {
+       spinlock_t              lock;
+       struct list_head        head;
+};
+
+static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
+
+static void rt6_uncached_list_add(struct rt6_info *rt)
 {
-       struct inet_peer_base *base;
-       struct inet_peer *peer;
+       struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
 
-       base = inetpeer_base_ptr(rt->_rt6i_peer);
-       if (!base)
-               return;
+       rt->dst.flags |= DST_NOCACHE;
+       rt->rt6i_uncached_list = ul;
+
+       spin_lock_bh(&ul->lock);
+       list_add_tail(&rt->rt6i_uncached, &ul->head);
+       spin_unlock_bh(&ul->lock);
+}
 
-       peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
-       if (peer) {
-               if (!rt6_set_peer(rt, peer))
-                       inet_putpeer(peer);
+static void rt6_uncached_list_del(struct rt6_info *rt)
+{
+       if (!list_empty(&rt->rt6i_uncached)) {
+               struct uncached_list *ul = rt->rt6i_uncached_list;
+
+               spin_lock_bh(&ul->lock);
+               list_del(&rt->rt6i_uncached);
+               spin_unlock_bh(&ul->lock);
        }
 }
 
-static struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
+static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
 {
-       if (rt6_has_peer(rt))
-               return rt6_peer_ptr(rt);
+       struct net_device *loopback_dev = net->loopback_dev;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
+               struct rt6_info *rt;
+
+               spin_lock_bh(&ul->lock);
+               list_for_each_entry(rt, &ul->head, rt6i_uncached) {
+                       struct inet6_dev *rt_idev = rt->rt6i_idev;
+                       struct net_device *rt_dev = rt->dst.dev;
+
+                       if (rt_idev && (rt_idev->dev == dev || !dev) &&
+                           rt_idev->dev != loopback_dev) {
+                               rt->rt6i_idev = in6_dev_get(loopback_dev);
+                               in6_dev_put(rt_idev);
+                       }
 
-       rt6_bind_peer(rt, create);
-       return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
+                       if (rt_dev && (rt_dev == dev || !dev) &&
+                           rt_dev != loopback_dev) {
+                               rt->dst.dev = loopback_dev;
+                               dev_hold(rt->dst.dev);
+                               dev_put(rt_dev);
+                       }
+               }
+               spin_unlock_bh(&ul->lock);
+       }
 }
 
-static struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
+static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
 {
-       return __rt6_get_peer(rt, 1);
+       return dst_metrics_write_ptr(rt->dst.from);
 }
 
 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
 {
-       struct rt6_info *rt = (struct rt6_info *) dst;
-       struct inet_peer *peer;
-       u32 *p = NULL;
+       struct rt6_info *rt = (struct rt6_info *)dst;
 
-       if (!(rt->dst.flags & DST_HOST))
+       if (rt->rt6i_flags & RTF_PCPU)
+               return rt6_pcpu_cow_metrics(rt);
+       else if (rt->rt6i_flags & RTF_CACHE)
+               return NULL;
+       else
                return dst_cow_metrics_generic(dst, old);
-
-       peer = rt6_get_peer_create(rt);
-       if (peer) {
-               u32 *old_p = __DST_METRICS_PTR(old);
-               unsigned long prev, new;
-
-               p = peer->metrics;
-               if (inet_metrics_new(peer) ||
-                   (old & DST_METRICS_FORCE_OVERWRITE))
-                       memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
-
-               new = (unsigned long) p;
-               prev = cmpxchg(&dst->_metrics, old, new);
-
-               if (prev != old) {
-                       p = __DST_METRICS_PTR(prev);
-                       if (prev & DST_METRICS_READ_ONLY)
-                               p = NULL;
-               }
-       }
-       return p;
 }
 
 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
@@ -299,10 +316,10 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
 #endif
 
 /* allocate dst with ip6_dst_ops */
-static inline struct rt6_info *ip6_dst_alloc(struct net *net,
-                                            struct net_device *dev,
-                                            int flags,
-                                            struct fib6_table *table)
+static struct rt6_info *__ip6_dst_alloc(struct net *net,
+                                       struct net_device *dev,
+                                       int flags,
+                                       struct fib6_table *table)
 {
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -311,21 +328,54 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
                struct dst_entry *dst = &rt->dst;
 
                memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
-               rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
                INIT_LIST_HEAD(&rt->rt6i_siblings);
+               INIT_LIST_HEAD(&rt->rt6i_uncached);
+       }
+       return rt;
+}
+
+static struct rt6_info *ip6_dst_alloc(struct net *net,
+                                     struct net_device *dev,
+                                     int flags,
+                                     struct fib6_table *table)
+{
+       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+
+       if (rt) {
+               rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
+               if (rt->rt6i_pcpu) {
+                       int cpu;
+
+                       for_each_possible_cpu(cpu) {
+                               struct rt6_info **p;
+
+                               p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
+                               /* no one shares rt */
+                               *p =  NULL;
+                       }
+               } else {
+                       dst_destroy((struct dst_entry *)rt);
+                       return NULL;
+               }
        }
+
        return rt;
 }
 
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
        struct rt6_info *rt = (struct rt6_info *)dst;
-       struct inet6_dev *idev = rt->rt6i_idev;
        struct dst_entry *from = dst->from;
+       struct inet6_dev *idev;
+
+       dst_destroy_metrics_generic(dst);
 
-       if (!(rt->dst.flags & DST_HOST))
-               dst_destroy_metrics_generic(dst);
+       if (rt->rt6i_pcpu)
+               free_percpu(rt->rt6i_pcpu);
 
+       rt6_uncached_list_del(rt);
+
+       idev = rt->rt6i_idev;
        if (idev) {
                rt->rt6i_idev = NULL;
                in6_dev_put(idev);
@@ -333,11 +383,6 @@ static void ip6_dst_destroy(struct dst_entry *dst)
 
        dst->from = NULL;
        dst_release(from);
-
-       if (rt6_has_peer(rt)) {
-               struct inet_peer *peer = rt6_peer_ptr(rt);
-               inet_putpeer(peer);
-       }
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -652,15 +697,33 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
                                     u32 metric, int oif, int strict,
                                     bool *do_rr)
 {
-       struct rt6_info *rt, *match;
+       struct rt6_info *rt, *match, *cont;
        int mpri = -1;
 
        match = NULL;
-       for (rt = rr_head; rt && rt->rt6i_metric == metric;
-            rt = rt->dst.rt6_next)
+       cont = NULL;
+       for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
+               if (rt->rt6i_metric != metric) {
+                       cont = rt;
+                       break;
+               }
+
                match = find_match(rt, oif, strict, &mpri, match, do_rr);
-       for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
-            rt = rt->dst.rt6_next)
+       }
+
+       for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
+               if (rt->rt6i_metric != metric) {
+                       cont = rt;
+                       break;
+               }
+
+               match = find_match(rt, oif, strict, &mpri, match, do_rr);
+       }
+
+       if (match || !cont)
+               return match;
+
+       for (rt = cont; rt; rt = rt->dst.rt6_next)
                match = find_match(rt, oif, strict, &mpri, match, do_rr);
 
        return match;
@@ -694,6 +757,11 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
        return match ? match : net->ipv6.ip6_null_entry;
 }
 
+static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
+{
+       return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
+}
+
 #ifdef CONFIG_IPV6_ROUTE_INFO
 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
                  const struct in6_addr *gwaddr)
@@ -872,9 +940,9 @@ int ip6_ins_rt(struct rt6_info *rt)
        return __ip6_ins_rt(rt, &info, &mxc);
 }
 
-static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
-                                     const struct in6_addr *daddr,
-                                     const struct in6_addr *saddr)
+static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
+                                          const struct in6_addr *daddr,
+                                          const struct in6_addr *saddr)
 {
        struct rt6_info *rt;
 
@@ -882,15 +950,26 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
         *      Clone the route.
         */
 
-       rt = ip6_rt_copy(ort, daddr);
+       if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
+               ort = (struct rt6_info *)ort->dst.from;
 
-       if (rt) {
+       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
+                            0, ort->rt6i_table);
+
+       if (!rt)
+               return NULL;
+
+       ip6_rt_copy_init(rt, ort);
+       rt->rt6i_flags |= RTF_CACHE;
+       rt->rt6i_metric = 0;
+       rt->dst.flags |= DST_HOST;
+       rt->rt6i_dst.addr = *daddr;
+       rt->rt6i_dst.plen = 128;
+
+       if (!rt6_is_gw_or_nonexthop(ort)) {
                if (ort->rt6i_dst.plen != 128 &&
                    ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                        rt->rt6i_flags |= RTF_ANYCAST;
-
-               rt->rt6i_flags |= RTF_CACHE;
-
 #ifdef CONFIG_IPV6_SUBTREES
                if (rt->rt6i_src.plen && saddr) {
                        rt->rt6i_src.addr = *saddr;
@@ -902,30 +981,65 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
        return rt;
 }
 
-static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
-                                       const struct in6_addr *daddr)
+static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
 {
-       struct rt6_info *rt = ip6_rt_copy(ort, daddr);
+       struct rt6_info *pcpu_rt;
 
-       if (rt)
-               rt->rt6i_flags |= RTF_CACHE;
-       return rt;
+       pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
+                                 rt->dst.dev, rt->dst.flags,
+                                 rt->rt6i_table);
+
+       if (!pcpu_rt)
+               return NULL;
+       ip6_rt_copy_init(pcpu_rt, rt);
+       pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
+       pcpu_rt->rt6i_flags |= RTF_PCPU;
+       return pcpu_rt;
+}
+
+/* It should be called with read_lock_bh(&tb6_lock) acquired */
+static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
+{
+       struct rt6_info *pcpu_rt, *prev, **p;
+
+       p = this_cpu_ptr(rt->rt6i_pcpu);
+       pcpu_rt = *p;
+
+       if (pcpu_rt)
+               goto done;
+
+       pcpu_rt = ip6_rt_pcpu_alloc(rt);
+       if (!pcpu_rt) {
+               struct net *net = dev_net(rt->dst.dev);
+
+               pcpu_rt = net->ipv6.ip6_null_entry;
+               goto done;
+       }
+
+       prev = cmpxchg(p, NULL, pcpu_rt);
+       if (prev) {
+               /* If someone did it before us, return prev instead */
+               dst_destroy(&pcpu_rt->dst);
+               pcpu_rt = prev;
+       }
+
+done:
+       dst_hold(&pcpu_rt->dst);
+       rt6_dst_from_metrics_check(pcpu_rt);
+       return pcpu_rt;
 }
 
 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
                                      struct flowi6 *fl6, int flags)
 {
        struct fib6_node *fn, *saved_fn;
-       struct rt6_info *rt, *nrt;
+       struct rt6_info *rt;
        int strict = 0;
-       int attempts = 3;
-       int err;
 
        strict |= flags & RT6_LOOKUP_F_IFACE;
        if (net->ipv6.devconf_all->forwarding == 0)
                strict |= RT6_LOOKUP_F_REACHABLE;
 
-redo_fib6_lookup_lock:
        read_lock_bh(&table->tb6_lock);
 
        fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
@@ -944,51 +1058,52 @@ redo_rt6_select:
                        strict &= ~RT6_LOOKUP_F_REACHABLE;
                        fn = saved_fn;
                        goto redo_rt6_select;
-               } else {
-                       dst_hold(&rt->dst);
-                       read_unlock_bh(&table->tb6_lock);
-                       goto out2;
                }
        }
 
-       dst_hold(&rt->dst);
-       read_unlock_bh(&table->tb6_lock);
 
-       if (rt->rt6i_flags & RTF_CACHE)
-               goto out2;
+       if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
+               dst_use(&rt->dst, jiffies);
+               read_unlock_bh(&table->tb6_lock);
 
-       if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)))
-               nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
-       else if (!(rt->dst.flags & DST_HOST))
-               nrt = rt6_alloc_clone(rt, &fl6->daddr);
-       else
-               goto out2;
+               rt6_dst_from_metrics_check(rt);
+               return rt;
+       } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
+                           !(rt->rt6i_flags & RTF_GATEWAY))) {
+               /* Create a RTF_CACHE clone which will not be
+                * owned by the fib6 tree.  It is for the special case where
+                * the daddr in the skb during the neighbor look-up is different
+                * from the fl6->daddr used to look-up route here.
+                */
 
-       ip6_rt_put(rt);
-       rt = nrt ? : net->ipv6.ip6_null_entry;
+               struct rt6_info *uncached_rt;
 
-       dst_hold(&rt->dst);
-       if (nrt) {
-               err = ip6_ins_rt(nrt);
-               if (!err)
-                       goto out2;
-       }
+               dst_use(&rt->dst, jiffies);
+               read_unlock_bh(&table->tb6_lock);
 
-       if (--attempts <= 0)
-               goto out2;
+               uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
+               dst_release(&rt->dst);
 
-       /*
-        * Race condition! In the gap, when table->tb6_lock was
-        * released someone could insert this route.  Relookup.
-        */
-       ip6_rt_put(rt);
-       goto redo_fib6_lookup_lock;
+               if (uncached_rt)
+                       rt6_uncached_list_add(uncached_rt);
+               else
+                       uncached_rt = net->ipv6.ip6_null_entry;
 
-out2:
-       rt->dst.lastuse = jiffies;
-       rt->dst.__use++;
+               dst_hold(&uncached_rt->dst);
+               return uncached_rt;
 
-       return rt;
+       } else {
+               /* Get a percpu copy */
+
+               struct rt6_info *pcpu_rt;
+
+               rt->dst.lastuse = jiffies;
+               rt->dst.__use++;
+               pcpu_rt = rt6_get_pcpu_route(rt);
+               read_unlock_bh(&table->tb6_lock);
+
+               return pcpu_rt;
+       }
 }
 
 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
@@ -1059,7 +1174,6 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
                new = &rt->dst;
 
                memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
-               rt6_init_peer(rt, net->ipv6.peers);
 
                new->__use = 1;
                new->input = dst_discard;
@@ -1093,6 +1207,33 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
  *     Destination cache support functions
  */
 
+static void rt6_dst_from_metrics_check(struct rt6_info *rt)
+{
+       if (rt->dst.from &&
+           dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
+               dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
+}
+
+static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
+{
+       if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+               return NULL;
+
+       if (rt6_check_expired(rt))
+               return NULL;
+
+       return &rt->dst;
+}
+
+static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
+{
+       if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+           rt6_check((struct rt6_info *)(rt->dst.from), cookie))
+               return &rt->dst;
+       else
+               return NULL;
+}
+
 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
 {
        struct rt6_info *rt;
@@ -1103,13 +1244,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
         * DST_OBSOLETE_FORCE_CHK which forces validation calls down
         * into this function always.
         */
-       if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
-               return NULL;
 
-       if (rt6_check_expired(rt))
-               return NULL;
+       rt6_dst_from_metrics_check(rt);
 
-       return dst;
+       if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
+               return rt6_dst_from_check(rt, cookie);
+       else
+               return rt6_check(rt, cookie);
 }
 
 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
@@ -1148,24 +1289,63 @@ static void ip6_link_failure(struct sk_buff *skb)
        }
 }
 
-static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                              struct sk_buff *skb, u32 mtu)
+static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
+{
+       struct net *net = dev_net(rt->dst.dev);
+
+       rt->rt6i_flags |= RTF_MODIFIED;
+       rt->rt6i_pmtu = mtu;
+       rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
+}
+
+static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
+                                const struct ipv6hdr *iph, u32 mtu)
 {
        struct rt6_info *rt6 = (struct rt6_info *)dst;
 
-       dst_confirm(dst);
-       if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
-               struct net *net = dev_net(dst->dev);
+       if (rt6->rt6i_flags & RTF_LOCAL)
+               return;
 
-               rt6->rt6i_flags |= RTF_MODIFIED;
-               if (mtu < IPV6_MIN_MTU)
-                       mtu = IPV6_MIN_MTU;
+       dst_confirm(dst);
+       mtu = max_t(u32, mtu, IPV6_MIN_MTU);
+       if (mtu >= dst_mtu(dst))
+               return;
 
-               dst_metric_set(dst, RTAX_MTU, mtu);
-               rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
+       if (rt6->rt6i_flags & RTF_CACHE) {
+               rt6_do_update_pmtu(rt6, mtu);
+       } else {
+               const struct in6_addr *daddr, *saddr;
+               struct rt6_info *nrt6;
+
+               if (iph) {
+                       daddr = &iph->daddr;
+                       saddr = &iph->saddr;
+               } else if (sk) {
+                       daddr = &sk->sk_v6_daddr;
+                       saddr = &inet6_sk(sk)->saddr;
+               } else {
+                       return;
+               }
+               nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
+               if (nrt6) {
+                       rt6_do_update_pmtu(nrt6, mtu);
+
+                       /* ip6_ins_rt(nrt6) will bump the
+                        * rt6->rt6i_node->fn_sernum
+                        * which will fail the next rt6_check() and
+                        * invalidate the sk->sk_dst_cache.
+                        */
+                       ip6_ins_rt(nrt6);
+               }
        }
 }
 
+static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+                              struct sk_buff *skb, u32 mtu)
+{
+       __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
+}
+
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
                     int oif, u32 mark)
 {
@@ -1182,7 +1362,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 
        dst = ip6_route_output(net, NULL, &fl6);
        if (!dst->error)
-               ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
+               __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
        dst_release(dst);
 }
 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
@@ -1341,9 +1521,14 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
 
 static unsigned int ip6_mtu(const struct dst_entry *dst)
 {
+       const struct rt6_info *rt = (const struct rt6_info *)dst;
+       unsigned int mtu = rt->rt6i_pmtu;
        struct inet6_dev *idev;
-       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
 
+       if (mtu)
+               goto out;
+
+       mtu = dst_metric_raw(dst, RTAX_MTU);
        if (mtu)
                goto out;
 
@@ -1590,10 +1775,8 @@ int ip6_route_add(struct fib6_config *cfg)
 
        ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
        rt->rt6i_dst.plen = cfg->fc_dst_len;
-       if (rt->rt6i_dst.plen == 128) {
+       if (rt->rt6i_dst.plen == 128)
                rt->dst.flags |= DST_HOST;
-               dst_metrics_set_force_overwrite(&rt->dst);
-       }
 
 #ifdef CONFIG_IPV6_SUBTREES
        ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1651,6 +1834,16 @@ int ip6_route_add(struct fib6_config *cfg)
                int gwa_type;
 
                gw_addr = &cfg->fc_gateway;
+
+               /* if gw_addr is local we will fail to detect this in case
+                * address is still TENTATIVE (DAD in progress). rt6_lookup()
+                * will return already-added prefix route via interface that
+                * prefix route was assigned to, which might be non-loopback.
+                */
+               err = -EINVAL;
+               if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+                       goto out;
+
                rt->rt6i_gateway = *gw_addr;
                gwa_type = ipv6_addr_type(gw_addr);
 
@@ -1664,7 +1857,6 @@ int ip6_route_add(struct fib6_config *cfg)
                           (SIT, PtP, NBMA NOARP links) it is handy to allow
                           some exceptions. --ANK
                         */
-                       err = -EINVAL;
                        if (!(gwa_type & IPV6_ADDR_UNICAST))
                                goto out;
 
@@ -1785,6 +1977,9 @@ static int ip6_route_del(struct fib6_config *cfg)
 
        if (fn) {
                for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
+                       if ((rt->rt6i_flags & RTF_CACHE) &&
+                           !(cfg->fc_flags & RTF_CACHE))
+                               continue;
                        if (cfg->fc_ifindex &&
                            (!rt->dst.dev ||
                             rt->dst.dev->ifindex != cfg->fc_ifindex))
@@ -1894,7 +2089,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
                                     NEIGH_UPDATE_F_ISROUTER))
                     );
 
-       nrt = ip6_rt_copy(rt, &msg->dest);
+       nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
        if (!nrt)
                goto out;
 
@@ -1926,42 +2121,35 @@ out:
  *     Misc support functions
  */
 
-static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
-                                   const struct in6_addr *dest)
+static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
 {
-       struct net *net = dev_net(ort->dst.dev);
-       struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
-                                           ort->rt6i_table);
+       BUG_ON(from->dst.from);
 
-       if (rt) {
-               rt->dst.input = ort->dst.input;
-               rt->dst.output = ort->dst.output;
-               rt->dst.flags |= DST_HOST;
-
-               rt->rt6i_dst.addr = *dest;
-               rt->rt6i_dst.plen = 128;
-               dst_copy_metrics(&rt->dst, &ort->dst);
-               rt->dst.error = ort->dst.error;
-               rt->rt6i_idev = ort->rt6i_idev;
-               if (rt->rt6i_idev)
-                       in6_dev_hold(rt->rt6i_idev);
-               rt->dst.lastuse = jiffies;
-
-               if (ort->rt6i_flags & RTF_GATEWAY)
-                       rt->rt6i_gateway = ort->rt6i_gateway;
-               else
-                       rt->rt6i_gateway = *dest;
-               rt->rt6i_flags = ort->rt6i_flags;
-               rt6_set_from(rt, ort);
-               rt->rt6i_metric = 0;
+       rt->rt6i_flags &= ~RTF_EXPIRES;
+       dst_hold(&from->dst);
+       rt->dst.from = &from->dst;
+       dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
+}
 
+static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
+{
+       rt->dst.input = ort->dst.input;
+       rt->dst.output = ort->dst.output;
+       rt->rt6i_dst = ort->rt6i_dst;
+       rt->dst.error = ort->dst.error;
+       rt->rt6i_idev = ort->rt6i_idev;
+       if (rt->rt6i_idev)
+               in6_dev_hold(rt->rt6i_idev);
+       rt->dst.lastuse = jiffies;
+       rt->rt6i_gateway = ort->rt6i_gateway;
+       rt->rt6i_flags = ort->rt6i_flags;
+       rt6_set_from(rt, ort);
+       rt->rt6i_metric = ort->rt6i_metric;
 #ifdef CONFIG_IPV6_SUBTREES
-               memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
+       rt->rt6i_src = ort->rt6i_src;
 #endif
-               memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
-               rt->rt6i_table = ort->rt6i_table;
-       }
-       return rt;
+       rt->rt6i_prefsrc = ort->rt6i_prefsrc;
+       rt->rt6i_table = ort->rt6i_table;
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2336,6 +2524,7 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
 
        fib6_clean_all(net, fib6_ifdown, &adn);
        icmp6_clean_all(fib6_ifdown, &adn);
+       rt6_uncached_list_flush_dev(net, dev);
 }
 
 struct rt6_mtu_change_arg {
@@ -2373,11 +2562,20 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
           PMTU discouvery.
         */
        if (rt->dst.dev == arg->dev &&
-           !dst_metric_locked(&rt->dst, RTAX_MTU) &&
-           (dst_mtu(&rt->dst) >= arg->mtu ||
-            (dst_mtu(&rt->dst) < arg->mtu &&
-             dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
-               dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
+           !dst_metric_locked(&rt->dst, RTAX_MTU)) {
+               if (rt->rt6i_flags & RTF_CACHE) {
+                       /* For RTF_CACHE with rt6i_pmtu == 0
+                        * (i.e. a redirected route),
+                        * the metrics of its rt->dst.from has already
+                        * been updated.
+                        */
+                       if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
+                               rt->rt6i_pmtu = arg->mtu;
+               } else if (dst_mtu(&rt->dst) >= arg->mtu ||
+                          (dst_mtu(&rt->dst) < arg->mtu &&
+                           dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
+                       dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
+               }
        }
        return 0;
 }
@@ -2434,6 +2632,9 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (rtm->rtm_type == RTN_LOCAL)
                cfg->fc_flags |= RTF_LOCAL;
 
+       if (rtm->rtm_flags & RTM_F_CLONED)
+               cfg->fc_flags |= RTF_CACHE;
+
        cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
        cfg->fc_nlinfo.nlh = nlh;
        cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@ -2608,6 +2809,7 @@ static int rt6_fill_node(struct net *net,
                         int iif, int type, u32 portid, u32 seq,
                         int prefix, int nowait, unsigned int flags)
 {
+       u32 metrics[RTAX_MAX];
        struct rtmsg *rtm;
        struct nlmsghdr *nlh;
        long expires;
@@ -2721,7 +2923,10 @@ static int rt6_fill_node(struct net *net,
                        goto nla_put_failure;
        }
 
-       if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
+       memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
+       if (rt->rt6i_pmtu)
+               metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
+       if (rtnetlink_put_metrics(skb, metrics) < 0)
                goto nla_put_failure;
 
        if (rt->rt6i_flags & RTF_GATEWAY) {
@@ -3216,6 +3421,7 @@ static struct notifier_block ip6_route_dev_notifier = {
 int __init ip6_route_init(void)
 {
        int ret;
+       int cpu;
 
        ret = -ENOMEM;
        ip6_dst_ops_template.kmem_cachep =
@@ -3275,6 +3481,13 @@ int __init ip6_route_init(void)
        if (ret)
                goto out_register_late_subsys;
 
+       for_each_possible_cpu(cpu) {
+               struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
+
+               INIT_LIST_HEAD(&ul->head);
+               spin_lock_init(&ul->lock);
+       }
+
 out:
        return ret;
 
index 21bc2eb53c57bce6dd0a1073bc77766eabf9ac5f..0909f4e0d53c200f6643883df29682bca8e86fbb 100644 (file)
@@ -41,23 +41,6 @@ static __u16 const msstab[] = {
        9000 - 60,
 };
 
-static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
-                                          struct request_sock *req,
-                                          struct dst_entry *dst)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-       struct sock *child;
-
-       child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
-       if (child) {
-               atomic_set(&req->rsk_refcnt, 1);
-               inet_csk_reqsk_queue_add(sk, req, child);
-       } else {
-               reqsk_free(req);
-       }
-       return child;
-}
-
 static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
                      ipv6_cookie_scratch);
 
@@ -264,7 +247,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        ireq->rcv_wscale = rcv_wscale;
        ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
 
-       ret = get_cookie_sock(sk, skb, req, dst);
+       ret = tcp_get_cookie_sock(sk, skb, req, dst);
 out:
        return ret;
 out_free:
index abcc79f649b34750ee5f25051d5197dd64d91856..4e705add4f187c69b1b11202d47808308e87cf80 100644 (file)
@@ -68,6 +68,13 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "flowlabel_state_ranges",
+               .data           = &init_net.ipv6.sysctl.flowlabel_state_ranges,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        { }
 };
 
@@ -109,6 +116,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
        ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
        ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
        ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
+       ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
 
        ipv6_route_table = ipv6_route_sysctl_init(net);
        if (!ipv6_route_table)
index 3adffb300238ebdaf729871bafbb348e82fbde56..45a7176ed460681558808439f20e1622423f4c32 100644 (file)
@@ -99,8 +99,7 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
                dst_hold(dst);
                sk->sk_rx_dst = dst;
                inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
-               if (rt->rt6i_node)
-                       inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+               inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
        }
 }
 
@@ -262,7 +261,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        rt = (struct rt6_info *) dst;
        if (tcp_death_row.sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp &&
-           ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
+           ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
                tcp_fetch_timewait_stamp(sk, dst);
 
        icsk->icsk_ext_hdr_len = 0;
@@ -1251,7 +1250,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                return 0;
        }
 
-       if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
+       if (tcp_checksum_complete(skb))
                goto csum_err;
 
        if (sk->sk_state == TCP_LISTEN) {
@@ -1421,6 +1420,7 @@ process:
        skb->dev = NULL;
 
        bh_lock_sock_nested(sk);
+       tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
        ret = 0;
        if (!sock_owned_by_user(sk)) {
                if (!tcp_prequeue(sk, skb))
@@ -1442,7 +1442,7 @@ no_tcp_socket:
 
        tcp_v6_fill_cb(skb, hdr, th);
 
-       if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
+       if (tcp_checksum_complete(skb)) {
 csum_error:
                TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
 bad_packet:
@@ -1467,10 +1467,6 @@ do_time_wait:
 
        tcp_v6_fill_cb(skb, hdr, th);
 
-       if (skb->len < (th->doff<<2)) {
-               inet_twsk_put(inet_twsk(sk));
-               goto bad_packet;
-       }
        if (tcp_checksum_complete(skb)) {
                inet_twsk_put(inet_twsk(sk));
                goto csum_error;
index f337a908a76a1145c8b878c224f961d087769785..ed0583c1b9fc2e0033912e2d4c7177a12f17e8b7 100644 (file)
@@ -71,20 +71,12 @@ static int xfrm6_get_tos(const struct flowi *fl)
        return 0;
 }
 
-static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst)
-{
-       struct rt6_info *rt = (struct rt6_info *)xdst;
-
-       rt6_init_peer(rt, net->ipv6.peers);
-}
-
 static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
                           int nfheader_len)
 {
        if (dst->ops->family == AF_INET6) {
                struct rt6_info *rt = (struct rt6_info *)dst;
-               if (rt->rt6i_node)
-                       path->path_cookie = rt->rt6i_node->fn_sernum;
+               path->path_cookie = rt6_get_cookie(rt);
        }
 
        path->u.rt6.rt6i_nfheader_len = nfheader_len;
@@ -106,16 +98,13 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
                return -ENODEV;
        }
 
-       rt6_transfer_peer(&xdst->u.rt6, rt);
-
        /* Sheit... I remember I did this right. Apparently,
         * it was magically lost, so this code needs audit */
        xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
                                                   RTF_LOCAL);
        xdst->u.rt6.rt6i_metric = rt->rt6i_metric;
        xdst->u.rt6.rt6i_node = rt->rt6i_node;
-       if (rt->rt6i_node)
-               xdst->route_cookie = rt->rt6i_node->fn_sernum;
+       xdst->route_cookie = rt6_get_cookie(rt);
        xdst->u.rt6.rt6i_gateway = rt->rt6i_gateway;
        xdst->u.rt6.rt6i_dst = rt->rt6i_dst;
        xdst->u.rt6.rt6i_src = rt->rt6i_src;
@@ -255,10 +244,6 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
        if (likely(xdst->u.rt6.rt6i_idev))
                in6_dev_put(xdst->u.rt6.rt6i_idev);
        dst_destroy_metrics_generic(dst);
-       if (rt6_has_peer(&xdst->u.rt6)) {
-               struct inet_peer *peer = rt6_peer_ptr(&xdst->u.rt6);
-               inet_putpeer(peer);
-       }
        xfrm_dst_destroy(xdst);
 }
 
@@ -308,7 +293,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
        .get_saddr =            xfrm6_get_saddr,
        .decode_session =       _decode_session6,
        .get_tos =              xfrm6_get_tos,
-       .init_dst =             xfrm6_init_dst,
        .init_path =            xfrm6_init_path,
        .fill_dst =             xfrm6_fill_dst,
        .blackhole_route =      ip6_blackhole_route,
index 4ea5d7497b5f29ac41a00783c48d1898aa769937..48d0dc89b58de8aae5f128145634070a77d7c281 100644 (file)
@@ -1347,7 +1347,7 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol,
                goto out;
 
        rc = -ENOMEM;
-       sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto);
+       sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto, kern);
        if (!sk)
                goto out;
 
index ee0ea25c8e7aa9ddd182c0bb284c75a5c0a198bf..fae6822cc367631ca403cc6207f800cf2571dfea 100644 (file)
@@ -1100,7 +1100,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
        }
 
        /* Allocate networking socket */
-       sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto);
+       sk = sk_alloc(net, PF_IRDA, GFP_KERNEL, &irda_proto, kern);
        if (sk == NULL)
                return -ENOMEM;
 
index 0c4c115a5cab5a39d8f331f446bd03f346a62edd..f2280f73b057d38c58f308c278baec6097d976c7 100644 (file)
@@ -60,8 +60,8 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
         * to avoid messing with for incoming connections requests and
         * to accommodate devices that perform discovery slower than us.
         * Jean II */
-       timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s)
-                  + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT);
+       timeout = msecs_to_jiffies(sysctl_slot_timeout) * (S - s)
+                  + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT;
 
        /* Set or re-set the timer. We reset the timer for each received
         * discovery query, which allow us to automatically adjust to
index 6daa52a18d40ca2a40f702acefccd01e6b145f36..918151c113480d6651c3f2c406fea7546ebddd22 100644 (file)
@@ -535,12 +535,12 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
                sk->sk_type = parent->sk_type;
 }
 
-static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
+static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
 {
        struct sock *sk;
        struct iucv_sock *iucv;
 
-       sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
+       sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
        if (!sk)
                return NULL;
        iucv = iucv_sk(sk);
@@ -602,7 +602,7 @@ static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
                return -ESOCKTNOSUPPORT;
        }
 
-       sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
+       sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
        if (!sk)
                return -ENOMEM;
 
@@ -1723,7 +1723,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
        }
 
        /* Create the new socket */
-       nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+       nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
        if (!nsk) {
                err = pr_iucv->path_sever(path, user_data);
                iucv_path_free(path);
@@ -1933,7 +1933,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
                goto out;
        }
 
-       nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+       nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
        bh_lock_sock(sk);
        if ((sk->sk_state != IUCV_LISTEN) ||
            sk_acceptq_is_full(sk) ||
index f0d52d721b3a4405b47f38d8f5d6c1990cbf8b5f..9e834ec475a9c202d4427d7a2b7f21511a1d6eea 100644 (file)
@@ -149,7 +149,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
                return -EPROTONOSUPPORT;
 
        err = -ENOMEM;
-       sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto);
+       sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern);
        if (sk == NULL)
                goto out;
 
index a29a504492af6f2c38607f2c15e123a297d565cd..f6b090df3930d32dc1fda0c8069ad1f7b3246d41 100644 (file)
@@ -1334,9 +1334,10 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
                if (sock)
                        inet_shutdown(sock, 2);
        } else {
-               if (sock)
+               if (sock) {
                        kernel_sock_shutdown(sock, SHUT_RDWR);
-               sk_release_kernel(sk);
+                       sock_release(sock);
+               }
        }
 
        l2tp_tunnel_sock_put(sk);
@@ -1399,13 +1400,11 @@ static int l2tp_tunnel_sock_create(struct net *net,
                if (cfg->local_ip6 && cfg->peer_ip6) {
                        struct sockaddr_l2tpip6 ip6_addr = {0};
 
-                       err = sock_create_kern(AF_INET6, SOCK_DGRAM,
+                       err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
                                          IPPROTO_L2TP, &sock);
                        if (err < 0)
                                goto out;
 
-                       sk_change_net(sock->sk, net);
-
                        ip6_addr.l2tp_family = AF_INET6;
                        memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
                               sizeof(ip6_addr.l2tp_addr));
@@ -1429,13 +1428,11 @@ static int l2tp_tunnel_sock_create(struct net *net,
                {
                        struct sockaddr_l2tpip ip_addr = {0};
 
-                       err = sock_create_kern(AF_INET, SOCK_DGRAM,
+                       err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
                                          IPPROTO_L2TP, &sock);
                        if (err < 0)
                                goto out;
 
-                       sk_change_net(sock->sk, net);
-
                        ip_addr.l2tp_family = AF_INET;
                        ip_addr.l2tp_addr = cfg->local_ip;
                        ip_addr.l2tp_conn_id = tunnel_id;
@@ -1462,7 +1459,7 @@ out:
        *sockp = sock;
        if ((err < 0) && sock) {
                kernel_sock_shutdown(sock, SHUT_RDWR);
-               sk_release_kernel(sock->sk);
+               sock_release(sock);
                *sockp = NULL;
        }
 
index e9b0dec56b8e80e13ea780d11bb56e921814c317..f56c9f69e9f288b2d1f9da53257a5f64bf5dd067 100644 (file)
@@ -542,12 +542,12 @@ static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
 
 /* socket() handler. Initialize a new struct sock.
  */
-static int pppol2tp_create(struct net *net, struct socket *sock)
+static int pppol2tp_create(struct net *net, struct socket *sock, int kern)
 {
        int error = -ENOMEM;
        struct sock *sk;
 
-       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
+       sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, kern);
        if (!sk)
                goto out;
 
index 17a8dff0609066e338b528328c4ed01259314d43..8fd9febaa5bad8150bdf632b97221a140e0e3cbd 100644 (file)
@@ -168,7 +168,7 @@ static int llc_ui_create(struct net *net, struct socket *sock, int protocol,
 
        if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) {
                rc = -ENOMEM;
-               sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto);
+               sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto, kern);
                if (sk) {
                        rc = 0;
                        llc_ui_sk_init(sock, sk);
index 81a61fce3afbfee10bd4e20ffecb4f861364c121..3e821daf9dd4a2fbf00550591e92b153efd4a73a 100644 (file)
@@ -768,7 +768,7 @@ static struct sock *llc_create_incoming_sock(struct sock *sk,
                                             struct llc_addr *daddr)
 {
        struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC,
-                                         sk->sk_prot);
+                                         sk->sk_prot, 0);
        struct llc_sock *newllc, *llc = llc_sk(sk);
 
        if (!newsk)
@@ -931,9 +931,9 @@ static void llc_sk_init(struct sock *sk)
  *     Allocates a LLC sock and initializes it. Returns the new LLC sock
  *     or %NULL if there's no memory available for one
  */
-struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot)
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern)
 {
-       struct sock *sk = sk_alloc(net, family, priority, prot);
+       struct sock *sk = sk_alloc(net, family, priority, prot, kern);
 
        if (!sk)
                goto out;
index 64a012a0c6e52dba4d026701402e56c6e82f73b5..086de496a4c197bb98a5e0b550a9b67d516d4902 100644 (file)
@@ -302,6 +302,20 @@ config MAC80211_DEBUG_COUNTERS
        ---help---
          Selecting this option causes mac80211 to keep additional
          and very verbose statistics about TX and RX handler use
-         and show them in debugfs.
+         as well as a few selected dot11 counters. These will be
+         exposed in debugfs.
+
+         Note that some of the counters are not concurrency safe
+         and may thus not always be accurate.
 
          If unsure, say N.
+
+config MAC80211_STA_HASH_MAX_SIZE
+       int "Station hash table maximum size" if MAC80211_DEBUG_MENU
+       default 0
+       ---help---
+         Setting this option to a low value (e.g. 4) allows testing the
+         hash table with collisions relatively deterministically (just
+         connect more stations than the number selected here.)
+
+         If unsure, leave the default of 0.
index ff347a0eebd4fdbcbd1580c8af0450c23f673f85..690b9f640b41a5a8e58e84d85b15e579121f84b4 100644 (file)
@@ -137,6 +137,9 @@ static int ieee80211_set_noack_map(struct wiphy *wiphy,
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
        sdata->noack_map = noack_map;
+
+       ieee80211_check_fast_xmit_iface(sdata);
+
        return 0;
 }
 
@@ -309,6 +312,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
        u32 iv32;
        u16 iv16;
        int err = -ENOENT;
+       struct ieee80211_key_seq kseq = {};
 
        sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
@@ -339,10 +343,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
                iv32 = key->u.tkip.tx.iv32;
                iv16 = key->u.tkip.tx.iv16;
 
-               if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
-                       drv_get_tkip_seq(sdata->local,
-                                        key->conf.hw_key_idx,
-                                        &iv32, &iv16);
+               if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+                   !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+                       drv_get_key_seq(sdata->local, key, &kseq);
+                       iv32 = kseq.tkip.iv32;
+                       iv16 = kseq.tkip.iv16;
+               }
 
                seq[0] = iv16 & 0xff;
                seq[1] = (iv16 >> 8) & 0xff;
@@ -355,52 +361,85 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
                break;
        case WLAN_CIPHER_SUITE_CCMP:
        case WLAN_CIPHER_SUITE_CCMP_256:
-               pn64 = atomic64_read(&key->u.ccmp.tx_pn);
-               seq[0] = pn64;
-               seq[1] = pn64 >> 8;
-               seq[2] = pn64 >> 16;
-               seq[3] = pn64 >> 24;
-               seq[4] = pn64 >> 32;
-               seq[5] = pn64 >> 40;
+               if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+                   !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+                       drv_get_key_seq(sdata->local, key, &kseq);
+                       memcpy(seq, kseq.ccmp.pn, 6);
+               } else {
+                       pn64 = atomic64_read(&key->u.ccmp.tx_pn);
+                       seq[0] = pn64;
+                       seq[1] = pn64 >> 8;
+                       seq[2] = pn64 >> 16;
+                       seq[3] = pn64 >> 24;
+                       seq[4] = pn64 >> 32;
+                       seq[5] = pn64 >> 40;
+               }
                params.seq = seq;
                params.seq_len = 6;
                break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
        case WLAN_CIPHER_SUITE_BIP_CMAC_256:
-               pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
-               seq[0] = pn64;
-               seq[1] = pn64 >> 8;
-               seq[2] = pn64 >> 16;
-               seq[3] = pn64 >> 24;
-               seq[4] = pn64 >> 32;
-               seq[5] = pn64 >> 40;
+               if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+                   !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+                       drv_get_key_seq(sdata->local, key, &kseq);
+                       memcpy(seq, kseq.aes_cmac.pn, 6);
+               } else {
+                       pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
+                       seq[0] = pn64;
+                       seq[1] = pn64 >> 8;
+                       seq[2] = pn64 >> 16;
+                       seq[3] = pn64 >> 24;
+                       seq[4] = pn64 >> 32;
+                       seq[5] = pn64 >> 40;
+               }
                params.seq = seq;
                params.seq_len = 6;
                break;
        case WLAN_CIPHER_SUITE_BIP_GMAC_128:
        case WLAN_CIPHER_SUITE_BIP_GMAC_256:
-               pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
-               seq[0] = pn64;
-               seq[1] = pn64 >> 8;
-               seq[2] = pn64 >> 16;
-               seq[3] = pn64 >> 24;
-               seq[4] = pn64 >> 32;
-               seq[5] = pn64 >> 40;
+               if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+                   !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+                       drv_get_key_seq(sdata->local, key, &kseq);
+                       memcpy(seq, kseq.aes_gmac.pn, 6);
+               } else {
+                       pn64 = atomic64_read(&key->u.aes_gmac.tx_pn);
+                       seq[0] = pn64;
+                       seq[1] = pn64 >> 8;
+                       seq[2] = pn64 >> 16;
+                       seq[3] = pn64 >> 24;
+                       seq[4] = pn64 >> 32;
+                       seq[5] = pn64 >> 40;
+               }
                params.seq = seq;
                params.seq_len = 6;
                break;
        case WLAN_CIPHER_SUITE_GCMP:
        case WLAN_CIPHER_SUITE_GCMP_256:
-               pn64 = atomic64_read(&key->u.gcmp.tx_pn);
-               seq[0] = pn64;
-               seq[1] = pn64 >> 8;
-               seq[2] = pn64 >> 16;
-               seq[3] = pn64 >> 24;
-               seq[4] = pn64 >> 32;
-               seq[5] = pn64 >> 40;
+               if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+                   !(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+                       drv_get_key_seq(sdata->local, key, &kseq);
+                       memcpy(seq, kseq.gcmp.pn, 6);
+               } else {
+                       pn64 = atomic64_read(&key->u.gcmp.tx_pn);
+                       seq[0] = pn64;
+                       seq[1] = pn64 >> 8;
+                       seq[2] = pn64 >> 16;
+                       seq[3] = pn64 >> 24;
+                       seq[4] = pn64 >> 32;
+                       seq[5] = pn64 >> 40;
+               }
                params.seq = seq;
                params.seq_len = 6;
                break;
+       default:
+               if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+                       break;
+               if (WARN_ON(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))
+                       break;
+               drv_get_key_seq(sdata->local, key, &kseq);
+               params.seq = kseq.hw.seq;
+               params.seq_len = kseq.hw.seq_len;
+               break;
        }
 
        params.key = key->conf.key;
@@ -1372,6 +1411,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
                }
 
                sta->sdata = vlansdata;
+               ieee80211_check_fast_xmit(sta);
 
                if (sta->sta_state == IEEE80211_STA_AUTHORIZED &&
                    prev_4addr != new_4addr) {
@@ -2099,10 +2139,14 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
        int err;
 
        if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+               ieee80211_check_fast_xmit_all(local);
+
                err = drv_set_frag_threshold(local, wiphy->frag_threshold);
 
-               if (err)
+               if (err) {
+                       ieee80211_check_fast_xmit_all(local);
                        return err;
+               }
        }
 
        if ((changed & WIPHY_PARAM_COVERAGE_CLASS) ||
@@ -3299,8 +3343,14 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                break;
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
-               if (!sdata->u.mgd.associated)
+               sdata_lock(sdata);
+               if (!sdata->u.mgd.associated ||
+                   (params->offchan && params->wait &&
+                    local->ops->remain_on_channel &&
+                    memcmp(sdata->u.mgd.associated->bssid,
+                           mgmt->bssid, ETH_ALEN)))
                        need_offchan = true;
+               sdata_unlock(sdata);
                break;
        case NL80211_IFTYPE_P2P_DEVICE:
                need_offchan = true;
index 5bcd4e5589d3294602c4abdeff778497afbc8de1..f01c18a3160e11d72dae9e2a0939530ec805f6a5 100644 (file)
@@ -664,6 +664,8 @@ out:
                ieee80211_bss_info_change_notify(sdata,
                                                 BSS_CHANGED_IDLE);
 
+       ieee80211_check_fast_xmit_iface(sdata);
+
        return ret;
 }
 
@@ -1008,6 +1010,8 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
        if (WARN_ON(!chandef))
                return -EINVAL;
 
+       ieee80211_change_chanctx(local, new_ctx, chandef);
+
        vif_chsw[0].vif = &sdata->vif;
        vif_chsw[0].old_ctx = &old_ctx->conf;
        vif_chsw[0].new_ctx = &new_ctx->conf;
@@ -1030,6 +1034,8 @@ ieee80211_vif_use_reserved_reassign(struct ieee80211_sub_if_data *sdata)
        if (sdata->vif.type == NL80211_IFTYPE_AP)
                __ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
 
+       ieee80211_check_fast_xmit_iface(sdata);
+
        if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
                ieee80211_free_chanctx(local, old_ctx);
 
@@ -1079,6 +1085,8 @@ ieee80211_vif_use_reserved_assign(struct ieee80211_sub_if_data *sdata)
        if (WARN_ON(!chandef))
                return -EINVAL;
 
+       ieee80211_change_chanctx(local, new_ctx, chandef);
+
        list_del(&sdata->reserved_chanctx_list);
        sdata->reserved_chanctx = NULL;
 
@@ -1376,6 +1384,8 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
                                __ieee80211_vif_copy_chanctx_to_vlans(sdata,
                                                                      false);
 
+                       ieee80211_check_fast_xmit_iface(sdata);
+
                        sdata->radar_required = sdata->reserved_radar_required;
 
                        if (sdata->vif.bss_conf.chandef.width !=
index 23813ebb349cd67c3f3248da90312695657e686f..b17206db49b44643adc1f252e089d913509ba7fc 100644 (file)
@@ -219,8 +219,8 @@ static const struct file_operations stats_ ##name## _ops = {                \
        .llseek = generic_file_llseek,                                  \
 };
 
-#define DEBUGFS_STATS_ADD(name, field)                                 \
-       debugfs_create_u32(#name, 0400, statsd, (u32 *) &field);
+#define DEBUGFS_STATS_ADD(name)                                        \
+       debugfs_create_u32(#name, 0400, statsd, &local->name);
 #define DEBUGFS_DEVSTATS_ADD(name)                                     \
        debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
 
@@ -255,53 +255,31 @@ void debugfs_hw_add(struct ieee80211_local *local)
        if (!statsd)
                return;
 
-       DEBUGFS_STATS_ADD(transmitted_fragment_count,
-               local->dot11TransmittedFragmentCount);
-       DEBUGFS_STATS_ADD(multicast_transmitted_frame_count,
-               local->dot11MulticastTransmittedFrameCount);
-       DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount);
-       DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount);
-       DEBUGFS_STATS_ADD(multiple_retry_count,
-               local->dot11MultipleRetryCount);
-       DEBUGFS_STATS_ADD(frame_duplicate_count,
-               local->dot11FrameDuplicateCount);
-       DEBUGFS_STATS_ADD(received_fragment_count,
-               local->dot11ReceivedFragmentCount);
-       DEBUGFS_STATS_ADD(multicast_received_frame_count,
-               local->dot11MulticastReceivedFrameCount);
-       DEBUGFS_STATS_ADD(transmitted_frame_count,
-               local->dot11TransmittedFrameCount);
 #ifdef CONFIG_MAC80211_DEBUG_COUNTERS
-       DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
-       DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
-       DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
-               local->tx_handlers_drop_fragment);
-       DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
-               local->tx_handlers_drop_wep);
-       DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc,
-               local->tx_handlers_drop_not_assoc);
-       DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port,
-               local->tx_handlers_drop_unauth_port);
-       DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop);
-       DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued);
-       DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc,
-               local->rx_handlers_drop_nullfunc);
-       DEBUGFS_STATS_ADD(rx_handlers_drop_defrag,
-               local->rx_handlers_drop_defrag);
-       DEBUGFS_STATS_ADD(rx_handlers_drop_short,
-               local->rx_handlers_drop_short);
-       DEBUGFS_STATS_ADD(tx_expand_skb_head,
-               local->tx_expand_skb_head);
-       DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
-               local->tx_expand_skb_head_cloned);
-       DEBUGFS_STATS_ADD(rx_expand_skb_head,
-               local->rx_expand_skb_head);
-       DEBUGFS_STATS_ADD(rx_expand_skb_head2,
-               local->rx_expand_skb_head2);
-       DEBUGFS_STATS_ADD(rx_handlers_fragments,
-               local->rx_handlers_fragments);
-       DEBUGFS_STATS_ADD(tx_status_drop,
-               local->tx_status_drop);
+       DEBUGFS_STATS_ADD(dot11TransmittedFragmentCount);
+       DEBUGFS_STATS_ADD(dot11MulticastTransmittedFrameCount);
+       DEBUGFS_STATS_ADD(dot11FailedCount);
+       DEBUGFS_STATS_ADD(dot11RetryCount);
+       DEBUGFS_STATS_ADD(dot11MultipleRetryCount);
+       DEBUGFS_STATS_ADD(dot11FrameDuplicateCount);
+       DEBUGFS_STATS_ADD(dot11ReceivedFragmentCount);
+       DEBUGFS_STATS_ADD(dot11MulticastReceivedFrameCount);
+       DEBUGFS_STATS_ADD(dot11TransmittedFrameCount);
+       DEBUGFS_STATS_ADD(tx_handlers_drop);
+       DEBUGFS_STATS_ADD(tx_handlers_queued);
+       DEBUGFS_STATS_ADD(tx_handlers_drop_wep);
+       DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc);
+       DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port);
+       DEBUGFS_STATS_ADD(rx_handlers_drop);
+       DEBUGFS_STATS_ADD(rx_handlers_queued);
+       DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
+       DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
+       DEBUGFS_STATS_ADD(rx_handlers_drop_short);
+       DEBUGFS_STATS_ADD(tx_expand_skb_head);
+       DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
+       DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
+       DEBUGFS_STATS_ADD(rx_handlers_fragments);
+       DEBUGFS_STATS_ADD(tx_status_drop);
 #endif
        DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount);
        DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
index 252859e90e8a5085f18916edda7a3bf407cdd000..06d52935036dd0875868e3db79aaebebe08c5c11 100644 (file)
@@ -29,8 +29,6 @@ static ssize_t sta_ ##name## _read(struct file *file,                 \
                                      format_string, sta->field);       \
 }
 #define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
-#define STA_READ_U(name, field) STA_READ(name, field, "%u\n")
-#define STA_READ_S(name, field) STA_READ(name, field, "%s\n")
 
 #define STA_OPS(name)                                                  \
 static const struct file_operations sta_ ##name## _ops = {             \
@@ -52,10 +50,7 @@ static const struct file_operations sta_ ##name## _ops = {           \
                STA_OPS(name)
 
 STA_FILE(aid, sta.aid, D);
-STA_FILE(dev, sdata->name, S);
-STA_FILE(last_signal, last_signal, D);
 STA_FILE(last_ack_signal, last_ack_signal, D);
-STA_FILE(beacon_loss_count, beacon_loss_count, D);
 
 static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
                              size_t count, loff_t *ppos)
@@ -101,40 +96,6 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
 }
 STA_OPS(num_ps_buf_frames);
 
-static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
-                                   size_t count, loff_t *ppos)
-{
-       struct sta_info *sta = file->private_data;
-       return mac80211_format_buffer(userbuf, count, ppos, "%d\n",
-                                     jiffies_to_msecs(jiffies - sta->last_rx));
-}
-STA_OPS(inactive_ms);
-
-
-static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf,
-                                       size_t count, loff_t *ppos)
-{
-       struct sta_info *sta = file->private_data;
-       struct timespec uptime;
-       struct tm result;
-       long connected_time_secs;
-       char buf[100];
-       int res;
-       ktime_get_ts(&uptime);
-       connected_time_secs = uptime.tv_sec - sta->last_connected;
-       time_to_tm(connected_time_secs, 0, &result);
-       result.tm_year -= 70;
-       result.tm_mday -= 1;
-       res = scnprintf(buf, sizeof(buf),
-               "years  - %ld\nmonths - %d\ndays   - %d\nclock  - %d:%d:%d\n\n",
-                       result.tm_year, result.tm_mon, result.tm_mday,
-                       result.tm_hour, result.tm_min, result.tm_sec);
-       return simple_read_from_buffer(userbuf, count, ppos, buf, res);
-}
-STA_OPS(connected_time);
-
-
-
 static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
                                      size_t count, loff_t *ppos)
 {
@@ -359,37 +320,6 @@ static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
 }
 STA_OPS(vht_capa);
 
-static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf,
-                                       size_t count, loff_t *ppos)
-{
-       struct sta_info *sta = file->private_data;
-       struct rate_info rinfo;
-       u16 rate;
-       sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
-       rate = cfg80211_calculate_bitrate(&rinfo);
-
-       return mac80211_format_buffer(userbuf, count, ppos,
-                                     "%d.%d MBit/s\n",
-                                     rate/10, rate%10);
-}
-STA_OPS(current_tx_rate);
-
-static ssize_t sta_last_rx_rate_read(struct file *file, char __user *userbuf,
-                                    size_t count, loff_t *ppos)
-{
-       struct sta_info *sta = file->private_data;
-       struct rate_info rinfo;
-       u16 rate;
-
-       sta_set_rate_info_rx(sta, &rinfo);
-
-       rate = cfg80211_calculate_bitrate(&rinfo);
-
-       return mac80211_format_buffer(userbuf, count, ppos,
-                                     "%d.%d MBit/s\n",
-                                     rate/10, rate%10);
-}
-STA_OPS(last_rx_rate);
 
 #define DEBUGFS_ADD(name) \
        debugfs_create_file(#name, 0400, \
@@ -432,30 +362,15 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
 
        DEBUGFS_ADD(flags);
        DEBUGFS_ADD(num_ps_buf_frames);
-       DEBUGFS_ADD(inactive_ms);
-       DEBUGFS_ADD(connected_time);
        DEBUGFS_ADD(last_seq_ctrl);
        DEBUGFS_ADD(agg_status);
-       DEBUGFS_ADD(dev);
-       DEBUGFS_ADD(last_signal);
-       DEBUGFS_ADD(beacon_loss_count);
        DEBUGFS_ADD(ht_capa);
        DEBUGFS_ADD(vht_capa);
        DEBUGFS_ADD(last_ack_signal);
-       DEBUGFS_ADD(current_tx_rate);
-       DEBUGFS_ADD(last_rx_rate);
 
-       DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
-       DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
-       DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes);
-       DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes);
        DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
        DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
-       DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped);
-       DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments);
        DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
-       DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
-       DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
 
        if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
                debugfs_create_x32("driver_buffered_tids", 0400,
index 26e1ca8a474af338685debf3d89b3cc3ad6d9986..c01e681b90fb4546ab29282d29cf4f77a7649338 100644 (file)
@@ -417,12 +417,13 @@ static inline int drv_get_stats(struct ieee80211_local *local,
        return ret;
 }
 
-static inline void drv_get_tkip_seq(struct ieee80211_local *local,
-                                   u8 hw_key_idx, u32 *iv32, u16 *iv16)
+static inline void drv_get_key_seq(struct ieee80211_local *local,
+                                  struct ieee80211_key *key,
+                                  struct ieee80211_key_seq *seq)
 {
-       if (local->ops->get_tkip_seq)
-               local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16);
-       trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16);
+       if (local->ops->get_key_seq)
+               local->ops->get_key_seq(&local->hw, &key->conf, seq);
+       trace_drv_get_key_seq(local, &key->conf);
 }
 
 static inline int drv_set_frag_threshold(struct ieee80211_local *local,
index 52bcea6ad9e8fb612a103ea61dd4dee0a31e2e53..188faab11c24573397b16296af97568afcd206cd 100644 (file)
@@ -38,7 +38,7 @@ static void ieee80211_get_ringparam(struct net_device *dev,
 static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
        "rx_packets", "rx_bytes",
        "rx_duplicates", "rx_fragments", "rx_dropped",
-       "tx_packets", "tx_bytes", "tx_fragments",
+       "tx_packets", "tx_bytes",
        "tx_filtered", "tx_retry_failed", "tx_retries",
        "beacon_loss", "sta_state", "txrate", "rxrate", "signal",
        "channel", "noise", "ch_time", "ch_time_busy",
@@ -87,7 +87,6 @@ static void ieee80211_get_stats(struct net_device *dev,
                                                        \
                data[i++] += sinfo.tx_packets;          \
                data[i++] += sinfo.tx_bytes;            \
-               data[i++] += sta->tx_fragments;         \
                data[i++] += sta->tx_filtered_count;    \
                data[i++] += sta->tx_retry_failed;      \
                data[i++] += sta->tx_retry_count;       \
index bfef1b2150504fa9ed2a0b4b9a7b4b60e229f1de..21716af8bec32970a7c094b73124f5b5aa8c5d8c 100644 (file)
@@ -1031,8 +1031,11 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       if (sta && elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS)
+       if (sta && !sta->sta.wme &&
+           elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS) {
                sta->sta.wme = true;
+               ieee80211_check_fast_xmit(sta);
+       }
 
        if (sta && elems->ht_operation && elems->ht_cap_elem &&
            sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
index c0a9187bc3a9d579b36824fa64ecbbcbd6575110..b12f61507f9f9a4f84eaad69f98ac209af90361e 100644 (file)
@@ -181,8 +181,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
 
 /**
  * enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_RA_MATCH: frame is destined to interface currently processed
- *     (incl. multicast frames)
  * @IEEE80211_RX_FRAGMENTED: fragmented frame
  * @IEEE80211_RX_AMSDU: a-MSDU packet
  * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
@@ -192,7 +190,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
  * @rx_flags field of &struct ieee80211_rx_status.
  */
 enum ieee80211_packet_rx_flags {
-       IEEE80211_RX_RA_MATCH                   = BIT(1),
        IEEE80211_RX_FRAGMENTED                 = BIT(2),
        IEEE80211_RX_AMSDU                      = BIT(3),
        IEEE80211_RX_MALFORMED_ACTION_FRM       = BIT(4),
@@ -722,7 +719,6 @@ struct ieee80211_if_mesh {
  * enum ieee80211_sub_if_data_flags - virtual interface flags
  *
  * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
- * @IEEE80211_SDATA_PROMISC: interface is promisc
  * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
  * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
  *     associated stations and deliver multicast frames both
@@ -732,7 +728,6 @@ struct ieee80211_if_mesh {
  */
 enum ieee80211_sub_if_data_flags {
        IEEE80211_SDATA_ALLMULTI                = BIT(0),
-       IEEE80211_SDATA_PROMISC                 = BIT(1),
        IEEE80211_SDATA_OPERATING_GMODE         = BIT(2),
        IEEE80211_SDATA_DONT_BRIDGE_PACKETS     = BIT(3),
        IEEE80211_SDATA_DISCONNECT_RESUME       = BIT(4),
@@ -1040,7 +1035,6 @@ enum queue_stop_reason {
 
 #ifdef CONFIG_MAC80211_LEDS
 struct tpt_led_trigger {
-       struct led_trigger trig;
        char name[32];
        const struct ieee80211_tpt_blink *blink_table;
        unsigned int blink_table_len;
@@ -1208,8 +1202,8 @@ struct ieee80211_local {
 
        atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
 
-       /* number of interfaces with corresponding IFF_ flags */
-       atomic_t iff_allmultis, iff_promiscs;
+       /* number of interfaces with allmulti RX */
+       atomic_t iff_allmultis;
 
        struct rate_control_ref *rate_ctrl;
 
@@ -1261,6 +1255,15 @@ struct ieee80211_local {
        struct list_head chanctx_list;
        struct mutex chanctx_mtx;
 
+#ifdef CONFIG_MAC80211_LEDS
+       struct led_trigger tx_led, rx_led, assoc_led, radio_led;
+       struct led_trigger tpt_led;
+       atomic_t tx_led_active, rx_led_active, assoc_led_active;
+       atomic_t radio_led_active, tpt_led_active;
+       struct tpt_led_trigger *tpt_led_trigger;
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
        /* SNMP counters */
        /* dot11CountersTable */
        u32 dot11TransmittedFragmentCount;
@@ -1273,18 +1276,9 @@ struct ieee80211_local {
        u32 dot11MulticastReceivedFrameCount;
        u32 dot11TransmittedFrameCount;
 
-#ifdef CONFIG_MAC80211_LEDS
-       struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led;
-       struct tpt_led_trigger *tpt_led_trigger;
-       char tx_led_name[32], rx_led_name[32],
-            assoc_led_name[32], radio_led_name[32];
-#endif
-
-#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
        /* TX/RX handler statistics */
        unsigned int tx_handlers_drop;
        unsigned int tx_handlers_queued;
-       unsigned int tx_handlers_drop_fragment;
        unsigned int tx_handlers_drop_wep;
        unsigned int tx_handlers_drop_not_assoc;
        unsigned int tx_handlers_drop_unauth_port;
@@ -1295,8 +1289,7 @@ struct ieee80211_local {
        unsigned int rx_handlers_drop_short;
        unsigned int tx_expand_skb_head;
        unsigned int tx_expand_skb_head_cloned;
-       unsigned int rx_expand_skb_head;
-       unsigned int rx_expand_skb_head2;
+       unsigned int rx_expand_skb_head_defrag;
        unsigned int rx_handlers_fragments;
        unsigned int tx_status_drop;
 #define I802_DEBUG_INC(c) (c)++
@@ -1648,6 +1641,11 @@ struct sk_buff *
 ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
                              struct sk_buff *skb, u32 info_flags);
 
+void ieee80211_check_fast_xmit(struct sta_info *sta);
+void ieee80211_check_fast_xmit_all(struct ieee80211_local *local);
+void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata);
+void ieee80211_clear_fast_xmit(struct sta_info *sta);
+
 /* HT */
 void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
                                     struct ieee80211_sta_ht_cap *ht_cap);
index 84cef600c5730e74c6456e801ffa93ef55e4e47f..b2e85ffca7ed0d081c53d87ecb390d9157a691d6 100644 (file)
@@ -703,9 +703,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
        if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
                atomic_inc(&local->iff_allmultis);
 
-       if (sdata->flags & IEEE80211_SDATA_PROMISC)
-               atomic_inc(&local->iff_promiscs);
-
        if (coming_up)
                local->open_count++;
 
@@ -835,13 +832,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
                     ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
                      (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
 
-       /* don't count this interface for promisc/allmulti while it is down */
+       /* don't count this interface for allmulti while it is down */
        if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
                atomic_dec(&local->iff_allmultis);
 
-       if (sdata->flags & IEEE80211_SDATA_PROMISC)
-               atomic_dec(&local->iff_promiscs);
-
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                local->fif_pspoll--;
                local->fif_probe_req--;
@@ -1055,12 +1049,10 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct ieee80211_local *local = sdata->local;
-       int allmulti, promisc, sdata_allmulti, sdata_promisc;
+       int allmulti, sdata_allmulti;
 
        allmulti = !!(dev->flags & IFF_ALLMULTI);
-       promisc = !!(dev->flags & IFF_PROMISC);
        sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI);
-       sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC);
 
        if (allmulti != sdata_allmulti) {
                if (dev->flags & IFF_ALLMULTI)
@@ -1070,13 +1062,6 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
                sdata->flags ^= IEEE80211_SDATA_ALLMULTI;
        }
 
-       if (promisc != sdata_promisc) {
-               if (dev->flags & IFF_PROMISC)
-                       atomic_inc(&local->iff_promiscs);
-               else
-                       atomic_dec(&local->iff_promiscs);
-               sdata->flags ^= IEEE80211_SDATA_PROMISC;
-       }
        spin_lock_bh(&local->filter_lock);
        __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
        spin_unlock_bh(&local->filter_lock);
@@ -1117,6 +1102,35 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
        return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
 }
 
+static struct rtnl_link_stats64 *
+ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_sw_netstats *tstats;
+               u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+               unsigned int start;
+
+               tstats = per_cpu_ptr(dev->tstats, i);
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&tstats->syncp);
+                       rx_packets = tstats->rx_packets;
+                       tx_packets = tstats->tx_packets;
+                       rx_bytes = tstats->rx_bytes;
+                       tx_bytes = tstats->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
+
+               stats->rx_packets += rx_packets;
+               stats->tx_packets += tx_packets;
+               stats->rx_bytes   += rx_bytes;
+               stats->tx_bytes   += tx_bytes;
+       }
+
+       return stats;
+}
+
 static const struct net_device_ops ieee80211_dataif_ops = {
        .ndo_open               = ieee80211_open,
        .ndo_stop               = ieee80211_stop,
@@ -1126,6 +1140,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
        .ndo_change_mtu         = ieee80211_change_mtu,
        .ndo_set_mac_address    = ieee80211_change_mac,
        .ndo_select_queue       = ieee80211_netdev_select_queue,
+       .ndo_get_stats64        = ieee80211_get_stats64,
 };
 
 static u16 ieee80211_monitor_select_queue(struct net_device *dev,
@@ -1159,14 +1174,21 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
        .ndo_change_mtu         = ieee80211_change_mtu,
        .ndo_set_mac_address    = ieee80211_change_mac,
        .ndo_select_queue       = ieee80211_monitor_select_queue,
+       .ndo_get_stats64        = ieee80211_get_stats64,
 };
 
+static void ieee80211_if_free(struct net_device *dev)
+{
+       free_percpu(dev->tstats);
+       free_netdev(dev);
+}
+
 static void ieee80211_if_setup(struct net_device *dev)
 {
        ether_setup(dev);
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->netdev_ops = &ieee80211_dataif_ops;
-       dev->destructor = free_netdev;
+       dev->destructor = ieee80211_if_free;
 }
 
 static void ieee80211_iface_work(struct work_struct *work)
@@ -1707,6 +1729,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                        return -ENOMEM;
                dev_net_set(ndev, wiphy_net(local->hw.wiphy));
 
+               ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+               if (!ndev->tstats) {
+                       free_netdev(ndev);
+                       return -ENOMEM;
+               }
+
                ndev->needed_headroom = local->tx_headroom +
                                        4*6 /* four MAC addresses */
                                        + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */
index a907f2d5c12d857bf1811af24e57f5af09eb8665..577a11a13cdf56e09f1166bf878aeb48bad4856e 100644 (file)
@@ -256,6 +256,7 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
 
        if (uni) {
                rcu_assign_pointer(sdata->default_unicast_key, key);
+               ieee80211_check_fast_xmit_iface(sdata);
                drv_set_default_unicast_key(sdata->local, sdata, idx);
        }
 
@@ -325,6 +326,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
                if (pairwise) {
                        rcu_assign_pointer(sta->ptk[idx], new);
                        sta->ptk_idx = idx;
+                       ieee80211_check_fast_xmit(sta);
                } else {
                        rcu_assign_pointer(sta->gtk[idx], new);
                        sta->gtk_idx = idx;
@@ -510,15 +512,17 @@ ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
                break;
        default:
                if (cs) {
-                       size_t len = (seq_len > MAX_PN_LEN) ?
-                                               MAX_PN_LEN : seq_len;
+                       if (seq_len && seq_len != cs->pn_len) {
+                               kfree(key);
+                               return ERR_PTR(-EINVAL);
+                       }
 
                        key->conf.iv_len = cs->hdr_len;
                        key->conf.icv_len = cs->mic_len;
                        for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
-                               for (j = 0; j < len; j++)
+                               for (j = 0; j < seq_len; j++)
                                        key->u.gen.rx_pn[i][j] =
-                                                       seq[len - j - 1];
+                                                       seq[seq_len - j - 1];
                        key->flags |= KEY_FLAG_CIPHER_SCHEME;
                }
        }
index 96557dd1e77dff325072cff12b7b671aad942015..2119526db2f429f80181deefd47cefdc2ed29c34 100644 (file)
@@ -18,7 +18,6 @@
 
 #define NUM_DEFAULT_KEYS 4
 #define NUM_DEFAULT_MGMT_KEYS 2
-#define MAX_PN_LEN 16
 
 struct ieee80211_local;
 struct ieee80211_sub_if_data;
@@ -116,7 +115,7 @@ struct ieee80211_key {
                } gcmp;
                struct {
                        /* generic cipher scheme */
-                       u8 rx_pn[IEEE80211_NUM_TIDS + 1][MAX_PN_LEN];
+                       u8 rx_pn[IEEE80211_NUM_TIDS + 1][IEEE80211_MAX_PN_LEN];
                } gen;
        } u;
 
index e2b836446af386dba692304bda48fd1dcb038c5b..0505845b7ab836c15888a06b3b7d76995d46e685 100644 (file)
 #include <linux/export.h>
 #include "led.h"
 
-#define MAC80211_BLINK_DELAY 50 /* ms */
-
-void ieee80211_led_rx(struct ieee80211_local *local)
-{
-       unsigned long led_delay = MAC80211_BLINK_DELAY;
-       if (unlikely(!local->rx_led))
-               return;
-       led_trigger_blink_oneshot(local->rx_led, &led_delay, &led_delay, 0);
-}
-
-void ieee80211_led_tx(struct ieee80211_local *local)
-{
-       unsigned long led_delay = MAC80211_BLINK_DELAY;
-       if (unlikely(!local->tx_led))
-               return;
-       led_trigger_blink_oneshot(local->tx_led, &led_delay, &led_delay, 0);
-}
-
 void ieee80211_led_assoc(struct ieee80211_local *local, bool associated)
 {
-       if (unlikely(!local->assoc_led))
+       if (!atomic_read(&local->assoc_led_active))
                return;
        if (associated)
-               led_trigger_event(local->assoc_led, LED_FULL);
+               led_trigger_event(&local->assoc_led, LED_FULL);
        else
-               led_trigger_event(local->assoc_led, LED_OFF);
+               led_trigger_event(&local->assoc_led, LED_OFF);
 }
 
 void ieee80211_led_radio(struct ieee80211_local *local, bool enabled)
 {
-       if (unlikely(!local->radio_led))
+       if (!atomic_read(&local->radio_led_active))
                return;
        if (enabled)
-               led_trigger_event(local->radio_led, LED_FULL);
+               led_trigger_event(&local->radio_led, LED_FULL);
        else
-               led_trigger_event(local->radio_led, LED_OFF);
+               led_trigger_event(&local->radio_led, LED_OFF);
+}
+
+void ieee80211_alloc_led_names(struct ieee80211_local *local)
+{
+       local->rx_led.name = kasprintf(GFP_KERNEL, "%srx",
+                                      wiphy_name(local->hw.wiphy));
+       local->tx_led.name = kasprintf(GFP_KERNEL, "%stx",
+                                      wiphy_name(local->hw.wiphy));
+       local->assoc_led.name = kasprintf(GFP_KERNEL, "%sassoc",
+                                         wiphy_name(local->hw.wiphy));
+       local->radio_led.name = kasprintf(GFP_KERNEL, "%sradio",
+                                         wiphy_name(local->hw.wiphy));
+}
+
+void ieee80211_free_led_names(struct ieee80211_local *local)
+{
+       kfree(local->rx_led.name);
+       kfree(local->tx_led.name);
+       kfree(local->assoc_led.name);
+       kfree(local->radio_led.name);
+}
+
+static void ieee80211_tx_led_activate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    tx_led);
+
+       atomic_inc(&local->tx_led_active);
+}
+
+static void ieee80211_tx_led_deactivate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    tx_led);
+
+       atomic_dec(&local->tx_led_active);
+}
+
+static void ieee80211_rx_led_activate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    rx_led);
+
+       atomic_inc(&local->rx_led_active);
+}
+
+static void ieee80211_rx_led_deactivate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    rx_led);
+
+       atomic_dec(&local->rx_led_active);
+}
+
+static void ieee80211_assoc_led_activate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    assoc_led);
+
+       atomic_inc(&local->assoc_led_active);
+}
+
+static void ieee80211_assoc_led_deactivate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    assoc_led);
+
+       atomic_dec(&local->assoc_led_active);
+}
+
+static void ieee80211_radio_led_activate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    radio_led);
+
+       atomic_inc(&local->radio_led_active);
+}
+
+static void ieee80211_radio_led_deactivate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    radio_led);
+
+       atomic_dec(&local->radio_led_active);
+}
+
+static void ieee80211_tpt_led_activate(struct led_classdev *led_cdev)
+{
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    tpt_led);
+
+       atomic_inc(&local->tpt_led_active);
 }
 
-void ieee80211_led_names(struct ieee80211_local *local)
+static void ieee80211_tpt_led_deactivate(struct led_classdev *led_cdev)
 {
-       snprintf(local->rx_led_name, sizeof(local->rx_led_name),
-                "%srx", wiphy_name(local->hw.wiphy));
-       snprintf(local->tx_led_name, sizeof(local->tx_led_name),
-                "%stx", wiphy_name(local->hw.wiphy));
-       snprintf(local->assoc_led_name, sizeof(local->assoc_led_name),
-                "%sassoc", wiphy_name(local->hw.wiphy));
-       snprintf(local->radio_led_name, sizeof(local->radio_led_name),
-                "%sradio", wiphy_name(local->hw.wiphy));
+       struct ieee80211_local *local = container_of(led_cdev->trigger,
+                                                    struct ieee80211_local,
+                                                    tpt_led);
+
+       atomic_dec(&local->tpt_led_active);
 }
 
 void ieee80211_led_init(struct ieee80211_local *local)
 {
-       local->rx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
-       if (local->rx_led) {
-               local->rx_led->name = local->rx_led_name;
-               if (led_trigger_register(local->rx_led)) {
-                       kfree(local->rx_led);
-                       local->rx_led = NULL;
-               }
+       atomic_set(&local->rx_led_active, 0);
+       local->rx_led.activate = ieee80211_rx_led_activate;
+       local->rx_led.deactivate = ieee80211_rx_led_deactivate;
+       if (local->rx_led.name && led_trigger_register(&local->rx_led)) {
+               kfree(local->rx_led.name);
+               local->rx_led.name = NULL;
        }
 
-       local->tx_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
-       if (local->tx_led) {
-               local->tx_led->name = local->tx_led_name;
-               if (led_trigger_register(local->tx_led)) {
-                       kfree(local->tx_led);
-                       local->tx_led = NULL;
-               }
+       atomic_set(&local->tx_led_active, 0);
+       local->tx_led.activate = ieee80211_tx_led_activate;
+       local->tx_led.deactivate = ieee80211_tx_led_deactivate;
+       if (local->tx_led.name && led_trigger_register(&local->tx_led)) {
+               kfree(local->tx_led.name);
+               local->tx_led.name = NULL;
        }
 
-       local->assoc_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
-       if (local->assoc_led) {
-               local->assoc_led->name = local->assoc_led_name;
-               if (led_trigger_register(local->assoc_led)) {
-                       kfree(local->assoc_led);
-                       local->assoc_led = NULL;
-               }
+       atomic_set(&local->assoc_led_active, 0);
+       local->assoc_led.activate = ieee80211_assoc_led_activate;
+       local->assoc_led.deactivate = ieee80211_assoc_led_deactivate;
+       if (local->assoc_led.name && led_trigger_register(&local->assoc_led)) {
+               kfree(local->assoc_led.name);
+               local->assoc_led.name = NULL;
        }
 
-       local->radio_led = kzalloc(sizeof(struct led_trigger), GFP_KERNEL);
-       if (local->radio_led) {
-               local->radio_led->name = local->radio_led_name;
-               if (led_trigger_register(local->radio_led)) {
-                       kfree(local->radio_led);
-                       local->radio_led = NULL;
-               }
+       atomic_set(&local->radio_led_active, 0);
+       local->radio_led.activate = ieee80211_radio_led_activate;
+       local->radio_led.deactivate = ieee80211_radio_led_deactivate;
+       if (local->radio_led.name && led_trigger_register(&local->radio_led)) {
+               kfree(local->radio_led.name);
+               local->radio_led.name = NULL;
        }
 
+       atomic_set(&local->tpt_led_active, 0);
        if (local->tpt_led_trigger) {
-               if (led_trigger_register(&local->tpt_led_trigger->trig)) {
+               local->tpt_led.activate = ieee80211_tpt_led_activate;
+               local->tpt_led.deactivate = ieee80211_tpt_led_deactivate;
+               if (led_trigger_register(&local->tpt_led)) {
                        kfree(local->tpt_led_trigger);
                        local->tpt_led_trigger = NULL;
                }
@@ -110,58 +189,50 @@ void ieee80211_led_init(struct ieee80211_local *local)
 
 void ieee80211_led_exit(struct ieee80211_local *local)
 {
-       if (local->radio_led) {
-               led_trigger_unregister(local->radio_led);
-               kfree(local->radio_led);
-       }
-       if (local->assoc_led) {
-               led_trigger_unregister(local->assoc_led);
-               kfree(local->assoc_led);
-       }
-       if (local->tx_led) {
-               led_trigger_unregister(local->tx_led);
-               kfree(local->tx_led);
-       }
-       if (local->rx_led) {
-               led_trigger_unregister(local->rx_led);
-               kfree(local->rx_led);
-       }
+       if (local->radio_led.name)
+               led_trigger_unregister(&local->radio_led);
+       if (local->assoc_led.name)
+               led_trigger_unregister(&local->assoc_led);
+       if (local->tx_led.name)
+               led_trigger_unregister(&local->tx_led);
+       if (local->rx_led.name)
+               led_trigger_unregister(&local->rx_led);
 
        if (local->tpt_led_trigger) {
-               led_trigger_unregister(&local->tpt_led_trigger->trig);
+               led_trigger_unregister(&local->tpt_led);
                kfree(local->tpt_led_trigger);
        }
 }
 
-char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
-       return local->radio_led_name;
+       return local->radio_led.name;
 }
 EXPORT_SYMBOL(__ieee80211_get_radio_led_name);
 
-char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
-       return local->assoc_led_name;
+       return local->assoc_led.name;
 }
 EXPORT_SYMBOL(__ieee80211_get_assoc_led_name);
 
-char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
-       return local->tx_led_name;
+       return local->tx_led.name;
 }
 EXPORT_SYMBOL(__ieee80211_get_tx_led_name);
 
-char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
+const char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw)
 {
        struct ieee80211_local *local = hw_to_local(hw);
 
-       return local->rx_led_name;
+       return local->rx_led.name;
 }
 EXPORT_SYMBOL(__ieee80211_get_rx_led_name);
 
@@ -205,16 +276,17 @@ static void tpt_trig_timer(unsigned long data)
                }
        }
 
-       read_lock(&tpt_trig->trig.leddev_list_lock);
-       list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
+       read_lock(&local->tpt_led.leddev_list_lock);
+       list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
                led_blink_set(led_cdev, &on, &off);
-       read_unlock(&tpt_trig->trig.leddev_list_lock);
+       read_unlock(&local->tpt_led.leddev_list_lock);
 }
 
-char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
-                               unsigned int flags,
-                               const struct ieee80211_tpt_blink *blink_table,
-                               unsigned int blink_table_len)
+const char *
+__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+                                  unsigned int flags,
+                                  const struct ieee80211_tpt_blink *blink_table,
+                                  unsigned int blink_table_len)
 {
        struct ieee80211_local *local = hw_to_local(hw);
        struct tpt_led_trigger *tpt_trig;
@@ -229,7 +301,7 @@ char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
        snprintf(tpt_trig->name, sizeof(tpt_trig->name),
                 "%stpt", wiphy_name(local->hw.wiphy));
 
-       tpt_trig->trig.name = tpt_trig->name;
+       local->tpt_led.name = tpt_trig->name;
 
        tpt_trig->blink_table = blink_table;
        tpt_trig->blink_table_len = blink_table_len;
@@ -269,10 +341,10 @@ static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
        tpt_trig->running = false;
        del_timer_sync(&tpt_trig->timer);
 
-       read_lock(&tpt_trig->trig.leddev_list_lock);
-       list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
+       read_lock(&local->tpt_led.leddev_list_lock);
+       list_for_each_entry(led_cdev, &local->tpt_led.led_cdevs, trig_list)
                led_set_brightness(led_cdev, LED_OFF);
-       read_unlock(&tpt_trig->trig.leddev_list_lock);
+       read_unlock(&local->tpt_led.leddev_list_lock);
 }
 
 void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
index 89f4344f13b973509344d2431960c4d51193d56e..a7893a1ac98bd1f0addfd1138fb289bd8663ef51 100644 (file)
 #include <linux/leds.h>
 #include "ieee80211_i.h"
 
+#define MAC80211_BLINK_DELAY 50 /* ms */
+
+static inline void ieee80211_led_rx(struct ieee80211_local *local)
+{
+#ifdef CONFIG_MAC80211_LEDS
+       unsigned long led_delay = MAC80211_BLINK_DELAY;
+
+       if (!atomic_read(&local->rx_led_active))
+               return;
+       led_trigger_blink_oneshot(&local->rx_led, &led_delay, &led_delay, 0);
+#endif
+}
+
+static inline void ieee80211_led_tx(struct ieee80211_local *local)
+{
+#ifdef CONFIG_MAC80211_LEDS
+       unsigned long led_delay = MAC80211_BLINK_DELAY;
+
+       if (!atomic_read(&local->tx_led_active))
+               return;
+       led_trigger_blink_oneshot(&local->tx_led, &led_delay, &led_delay, 0);
+#endif
+}
+
 #ifdef CONFIG_MAC80211_LEDS
-void ieee80211_led_rx(struct ieee80211_local *local);
-void ieee80211_led_tx(struct ieee80211_local *local);
 void ieee80211_led_assoc(struct ieee80211_local *local,
                         bool associated);
 void ieee80211_led_radio(struct ieee80211_local *local,
                         bool enabled);
-void ieee80211_led_names(struct ieee80211_local *local);
+void ieee80211_alloc_led_names(struct ieee80211_local *local);
+void ieee80211_free_led_names(struct ieee80211_local *local);
 void ieee80211_led_init(struct ieee80211_local *local);
 void ieee80211_led_exit(struct ieee80211_local *local);
 void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
                                unsigned int types_on, unsigned int types_off);
 #else
-static inline void ieee80211_led_rx(struct ieee80211_local *local)
-{
-}
-static inline void ieee80211_led_tx(struct ieee80211_local *local)
-{
-}
 static inline void ieee80211_led_assoc(struct ieee80211_local *local,
                                       bool associated)
 {
@@ -38,7 +55,10 @@ static inline void ieee80211_led_radio(struct ieee80211_local *local,
                                       bool enabled)
 {
 }
-static inline void ieee80211_led_names(struct ieee80211_local *local)
+static inline void ieee80211_alloc_led_names(struct ieee80211_local *local)
+{
+}
+static inline void ieee80211_free_led_names(struct ieee80211_local *local)
 {
 }
 static inline void ieee80211_led_init(struct ieee80211_local *local)
@@ -58,7 +78,7 @@ static inline void
 ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, __le16 fc, int bytes)
 {
 #ifdef CONFIG_MAC80211_LEDS
-       if (local->tpt_led_trigger && ieee80211_is_data(fc))
+       if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
                local->tpt_led_trigger->tx_bytes += bytes;
 #endif
 }
@@ -67,7 +87,7 @@ static inline void
 ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, __le16 fc, int bytes)
 {
 #ifdef CONFIG_MAC80211_LEDS
-       if (local->tpt_led_trigger && ieee80211_is_data(fc))
+       if (ieee80211_is_data(fc) && atomic_read(&local->tpt_led_active))
                local->tpt_led_trigger->rx_bytes += bytes;
 #endif
 }
index df3051d96afffbd1257442a16c3a2f198629fbcc..674164fe5cdba12156b15e8e8a493e56b90dbb6e 100644 (file)
@@ -41,9 +41,6 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
        unsigned int changed_flags;
        unsigned int new_flags = 0;
 
-       if (atomic_read(&local->iff_promiscs))
-               new_flags |= FIF_PROMISC_IN_BSS;
-
        if (atomic_read(&local->iff_allmultis))
                new_flags |= FIF_ALLMULTI;
 
@@ -249,6 +246,7 @@ static void ieee80211_restart_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
                container_of(work, struct ieee80211_local, restart_work);
+       struct ieee80211_sub_if_data *sdata;
 
        /* wait for scan work complete */
        flush_workqueue(local->workqueue);
@@ -257,6 +255,8 @@ static void ieee80211_restart_work(struct work_struct *work)
             "%s called with hardware scan in progress\n", __func__);
 
        rtnl_lock();
+       list_for_each_entry(sdata, &local->interfaces, list)
+               flush_delayed_work(&sdata->dec_tailroom_needed_wk);
        ieee80211_scan_cancel(local);
        ieee80211_reconfig(local);
        rtnl_unlock();
@@ -646,7 +646,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
        skb_queue_head_init(&local->skb_queue);
        skb_queue_head_init(&local->skb_queue_unreliable);
 
-       ieee80211_led_names(local);
+       ieee80211_alloc_led_names(local);
 
        ieee80211_roc_setup(local);
 
@@ -771,8 +771,13 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local)
                        suites[w++] = WLAN_CIPHER_SUITE_BIP_GMAC_256;
                }
 
-               for (r = 0; r < local->hw.n_cipher_schemes; r++)
+               for (r = 0; r < local->hw.n_cipher_schemes; r++) {
                        suites[w++] = cs[r].cipher;
+                       if (WARN_ON(cs[r].pn_len > IEEE80211_MAX_PN_LEN)) {
+                               kfree(suites);
+                               return -EINVAL;
+                       }
+               }
        }
 
        local->hw.wiphy->cipher_suites = suites;
@@ -840,7 +845,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
 
        /* Only HW csum features are currently compatible with mac80211 */
        feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
-                           NETIF_F_HW_CSUM;
+                           NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA |
+                           NETIF_F_GSO_SOFTWARE;
        if (WARN_ON(hw->netdev_features & ~feature_whitelist))
                return -EINVAL;
 
@@ -1209,6 +1215,8 @@ void ieee80211_free_hw(struct ieee80211_hw *hw)
 
        sta_info_stop(local);
 
+       ieee80211_free_led_names(local);
+
        wiphy_free(local->hw.wiphy);
 }
 EXPORT_SYMBOL(ieee80211_free_hw);
index 60d737f144e37563ebfaa9f7c82efafc3e2c9135..ac843fc88745252e798b8a9ddaf62d1f86f82223 100644 (file)
@@ -72,10 +72,11 @@ static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
  *
  * @sta: mesh peer link to restart
  *
- * Locking: this function must be called holding sta->lock
+ * Locking: this function must be called holding sta->plink_lock
  */
 static inline void mesh_plink_fsm_restart(struct sta_info *sta)
 {
+       lockdep_assert_held(&sta->plink_lock);
        sta->plink_state = NL80211_PLINK_LISTEN;
        sta->llid = sta->plid = sta->reason = 0;
        sta->plink_retries = 0;
@@ -213,13 +214,15 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
  * All mesh paths with this peer as next hop will be flushed
  * Returns beacon changed flag if the beacon content changed.
  *
- * Locking: the caller must hold sta->lock
+ * Locking: the caller must hold sta->plink_lock
  */
 static u32 __mesh_plink_deactivate(struct sta_info *sta)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        u32 changed = 0;
 
+       lockdep_assert_held(&sta->plink_lock);
+
        if (sta->plink_state == NL80211_PLINK_ESTAB)
                changed = mesh_plink_dec_estab_count(sdata);
        sta->plink_state = NL80211_PLINK_BLOCKED;
@@ -244,13 +247,13 @@ u32 mesh_plink_deactivate(struct sta_info *sta)
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        u32 changed;
 
-       spin_lock_bh(&sta->lock);
+       spin_lock_bh(&sta->plink_lock);
        changed = __mesh_plink_deactivate(sta);
        sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
        mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
                            sta->sta.addr, sta->llid, sta->plid,
                            sta->reason);
-       spin_unlock_bh(&sta->lock);
+       spin_unlock_bh(&sta->plink_lock);
 
        return changed;
 }
@@ -387,7 +390,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
        sband = local->hw.wiphy->bands[band];
        rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
 
-       spin_lock_bh(&sta->lock);
+       spin_lock_bh(&sta->plink_lock);
        sta->last_rx = jiffies;
 
        /* rates and capabilities don't change during peering */
@@ -419,7 +422,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
        else
                rate_control_rate_update(local, sband, sta, changed);
 out:
-       spin_unlock_bh(&sta->lock);
+       spin_unlock_bh(&sta->plink_lock);
 }
 
 static struct sta_info *
@@ -552,7 +555,7 @@ static void mesh_plink_timer(unsigned long data)
        if (sta->sdata->local->quiescing)
                return;
 
-       spin_lock_bh(&sta->lock);
+       spin_lock_bh(&sta->plink_lock);
 
        /* If a timer fires just before a state transition on another CPU,
         * we may have already extended the timeout and changed state by the
@@ -563,7 +566,7 @@ static void mesh_plink_timer(unsigned long data)
                mpl_dbg(sta->sdata,
                        "Ignoring timer for %pM in state %s (timer adjusted)",
                        sta->sta.addr, mplstates[sta->plink_state]);
-               spin_unlock_bh(&sta->lock);
+               spin_unlock_bh(&sta->plink_lock);
                return;
        }
 
@@ -573,7 +576,7 @@ static void mesh_plink_timer(unsigned long data)
                mpl_dbg(sta->sdata,
                        "Ignoring timer for %pM in state %s (timer deleted)",
                        sta->sta.addr, mplstates[sta->plink_state]);
-               spin_unlock_bh(&sta->lock);
+               spin_unlock_bh(&sta->plink_lock);
                return;
        }
 
@@ -619,7 +622,7 @@ static void mesh_plink_timer(unsigned long data)
        default:
                break;
        }
-       spin_unlock_bh(&sta->lock);
+       spin_unlock_bh(&sta->plink_lock);
        if (action)
                mesh_plink_frame_tx(sdata, action, sta->sta.addr,
                                    sta->llid, sta->plid, reason);
@@ -674,16 +677,16 @@ u32 mesh_plink_open(struct sta_info *sta)
        if (!test_sta_flag(sta, WLAN_STA_AUTH))
                return 0;
 
-       spin_lock_bh(&sta->lock);
+       spin_lock_bh(&sta->plink_lock);
        sta->llid = mesh_get_new_llid(sdata);
        if (sta->plink_state != NL80211_PLINK_LISTEN &&
            sta->plink_state != NL80211_PLINK_BLOCKED) {
-               spin_unlock_bh(&sta->lock);
+               spin_unlock_bh(&sta->plink_lock);
                return 0;
        }
        sta->plink_state = NL80211_PLINK_OPN_SNT;
        mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
-       spin_unlock_bh(&sta->lock);
+       spin_unlock_bh(&sta->plink_lock);
        mpl_dbg(sdata,
                "Mesh plink: starting establishment with %pM\n",
                sta->sta.addr);
@@ -700,10 +703,10 @@ u32 mesh_plink_block(struct sta_info *sta)
 {
        u32 changed;
 
-       spin_lock_bh(&sta->lock);
+       spin_lock_bh(&sta->plink_lock);
        changed = __mesh_plink_deactivate(sta);
        sta->plink_state = NL80211_PLINK_BLOCKED;
-       spin_unlock_bh(&sta->lock);
+       spin_unlock_bh(&sta->plink_lock);
 
        return changed;
 }
@@ -758,7 +761,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
        mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
                mplstates[sta->plink_state], mplevents[event]);
 
-       spin_lock_bh(&sta->lock);
+       spin_lock_bh(&sta->plink_lock);
        switch (sta->plink_state) {
        case NL80211_PLINK_LISTEN:
                switch (event) {
@@ -872,7 +875,7 @@ static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata,
                 */
                break;
        }
-       spin_unlock_bh(&sta->lock);
+       spin_unlock_bh(&sta->plink_lock);
        if (action) {
                mesh_plink_frame_tx(sdata, action, sta->sta.addr,
                                    sta->llid, sta->plid, sta->reason);
index 26053bf2faa8ff441d0ff0c9905536bccf914281..387fe70ab12641bcf5c3371ee36aa5320628935a 100644 (file)
@@ -1098,6 +1098,24 @@ static void ieee80211_chswitch_timer(unsigned long data)
        ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
 }
 
+static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+       struct sta_info *sta;
+       u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+               if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+                       continue;
+
+               ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+                                           NL80211_TDLS_TEARDOWN, reason,
+                                           GFP_ATOMIC);
+       }
+       rcu_read_unlock();
+}
+
 static void
 ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                                 u64 timestamp, u32 device_timestamp,
@@ -1161,6 +1179,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                return;
        }
 
+       /*
+        * Drop all TDLS peers - either we disconnect or move to a different
+        * channel from this point on. There's no telling what our peer will do.
+        * The TDLS WIDER_BW scenario is also problematic, as peers might now
+        * have an incompatible wider chandef.
+        */
+       ieee80211_teardown_tdls_peers(sdata);
+
        mutex_lock(&local->mtx);
        mutex_lock(&local->chanctx_mtx);
        conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
@@ -4307,15 +4333,15 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
 }
 
 static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
-                                    struct cfg80211_bss *cbss, bool assoc)
+                                    struct cfg80211_bss *cbss, bool assoc,
+                                    bool override)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct ieee80211_bss *bss = (void *)cbss->priv;
        struct sta_info *new_sta = NULL;
        struct ieee80211_supported_band *sband;
-       struct ieee80211_sta_ht_cap sta_ht_cap;
-       bool have_sta = false, is_override = false;
+       bool have_sta = false;
        int err;
 
        sband = local->hw.wiphy->bands[cbss->channel->band];
@@ -4335,14 +4361,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
                        return -ENOMEM;
        }
 
-       memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
-       ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
-
-       is_override = (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) !=
-                     (sband->ht_cap.cap &
-                      IEEE80211_HT_CAP_SUP_WIDTH_20_40);
-
-       if (new_sta || is_override) {
+       if (new_sta || override) {
                err = ieee80211_prep_channel(sdata, cbss);
                if (err) {
                        if (new_sta)
@@ -4552,7 +4571,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
 
        sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
 
-       err = ieee80211_prep_connection(sdata, req->bss, false);
+       err = ieee80211_prep_connection(sdata, req->bss, false, false);
        if (err)
                goto err_clear;
 
@@ -4624,6 +4643,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_supported_band *sband;
        const u8 *ssidie, *ht_ie, *vht_ie;
        int i, err;
+       bool override = false;
 
        assoc_data = kzalloc(sizeof(*assoc_data) + req->ie_len, GFP_KERNEL);
        if (!assoc_data)
@@ -4728,14 +4748,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       if (req->flags & ASSOC_REQ_DISABLE_HT) {
-               ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
-               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
-       }
-
-       if (req->flags & ASSOC_REQ_DISABLE_VHT)
-               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
-
        /* Also disable HT if we don't support it or the AP doesn't use WMM */
        sband = local->hw.wiphy->bands[req->bss->channel->band];
        if (!sband->ht_cap.ht_supported ||
@@ -4847,7 +4859,36 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        ifmgd->dtim_period = 0;
        ifmgd->have_beacon = false;
 
-       err = ieee80211_prep_connection(sdata, req->bss, true);
+       /* override HT/VHT configuration only if the AP and we support it */
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
+               struct ieee80211_sta_ht_cap sta_ht_cap;
+
+               if (req->flags & ASSOC_REQ_DISABLE_HT)
+                       override = true;
+
+               memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
+               ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+
+               /* check for 40 MHz disable override */
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ) &&
+                   sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
+                   !(sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
+                       override = true;
+
+               if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
+                   req->flags & ASSOC_REQ_DISABLE_VHT)
+                       override = true;
+       }
+
+       if (req->flags & ASSOC_REQ_DISABLE_HT) {
+               ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+       }
+
+       if (req->flags & ASSOC_REQ_DISABLE_VHT)
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+
+       err = ieee80211_prep_connection(sdata, req->bss, true, override);
        if (err)
                goto err_clear;
 
index d53355b011f5cf6407367e3c5f1a3e513d1cc08e..de69adf24f53f8ab8213c1b0f4ab32e77400bf23 100644 (file)
@@ -683,7 +683,13 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
        if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
                return;
 
-       ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
+       if (ista) {
+               spin_lock_bh(&sta->rate_ctrl_lock);
+               ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
+               spin_unlock_bh(&sta->rate_ctrl_lock);
+       } else {
+               ref->ops->get_rate(ref->priv, NULL, NULL, txrc);
+       }
 
        if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_RC_TABLE)
                return;
index 38652f09feaf250ec0517e23c8f147c8921f7499..25c9be5dd7fd811b32d13c792cf50c661f6f1e44 100644 (file)
@@ -42,10 +42,12 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
        if (!ref || !test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
                return;
 
+       spin_lock_bh(&sta->rate_ctrl_lock);
        if (ref->ops->tx_status)
                ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
        else
                ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info);
+       spin_unlock_bh(&sta->rate_ctrl_lock);
 }
 
 static inline void
@@ -64,7 +66,9 @@ rate_control_tx_status_noskb(struct ieee80211_local *local,
        if (WARN_ON_ONCE(!ref->ops->tx_status_noskb))
                return;
 
+       spin_lock_bh(&sta->rate_ctrl_lock);
        ref->ops->tx_status_noskb(ref->priv, sband, ista, priv_sta, info);
+       spin_unlock_bh(&sta->rate_ctrl_lock);
 }
 
 static inline void rate_control_rate_init(struct sta_info *sta)
@@ -91,8 +95,10 @@ static inline void rate_control_rate_init(struct sta_info *sta)
 
        sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
 
+       spin_lock_bh(&sta->rate_ctrl_lock);
        ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
                            priv_sta);
+       spin_unlock_bh(&sta->rate_ctrl_lock);
        rcu_read_unlock();
        set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
 }
@@ -115,18 +121,20 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
                        return;
                }
 
+               spin_lock_bh(&sta->rate_ctrl_lock);
                ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
                                      ista, priv_sta, changed);
+               spin_unlock_bh(&sta->rate_ctrl_lock);
                rcu_read_unlock();
        }
        drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
 }
 
 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
-                                          struct ieee80211_sta *sta,
-                                          gfp_t gfp)
+                                          struct sta_info *sta, gfp_t gfp)
 {
-       return ref->ops->alloc_sta(ref->priv, sta, gfp);
+       spin_lock_init(&sta->rate_ctrl_lock);
+       return ref->ops->alloc_sta(ref->priv, &sta->sta, gfp);
 }
 
 static inline void rate_control_free_sta(struct sta_info *sta)
index 5793f75c5ffde91de02e9698bd27500ff4640826..7d85f75163241fd242969026ef138271ecc1edfc 100644 (file)
 #include "wme.h"
 #include "rate.h"
 
+static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
+{
+       struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+       u64_stats_update_begin(&tstats->syncp);
+       tstats->rx_packets++;
+       tstats->rx_bytes += len;
+       u64_stats_update_end(&tstats->syncp);
+}
+
 /*
  * monitor mode reception
  *
@@ -529,8 +539,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                }
 
                prev_dev = sdata->dev;
-               sdata->dev->stats.rx_packets++;
-               sdata->dev->stats.rx_bytes += skb->len;
+               ieee80211_rx_stats(sdata->dev, skb->len);
        }
 
        if (prev_dev) {
@@ -981,7 +990,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
        struct sk_buff *skb = rx->skb;
        struct ieee80211_local *local = rx->local;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct sta_info *sta = rx->sta;
        struct tid_ampdu_rx *tid_agg_rx;
        u16 sc;
@@ -1016,10 +1024,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
            ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
                goto dont_reorder;
 
-       /* not actually part of this BA session */
-       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-               goto dont_reorder;
-
        /* new, potentially un-ordered, ampdu frame - process it */
 
        /* reset session timer */
@@ -1073,10 +1077,8 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
                if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
                             rx->sta->last_seq_ctrl[rx->seqno_idx] ==
                             hdr->seq_ctrl)) {
-                       if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
-                               rx->local->dot11FrameDuplicateCount++;
-                               rx->sta->num_duplicates++;
-                       }
+                       I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
+                       rx->sta->num_duplicates++;
                        return RX_DROP_UNUSABLE;
                } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
                        rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
@@ -1200,6 +1202,8 @@ static void sta_ps_start(struct sta_info *sta)
        ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
               sta->sta.addr, sta->sta.aid);
 
+       ieee80211_clear_fast_xmit(sta);
+
        if (!sta->sta.txq[0])
                return;
 
@@ -1265,7 +1269,7 @@ ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
        int tid, ac;
 
-       if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+       if (!rx->sta)
                return RX_CONTINUE;
 
        if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1367,11 +1371,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                        }
                }
        } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
-               u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
-                                               NL80211_IFTYPE_OCB);
-               /* OCB uses wild-card BSSID */
-               if (is_broadcast_ether_addr(bssid))
-                       sta->last_rx = jiffies;
+               sta->last_rx = jiffies;
        } else if (!is_multicast_ether_addr(hdr->addr1)) {
                /*
                 * Mesh beacons will update last_rx when if they are found to
@@ -1386,9 +1386,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
                }
        }
 
-       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-               return RX_CONTINUE;
-
        if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
                ieee80211_sta_rx_notify(rx->sdata, hdr);
 
@@ -1517,13 +1514,6 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
         * possible.
         */
 
-       /*
-        * No point in finding a key and decrypting if the frame is neither
-        * addressed to us nor a multicast frame.
-        */
-       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-               return RX_CONTINUE;
-
        /* start without a key */
        rx->key = NULL;
        fc = hdr->frame_control;
@@ -1795,7 +1785,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        frag = sc & IEEE80211_SCTL_FRAG;
 
        if (is_multicast_ether_addr(hdr->addr1)) {
-               rx->local->dot11MulticastReceivedFrameCount++;
+               I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
                goto out_no_led;
        }
 
@@ -1878,7 +1868,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
 
        rx->skb = __skb_dequeue(&entry->skb_list);
        if (skb_tailroom(rx->skb) < entry->extra_len) {
-               I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
+               I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
                if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
                                              GFP_ATOMIC))) {
                        I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
@@ -2054,18 +2044,15 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
        struct sk_buff *skb, *xmit_skb;
        struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
        struct sta_info *dsta;
-       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
-
-       dev->stats.rx_packets++;
-       dev->stats.rx_bytes += rx->skb->len;
 
        skb = rx->skb;
        xmit_skb = NULL;
 
+       ieee80211_rx_stats(dev, skb->len);
+
        if ((sdata->vif.type == NL80211_IFTYPE_AP ||
             sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
            !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
-           (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
            (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
                if (is_multicast_ether_addr(ehdr->h_dest)) {
                        /*
@@ -2207,7 +2194,6 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        struct sk_buff *skb = rx->skb, *fwd_skb;
        struct ieee80211_local *local = rx->local;
        struct ieee80211_sub_if_data *sdata = rx->sdata;
-       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
        u16 q, hdrlen;
 
@@ -2238,8 +2224,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
            mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
                return RX_DROP_MONITOR;
 
-       if (!ieee80211_is_data(hdr->frame_control) ||
-           !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+       if (!ieee80211_is_data(hdr->frame_control))
                return RX_CONTINUE;
 
        if (!mesh_hdr->ttl)
@@ -2330,11 +2315,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
        ieee80211_add_pending_skb(local, fwd_skb);
  out:
-       if (is_multicast_ether_addr(hdr->addr1) ||
-           sdata->dev->flags & IFF_PROMISC)
+       if (is_multicast_ether_addr(hdr->addr1))
                return RX_CONTINUE;
-       else
-               return RX_DROP_MONITOR;
+       return RX_DROP_MONITOR;
 }
 #endif
 
@@ -2445,6 +2428,9 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
                struct {
                        __le16 control, start_seq_num;
                } __packed bar_data;
+               struct ieee80211_event event = {
+                       .type = BAR_RX_EVENT,
+               };
 
                if (!rx->sta)
                        return RX_DROP_MONITOR;
@@ -2460,6 +2446,9 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
                        return RX_DROP_MONITOR;
 
                start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
+               event.u.ba.tid = tid;
+               event.u.ba.ssn = start_seq_num;
+               event.u.ba.sta = &rx->sta->sta;
 
                /* reset session timer */
                if (tid_agg_rx->timeout)
@@ -2472,6 +2461,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
                                                 start_seq_num, frames);
                spin_unlock(&tid_agg_rx->reorder_lock);
 
+               drv_event_callback(rx->local, rx->sdata, &event);
+
                kfree_skb(skb);
                return RX_QUEUED;
        }
@@ -2561,9 +2552,6 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
                rx->flags |= IEEE80211_RX_BEACON_REPORTED;
        }
 
-       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-               return RX_DROP_MONITOR;
-
        if (ieee80211_drop_unencrypted_mgmt(rx))
                return RX_DROP_UNUSABLE;
 
@@ -2591,9 +2579,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
            mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
                return RX_DROP_UNUSABLE;
 
-       if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
-               return RX_DROP_UNUSABLE;
-
        switch (mgmt->u.action.category) {
        case WLAN_CATEGORY_HT:
                /* reject HT action frames from stations not supporting HT */
@@ -3077,8 +3062,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
                }
 
                prev_dev = sdata->dev;
-               sdata->dev->stats.rx_packets++;
-               sdata->dev->stats.rx_bytes += skb->len;
+               ieee80211_rx_stats(sdata->dev, skb->len);
        }
 
        if (prev_dev) {
@@ -3246,16 +3230,25 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
        ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
        spin_unlock(&tid_agg_rx->reorder_lock);
 
+       if (!skb_queue_empty(&frames)) {
+               struct ieee80211_event event = {
+                       .type = BA_FRAME_TIMEOUT,
+                       .u.ba.tid = tid,
+                       .u.ba.sta = &sta->sta,
+               };
+               drv_event_callback(rx.local, rx.sdata, &event);
+       }
+
        ieee80211_rx_handlers(&rx, &frames);
 }
 
 /* main receive path */
 
-static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
-                                struct ieee80211_hdr *hdr)
+static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
 {
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct sk_buff *skb = rx->skb;
+       struct ieee80211_hdr *hdr = (void *)skb->data;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
        int multicast = is_multicast_ether_addr(hdr->addr1);
@@ -3264,30 +3257,23 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
        case NL80211_IFTYPE_STATION:
                if (!bssid && !sdata->u.mgd.use_4addr)
                        return false;
-               if (!multicast &&
-                   !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
-                       if (!(sdata->dev->flags & IFF_PROMISC) ||
-                           sdata->u.mgd.use_4addr)
-                               return false;
-                       status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-               }
-               break;
+               if (multicast)
+                       return true;
+               return ether_addr_equal(sdata->vif.addr, hdr->addr1);
        case NL80211_IFTYPE_ADHOC:
                if (!bssid)
                        return false;
                if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
                    ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
                        return false;
-               if (ieee80211_is_beacon(hdr->frame_control)) {
+               if (ieee80211_is_beacon(hdr->frame_control))
                        return true;
-               } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
+               if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
                        return false;
-               } else if (!multicast &&
-                          !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
-                       if (!(sdata->dev->flags & IFF_PROMISC))
-                               return false;
-                       status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-               } else if (!rx->sta) {
+               if (!multicast &&
+                   !ether_addr_equal(sdata->vif.addr, hdr->addr1))
+                       return false;
+               if (!rx->sta) {
                        int rate_idx;
                        if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
                                rate_idx = 0; /* TODO: HT/VHT rates */
@@ -3296,25 +3282,18 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
                        ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
                                                 BIT(rate_idx));
                }
-               break;
+               return true;
        case NL80211_IFTYPE_OCB:
                if (!bssid)
                        return false;
-               if (ieee80211_is_beacon(hdr->frame_control)) {
+               if (ieee80211_is_beacon(hdr->frame_control))
                        return false;
-               } else if (!is_broadcast_ether_addr(bssid)) {
-                       ocb_dbg(sdata, "BSSID mismatch in OCB mode!\n");
+               if (!is_broadcast_ether_addr(bssid))
                        return false;
-               } else if (!multicast &&
-                          !ether_addr_equal(sdata->dev->dev_addr,
-                                            hdr->addr1)) {
-                       /* if we are in promisc mode we also accept
-                        * packets not destined for us
-                        */
-                       if (!(sdata->dev->flags & IFF_PROMISC))
-                               return false;
-                       rx->flags &= ~IEEE80211_RX_RA_MATCH;
-               } else if (!rx->sta) {
+               if (!multicast &&
+                   !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
+                       return false;
+               if (!rx->sta) {
                        int rate_idx;
                        if (status->flag & RX_FLAG_HT)
                                rate_idx = 0; /* TODO: HT rates */
@@ -3323,22 +3302,17 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
                        ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
                                                BIT(rate_idx));
                }
-               break;
+               return true;
        case NL80211_IFTYPE_MESH_POINT:
-               if (!multicast &&
-                   !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
-                       if (!(sdata->dev->flags & IFF_PROMISC))
-                               return false;
-
-                       status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-               }
-               break;
+               if (multicast)
+                       return true;
+               return ether_addr_equal(sdata->vif.addr, hdr->addr1);
        case NL80211_IFTYPE_AP_VLAN:
        case NL80211_IFTYPE_AP:
-               if (!bssid) {
-                       if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
-                               return false;
-               } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
+               if (!bssid)
+                       return ether_addr_equal(sdata->vif.addr, hdr->addr1);
+
+               if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
                        /*
                         * Accept public action frames even when the
                         * BSSID doesn't match, this is used for P2P
@@ -3350,10 +3324,10 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
                                return false;
                        if (ieee80211_is_public_action(hdr, skb->len))
                                return true;
-                       if (!ieee80211_is_beacon(hdr->frame_control))
-                               return false;
-                       status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-               } else if (!ieee80211_has_tods(hdr->frame_control)) {
+                       return ieee80211_is_beacon(hdr->frame_control);
+               }
+
+               if (!ieee80211_has_tods(hdr->frame_control)) {
                        /* ignore data frames to TDLS-peers */
                        if (ieee80211_is_data(hdr->frame_control))
                                return false;
@@ -3362,30 +3336,22 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
                            !ether_addr_equal(bssid, hdr->addr1))
                                return false;
                }
-               break;
+               return true;
        case NL80211_IFTYPE_WDS:
                if (bssid || !ieee80211_is_data(hdr->frame_control))
                        return false;
-               if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
-                       return false;
-               break;
+               return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2);
        case NL80211_IFTYPE_P2P_DEVICE:
-               if (!ieee80211_is_public_action(hdr, skb->len) &&
-                   !ieee80211_is_probe_req(hdr->frame_control) &&
-                   !ieee80211_is_probe_resp(hdr->frame_control) &&
-                   !ieee80211_is_beacon(hdr->frame_control))
-                       return false;
-               if (!ether_addr_equal(sdata->vif.addr, hdr->addr1) &&
-                   !multicast)
-                       status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
-               break;
+               return ieee80211_is_public_action(hdr, skb->len) ||
+                      ieee80211_is_probe_req(hdr->frame_control) ||
+                      ieee80211_is_probe_resp(hdr->frame_control) ||
+                      ieee80211_is_beacon(hdr->frame_control);
        default:
-               /* should never get here */
-               WARN_ON_ONCE(1);
                break;
        }
 
-       return true;
+       WARN_ON_ONCE(1);
+       return false;
 }
 
 /*
@@ -3399,13 +3365,10 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
 {
        struct ieee80211_local *local = rx->local;
        struct ieee80211_sub_if_data *sdata = rx->sdata;
-       struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-       struct ieee80211_hdr *hdr = (void *)skb->data;
 
        rx->skb = skb;
-       status->rx_flags |= IEEE80211_RX_RA_MATCH;
 
-       if (!prepare_for_handlers(rx, hdr))
+       if (!ieee80211_accept_frame(rx))
                return false;
 
        if (!consume) {
@@ -3448,7 +3411,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
        rx.local = local;
 
        if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
-               local->dot11ReceivedFragmentCount++;
+               I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
 
        if (ieee80211_is_mgmt(fc)) {
                /* drop frame if too short for header */
index 2880f2ae99abe3a05b6421f53340a42e11a9ca30..ce0c1662de42078a558ddc08530e96d62a604308 100644 (file)
@@ -71,6 +71,7 @@ static const struct rhashtable_params sta_rht_params = {
        .key_offset = offsetof(struct sta_info, sta.addr),
        .key_len = ETH_ALEN,
        .hashfn = sta_addr_hash,
+       .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
 };
 
 /* Caller must hold local->sta_mtx */
@@ -286,7 +287,7 @@ static int sta_prepare_rate_control(struct ieee80211_local *local,
 
        sta->rate_ctrl = local->rate_ctrl;
        sta->rate_ctrl_priv = rate_control_alloc_sta(sta->rate_ctrl,
-                                                    &sta->sta, gfp);
+                                                    sta, gfp);
        if (!sta->rate_ctrl_priv)
                return -ENOMEM;
 
@@ -312,6 +313,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
        INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
        mutex_init(&sta->ampdu_mlme.mtx);
 #ifdef CONFIG_MAC80211_MESH
+       spin_lock_init(&sta->plink_lock);
        if (ieee80211_vif_is_mesh(&sdata->vif) &&
            !sdata->u.mesh.user_mpm)
                init_timer(&sta->plink_timer);
@@ -1217,6 +1219,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        ps_dbg(sdata,
               "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n",
               sta->sta.addr, sta->sta.aid, filtered, buffered);
+
+       ieee80211_check_fast_xmit(sta);
 }
 
 static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
@@ -1615,6 +1619,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
 
        if (block) {
                set_sta_flag(sta, WLAN_STA_PS_DRIVER);
+               ieee80211_clear_fast_xmit(sta);
                return;
        }
 
@@ -1632,6 +1637,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
                ieee80211_queue_work(hw, &sta->drv_deliver_wk);
        } else {
                clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+               ieee80211_check_fast_xmit(sta);
        }
 }
 EXPORT_SYMBOL(ieee80211_sta_block_awake);
@@ -1736,6 +1742,7 @@ int sta_info_move_state(struct sta_info *sta,
                             !sta->sdata->u.vlan.sta))
                                atomic_dec(&sta->sdata->bss->num_mcast_sta);
                        clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+                       ieee80211_clear_fast_xmit(sta);
                }
                break;
        case IEEE80211_STA_AUTHORIZED:
@@ -1745,6 +1752,7 @@ int sta_info_move_state(struct sta_info *sta,
                             !sta->sdata->u.vlan.sta))
                                atomic_inc(&sta->sdata->bss->num_mcast_sta);
                        set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
+                       ieee80211_check_fast_xmit(sta);
                }
                break;
        default:
index 5c164fb3f6c5bd2d68b5daa3993ff369a2a8792f..9bd1e97876bd04097feb0003d56c0a1f5dc021b7 100644 (file)
@@ -241,6 +241,34 @@ struct sta_ampdu_mlme {
 /* Value to indicate no TID reservation */
 #define IEEE80211_TID_UNRESERVED       0xff
 
+#define IEEE80211_FAST_XMIT_MAX_IV     18
+
+/**
+ * struct ieee80211_fast_tx - TX fastpath information
+ * @key: key to use for hw crypto
+ * @hdr: the 802.11 header to put with the frame
+ * @hdr_len: actual 802.11 header length
+ * @sa_offs: offset of the SA
+ * @da_offs: offset of the DA
+ * @pn_offs: offset where to put PN for crypto (or 0 if not needed)
+ * @band: band this will be transmitted on, for tx_info
+ * @rcu_head: RCU head to free this struct
+ *
+ * This struct is small enough so that the common case (maximum crypto
+ * header length of 8 like for CCMP/GCMP) fits into a single 64-byte
+ * cache line.
+ */
+struct ieee80211_fast_tx {
+       struct ieee80211_key *key;
+       u8 hdr_len;
+       u8 sa_offs, da_offs, pn_offs;
+       u8 band;
+       u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
+              sizeof(rfc1042_header)];
+
+       struct rcu_head rcu_head;
+};
+
 /**
  * struct sta_info - STA information
  *
@@ -257,6 +285,8 @@ struct sta_ampdu_mlme {
  * @gtk: group keys negotiated with this station, if any
  * @gtk_idx: last installed group key index
  * @rate_ctrl: rate control algorithm reference
+ * @rate_ctrl_lock: spinlock used to protect rate control data
+ *     (data inside the algorithm, so serializes calls there)
  * @rate_ctrl_priv: rate control private per-STA pointer
  * @last_tx_rate: rate used for last transmit, to report to userspace as
  *     "the" transmit rate
@@ -295,10 +325,10 @@ struct sta_ampdu_mlme {
  * @fail_avg: moving percentage of failed MSDUs
  * @tx_packets: number of RX/TX MSDUs
  * @tx_bytes: number of bytes transmitted to this STA
- * @tx_fragments: number of transmitted MPDUs
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
+ * @plink_lock: serialize access to plink fields
  * @llid: Local link ID
  * @plid: Peer link ID
  * @reason: Cancel reason on PLINK_HOLDING state
@@ -338,6 +368,7 @@ struct sta_ampdu_mlme {
  *     using IEEE80211_NUM_TID entry for non-QoS frames
  * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
  *     entry for non-QoS frames
+ * @fast_tx: TX fastpath information
  */
 struct sta_info {
        /* General information, mostly static */
@@ -352,8 +383,11 @@ struct sta_info {
        u8 ptk_idx;
        struct rate_control_ref *rate_ctrl;
        void *rate_ctrl_priv;
+       spinlock_t rate_ctrl_lock;
        spinlock_t lock;
 
+       struct ieee80211_fast_tx __rcu *fast_tx;
+
        struct work_struct drv_deliver_wk;
 
        u16 listen_interval;
@@ -400,7 +434,6 @@ struct sta_info {
        unsigned int fail_avg;
 
        /* Updated from TX path only, no locking requirements */
-       u32 tx_fragments;
        u64 tx_packets[IEEE80211_NUM_ACS];
        u64 tx_bytes[IEEE80211_NUM_ACS];
        struct ieee80211_tx_rate last_tx_rate;
@@ -422,9 +455,10 @@ struct sta_info {
 
 #ifdef CONFIG_MAC80211_MESH
        /*
-        * Mesh peer link attributes
+        * Mesh peer link attributes, protected by plink_lock.
         * TODO: move to a sub-structure that is referenced with pointer?
         */
+       spinlock_t plink_lock;
        u16 llid;
        u16 plid;
        u16 reason;
@@ -432,6 +466,7 @@ struct sta_info {
        enum nl80211_plink_state plink_state;
        u32 plink_timeout;
        struct timer_list plink_timer;
+
        s64 t_offset;
        s64 t_offset_setpoint;
        /* mesh power save */
index 005fdbe39a8b2f694b2deaa3d55e1a2baef031f3..461594966b65ed1f356522336346add77e0835f7 100644 (file)
@@ -631,15 +631,15 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
        }
 
        if (acked || noack_success) {
-                   local->dot11TransmittedFrameCount++;
-                   if (!pubsta)
-                           local->dot11MulticastTransmittedFrameCount++;
-                   if (retry_count > 0)
-                           local->dot11RetryCount++;
-                   if (retry_count > 1)
-                           local->dot11MultipleRetryCount++;
+               I802_DEBUG_INC(local->dot11TransmittedFrameCount);
+               if (!pubsta)
+                       I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
+               if (retry_count > 0)
+                       I802_DEBUG_INC(local->dot11RetryCount);
+               if (retry_count > 1)
+                       I802_DEBUG_INC(local->dot11MultipleRetryCount);
        } else {
-               local->dot11FailedCount++;
+               I802_DEBUG_INC(local->dot11FailedCount);
        }
 }
 EXPORT_SYMBOL(ieee80211_tx_status_noskb);
@@ -802,13 +802,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        if ((info->flags & IEEE80211_TX_STAT_ACK) ||
            (info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED)) {
                if (ieee80211_is_first_frag(hdr->seq_ctrl)) {
-                       local->dot11TransmittedFrameCount++;
+                       I802_DEBUG_INC(local->dot11TransmittedFrameCount);
                        if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
-                               local->dot11MulticastTransmittedFrameCount++;
+                               I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
                        if (retry_count > 0)
-                               local->dot11RetryCount++;
+                               I802_DEBUG_INC(local->dot11RetryCount);
                        if (retry_count > 1)
-                               local->dot11MultipleRetryCount++;
+                               I802_DEBUG_INC(local->dot11MultipleRetryCount);
                }
 
                /* This counter shall be incremented for an acknowledged MPDU
@@ -818,10 +818,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
                if (!is_multicast_ether_addr(hdr->addr1) ||
                    ieee80211_is_data(fc) ||
                    ieee80211_is_mgmt(fc))
-                       local->dot11TransmittedFragmentCount++;
+                       I802_DEBUG_INC(local->dot11TransmittedFragmentCount);
        } else {
                if (ieee80211_is_first_frag(hdr->seq_ctrl))
-                       local->dot11FailedCount++;
+                       I802_DEBUG_INC(local->dot11FailedCount);
        }
 
        if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
index fff0d864adfa601da2af75f226c6d5c00affb335..75e8e3bba538a3e15b8a1ec392e594df5a9dbe47 100644 (file)
@@ -527,30 +527,19 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
 
        /* if HT support is only added in TDLS, we need an HT-operation IE */
        if (!ap_sta->sta.ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
-               struct ieee80211_chanctx_conf *chanctx_conf =
-                               rcu_dereference(sdata->vif.chanctx_conf);
-               if (!WARN_ON(!chanctx_conf)) {
-                       pos = skb_put(skb, 2 +
-                                     sizeof(struct ieee80211_ht_operation));
-                       /* send an empty HT operation IE */
-                       ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap,
-                                                  &chanctx_conf->def, 0);
-               }
+               pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
+               /* send an empty HT operation IE */
+               ieee80211_ie_build_ht_oper(pos, &sta->sta.ht_cap,
+                                          &sdata->vif.bss_conf.chandef, 0);
        }
 
        ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
 
        /* only include VHT-operation if not on the 2.4GHz band */
-       if (band != IEEE80211_BAND_2GHZ && !ap_sta->sta.vht_cap.vht_supported &&
-           sta->sta.vht_cap.vht_supported) {
-               struct ieee80211_chanctx_conf *chanctx_conf =
-                               rcu_dereference(sdata->vif.chanctx_conf);
-               if (!WARN_ON(!chanctx_conf)) {
-                       pos = skb_put(skb, 2 +
-                                     sizeof(struct ieee80211_vht_operation));
-                       ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
-                                                   &chanctx_conf->def);
-               }
+       if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+               pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
+               ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
+                                           &sdata->vif.bss_conf.chandef);
        }
 
        rcu_read_unlock();
@@ -1194,6 +1183,12 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
 
        switch (oper) {
        case NL80211_TDLS_ENABLE_LINK:
+               if (sdata->vif.csa_active) {
+                       tdls_dbg(sdata, "TDLS: disallow link during CSA\n");
+                       ret = -EBUSY;
+                       break;
+               }
+
                rcu_read_lock();
                sta = sta_info_get(sdata, peer);
                if (!sta) {
index 4c2e7690226a870664ba456cb01dc7de0eb66caf..6f14591d8ca9eb7a356f54cf0a16e96f87480246 100644 (file)
 #define CHANCTX_PR_ARG CHANDEF_PR_ARG, MIN_CHANDEF_PR_ARG,                             \
                        __entry->rx_chains_static, __entry->rx_chains_dynamic
 
+#define KEY_ENTRY      __field(u32, cipher)                                            \
+                       __field(u8, hw_key_idx)                                         \
+                       __field(u8, flags)                                              \
+                       __field(s8, keyidx)
+#define KEY_ASSIGN(k)  __entry->cipher = (k)->cipher;                                  \
+                       __entry->flags = (k)->flags;                                    \
+                       __entry->keyidx = (k)->keyidx;                                  \
+                       __entry->hw_key_idx = (k)->hw_key_idx;
+#define KEY_PR_FMT     " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d"
+#define KEY_PR_ARG     __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx
+
 
 
 /*
@@ -522,25 +533,19 @@ TRACE_EVENT(drv_set_key,
                LOCAL_ENTRY
                VIF_ENTRY
                STA_ENTRY
-               __field(u32, cipher)
-               __field(u8, hw_key_idx)
-               __field(u8, flags)
-               __field(s8, keyidx)
+               KEY_ENTRY
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
                STA_ASSIGN;
-               __entry->cipher = key->cipher;
-               __entry->flags = key->flags;
-               __entry->keyidx = key->keyidx;
-               __entry->hw_key_idx = key->hw_key_idx;
+               KEY_ASSIGN(key);
        ),
 
        TP_printk(
-               LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT,
-               LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
+               LOCAL_PR_FMT  VIF_PR_FMT  STA_PR_FMT KEY_PR_FMT,
+               LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, KEY_PR_ARG
        )
 );
 
@@ -656,28 +661,25 @@ TRACE_EVENT(drv_get_stats,
        )
 );
 
-TRACE_EVENT(drv_get_tkip_seq,
+TRACE_EVENT(drv_get_key_seq,
        TP_PROTO(struct ieee80211_local *local,
-                u8 hw_key_idx, u32 *iv32, u16 *iv16),
+                struct ieee80211_key_conf *key),
 
-       TP_ARGS(local, hw_key_idx, iv32, iv16),
+       TP_ARGS(local, key),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
-               __field(u8, hw_key_idx)
-               __field(u32, iv32)
-               __field(u16, iv16)
+               KEY_ENTRY
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
-               __entry->hw_key_idx = hw_key_idx;
-               __entry->iv32 = *iv32;
-               __entry->iv16 = *iv16;
+               KEY_ASSIGN(key);
        ),
 
        TP_printk(
-               LOCAL_PR_FMT, LOCAL_PR_ARG
+               LOCAL_PR_FMT KEY_PR_FMT,
+               LOCAL_PR_ARG, KEY_PR_ARG
        )
 );
 
index 667111ee6a20fc48493f88605e8ed45e36d6d55e..8df134213adfc563660befa1960c2eaee205617c 100644 (file)
 
 /* misc utils */
 
+static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
+{
+       struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+       u64_stats_update_begin(&tstats->syncp);
+       tstats->tx_packets++;
+       tstats->tx_bytes += len;
+       u64_stats_update_end(&tstats->syncp);
+}
+
 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
                                 struct sk_buff *skb, int group_addr,
                                 int next_frag_len)
@@ -987,7 +997,6 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
 
        skb_queue_walk(&tx->skbs, skb) {
                ac = skb_get_queue_mapping(skb);
-               tx->sta->tx_fragments++;
                tx->sta->tx_bytes[ac] += skb->len;
        }
        if (ac >= 0)
@@ -1600,7 +1609,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
        if (skb_cloned(skb) &&
            (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CLONED_SKBS) ||
             !skb_clone_writable(skb, ETH_HLEN) ||
-            sdata->crypto_tx_tailroom_needed_cnt))
+            (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
                I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
        else if (head_need || tail_need)
                I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -2387,12 +2396,460 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
        return ERR_PTR(ret);
 }
 
+/*
+ * fast-xmit overview
+ *
+ * The core idea of this fast-xmit is to remove per-packet checks by checking
+ * them out of band. ieee80211_check_fast_xmit() implements the out-of-band
+ * checks that are needed to get the sta->fast_tx pointer assigned, after which
+ * much less work can be done per packet. For example, fragmentation must be
+ * disabled or the fast_tx pointer will not be set. All the conditions are seen
+ * in the code here.
+ *
+ * Once assigned, the fast_tx data structure also caches the per-packet 802.11
+ * header and other data to aid packet processing in ieee80211_xmit_fast().
+ *
+ * The most difficult part of this is that when any of these assumptions
+ * change, an external trigger (i.e. a call to ieee80211_clear_fast_xmit(),
+ * ieee80211_check_fast_xmit() or friends) is required to reset the data,
+ * since the per-packet code no longer checks the conditions. This is reflected
+ * by the calls to these functions throughout the rest of the code, and must be
+ * maintained if any of the TX path checks change.
+ */
+
+void ieee80211_check_fast_xmit(struct sta_info *sta)
+{
+       struct ieee80211_fast_tx build = {}, *fast_tx = NULL, *old;
+       struct ieee80211_local *local = sta->local;
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_hdr *hdr = (void *)build.hdr;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       __le16 fc;
+
+       if (!(local->hw.flags & IEEE80211_HW_SUPPORT_FAST_XMIT))
+               return;
+
+       /* Locking here protects both the pointer itself, and against concurrent
+        * invocations winning data access races to, e.g., the key pointer that
+        * is used.
+        * Without it, the invocation of this function right after the key
+        * pointer changes wouldn't be sufficient, as another CPU could access
+        * the pointer, then stall, and then do the cache update after the CPU
+        * that invalidated the key.
+        * With the locking, such scenarios cannot happen as the check for the
+        * key and the fast-tx assignment are done atomically, so the CPU that
+        * modifies the key will either wait or other one will see the key
+        * cleared/changed already.
+        */
+       spin_lock_bh(&sta->lock);
+       if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS &&
+           !(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) &&
+           sdata->vif.type == NL80211_IFTYPE_STATION)
+               goto out;
+
+       if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+               goto out;
+
+       if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+           test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
+           test_sta_flag(sta, WLAN_STA_PS_DELIVER))
+               goto out;
+
+       if (sdata->noack_map)
+               goto out;
+
+       /* fast-xmit doesn't handle fragmentation at all */
+       if (local->hw.wiphy->frag_threshold != (u32)-1 &&
+           !local->ops->set_frag_threshold)
+               goto out;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+       if (!chanctx_conf) {
+               rcu_read_unlock();
+               goto out;
+       }
+       build.band = chanctx_conf->def.chan->band;
+       rcu_read_unlock();
+
+       fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_ADHOC:
+               /* DA SA BSSID */
+               build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+               build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+               memcpy(hdr->addr3, sdata->u.ibss.bssid, ETH_ALEN);
+               build.hdr_len = 24;
+               break;
+       case NL80211_IFTYPE_STATION:
+               if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+                       /* DA SA BSSID */
+                       build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+                       build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+                       memcpy(hdr->addr3, sdata->u.mgd.bssid, ETH_ALEN);
+                       build.hdr_len = 24;
+                       break;
+               }
+
+               if (sdata->u.mgd.use_4addr) {
+                       /* non-regular ethertype cannot use the fastpath */
+                       fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+                                         IEEE80211_FCTL_TODS);
+                       /* RA TA DA SA */
+                       memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+                       memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+                       build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+                       build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
+                       build.hdr_len = 30;
+                       break;
+               }
+               fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
+               /* BSSID SA DA */
+               memcpy(hdr->addr1, sdata->u.mgd.bssid, ETH_ALEN);
+               build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+               build.sa_offs = offsetof(struct ieee80211_hdr, addr2);
+               build.hdr_len = 24;
+               break;
+       case NL80211_IFTYPE_AP_VLAN:
+               if (sdata->wdev.use_4addr) {
+                       fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+                                         IEEE80211_FCTL_TODS);
+                       /* RA TA DA SA */
+                       memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+                       memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+                       build.da_offs = offsetof(struct ieee80211_hdr, addr3);
+                       build.sa_offs = offsetof(struct ieee80211_hdr, addr4);
+                       build.hdr_len = 30;
+                       break;
+               }
+               /* fall through */
+       case NL80211_IFTYPE_AP:
+               fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
+               /* DA BSSID SA */
+               build.da_offs = offsetof(struct ieee80211_hdr, addr1);
+               memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+               build.sa_offs = offsetof(struct ieee80211_hdr, addr3);
+               build.hdr_len = 24;
+               break;
+       default:
+               /* not handled on fast-xmit */
+               goto out;
+       }
+
+       if (sta->sta.wme) {
+               build.hdr_len += 2;
+               fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+       }
+
+       /* We store the key here so there's no point in using rcu_dereference()
+        * but that's fine because the code that changes the pointers will call
+        * this function after doing so. For a single CPU that would be enough,
+        * for multiple see the comment above.
+        */
+       build.key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
+       if (!build.key)
+               build.key = rcu_access_pointer(sdata->default_unicast_key);
+       if (build.key) {
+               bool gen_iv, iv_spc, mmic;
+
+               gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
+               iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+               mmic = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+               /* don't handle software crypto */
+               if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+                       goto out;
+
+               switch (build.key->conf.cipher) {
+               case WLAN_CIPHER_SUITE_CCMP:
+               case WLAN_CIPHER_SUITE_CCMP_256:
+                       /* add fixed key ID */
+                       if (gen_iv) {
+                               (build.hdr + build.hdr_len)[3] =
+                                       0x20 | (build.key->conf.keyidx << 6);
+                               build.pn_offs = build.hdr_len;
+                       }
+                       if (gen_iv || iv_spc)
+                               build.hdr_len += IEEE80211_CCMP_HDR_LEN;
+                       break;
+               case WLAN_CIPHER_SUITE_GCMP:
+               case WLAN_CIPHER_SUITE_GCMP_256:
+                       /* add fixed key ID */
+                       if (gen_iv) {
+                               (build.hdr + build.hdr_len)[3] =
+                                       0x20 | (build.key->conf.keyidx << 6);
+                               build.pn_offs = build.hdr_len;
+                       }
+                       if (gen_iv || iv_spc)
+                               build.hdr_len += IEEE80211_GCMP_HDR_LEN;
+                       break;
+               case WLAN_CIPHER_SUITE_TKIP:
+                       /* cannot handle MMIC or IV generation in xmit-fast */
+                       if (mmic || gen_iv)
+                               goto out;
+                       if (iv_spc)
+                               build.hdr_len += IEEE80211_TKIP_IV_LEN;
+                       break;
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+                       /* cannot handle IV generation in fast-xmit */
+                       if (gen_iv)
+                               goto out;
+                       if (iv_spc)
+                               build.hdr_len += IEEE80211_WEP_IV_LEN;
+                       break;
+               case WLAN_CIPHER_SUITE_AES_CMAC:
+               case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+               case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+                       WARN(1,
+                            "management cipher suite 0x%x enabled for data\n",
+                            build.key->conf.cipher);
+                       goto out;
+               default:
+                       /* we don't know how to generate IVs for this at all */
+                       if (WARN_ON(gen_iv))
+                               goto out;
+                       /* pure hardware keys are OK, of course */
+                       if (!(build.key->flags & KEY_FLAG_CIPHER_SCHEME))
+                               break;
+                       /* cipher scheme might require space allocation */
+                       if (iv_spc &&
+                           build.key->conf.iv_len > IEEE80211_FAST_XMIT_MAX_IV)
+                               goto out;
+                       if (iv_spc)
+                               build.hdr_len += build.key->conf.iv_len;
+               }
+
+               fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+       }
+
+       hdr->frame_control = fc;
+
+       memcpy(build.hdr + build.hdr_len,
+              rfc1042_header,  sizeof(rfc1042_header));
+       build.hdr_len += sizeof(rfc1042_header);
+
+       fast_tx = kmemdup(&build, sizeof(build), GFP_ATOMIC);
+       /* if the kmemdup fails, continue w/o fast_tx */
+       if (!fast_tx)
+               goto out;
+
+ out:
+       /* we might have raced against another call to this function */
+       old = rcu_dereference_protected(sta->fast_tx,
+                                       lockdep_is_held(&sta->lock));
+       rcu_assign_pointer(sta->fast_tx, fast_tx);
+       if (old)
+               kfree_rcu(old, rcu_head);
+       spin_unlock_bh(&sta->lock);
+}
+
+void ieee80211_check_fast_xmit_all(struct ieee80211_local *local)
+{
+       struct sta_info *sta;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &local->sta_list, list)
+               ieee80211_check_fast_xmit(sta);
+       rcu_read_unlock();
+}
+
+void ieee80211_check_fast_xmit_iface(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(sta, &local->sta_list, list) {
+               if (sdata != sta->sdata &&
+                   (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
+                       continue;
+               ieee80211_check_fast_xmit(sta);
+       }
+
+       rcu_read_unlock();
+}
+
+void ieee80211_clear_fast_xmit(struct sta_info *sta)
+{
+       struct ieee80211_fast_tx *fast_tx;
+
+       spin_lock_bh(&sta->lock);
+       fast_tx = rcu_dereference_protected(sta->fast_tx,
+                                           lockdep_is_held(&sta->lock));
+       RCU_INIT_POINTER(sta->fast_tx, NULL);
+       spin_unlock_bh(&sta->lock);
+
+       if (fast_tx)
+               kfree_rcu(fast_tx, rcu_head);
+}
+
+static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
+                               struct net_device *dev, struct sta_info *sta,
+                               struct ieee80211_fast_tx *fast_tx,
+                               struct sk_buff *skb)
+{
+       struct ieee80211_local *local = sdata->local;
+       u16 ethertype = (skb->data[12] << 8) | skb->data[13];
+       int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
+       int hw_headroom = sdata->local->hw.extra_tx_headroom;
+       struct ethhdr eth;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
+       struct ieee80211_tx_data tx;
+       ieee80211_tx_result r;
+       struct tid_ampdu_tx *tid_tx = NULL;
+       u8 tid = IEEE80211_NUM_TIDS;
+
+       /* control port protocol needs a lot of special handling */
+       if (cpu_to_be16(ethertype) == sdata->control_port_protocol)
+               return false;
+
+       /* only RFC 1042 SNAP */
+       if (ethertype < ETH_P_802_3_MIN)
+               return false;
+
+       /* don't handle TX status request here either */
+       if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
+               return false;
+
+       if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+               tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+               tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+               if (tid_tx &&
+                   !test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
+                       return false;
+       }
+
+       /* after this point (skb is modified) we cannot return false */
+
+       if (skb_shared(skb)) {
+               struct sk_buff *tmp_skb = skb;
+
+               skb = skb_clone(skb, GFP_ATOMIC);
+               kfree_skb(tmp_skb);
+
+               if (!skb)
+                       return true;
+       }
+
+       ieee80211_tx_stats(dev, skb->len + extra_head);
+
+       /* will not be crypto-handled beyond what we do here, so use false
+        * as the may-encrypt argument for the resize to not account for
+        * more room than we already have in 'extra_head'
+        */
+       if (unlikely(ieee80211_skb_resize(sdata, skb,
+                                         max_t(int, extra_head + hw_headroom -
+                                                    skb_headroom(skb), 0),
+                                         false))) {
+               kfree_skb(skb);
+               return true;
+       }
+
+       memcpy(&eth, skb->data, ETH_HLEN - 2);
+       hdr = (void *)skb_push(skb, extra_head);
+       memcpy(skb->data, fast_tx->hdr, fast_tx->hdr_len);
+       memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
+       memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
+
+       memset(info, 0, sizeof(*info));
+       info->band = fast_tx->band;
+       info->control.vif = &sdata->vif;
+       info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
+                     IEEE80211_TX_CTL_DONTFRAG |
+                     (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
+
+       if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+               *ieee80211_get_qos_ctl(hdr) = tid;
+               hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
+       } else {
+               info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+               hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
+               sdata->sequence_number += 0x10;
+       }
+
+       sta->tx_msdu[tid]++;
+
+       info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
+
+       __skb_queue_head_init(&tx.skbs);
+
+       tx.flags = IEEE80211_TX_UNICAST;
+       tx.local = local;
+       tx.sdata = sdata;
+       tx.sta = sta;
+       tx.key = fast_tx->key;
+
+       if (fast_tx->key)
+               info->control.hw_key = &fast_tx->key->conf;
+
+       if (!(local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) {
+               tx.skb = skb;
+               r = ieee80211_tx_h_rate_ctrl(&tx);
+               skb = tx.skb;
+               tx.skb = NULL;
+
+               if (r != TX_CONTINUE) {
+                       if (r != TX_QUEUED)
+                               kfree_skb(skb);
+                       return true;
+               }
+       }
+
+       /* statistics normally done by ieee80211_tx_h_stats (but that
+        * has to consider fragmentation, so is more complex)
+        */
+       sta->tx_bytes[skb_get_queue_mapping(skb)] += skb->len;
+       sta->tx_packets[skb_get_queue_mapping(skb)]++;
+
+       if (fast_tx->pn_offs) {
+               u64 pn;
+               u8 *crypto_hdr = skb->data + fast_tx->pn_offs;
+
+               switch (fast_tx->key->conf.cipher) {
+               case WLAN_CIPHER_SUITE_CCMP:
+               case WLAN_CIPHER_SUITE_CCMP_256:
+                       pn = atomic64_inc_return(&fast_tx->key->u.ccmp.tx_pn);
+                       crypto_hdr[0] = pn;
+                       crypto_hdr[1] = pn >> 8;
+                       crypto_hdr[4] = pn >> 16;
+                       crypto_hdr[5] = pn >> 24;
+                       crypto_hdr[6] = pn >> 32;
+                       crypto_hdr[7] = pn >> 40;
+                       break;
+               case WLAN_CIPHER_SUITE_GCMP:
+               case WLAN_CIPHER_SUITE_GCMP_256:
+                       pn = atomic64_inc_return(&fast_tx->key->u.gcmp.tx_pn);
+                       crypto_hdr[0] = pn;
+                       crypto_hdr[1] = pn >> 8;
+                       crypto_hdr[4] = pn >> 16;
+                       crypto_hdr[5] = pn >> 24;
+                       crypto_hdr[6] = pn >> 32;
+                       crypto_hdr[7] = pn >> 40;
+                       break;
+               }
+       }
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+               sdata = container_of(sdata->bss,
+                                    struct ieee80211_sub_if_data, u.ap);
+
+       __skb_queue_tail(&tx.skbs, skb);
+       ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
+       return true;
+}
+
 void __ieee80211_subif_start_xmit(struct sk_buff *skb,
                                  struct net_device *dev,
                                  u32 info_flags)
 {
        struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
        struct sta_info *sta;
+       struct sk_buff *next;
 
        if (unlikely(skb->len < ETH_HLEN)) {
                kfree_skb(skb);
@@ -2401,20 +2858,67 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
 
        rcu_read_lock();
 
-       if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
-               kfree_skb(skb);
-               goto out;
+       if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
+               goto out_free;
+
+       if (!IS_ERR_OR_NULL(sta)) {
+               struct ieee80211_fast_tx *fast_tx;
+
+               fast_tx = rcu_dereference(sta->fast_tx);
+
+               if (fast_tx &&
+                   ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb))
+                       goto out;
        }
 
-       skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
-       if (IS_ERR(skb))
-               goto out;
+       if (skb_is_gso(skb)) {
+               struct sk_buff *segs;
+
+               segs = skb_gso_segment(skb, 0);
+               if (IS_ERR(segs)) {
+                       goto out_free;
+               } else if (segs) {
+                       consume_skb(skb);
+                       skb = segs;
+               }
+       } else {
+               /* we cannot process non-linear frames on this path */
+               if (skb_linearize(skb)) {
+                       kfree_skb(skb);
+                       goto out;
+               }
+
+               /* the frame could be fragmented, software-encrypted, and other
+                * things so we cannot really handle checksum offload with it -
+                * fix it up in software before we handle anything else.
+                */
+               if (skb->ip_summed == CHECKSUM_PARTIAL) {
+                       skb_set_transport_header(skb,
+                                                skb_checksum_start_offset(skb));
+                       if (skb_checksum_help(skb))
+                               goto out_free;
+               }
+       }
+
+       next = skb;
+       while (next) {
+               skb = next;
+               next = skb->next;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-       dev->trans_start = jiffies;
+               skb->prev = NULL;
+               skb->next = NULL;
+
+               skb = ieee80211_build_hdr(sdata, skb, info_flags, sta);
+               if (IS_ERR(skb))
+                       goto out;
 
-       ieee80211_xmit(sdata, sta, skb);
+               ieee80211_tx_stats(dev, skb->len);
+
+               ieee80211_xmit(sdata, sta, skb);
+       }
+       goto out;
+ out_free:
+       kfree_skb(skb);
  out:
        rcu_read_unlock();
 }
index aa462b480a394b0081ba73e2441d9cb4619e519b..fb45287ebac35308be9202d8b6061d764c24714a 100644 (file)
@@ -2,6 +2,7 @@ config MAC802154
        tristate "Generic IEEE 802.15.4 Soft Networking Stack (mac802154)"
        depends on IEEE802154
        select CRC_CCITT
+       select CRYPTO
        select CRYPTO_AUTHENC
        select CRYPTO_CCM
        select CRYPTO_CTR
index 70be9c799f8a81596a4e753b239849549d792dd0..317c4662e544679ab37dcc8cfa92fc8108c4820b 100644 (file)
@@ -73,9 +73,9 @@ ieee802154_set_channel(struct wpan_phy *wpan_phy, u8 page, u8 channel)
 
        ASSERT_RTNL();
 
-       /* check if phy support this setting */
-       if (!(wpan_phy->channels_supported[page] & BIT(channel)))
-               return -EINVAL;
+       if (wpan_phy->current_page == page &&
+           wpan_phy->current_channel == channel)
+               return 0;
 
        ret = drv_set_channel(local, page, channel);
        if (!ret) {
@@ -95,9 +95,8 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
 
        ASSERT_RTNL();
 
-       /* check if phy support this setting */
-       if (!(local->hw.flags & IEEE802154_HW_CCA_MODE))
-               return -EOPNOTSUPP;
+       if (wpan_phy_cca_cmp(&wpan_phy->cca, cca))
+               return 0;
 
        ret = drv_set_cca_mode(local, cca);
        if (!ret)
@@ -106,21 +105,50 @@ ieee802154_set_cca_mode(struct wpan_phy *wpan_phy,
        return ret;
 }
 
+static int
+ieee802154_set_cca_ed_level(struct wpan_phy *wpan_phy, s32 ed_level)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+       int ret;
+
+       ASSERT_RTNL();
+
+       if (wpan_phy->cca_ed_level == ed_level)
+               return 0;
+
+       ret = drv_set_cca_ed_level(local, ed_level);
+       if (!ret)
+               wpan_phy->cca_ed_level = ed_level;
+
+       return ret;
+}
+
+static int
+ieee802154_set_tx_power(struct wpan_phy *wpan_phy, s32 power)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+       int ret;
+
+       ASSERT_RTNL();
+
+       if (wpan_phy->transmit_power == power)
+               return 0;
+
+       ret = drv_set_tx_power(local, power);
+       if (!ret)
+               wpan_phy->transmit_power = power;
+
+       return ret;
+}
+
 static int
 ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                      __le16 pan_id)
 {
        ASSERT_RTNL();
 
-       /* TODO
-        * I am not sure about to check here on broadcast pan_id.
-        * Broadcast is a valid setting, comment from 802.15.4:
-        * If this value is 0xffff, the device is not associated.
-        *
-        * This could useful to simple deassociate an device.
-        */
-       if (pan_id == cpu_to_le16(IEEE802154_PAN_ID_BROADCAST))
-               return -EINVAL;
+       if (wpan_dev->pan_id == pan_id)
+               return 0;
 
        wpan_dev->pan_id = pan_id;
        return 0;
@@ -131,12 +159,11 @@ ieee802154_set_backoff_exponent(struct wpan_phy *wpan_phy,
                                struct wpan_dev *wpan_dev,
                                u8 min_be, u8 max_be)
 {
-       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
        ASSERT_RTNL();
 
-       if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
-               return -EOPNOTSUPP;
+       if (wpan_dev->min_be == min_be &&
+           wpan_dev->max_be == max_be)
+               return 0;
 
        wpan_dev->min_be = min_be;
        wpan_dev->max_be = max_be;
@@ -149,20 +176,8 @@ ieee802154_set_short_addr(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 {
        ASSERT_RTNL();
 
-       /* TODO
-        * I am not sure about to check here on broadcast short_addr.
-        * Broadcast is a valid setting, comment from 802.15.4:
-        * A value of 0xfffe indicates that the device has
-        * associated but has not been allocated an address. A
-        * value of 0xffff indicates that the device does not
-        * have a short address.
-        *
-        * I think we should allow to set these settings but
-        * don't allow to allow socket communication with it.
-        */
-       if (short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC) ||
-           short_addr == cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST))
-               return -EINVAL;
+       if (wpan_dev->short_addr == short_addr)
+               return 0;
 
        wpan_dev->short_addr = short_addr;
        return 0;
@@ -173,12 +188,10 @@ ieee802154_set_max_csma_backoffs(struct wpan_phy *wpan_phy,
                                 struct wpan_dev *wpan_dev,
                                 u8 max_csma_backoffs)
 {
-       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
        ASSERT_RTNL();
 
-       if (!(local->hw.flags & IEEE802154_HW_CSMA_PARAMS))
-               return -EOPNOTSUPP;
+       if (wpan_dev->csma_retries == max_csma_backoffs)
+               return 0;
 
        wpan_dev->csma_retries = max_csma_backoffs;
        return 0;
@@ -189,12 +202,10 @@ ieee802154_set_max_frame_retries(struct wpan_phy *wpan_phy,
                                 struct wpan_dev *wpan_dev,
                                 s8 max_frame_retries)
 {
-       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
        ASSERT_RTNL();
 
-       if (!(local->hw.flags & IEEE802154_HW_FRAME_RETRIES))
-               return -EOPNOTSUPP;
+       if (wpan_dev->frame_retries == max_frame_retries)
+               return 0;
 
        wpan_dev->frame_retries = max_frame_retries;
        return 0;
@@ -204,12 +215,10 @@ static int
 ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                        bool mode)
 {
-       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
-
        ASSERT_RTNL();
 
-       if (!(local->hw.flags & IEEE802154_HW_LBT))
-               return -EOPNOTSUPP;
+       if (wpan_dev->lbt == mode)
+               return 0;
 
        wpan_dev->lbt = mode;
        return 0;
@@ -222,6 +231,8 @@ const struct cfg802154_ops mac802154_config_ops = {
        .del_virtual_intf = ieee802154_del_iface,
        .set_channel = ieee802154_set_channel,
        .set_cca_mode = ieee802154_set_cca_mode,
+       .set_cca_ed_level = ieee802154_set_cca_ed_level,
+       .set_tx_power = ieee802154_set_tx_power,
        .set_pan_id = ieee802154_set_pan_id,
        .set_short_addr = ieee802154_set_short_addr,
        .set_backoff_exponent = ieee802154_set_backoff_exponent,
index a0533357b9eaca22df9e7b105f17ce3440b03d4e..caecd5f43aa730341c4e3e6556636ab5f13a285f 100644 (file)
@@ -58,7 +58,7 @@ drv_set_channel(struct ieee802154_local *local, u8 page, u8 channel)
        return local->ops->set_channel(&local->hw, page, channel);
 }
 
-static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
+static inline int drv_set_tx_power(struct ieee802154_local *local, s32 mbm)
 {
        might_sleep();
 
@@ -67,7 +67,7 @@ static inline int drv_set_tx_power(struct ieee802154_local *local, s8 dbm)
                return -EOPNOTSUPP;
        }
 
-       return local->ops->set_txpower(&local->hw, dbm);
+       return local->ops->set_txpower(&local->hw, mbm);
 }
 
 static inline int drv_set_cca_mode(struct ieee802154_local *local,
@@ -96,7 +96,7 @@ static inline int drv_set_lbt_mode(struct ieee802154_local *local, bool mode)
 }
 
 static inline int
-drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level)
+drv_set_cca_ed_level(struct ieee802154_local *local, s32 mbm)
 {
        might_sleep();
 
@@ -105,7 +105,7 @@ drv_set_cca_ed_level(struct ieee802154_local *local, s32 ed_level)
                return -EOPNOTSUPP;
        }
 
-       return local->ops->set_cca_ed_level(&local->hw, ed_level);
+       return local->ops->set_cca_ed_level(&local->hw, mbm);
 }
 
 static inline int drv_set_pan_id(struct ieee802154_local *local, __le16 pan_id)
index 127ba18386fc639aac4ccda482ebe4be11b8e6ee..eec668f3637ff37c9e12c1251cdca4cc59c75d7e 100644 (file)
@@ -86,8 +86,6 @@ struct ieee802154_sub_if_data {
        unsigned long state;
        char name[IFNAMSIZ];
 
-       spinlock_t mib_lock;
-
        /* protects sec from concurrent access by netlink. access by
         * encrypt/decrypt/header_create safe without additional protection.
         */
@@ -136,12 +134,7 @@ ieee802154_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
 enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer);
 
 /* MIB callbacks */
-void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val);
-__le16 mac802154_dev_get_short_addr(const struct net_device *dev);
-__le16 mac802154_dev_get_pan_id(const struct net_device *dev);
-void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val);
 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
-u8 mac802154_dev_get_dsn(const struct net_device *dev);
 
 int mac802154_get_params(struct net_device *dev,
                         struct ieee802154_llsec_params *params);
index 91b75abbd1a1d05b3219b9089232d9f67eb73ccd..b544b5dc4bfbd1968372c65dee771d0b59cb9671 100644 (file)
@@ -62,9 +62,10 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
        int err = -ENOIOCTLCMD;
 
-       ASSERT_RTNL();
+       if (cmd != SIOCGIFADDR && cmd != SIOCSIFADDR)
+               return err;
 
-       spin_lock_bh(&sdata->mib_lock);
+       rtnl_lock();
 
        switch (cmd) {
        case SIOCGIFADDR:
@@ -89,7 +90,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
        case SIOCSIFADDR:
                if (netif_running(dev)) {
-                       spin_unlock_bh(&sdata->mib_lock);
+                       rtnl_unlock();
                        return -EBUSY;
                }
 
@@ -111,7 +112,7 @@ mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
        }
 
-       spin_unlock_bh(&sdata->mib_lock);
+       rtnl_unlock();
        return err;
 }
 
@@ -241,7 +242,6 @@ static int mac802154_wpan_open(struct net_device *dev)
        struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
        struct ieee802154_local *local = sdata->local;
        struct wpan_dev *wpan_dev = &sdata->wpan_dev;
-       struct wpan_phy *phy = sdata->local->phy;
 
        rc = ieee802154_check_concurrent_iface(sdata, sdata->vif.type);
        if (rc < 0)
@@ -251,8 +251,6 @@ static int mac802154_wpan_open(struct net_device *dev)
        if (rc < 0)
                return rc;
 
-       mutex_lock(&phy->pib_lock);
-
        if (local->hw.flags & IEEE802154_HW_PROMISCUOUS) {
                rc = drv_set_promiscuous_mode(local,
                                              wpan_dev->promiscuous_mode);
@@ -294,11 +292,7 @@ static int mac802154_wpan_open(struct net_device *dev)
                        goto out;
        }
 
-       mutex_unlock(&phy->pib_lock);
-       return 0;
-
 out:
-       mutex_unlock(&phy->pib_lock);
        return rc;
 }
 
@@ -374,14 +368,12 @@ static int mac802154_header_create(struct sk_buff *skb,
        hdr.fc.type = cb->type;
        hdr.fc.security_enabled = cb->secen;
        hdr.fc.ack_request = cb->ackreq;
-       hdr.seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
+       hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF;
 
        if (mac802154_set_header_security(sdata, &hdr, cb) < 0)
                return -EINVAL;
 
        if (!saddr) {
-               spin_lock_bh(&sdata->mib_lock);
-
                if (wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_BROADCAST) ||
                    wpan_dev->short_addr == cpu_to_le16(IEEE802154_ADDR_UNDEF) ||
                    wpan_dev->pan_id == cpu_to_le16(IEEE802154_PANID_BROADCAST)) {
@@ -393,8 +385,6 @@ static int mac802154_header_create(struct sk_buff *skb,
                }
 
                hdr.source.pan_id = wpan_dev->pan_id;
-
-               spin_unlock_bh(&sdata->mib_lock);
        } else {
                hdr.source = *(const struct ieee802154_addr *)saddr;
        }
@@ -474,13 +464,16 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                       enum nl802154_iftype type)
 {
        struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+       u8 tmp;
 
        /* set some type-dependent values */
        sdata->vif.type = type;
        sdata->wpan_dev.iftype = type;
 
-       get_random_bytes(&wpan_dev->bsn, 1);
-       get_random_bytes(&wpan_dev->dsn, 1);
+       get_random_bytes(&tmp, sizeof(tmp));
+       atomic_set(&wpan_dev->bsn, tmp);
+       get_random_bytes(&tmp, sizeof(tmp));
+       atomic_set(&wpan_dev->dsn, tmp);
 
        /* defaults per 802.15.4-2011 */
        wpan_dev->min_be = 3;
@@ -503,7 +496,6 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                sdata->dev->ml_priv = &mac802154_mlme_wpan;
                wpan_dev->promiscuous_mode = false;
 
-               spin_lock_init(&sdata->mib_lock);
                mutex_init(&sdata->sec_mtx);
 
                mac802154_llsec_init(&sdata->sec);
index bdccb4ecd30fed81fec03a00c1a3b6452024fcbb..8606da459ff3421f9528a059cf2d66ca98a4a579 100644 (file)
@@ -36,37 +36,30 @@ static int mac802154_mlme_start_req(struct net_device *dev,
                                    u8 pan_coord, u8 blx,
                                    u8 coord_realign)
 {
-       struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
-       int rc = 0;
+       struct ieee802154_llsec_params params;
+       int changed = 0;
 
        ASSERT_RTNL();
 
        BUG_ON(addr->mode != IEEE802154_ADDR_SHORT);
 
-       mac802154_dev_set_pan_id(dev, addr->pan_id);
-       mac802154_dev_set_short_addr(dev, addr->short_addr);
+       dev->ieee802154_ptr->pan_id = addr->pan_id;
+       dev->ieee802154_ptr->short_addr = addr->short_addr;
        mac802154_dev_set_page_channel(dev, page, channel);
 
-       if (ops->llsec) {
-               struct ieee802154_llsec_params params;
-               int changed = 0;
+       params.pan_id = addr->pan_id;
+       changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
 
-               params.coord_shortaddr = addr->short_addr;
-               changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
+       params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
+       changed |= IEEE802154_LLSEC_PARAM_HWADDR;
 
-               params.pan_id = addr->pan_id;
-               changed |= IEEE802154_LLSEC_PARAM_PAN_ID;
+       params.coord_hwaddr = params.hwaddr;
+       changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
 
-               params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr);
-               changed |= IEEE802154_LLSEC_PARAM_HWADDR;
+       params.coord_shortaddr = addr->short_addr;
+       changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR;
 
-               params.coord_hwaddr = params.hwaddr;
-               changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR;
-
-               rc = ops->llsec->set_params(dev, &params, changed);
-       }
-
-       return rc;
+       return mac802154_set_params(dev, &params, changed);
 }
 
 static int mac802154_set_mac_params(struct net_device *dev,
@@ -91,19 +84,19 @@ static int mac802154_set_mac_params(struct net_device *dev,
        wpan_dev->frame_retries = params->frame_retries;
        wpan_dev->lbt = params->lbt;
 
-       if (local->hw.flags & IEEE802154_HW_TXPOWER) {
+       if (local->hw.phy->flags & WPAN_PHY_FLAG_TXPOWER) {
                ret = drv_set_tx_power(local, params->transmit_power);
                if (ret < 0)
                        return ret;
        }
 
-       if (local->hw.flags & IEEE802154_HW_CCA_MODE) {
+       if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_MODE) {
                ret = drv_set_cca_mode(local, &params->cca);
                if (ret < 0)
                        return ret;
        }
 
-       if (local->hw.flags & IEEE802154_HW_CCA_ED_LEVEL) {
+       if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) {
                ret = drv_set_cca_ed_level(local, params->cca_ed_level);
                if (ret < 0)
                        return ret;
@@ -151,9 +144,6 @@ static struct ieee802154_llsec_ops mac802154_llsec_ops = {
 
 struct ieee802154_mlme_ops mac802154_mlme_wpan = {
        .start_req = mac802154_mlme_start_req,
-       .get_pan_id = mac802154_dev_get_pan_id,
-       .get_short_addr = mac802154_dev_get_short_addr,
-       .get_dsn = mac802154_dev_get_dsn,
 
        .llsec = &mac802154_llsec_ops,
 
index 08cb32dc8fd33e892e53f7f87f601b10ede8c38d..356b346e1ee86fdeadebf7be5d318c70dbc0d969 100644 (file)
@@ -107,6 +107,18 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
 
        skb_queue_head_init(&local->skb_queue);
 
+       /* init supported flags with 802.15.4 default ranges */
+       phy->supported.max_minbe = 8;
+       phy->supported.min_maxbe = 3;
+       phy->supported.max_maxbe = 8;
+       phy->supported.min_frame_retries = -1;
+       phy->supported.max_frame_retries = 7;
+       phy->supported.max_csma_backoffs = 5;
+       phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
+
+       /* always supported */
+       phy->supported.iftypes = BIT(NL802154_IFTYPE_NODE);
+
        return &local->hw;
 }
 EXPORT_SYMBOL(ieee802154_alloc_hw);
@@ -155,6 +167,26 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
 
        ieee802154_setup_wpan_phy_pib(local->phy);
 
+       if (!(hw->flags & IEEE802154_HW_CSMA_PARAMS)) {
+               local->phy->supported.min_csma_backoffs = 4;
+               local->phy->supported.max_csma_backoffs = 4;
+               local->phy->supported.min_maxbe = 5;
+               local->phy->supported.max_maxbe = 5;
+               local->phy->supported.min_minbe = 3;
+               local->phy->supported.max_minbe = 3;
+       }
+
+       if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
+               /* TODO should be 3, but our default value is -1 which means
+                * no ARET handling.
+                */
+               local->phy->supported.min_frame_retries = -1;
+               local->phy->supported.max_frame_retries = -1;
+       }
+
+       if (hw->flags & IEEE802154_HW_PROMISCUOUS)
+               local->phy->supported.iftypes |= BIT(NL802154_IFTYPE_MONITOR);
+
        rc = wpan_phy_register(local->phy);
        if (rc < 0)
                goto out_wq;
index 5cf019a57fd79cd601209971c349023e6c619ea0..73f94fbf87856a5b7d81ba0c27c50a0c6f5c54fb 100644 (file)
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-void mac802154_dev_set_short_addr(struct net_device *dev, __le16 val)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       spin_lock_bh(&sdata->mib_lock);
-       sdata->wpan_dev.short_addr = val;
-       spin_unlock_bh(&sdata->mib_lock);
-}
-
-__le16 mac802154_dev_get_short_addr(const struct net_device *dev)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-       __le16 ret;
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       spin_lock_bh(&sdata->mib_lock);
-       ret = sdata->wpan_dev.short_addr;
-       spin_unlock_bh(&sdata->mib_lock);
-
-       return ret;
-}
-
-__le16 mac802154_dev_get_pan_id(const struct net_device *dev)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-       __le16 ret;
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       spin_lock_bh(&sdata->mib_lock);
-       ret = sdata->wpan_dev.pan_id;
-       spin_unlock_bh(&sdata->mib_lock);
-
-       return ret;
-}
-
-void mac802154_dev_set_pan_id(struct net_device *dev, __le16 val)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       spin_lock_bh(&sdata->mib_lock);
-       sdata->wpan_dev.pan_id = val;
-       spin_unlock_bh(&sdata->mib_lock);
-}
-
-u8 mac802154_dev_get_dsn(const struct net_device *dev)
-{
-       struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
-
-       BUG_ON(dev->type != ARPHRD_IEEE802154);
-
-       return sdata->wpan_dev.dsn++;
-}
-
 void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
 {
        struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
        struct ieee802154_local *local = sdata->local;
        int res;
 
+       ASSERT_RTNL();
+
        BUG_ON(dev->type != ARPHRD_IEEE802154);
 
        res = drv_set_channel(local, page, chan);
        if (res) {
                pr_debug("set_channel failed\n");
        } else {
-               mutex_lock(&local->phy->pib_lock);
                local->phy->current_channel = chan;
                local->phy->current_page = page;
-               mutex_unlock(&local->phy->pib_lock);
        }
 }
 
index c0d67b2b4132b033ca28446556c5aefe7e047ee6..e0f10063cac3816fd6830376d65e955429445145 100644 (file)
@@ -47,8 +47,6 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
 
        pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
 
-       spin_lock_bh(&sdata->mib_lock);
-
        span = wpan_dev->pan_id;
        sshort = wpan_dev->short_addr;
 
@@ -83,13 +81,10 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
                        skb->pkt_type = PACKET_OTHERHOST;
                break;
        default:
-               spin_unlock_bh(&sdata->mib_lock);
                pr_debug("invalid dest mode\n");
                goto fail;
        }
 
-       spin_unlock_bh(&sdata->mib_lock);
-
        skb->dev = sdata->dev;
 
        rc = mac802154_llsec_decrypt(&sdata->sec, skb);
index 150bf807e572eb85458371d1c8e930e2cb7ec0b2..583435f3893037e45d4a5879b66b4cda3bb6f27f 100644 (file)
@@ -85,11 +85,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
                        hrtimer_start(&local->ifs_timer,
                                      ktime_set(0, hw->phy->sifs_period * NSEC_PER_USEC),
                                      HRTIMER_MODE_REL);
-
-               consume_skb(skb);
        } else {
                ieee802154_wake_queue(hw);
-               consume_skb(skb);
        }
+
+       dev_consume_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee802154_xmit_complete);
index 809df534a7204916f10203ce55dcc3fdc0fe7b51..0183b32da9427d39aa761e00e080afeff7e4b6d5 100644 (file)
@@ -62,6 +62,7 @@ out:
 
 static struct packet_offload mpls_mc_offload __read_mostly = {
        .type = cpu_to_be16(ETH_P_MPLS_MC),
+       .priority = 15,
        .callbacks = {
                .gso_segment    =       mpls_gso_segment,
        },
@@ -69,6 +70,7 @@ static struct packet_offload mpls_mc_offload __read_mostly = {
 
 static struct packet_offload mpls_uc_offload __read_mostly = {
        .type = cpu_to_be16(ETH_P_MPLS_UC),
+       .priority = 15,
        .callbacks = {
                .gso_segment    =       mpls_gso_segment,
        },
index a0f3e6a3c7d18f344d3321a83b5c11d1988d5d3d..fbc8d15c7fda7644eeba4b35d50baf7cc93da7f5 100644 (file)
@@ -1,6 +1,14 @@
 menu "Core Netfilter Configuration"
        depends on NET && INET && NETFILTER
 
+config NETFILTER_INGRESS
+       bool "Netfilter ingress support"
+       default y
+       select NET_INGRESS
+       help
+         This allows you to classify packets from ingress using the Netfilter
+         infrastructure.
+
 config NETFILTER_NETLINK
        tristate
 
@@ -448,6 +456,11 @@ config NF_TABLES_INET
        help
          This option enables support for a mixed IPv4/IPv6 "inet" table.
 
+config NF_TABLES_NETDEV
+       tristate "Netfilter nf_tables netdev tables support"
+       help
+         This option enables support for the "netdev" table.
+
 config NFT_EXTHDR
        tristate "Netfilter nf_tables IPv6 exthdr module"
        help
index a87d8b8ec730421403930c69061a2c7167db0a6a..70d026d46fe7d07a3ee1d942b7e877782bb37957 100644 (file)
@@ -75,6 +75,7 @@ nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
 
 obj-$(CONFIG_NF_TABLES)                += nf_tables.o
 obj-$(CONFIG_NF_TABLES_INET)   += nf_tables_inet.o
+obj-$(CONFIG_NF_TABLES_NETDEV) += nf_tables_netdev.o
 obj-$(CONFIG_NFT_COMPAT)       += nft_compat.o
 obj-$(CONFIG_NFT_EXTHDR)       += nft_exthdr.o
 obj-$(CONFIG_NFT_META)         += nft_meta.o
index e6163017c42db2a1d553bc7b8ac812e5c401fcba..653e32eac08ca587b8c5cd8cf9eeed1a2290906d 100644 (file)
@@ -64,10 +64,27 @@ static DEFINE_MUTEX(nf_hook_mutex);
 
 int nf_register_hook(struct nf_hook_ops *reg)
 {
+       struct list_head *nf_hook_list;
        struct nf_hook_ops *elem;
 
        mutex_lock(&nf_hook_mutex);
-       list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) {
+       switch (reg->pf) {
+       case NFPROTO_NETDEV:
+#ifdef CONFIG_NETFILTER_INGRESS
+               if (reg->hooknum == NF_NETDEV_INGRESS) {
+                       BUG_ON(reg->dev == NULL);
+                       nf_hook_list = &reg->dev->nf_hooks_ingress;
+                       net_inc_ingress_queue();
+                       break;
+               }
+#endif
+               /* Fall through. */
+       default:
+               nf_hook_list = &nf_hooks[reg->pf][reg->hooknum];
+               break;
+       }
+
+       list_for_each_entry(elem, nf_hook_list, list) {
                if (reg->priority < elem->priority)
                        break;
        }
@@ -85,6 +102,18 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
        mutex_lock(&nf_hook_mutex);
        list_del_rcu(&reg->list);
        mutex_unlock(&nf_hook_mutex);
+       switch (reg->pf) {
+       case NFPROTO_NETDEV:
+#ifdef CONFIG_NETFILTER_INGRESS
+               if (reg->hooknum == NF_NETDEV_INGRESS) {
+                       net_dec_ingress_queue();
+                       break;
+               }
+               break;
+#endif
+       default:
+               break;
+       }
 #ifdef HAVE_JUMP_LABEL
        static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
@@ -166,11 +195,9 @@ int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state)
        /* We may already have this, but read-locks nest anyway */
        rcu_read_lock();
 
-       elem = list_entry_rcu(&nf_hooks[state->pf][state->hook],
-                             struct nf_hook_ops, list);
+       elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list);
 next_hook:
-       verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state,
-                            &elem);
+       verdict = nf_iterate(state->hook_list, skb, state, &elem);
        if (verdict == NF_ACCEPT || verdict == NF_STOP) {
                ret = 1;
        } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
index 55b083ec587a617109bc2b1d4b299f0f6fbe15e3..2fe6de46f6d04dcb70a22aba17ce6e748b058234 100644 (file)
@@ -36,6 +36,7 @@ IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip");
 
 #define MTYPE          bitmap_ip
+#define HOST_MASK      32
 
 /* Type structure */
 struct bitmap_ip {
@@ -149,8 +150,11 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -174,7 +178,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (!cidr || cidr > 32)
+               if (!cidr || cidr > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        } else
@@ -277,7 +281,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr >= 32)
+               if (cidr >= HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(first_ip, last_ip, cidr);
        } else
@@ -286,7 +290,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_NETMASK]) {
                netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
 
-               if (netmask > 32)
+               if (netmask > HOST_MASK)
                        return -IPSET_ERR_INVALID_NETMASK;
 
                first_ip &= ip_set_hostmask(netmask);
@@ -360,7 +364,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index 86104744b00ff67339f78db238f093874b70845a..eb188561d65f75370acaeb9761df7cd9a2290860 100644 (file)
@@ -36,6 +36,7 @@ IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip,mac");
 
 #define MTYPE          bitmap_ipmac
+#define HOST_MASK      32
 #define IP_SET_BITMAP_STORED_TIMEOUT
 
 enum {
@@ -250,8 +251,11 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -343,7 +347,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (cidr >= 32)
+               if (cidr >= HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(first_ip, last_ip, cidr);
        } else
@@ -397,7 +401,8 @@ static struct ip_set_type bitmap_ipmac_type = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index 005dd36444c3472b2257be6e29ada9882649c1c8..898edb693b3f16905bd56eac1179209ae31fcd96 100644 (file)
@@ -294,7 +294,8 @@ static struct ip_set_type bitmap_port_type = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index d259da3ce67a6b18f6e4a345a5729876e373efc0..475e4960a164eca8f6d48c939d6f9165a70b6886 100644 (file)
@@ -42,7 +42,7 @@ static inline struct ip_set_net *ip_set_pernet(struct net *net)
 }
 
 #define IP_SET_INC     64
-#define STREQ(a, b)    (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+#define STRNCMP(a, b)  (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
 
 static unsigned int max_sets;
 
@@ -85,7 +85,7 @@ find_set_type(const char *name, u8 family, u8 revision)
        struct ip_set_type *type;
 
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
-               if (STREQ(type->name, name) &&
+               if (STRNCMP(type->name, name) &&
                    (type->family == family ||
                     type->family == NFPROTO_UNSPEC) &&
                    revision >= type->revision_min &&
@@ -132,7 +132,7 @@ __find_set_type_get(const char *name, u8 family, u8 revision,
        /* Make sure the type is already loaded
         * but we don't support the revision */
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
-               if (STREQ(type->name, name)) {
+               if (STRNCMP(type->name, name)) {
                        err = -IPSET_ERR_FIND_TYPE;
                        goto unlock;
                }
@@ -166,7 +166,7 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
        *min = 255; *max = 0;
        rcu_read_lock();
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
-               if (STREQ(type->name, name) &&
+               if (STRNCMP(type->name, name) &&
                    (type->family == family ||
                     type->family == NFPROTO_UNSPEC)) {
                        found = true;
@@ -365,7 +365,7 @@ size_t
 ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
 {
        enum ip_set_ext_id id;
-       size_t offset = 0;
+       size_t offset = len;
        u32 cadt_flags = 0;
 
        if (tb[IPSET_ATTR_CADT_FLAGS])
@@ -375,12 +375,12 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
        for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
                if (!add_extension(id, cadt_flags, tb))
                        continue;
-               offset += ALIGN(len + offset, ip_set_extensions[id].align);
+               offset = ALIGN(offset, ip_set_extensions[id].align);
                set->offset[id] = offset;
                set->extensions |= ip_set_extensions[id].type;
                offset += ip_set_extensions[id].len;
        }
-       return len + offset;
+       return offset;
 }
 EXPORT_SYMBOL_GPL(ip_set_elem_len);
 
@@ -432,6 +432,31 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
 }
 EXPORT_SYMBOL_GPL(ip_set_get_extensions);
 
+int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+                     const void *e, bool active)
+{
+       if (SET_WITH_TIMEOUT(set)) {
+               unsigned long *timeout = ext_timeout(e, set);
+
+               if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                       htonl(active ? ip_set_timeout_get(timeout)
+                               : *timeout)))
+                       return -EMSGSIZE;
+       }
+       if (SET_WITH_COUNTER(set) &&
+           ip_set_put_counter(skb, ext_counter(e, set)))
+               return -EMSGSIZE;
+       if (SET_WITH_COMMENT(set) &&
+           ip_set_put_comment(skb, ext_comment(e, set)))
+               return -EMSGSIZE;
+       if (SET_WITH_SKBINFO(set) &&
+           ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
+               return -EMSGSIZE;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_put_extensions);
+
 /*
  * Creating/destroying/renaming/swapping affect the existence and
  * the properties of a set. All of these can be executed from userspace
@@ -581,7 +606,7 @@ ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
        rcu_read_lock();
        for (i = 0; i < inst->ip_set_max; i++) {
                s = rcu_dereference(inst->ip_set_list)[i];
-               if (s != NULL && STREQ(s->name, name)) {
+               if (s != NULL && STRNCMP(s->name, name)) {
                        __ip_set_get(s);
                        index = i;
                        *set = s;
@@ -758,7 +783,7 @@ find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
        *id = IPSET_INVALID_ID;
        for (i = 0; i < inst->ip_set_max; i++) {
                set = ip_set(inst, i);
-               if (set != NULL && STREQ(set->name, name)) {
+               if (set != NULL && STRNCMP(set->name, name)) {
                        *id = i;
                        break;
                }
@@ -787,7 +812,7 @@ find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
                if (s == NULL) {
                        if (*index == IPSET_INVALID_ID)
                                *index = i;
-               } else if (STREQ(name, s->name)) {
+               } else if (STRNCMP(name, s->name)) {
                        /* Name clash */
                        *set = s;
                        return -EEXIST;
@@ -887,7 +912,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
        if (ret == -EEXIST) {
                /* If this is the same set and requested, ignore error */
                if ((flags & IPSET_FLAG_EXIST) &&
-                   STREQ(set->type->name, clash->type->name) &&
+                   STRNCMP(set->type->name, clash->type->name) &&
                    set->type->family == clash->type->family &&
                    set->type->revision_min == clash->type->revision_min &&
                    set->type->revision_max == clash->type->revision_max &&
@@ -1098,7 +1123,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
        name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
        for (i = 0; i < inst->ip_set_max; i++) {
                s = ip_set(inst, i);
-               if (s != NULL && STREQ(s->name, name2)) {
+               if (s != NULL && STRNCMP(s->name, name2)) {
                        ret = -IPSET_ERR_EXIST_SETNAME2;
                        goto out;
                }
index 29fb01ddff93b0a0da7ad1dc3691a141376f5007..1981f021cc60a25f57c68dc498f1c67926228b1f 100644 (file)
@@ -98,7 +98,7 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
                    __be16 *port, u8 *proto)
 {
        const struct iphdr *iph = ip_hdr(skb);
-       unsigned int protooff = ip_hdrlen(skb);
+       unsigned int protooff = skb_network_offset(skb) + ip_hdrlen(skb);
        int protocol = iph->protocol;
 
        /* See comments at tcp_match in ip_tables.c */
@@ -135,7 +135,9 @@ ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
        __be16 frag_off = 0;
 
        nexthdr = ipv6_hdr(skb)->nexthdr;
-       protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+       protoff = ipv6_skip_exthdr(skb,
+                                  skb_network_offset(skb) +
+                                       sizeof(struct ipv6hdr), &nexthdr,
                                   &frag_off);
        if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
                return false;
index 974ff386db0fc118596d58311e1b6ba54cf8973a..7952869c8023f9a42b3502301973747e0ec9adcc 100644 (file)
@@ -180,6 +180,7 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 #undef mtype_data_equal
 #undef mtype_do_data_match
 #undef mtype_data_set_flags
+#undef mtype_data_reset_elem
 #undef mtype_data_reset_flags
 #undef mtype_data_netmask
 #undef mtype_data_list
@@ -193,7 +194,6 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 #undef mtype_ahash_memsize
 #undef mtype_flush
 #undef mtype_destroy
-#undef mtype_gc_init
 #undef mtype_same_set
 #undef mtype_kadt
 #undef mtype_uadt
@@ -227,6 +227,7 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 #define mtype_data_list                IPSET_TOKEN(MTYPE, _data_list)
 #define mtype_data_next                IPSET_TOKEN(MTYPE, _data_next)
 #define mtype_elem             IPSET_TOKEN(MTYPE, _elem)
+
 #define mtype_ahash_destroy    IPSET_TOKEN(MTYPE, _ahash_destroy)
 #define mtype_ext_cleanup      IPSET_TOKEN(MTYPE, _ext_cleanup)
 #define mtype_add_cidr         IPSET_TOKEN(MTYPE, _add_cidr)
@@ -234,7 +235,6 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 #define mtype_ahash_memsize    IPSET_TOKEN(MTYPE, _ahash_memsize)
 #define mtype_flush            IPSET_TOKEN(MTYPE, _flush)
 #define mtype_destroy          IPSET_TOKEN(MTYPE, _destroy)
-#define mtype_gc_init          IPSET_TOKEN(MTYPE, _gc_init)
 #define mtype_same_set         IPSET_TOKEN(MTYPE, _same_set)
 #define mtype_kadt             IPSET_TOKEN(MTYPE, _kadt)
 #define mtype_uadt             IPSET_TOKEN(MTYPE, _uadt)
@@ -249,9 +249,18 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 #define mtype_head             IPSET_TOKEN(MTYPE, _head)
 #define mtype_list             IPSET_TOKEN(MTYPE, _list)
 #define mtype_gc               IPSET_TOKEN(MTYPE, _gc)
+#define mtype_gc_init          IPSET_TOKEN(MTYPE, _gc_init)
 #define mtype_variant          IPSET_TOKEN(MTYPE, _variant)
 #define mtype_data_match       IPSET_TOKEN(MTYPE, _data_match)
 
+#ifndef MTYPE
+#error "MTYPE is not defined!"
+#endif
+
+#ifndef HOST_MASK
+#error "HOST_MASK is not defined!"
+#endif
+
 #ifndef HKEY_DATALEN
 #define HKEY_DATALEN           sizeof(struct mtype_elem)
 #endif
@@ -261,6 +270,9 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
        & jhash_mask(htable_bits))
 
 #ifndef htype
+#ifndef HTYPE
+#error "HTYPE is not defined!"
+#endif /* HTYPE */
 #define htype                  HTYPE
 
 /* The generic hash structure */
@@ -287,7 +299,7 @@ struct htype {
        struct net_prefixes nets[0]; /* book-keeping of prefixes */
 #endif
 };
-#endif
+#endif /* htype */
 
 #ifdef IP_SET_HASH_WITH_NETS
 /* Network cidr size book keeping when the hash stores different
@@ -1045,7 +1057,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        u8 netmask;
 #endif
        size_t hsize;
-       struct HTYPE *h;
+       struct htype *h;
        struct htable *t;
 
 #ifndef IP_SET_PROTO_UNDEF
@@ -1165,3 +1177,5 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        return 0;
 }
 #endif /* IP_SET_EMIT_CREATE */
+
+#undef HKEY_DATALEN
index 76959d79e9d1f67e4618b494bd265ceb0acb57a6..54df48b5c4558c9e7229abe6d82069af502e2562 100644 (file)
@@ -56,15 +56,15 @@ hash_ip4_data_equal(const struct hash_ip4_elem *e1,
        return e1->ip == e2->ip;
 }
 
-static inline bool
+static bool
 hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *e)
 {
        if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, e->ip))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -74,7 +74,6 @@ hash_ip4_data_next(struct hash_ip4_elem *next, const struct hash_ip4_elem *e)
 }
 
 #define MTYPE          hash_ip4
-#define PF             4
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
@@ -121,8 +120,11 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -145,7 +147,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (!cidr || cidr > 32)
+               if (!cidr || cidr > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
@@ -196,10 +198,10 @@ hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *e)
 {
        if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -208,12 +210,9 @@ hash_ip6_data_next(struct hash_ip4_elem *next, const struct hash_ip6_elem *e)
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
-#undef HKEY_DATALEN
 
 #define MTYPE          hash_ip6
-#define PF             6
 #define HOST_MASK      128
 
 #define IP_SET_EMIT_CREATE
@@ -261,8 +260,11 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -301,7 +303,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index 7abf9788cfa850705bc2e5240751d8443a2a558e..d231248eb3e2b11a1dc620707ad3986672813242 100644 (file)
@@ -63,10 +63,10 @@ hash_ipmark4_data_list(struct sk_buff *skb,
        if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
            nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -76,10 +76,8 @@ hash_ipmark4_data_next(struct hash_ipmark4_elem *next,
        next->ip = d->ip;
 }
 
-#define MTYPE           hash_ipmark4
-#define PF              4
-#define HOST_MASK       32
-#define HKEY_DATALEN   sizeof(struct hash_ipmark4_elem)
+#define MTYPE          hash_ipmark4
+#define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
 static int
@@ -123,12 +121,15 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
        if (ret)
                return ret;
 
-       e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK]));
+       ret = ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
        e.mark &= h->markmask;
 
        if (adt == IPSET_TEST ||
@@ -147,7 +148,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (!cidr || cidr > 32)
+               if (!cidr || cidr > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
@@ -191,10 +192,10 @@ hash_ipmark6_data_list(struct sk_buff *skb,
        if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
            nla_put_net32(skb, IPSET_ATTR_MARK, htonl(data->mark)))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -204,15 +205,11 @@ hash_ipmark6_data_next(struct hash_ipmark4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
-#undef HKEY_DATALEN
 
 #define MTYPE          hash_ipmark6
-#define PF             6
 #define HOST_MASK      128
-#define HKEY_DATALEN   sizeof(struct hash_ipmark6_elem)
-#define        IP_SET_EMIT_CREATE
+#define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
 
 
@@ -258,12 +255,15 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
-       e.mark = ntohl(nla_get_u32(tb[IPSET_ATTR_MARK]));
+       e.mark = ntohl(nla_get_be32(tb[IPSET_ATTR_MARK]));
        e.mark &= h->markmask;
 
        if (adt == IPSET_TEST) {
@@ -307,7 +307,8 @@ static struct ip_set_type hash_ipmark_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index dcbcceb9a52feea746d2b9d88a9a80e79d579bdb..a47c29f1209092fef680d93ec26247c9dfc8c90e 100644 (file)
@@ -69,10 +69,10 @@ hash_ipport4_data_list(struct sk_buff *skb,
            nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
            nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -83,10 +83,8 @@ hash_ipport4_data_next(struct hash_ipport4_elem *next,
        next->port = d->port;
 }
 
-#define MTYPE           hash_ipport4
-#define PF              4
-#define HOST_MASK       32
-#define HKEY_DATALEN   sizeof(struct hash_ipport4_elem)
+#define MTYPE          hash_ipport4
+#define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
 static int
@@ -132,15 +130,15 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
        if (ret)
                return ret;
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       ret = ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -171,7 +169,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (!cidr || cidr > 32)
+               if (!cidr || cidr > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
@@ -231,10 +229,10 @@ hash_ipport6_data_list(struct sk_buff *skb,
            nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
            nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -245,15 +243,11 @@ hash_ipport6_data_next(struct hash_ipport4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
-#undef HKEY_DATALEN
 
 #define MTYPE          hash_ipport6
-#define PF             6
 #define HOST_MASK      128
-#define HKEY_DATALEN   sizeof(struct hash_ipport6_elem)
-#define        IP_SET_EMIT_CREATE
+#define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
 
 static int
@@ -301,15 +295,15 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
        if (ret)
                return ret;
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       ret = ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -376,7 +370,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index 7ef93fc887a13b5e5d530a98a2bb3ae7f015d390..89615f134845d859d1bc526c5a47f025f77de249 100644 (file)
@@ -70,10 +70,10 @@ hash_ipportip4_data_list(struct sk_buff *skb,
            nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
            nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -86,7 +86,6 @@ hash_ipportip4_data_next(struct hash_ipportip4_elem *next,
 
 /* Common functions */
 #define MTYPE          hash_ipportip4
-#define PF             4
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
@@ -134,8 +133,11 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -143,10 +145,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (ret)
                return ret;
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -177,7 +176,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (!cidr || cidr > 32)
+               if (!cidr || cidr > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
@@ -240,10 +239,10 @@ hash_ipportip6_data_list(struct sk_buff *skb,
            nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
            nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -254,11 +253,9 @@ hash_ipportip6_data_next(struct hash_ipportip4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE          hash_ipportip6
-#define PF             6
 #define HOST_MASK      128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -309,8 +306,11 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -318,10 +318,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (ret)
                return ret;
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -388,7 +385,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index b6012ad9278113e43c98e8615a0694ef9c30c48b..6ba7a7e083f91690d47b4451dd4fca1d90b4f378 100644 (file)
@@ -114,10 +114,10 @@ hash_ipportnet4_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -130,7 +130,6 @@ hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next,
 }
 
 #define MTYPE          hash_ipportnet4
-#define PF             4
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
@@ -189,8 +188,11 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -205,10 +207,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                e.cidr = cidr - 1;
        }
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -249,7 +248,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else if (tb[IPSET_ATTR_CIDR]) {
                cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
-               if (!cidr || cidr > 32)
+               if (!cidr || cidr > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
@@ -367,10 +366,10 @@ hash_ipportnet6_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -381,11 +380,9 @@ hash_ipportnet6_data_next(struct hash_ipportnet4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE          hash_ipportnet6
-#define PF             6
 #define HOST_MASK      128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -448,8 +445,11 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -466,10 +466,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
 
        ip6_netmask(&e.ip2, e.cidr + 1);
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -547,7 +544,8 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index 65690b52a4d584cce8a6b7ce2864f716f4d39fd1..1f8668d7a53851556a6e1cff775ca2b4233451e2 100644 (file)
@@ -52,7 +52,12 @@ hash_mac4_data_equal(const struct hash_mac4_elem *e1,
 static inline bool
 hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e)
 {
-       return nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether);
+       if (nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether))
+               goto nla_put_failure;
+       return false;
+
+nla_put_failure:
+       return true;
 }
 
 static inline void
@@ -62,7 +67,6 @@ hash_mac4_data_next(struct hash_mac4_elem *next,
 }
 
 #define MTYPE          hash_mac4
-#define PF             4
 #define HOST_MASK      32
 #define IP_SET_EMIT_CREATE
 #define IP_SET_PROTO_UNDEF
@@ -149,7 +153,8 @@ static struct ip_set_type hash_mac_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index 6b3ac10ac2f18718bda3b1b1f4108fa347eda377..2e63dad8644da6c2cd024fff2e1b38d7d758b03e 100644 (file)
@@ -95,10 +95,10 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -109,7 +109,6 @@ hash_net4_data_next(struct hash_net4_elem *next,
 }
 
 #define MTYPE          hash_net4
-#define PF             4
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
@@ -160,8 +159,11 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -264,10 +266,10 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -277,11 +279,9 @@ hash_net6_data_next(struct hash_net4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE          hash_net6
-#define PF             6
 #define HOST_MASK      128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -333,8 +333,11 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -383,7 +386,8 @@ static struct ip_set_type hash_net_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index 380ef5148ea11ef29b5e3ab8113b94905fbea389..fe481f677f56edbbb195ee75008e2ecd01ff0849 100644 (file)
@@ -193,10 +193,10 @@ hash_netiface4_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -207,7 +207,6 @@ hash_netiface4_data_next(struct hash_netiface4_elem *next,
 }
 
 #define MTYPE          hash_netiface4
-#define PF             4
 #define HOST_MASK      32
 #define HKEY_DATALEN   sizeof(struct hash_netiface4_elem_hashed)
 #include "ip_set_hash_gen.h"
@@ -308,8 +307,11 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -444,10 +446,10 @@ hash_netiface6_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -457,12 +459,9 @@ hash_netiface6_data_next(struct hash_netiface4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
-#undef HKEY_DATALEN
 
 #define MTYPE          hash_netiface6
-#define PF             6
 #define HOST_MASK      128
 #define HKEY_DATALEN   sizeof(struct hash_netiface6_elem_hashed)
 #define IP_SET_EMIT_CREATE
@@ -546,8 +545,11 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -613,7 +615,8 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index ea8772afb6e70c4d366220fc7654f64793e4ef88..84704748356010d680b05b35cf9afb9743e432d4 100644 (file)
@@ -128,7 +128,6 @@ hash_netnet4_data_next(struct hash_netnet4_elem *next,
 }
 
 #define MTYPE          hash_netnet4
-#define PF             4
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
@@ -182,9 +181,15 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -354,11 +359,9 @@ hash_netnet6_data_next(struct hash_netnet4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE          hash_netnet6
-#define PF             6
 #define HOST_MASK      128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -411,9 +414,15 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
-             ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -470,7 +479,8 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index c0ddb58d19dcca43818ea734cbc1ca64ef7eb1c9..8273819c1a2f638e1024528d600476dbfb8b144b 100644 (file)
@@ -110,10 +110,10 @@ hash_netport4_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -125,7 +125,6 @@ hash_netport4_data_next(struct hash_netport4_elem *next,
 }
 
 #define MTYPE          hash_netport4
-#define PF             4
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
@@ -182,8 +181,11 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -194,10 +196,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
                e.cidr = cidr - 1;
        }
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -326,10 +325,10 @@ hash_netport6_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -340,11 +339,9 @@ hash_netport6_data_next(struct hash_netport4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE          hash_netport6
-#define PF             6
 #define HOST_MASK      128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -404,8 +401,11 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -417,10 +417,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
        }
        ip6_netmask(&e.ip, e.cidr + 1);
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -495,7 +492,8 @@ static struct ip_set_type hash_netport_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index bfaa94c7baa79300da255f9f7761f888b79136e7..1451a8ac938f97d3c402bf1d78c4463969131f0b 100644 (file)
@@ -54,7 +54,7 @@ struct hash_netportnet4_elem {
                u16 ccmp;
        };
        u16 padding;
-       u8 nomatch:1;
+       u8 nomatch;
        u8 proto;
 };
 
@@ -124,10 +124,10 @@ hash_netportnet4_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -139,7 +139,6 @@ hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
 }
 
 #define MTYPE          hash_netportnet4
-#define PF             4
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
@@ -200,9 +199,15 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
-             ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -220,10 +225,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                e.cidr[1] = cidr;
        }
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -326,7 +328,7 @@ struct hash_netportnet6_elem {
                u16 ccmp;
        };
        u16 padding;
-       u8 nomatch:1;
+       u8 nomatch;
        u8 proto;
 };
 
@@ -397,10 +399,10 @@ hash_netportnet6_data_list(struct sk_buff *skb,
            (flags &&
             nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
                goto nla_put_failure;
-       return 0;
+       return false;
 
 nla_put_failure:
-       return 1;
+       return true;
 }
 
 static inline void
@@ -411,11 +413,9 @@ hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
 }
 
 #undef MTYPE
-#undef PF
 #undef HOST_MASK
 
 #define MTYPE          hash_netportnet6
-#define PF             6
 #define HOST_MASK      128
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
@@ -477,9 +477,15 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
-       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
-             ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
-             ip_set_get_extensions(set, tb, &ext);
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]);
+       if (ret)
+               return ret;
+
+       ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
 
@@ -496,10 +502,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
        ip6_netmask(&e.ip[0], e.cidr[0]);
        ip6_netmask(&e.ip[1], e.cidr[1]);
 
-       if (tb[IPSET_ATTR_PORT])
-               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
-       else
-               return -IPSET_ERR_PROTOCOL;
+       e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
 
        if (tb[IPSET_ATTR_PROTO]) {
                e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
@@ -577,7 +580,8 @@ static struct ip_set_type hash_netportnet_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index f8f682806e36df61fe6a606fcc94ed96660e2227..5bd3b1eae3fad2ad99109cf6f433ff84c0ba0862 100644 (file)
@@ -678,7 +678,8 @@ static struct ip_set_type list_set_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
-               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING,
+                                           .len  = IPSET_MAX_COMMENT_SIZE },
                [IPSET_ATTR_SKBMARK]    = { .type = NLA_U64 },
                [IPSET_ATTR_SKBPRIO]    = { .type = NLA_U32 },
                [IPSET_ATTR_SKBQUEUE]   = { .type = NLA_U16 },
index 19b9cce6c210c425f3e577a22e0ac8c2e1c71804..b08ba9538d121bad95ab9aa579b2b37523f57dc2 100644 (file)
@@ -1457,18 +1457,12 @@ static struct socket *make_send_sock(struct net *net, int id)
        struct socket *sock;
        int result;
 
-       /* First create a socket move it to right name space later */
-       result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+       /* First create a socket */
+       result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
                return ERR_PTR(result);
        }
-       /*
-        * Kernel sockets that are a part of a namespace, should not
-        * hold a reference to a namespace in order to allow to stop it.
-        * After sk_change_net should be released using sk_release_kernel.
-        */
-       sk_change_net(sock->sk, net);
        result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
        if (result < 0) {
                pr_err("Error setting outbound mcast interface\n");
@@ -1497,7 +1491,7 @@ static struct socket *make_send_sock(struct net *net, int id)
        return sock;
 
 error:
-       sk_release_kernel(sock->sk);
+       sock_release(sock);
        return ERR_PTR(result);
 }
 
@@ -1518,17 +1512,11 @@ static struct socket *make_receive_sock(struct net *net, int id)
        int result;
 
        /* First create a socket */
-       result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+       result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
                return ERR_PTR(result);
        }
-       /*
-        * Kernel sockets that are a part of a namespace, should not
-        * hold a reference to a namespace in order to allow to stop it.
-        * After sk_change_net should be released using sk_release_kernel.
-        */
-       sk_change_net(sock->sk, net);
        /* it is equivalent to the REUSEADDR option in user-space */
        sock->sk->sk_reuse = SK_CAN_REUSE;
        result = sysctl_sync_sock_size(ipvs);
@@ -1554,7 +1542,7 @@ static struct socket *make_receive_sock(struct net *net, int id)
        return sock;
 
 error:
-       sk_release_kernel(sock->sk);
+       sock_release(sock);
        return ERR_PTR(result);
 }
 
@@ -1692,7 +1680,7 @@ done:
                ip_vs_sync_buff_release(sb);
 
        /* release the sending multicast socket */
-       sk_release_kernel(tinfo->sock->sk);
+       sock_release(tinfo->sock);
        kfree(tinfo);
 
        return 0;
@@ -1729,7 +1717,7 @@ static int sync_thread_backup(void *data)
        }
 
        /* release the sending multicast socket */
-       sk_release_kernel(tinfo->sock->sk);
+       sock_release(tinfo->sock);
        kfree(tinfo->buf);
        kfree(tinfo);
 
@@ -1854,11 +1842,11 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
        return 0;
 
 outsocket:
-       sk_release_kernel(sock->sk);
+       sock_release(sock);
 
 outtinfo:
        if (tinfo) {
-               sk_release_kernel(tinfo->sock->sk);
+               sock_release(tinfo->sock);
                kfree(tinfo->buf);
                kfree(tinfo);
        }
index 19986ec5f21addd110ee6deadc72398641b61998..bf66a8657a5f7c7e1a2d3adbde3a05245c598249 100644 (file)
@@ -364,13 +364,16 @@ err_unreach:
 #ifdef CONFIG_IP_VS_IPV6
 static struct dst_entry *
 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
-                       struct in6_addr *ret_saddr, int do_xfrm)
+                       struct in6_addr *ret_saddr, int do_xfrm, int rt_mode)
 {
        struct dst_entry *dst;
        struct flowi6 fl6 = {
                .daddr = *daddr,
        };
 
+       if (rt_mode & IP_VS_RT_MODE_KNOWN_NH)
+               fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
+
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error)
                goto out_err;
@@ -427,7 +430,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
                        }
                        dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
                                                      &dest_dst->dst_saddr.in6,
-                                                     do_xfrm);
+                                                     do_xfrm, rt_mode);
                        if (!dst) {
                                __ip_vs_dst_set(dest, NULL, NULL, 0);
                                spin_unlock_bh(&dest->dst_lock);
@@ -435,7 +438,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
                                goto err_unreach;
                        }
                        rt = (struct rt6_info *) dst;
-                       cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+                       cookie = rt6_get_cookie(rt);
                        __ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
                        spin_unlock_bh(&dest->dst_lock);
                        IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
@@ -446,7 +449,8 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
                        *ret_saddr = dest_dst->dst_saddr.in6;
        } else {
                noref = 0;
-               dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
+               dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm,
+                                             rt_mode);
                if (!dst)
                        goto err_unreach;
                rt = (struct rt6_info *) dst;
@@ -781,7 +785,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* From world but DNAT to loopback address? */
        if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
-           ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
+           ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
                IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
                                 "ip_vs_nat_xmit_v6(): "
                                 "stopping DNAT to loopback address");
@@ -1164,7 +1168,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
        local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
                                      NULL, ipvsh, 0,
                                      IP_VS_RT_MODE_LOCAL |
-                                     IP_VS_RT_MODE_NON_LOCAL);
+                                     IP_VS_RT_MODE_NON_LOCAL |
+                                     IP_VS_RT_MODE_KNOWN_NH);
        if (local < 0)
                goto tx_error;
        if (local) {
@@ -1346,7 +1351,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
 
        /* From world but DNAT to loopback address? */
        if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
-           ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
+           ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
                IP_VS_DBG(1, "%s(): "
                          "stopping DNAT to loopback %pI6\n",
                          __func__, &cp->daddr.in6);
index 1d69f5b9748fd760630b998493d96ad84d998aba..9511af04dc814258ddc136bdaabe02fd053720fc 100644 (file)
@@ -779,8 +779,8 @@ static int callforward_do_filter(struct net *net,
                                   flowi6_to_flowi(&fl1), false)) {
                        if (!afinfo->route(net, (struct dst_entry **)&rt2,
                                           flowi6_to_flowi(&fl2), false)) {
-                               if (ipv6_addr_equal(rt6_nexthop(rt1),
-                                                   rt6_nexthop(rt2)) &&
+                               if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr),
+                                                   rt6_nexthop(rt2, &fl2.daddr)) &&
                                    rt1->dst.dev == rt2->dst.dev)
                                        ret = 1;
                                dst_release(&rt2->dst);
index 34ded09317e715cc94b80ce8d918006bbe1f714b..4528f122bcd2ff79806709efdeb9ae0faded50cc 100644 (file)
@@ -399,6 +399,8 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
        [NFTA_TABLE_NAME]       = { .type = NLA_STRING,
                                    .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_TABLE_FLAGS]      = { .type = NLA_U32 },
+       [NFTA_TABLE_DEV]        = { .type = NLA_STRING,
+                                   .len = IFNAMSIZ - 1 },
 };
 
 static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
@@ -423,6 +425,10 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
            nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)))
                goto nla_put_failure;
 
+       if (table->dev &&
+           nla_put_string(skb, NFTA_TABLE_DEV, table->dev->name))
+               goto nla_put_failure;
+
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -608,6 +614,11 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
        if (flags == ctx->table->flags)
                return 0;
 
+       if ((ctx->afi->flags & NFT_AF_NEEDS_DEV) &&
+           ctx->nla[NFTA_TABLE_DEV] &&
+           nla_strcmp(ctx->nla[NFTA_TABLE_DEV], ctx->table->dev->name))
+               return -EOPNOTSUPP;
+
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
                                sizeof(struct nft_trans_table));
        if (trans == NULL)
@@ -645,6 +656,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
        struct nft_table *table;
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
+       struct net_device *dev = NULL;
        u32 flags = 0;
        struct nft_ctx ctx;
        int err;
@@ -679,30 +691,50 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
                        return -EINVAL;
        }
 
+       if (afi->flags & NFT_AF_NEEDS_DEV) {
+               char ifname[IFNAMSIZ];
+
+               if (!nla[NFTA_TABLE_DEV])
+                       return -EOPNOTSUPP;
+
+               nla_strlcpy(ifname, nla[NFTA_TABLE_DEV], IFNAMSIZ);
+               dev = dev_get_by_name(net, ifname);
+               if (!dev)
+                       return -ENOENT;
+       } else if (nla[NFTA_TABLE_DEV]) {
+               return -EOPNOTSUPP;
+       }
+
+       err = -EAFNOSUPPORT;
        if (!try_module_get(afi->owner))
-               return -EAFNOSUPPORT;
+               goto err1;
 
        err = -ENOMEM;
        table = kzalloc(sizeof(*table), GFP_KERNEL);
        if (table == NULL)
-               goto err1;
+               goto err2;
 
        nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN);
        INIT_LIST_HEAD(&table->chains);
        INIT_LIST_HEAD(&table->sets);
        table->flags = flags;
+       table->dev   = dev;
 
        nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
        err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE);
        if (err < 0)
-               goto err2;
+               goto err3;
 
        list_add_tail_rcu(&table->list, &afi->tables);
        return 0;
-err2:
+err3:
        kfree(table);
-err1:
+err2:
        module_put(afi->owner);
+err1:
+       if (dev != NULL)
+               dev_put(dev);
+
        return err;
 }
 
@@ -806,6 +838,9 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
 {
        BUG_ON(ctx->table->use > 0);
 
+       if (ctx->table->dev)
+               dev_put(ctx->table->dev);
+
        kfree(ctx->table);
        module_put(ctx->afi->owner);
 }
@@ -1361,6 +1396,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        ops->priority   = priority;
                        ops->priv       = chain;
                        ops->hook       = afi->hooks[ops->hooknum];
+                       ops->dev        = table->dev;
                        if (hookfn)
                                ops->hook = hookfn;
                        if (afi->hook_ops_init)
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c
new file mode 100644 (file)
index 0000000..04cb170
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <net/netfilter/nf_tables.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static inline void
+nft_netdev_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+                           const struct nf_hook_ops *ops, struct sk_buff *skb,
+                           const struct nf_hook_state *state)
+{
+       struct iphdr *iph, _iph;
+       u32 len, thoff;
+
+       nft_set_pktinfo(pkt, ops, skb, state);
+
+       iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
+                                &_iph);
+       if (!iph)
+               return;
+
+       iph = ip_hdr(skb);
+       if (iph->ihl < 5 || iph->version != 4)
+               return;
+
+       len = ntohs(iph->tot_len);
+       thoff = iph->ihl * 4;
+       if (skb->len < len)
+               return;
+       else if (len < thoff)
+               return;
+
+       pkt->tprot = iph->protocol;
+       pkt->xt.thoff = thoff;
+       pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+}
+
+static inline void
+__nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+                             const struct nf_hook_ops *ops,
+                             struct sk_buff *skb,
+                             const struct nf_hook_state *state)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+       struct ipv6hdr *ip6h, _ip6h;
+       unsigned int thoff = 0;
+       unsigned short frag_off;
+       int protohdr;
+       u32 pkt_len;
+
+       ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
+                                 &_ip6h);
+       if (!ip6h)
+               return;
+
+       if (ip6h->version != 6)
+               return;
+
+       pkt_len = ntohs(ip6h->payload_len);
+       if (pkt_len + sizeof(*ip6h) > skb->len)
+               return;
+
+       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+       if (protohdr < 0)
+                return;
+
+       pkt->tprot = protohdr;
+       pkt->xt.thoff = thoff;
+       pkt->xt.fragoff = frag_off;
+#endif
+}
+
+static inline void nft_netdev_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+                                              const struct nf_hook_ops *ops,
+                                              struct sk_buff *skb,
+                                              const struct nf_hook_state *state)
+{
+       nft_set_pktinfo(pkt, ops, skb, state);
+       __nft_netdev_set_pktinfo_ipv6(pkt, ops, skb, state);
+}
+
+static unsigned int
+nft_do_chain_netdev(const struct nf_hook_ops *ops, struct sk_buff *skb,
+                   const struct nf_hook_state *state)
+{
+       struct nft_pktinfo pkt;
+
+       switch (eth_hdr(skb)->h_proto) {
+       case htons(ETH_P_IP):
+               nft_netdev_set_pktinfo_ipv4(&pkt, ops, skb, state);
+               break;
+       case htons(ETH_P_IPV6):
+               nft_netdev_set_pktinfo_ipv6(&pkt, ops, skb, state);
+               break;
+       default:
+               nft_set_pktinfo(&pkt, ops, skb, state);
+               break;
+       }
+
+       return nft_do_chain(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_netdev __read_mostly = {
+       .family         = NFPROTO_NETDEV,
+       .nhooks         = NF_NETDEV_NUMHOOKS,
+       .owner          = THIS_MODULE,
+       .flags          = NFT_AF_NEEDS_DEV,
+       .nops           = 1,
+       .hooks          = {
+               [NF_NETDEV_INGRESS]     = nft_do_chain_netdev,
+       },
+};
+
+static int nf_tables_netdev_init_net(struct net *net)
+{
+       net->nft.netdev = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.netdev == NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.netdev, &nft_af_netdev, sizeof(nft_af_netdev));
+
+       if (nft_register_afinfo(net, net->nft.netdev) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.netdev);
+       return -ENOMEM;
+}
+
+static void nf_tables_netdev_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.netdev);
+       kfree(net->nft.netdev);
+}
+
+static struct pernet_operations nf_tables_netdev_net_ops = {
+       .init   = nf_tables_netdev_init_net,
+       .exit   = nf_tables_netdev_exit_net,
+};
+
+static const struct nf_chain_type nft_filter_chain_netdev = {
+       .name           = "filter",
+       .type           = NFT_CHAIN_T_DEFAULT,
+       .family         = NFPROTO_NETDEV,
+       .owner          = THIS_MODULE,
+       .hook_mask      = (1 << NF_NETDEV_INGRESS),
+};
+
+static int __init nf_tables_netdev_init(void)
+{
+       int ret;
+
+       nft_register_chain_type(&nft_filter_chain_netdev);
+       ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
+       if (ret < 0)
+               nft_unregister_chain_type(&nft_filter_chain_netdev);
+
+       return ret;
+}
+
+static void __exit nf_tables_netdev_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_netdev_net_ops);
+       nft_unregister_chain_type(&nft_filter_chain_netdev);
+}
+
+module_init(nf_tables_netdev_init);
+module_exit(nf_tables_netdev_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_FAMILY(5); /* NFPROTO_NETDEV */
index 11c7682fa0ea1fbd13c90a38126f6a77efcac537..22a5ac76683e2b119e1d5bae44aa7ba07d9e0fea 100644 (file)
@@ -1257,7 +1257,7 @@ static int seq_show(struct seq_file *s, void *v)
                   inst->copy_mode, inst->copy_range,
                   inst->queue_dropped, inst->queue_user_dropped,
                   inst->id_sequence, 1);
-       return seq_has_overflowed(s);
+       return 0;
 }
 
 static const struct seq_operations nfqnl_seq_ops = {
index 7f29cfc76349f56d7408f4519213a98d564ab135..66def315eb5619a26b64da8a1a5525af7d4a5e3f 100644 (file)
@@ -161,6 +161,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
                par->hook_mask = 0;
        }
        par->family     = ctx->afi->family;
+       par->nft_compat = true;
 }
 
 static void target_compat_from_user(struct xt_target *t, void *in, void *out)
@@ -377,6 +378,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
                par->hook_mask = 0;
        }
        par->family     = ctx->afi->family;
+       par->nft_compat = true;
 }
 
 static void match_compat_from_user(struct xt_match *m, void *in, void *out)
index 51a459c3c6490cb7ce5080c5aa147920077dc9d5..83032464a4bdd7109b2d4016a687af822029564b 100644 (file)
@@ -947,11 +947,9 @@ static int xt_table_seq_show(struct seq_file *seq, void *v)
 {
        struct xt_table *table = list_entry(v, struct xt_table, list);
 
-       if (strlen(table->name)) {
+       if (*table->name)
                seq_printf(seq, "%s\n", table->name);
-               return seq_has_overflowed(seq);
-       } else
-               return 0;
+       return 0;
 }
 
 static const struct seq_operations xt_table_seq_ops = {
@@ -1087,10 +1085,8 @@ static int xt_match_seq_show(struct seq_file *seq, void *v)
                if (trav->curr == trav->head)
                        return 0;
                match = list_entry(trav->curr, struct xt_match, list);
-               if (*match->name == '\0')
-                       return 0;
-               seq_printf(seq, "%s\n", match->name);
-               return seq_has_overflowed(seq);
+               if (*match->name)
+                       seq_printf(seq, "%s\n", match->name);
        }
        return 0;
 }
@@ -1142,10 +1138,8 @@ static int xt_target_seq_show(struct seq_file *seq, void *v)
                if (trav->curr == trav->head)
                        return 0;
                target = list_entry(trav->curr, struct xt_target, list);
-               if (*target->name == '\0')
-                       return 0;
-               seq_printf(seq, "%s\n", target->name);
-               return seq_has_overflowed(seq);
+               if (*target->name)
+                       seq_printf(seq, "%s\n", target->name);
        }
        return 0;
 }
index e762de5ee89bfa480b1789ce164cb024ac35e91e..8c3190e2fc6abad6394ba5498762ba0502c34d58 100644 (file)
@@ -277,6 +277,9 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
                        "FORWARD, OUTPUT and POSTROUTING hooks\n");
                return -EINVAL;
        }
+       if (par->nft_compat)
+               return 0;
+
        xt_ematch_foreach(ematch, e)
                if (find_syn_match(ematch))
                        return 0;
@@ -299,6 +302,9 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
                        "FORWARD, OUTPUT and POSTROUTING hooks\n");
                return -EINVAL;
        }
+       if (par->nft_compat)
+               return 0;
+
        xt_ematch_foreach(ematch, e)
                if (find_syn_match(ematch))
                        return 0;
index 292934d234822a4a0b0088e030b316de05eee9bc..a747eb475b68e174db6a0f3ebe41de8ec5ca7d61 100644 (file)
@@ -152,6 +152,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
        fl6.daddr = info->gw.in6;
        fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
                           (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+       fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error) {
                dst_release(dst);
index fab6eea1bf382704b07449d88deaece0aa9d7d7e..5b4743cc0436105f51cca8cc4ec71cf4a0ed730e 100644 (file)
@@ -73,7 +73,7 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
 
        if (dev == NULL && rt->rt6i_flags & RTF_LOCAL)
                ret |= XT_ADDRTYPE_LOCAL;
-       if (rt->rt6i_flags & RTF_ANYCAST)
+       if (ipv6_anycast_destination((struct dst_entry *)rt, addr))
                ret |= XT_ADDRTYPE_ANYCAST;
 
        dst_release(&rt->dst);
index 23345238711b515805a63687b774cdb4b6d788f4..ebd41dc501e5033924e2555854c7e51b9b963c60 100644 (file)
@@ -23,6 +23,7 @@ MODULE_ALIAS("ipt_mark");
 MODULE_ALIAS("ip6t_mark");
 MODULE_ALIAS("ipt_MARK");
 MODULE_ALIAS("ip6t_MARK");
+MODULE_ALIAS("arpt_MARK");
 
 static unsigned int
 mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
index 89045982ec9468e01c81f6d86f50d508981e4593..b103e96277169becaa78eeec6ee3c53a2083a307 100644 (file)
@@ -15,8 +15,9 @@
 #include <linux/skbuff.h>
 
 #include <linux/netfilter/x_tables.h>
-#include <linux/netfilter/xt_set.h>
+#include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <uapi/linux/netfilter/xt_set.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
index bf6e76643f7876d8dee4df261baf077aad837be3..69d67c300b80de2963e1ec23621b13ac4f66377f 100644 (file)
@@ -76,17 +76,18 @@ struct listeners {
 };
 
 /* state bits */
-#define NETLINK_CONGESTED      0x0
+#define NETLINK_S_CONGESTED            0x0
 
 /* flags */
-#define NETLINK_KERNEL_SOCKET  0x1
-#define NETLINK_RECV_PKTINFO   0x2
-#define NETLINK_BROADCAST_SEND_ERROR   0x4
-#define NETLINK_RECV_NO_ENOBUFS        0x8
+#define NETLINK_F_KERNEL_SOCKET                0x1
+#define NETLINK_F_RECV_PKTINFO         0x2
+#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
+#define NETLINK_F_RECV_NO_ENOBUFS      0x8
+#define NETLINK_F_LISTEN_ALL_NSID      0x10
 
 static inline int netlink_is_kernel(struct sock *sk)
 {
-       return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
+       return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
 }
 
 struct netlink_table *nl_table __read_mostly;
@@ -256,8 +257,9 @@ static void netlink_overrun(struct sock *sk)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
 
-       if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
-               if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
+       if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
+               if (!test_and_set_bit(NETLINK_S_CONGESTED,
+                                     &nlk_sk(sk)->state)) {
                        sk->sk_err = ENOBUFS;
                        sk->sk_error_report(sk);
                }
@@ -270,8 +272,8 @@ static void netlink_rcv_wake(struct sock *sk)
        struct netlink_sock *nlk = nlk_sk(sk);
 
        if (skb_queue_empty(&sk->sk_receive_queue))
-               clear_bit(NETLINK_CONGESTED, &nlk->state);
-       if (!test_bit(NETLINK_CONGESTED, &nlk->state))
+               clear_bit(NETLINK_S_CONGESTED, &nlk->state);
+       if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
                wake_up_interruptible(&nlk->wait);
 }
 
@@ -1118,14 +1120,15 @@ static struct proto netlink_proto = {
 };
 
 static int __netlink_create(struct net *net, struct socket *sock,
-                           struct mutex *cb_mutex, int protocol)
+                           struct mutex *cb_mutex, int protocol,
+                           int kern)
 {
        struct sock *sk;
        struct netlink_sock *nlk;
 
        sock->ops = &netlink_ops;
 
-       sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
+       sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
        if (!sk)
                return -ENOMEM;
 
@@ -1187,7 +1190,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
        if (err < 0)
                goto out;
 
-       err = __netlink_create(net, sock, cb_mutex, protocol);
+       err = __netlink_create(net, sock, cb_mutex, protocol, kern);
        if (err < 0)
                goto out_module;
 
@@ -1297,20 +1300,24 @@ static int netlink_autobind(struct socket *sock)
        struct netlink_table *table = &nl_table[sk->sk_protocol];
        s32 portid = task_tgid_vnr(current);
        int err;
-       static s32 rover = -4097;
+       s32 rover = -4096;
+       bool ok;
 
 retry:
        cond_resched();
        rcu_read_lock();
-       if (__netlink_lookup(table, portid, net)) {
+       ok = !__netlink_lookup(table, portid, net);
+       rcu_read_unlock();
+       if (!ok) {
                /* Bind collision, search negative portid values. */
-               portid = rover--;
-               if (rover > -4097)
+               if (rover == -4096)
+                       /* rover will be in range [S32_MIN, -4097] */
+                       rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
+               else if (rover >= -4096)
                        rover = -4097;
-               rcu_read_unlock();
+               portid = rover--;
                goto retry;
        }
-       rcu_read_unlock();
 
        err = netlink_insert(sk, portid);
        if (err == -EADDRINUSE)
@@ -1657,7 +1664,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
        nlk = nlk_sk(sk);
 
        if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-            test_bit(NETLINK_CONGESTED, &nlk->state)) &&
+            test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
            !netlink_skb_is_mmaped(skb)) {
                DECLARE_WAITQUEUE(wait, current);
                if (!*timeo) {
@@ -1672,7 +1679,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
                add_wait_queue(&nlk->wait, &wait);
 
                if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
-                    test_bit(NETLINK_CONGESTED, &nlk->state)) &&
+                    test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
                    !sock_flag(sk, SOCK_DEAD))
                        *timeo = schedule_timeout(*timeo);
 
@@ -1896,7 +1903,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
        struct netlink_sock *nlk = nlk_sk(sk);
 
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
-           !test_bit(NETLINK_CONGESTED, &nlk->state)) {
+           !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
                netlink_skb_set_owner_r(skb, sk);
                __netlink_sendskb(sk, skb);
                return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
@@ -1932,8 +1939,17 @@ static void do_one_broadcast(struct sock *sk,
            !test_bit(p->group - 1, nlk->groups))
                return;
 
-       if (!net_eq(sock_net(sk), p->net))
-               return;
+       if (!net_eq(sock_net(sk), p->net)) {
+               if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
+                       return;
+
+               if (!peernet_has_id(sock_net(sk), p->net))
+                       return;
+
+               if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
+                                    CAP_NET_BROADCAST))
+                       return;
+       }
 
        if (p->failure) {
                netlink_overrun(sk);
@@ -1957,23 +1973,33 @@ static void do_one_broadcast(struct sock *sk,
                netlink_overrun(sk);
                /* Clone failed. Notify ALL listeners. */
                p->failure = 1;
-               if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
+               if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
                        p->delivery_failure = 1;
-       } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
+               goto out;
+       }
+       if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
                kfree_skb(p->skb2);
                p->skb2 = NULL;
-       } else if (sk_filter(sk, p->skb2)) {
+               goto out;
+       }
+       if (sk_filter(sk, p->skb2)) {
                kfree_skb(p->skb2);
                p->skb2 = NULL;
-       } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
+               goto out;
+       }
+       NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
+       NETLINK_CB(p->skb2).nsid_is_set = true;
+       val = netlink_broadcast_deliver(sk, p->skb2);
+       if (val < 0) {
                netlink_overrun(sk);
-               if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
+               if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
                        p->delivery_failure = 1;
        } else {
                p->congested |= val;
                p->delivered = 1;
                p->skb2 = NULL;
        }
+out:
        sock_put(sk);
 }
 
@@ -2058,7 +2084,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
            !test_bit(p->group - 1, nlk->groups))
                goto out;
 
-       if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
+       if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
                ret = 1;
                goto out;
        }
@@ -2077,7 +2103,7 @@ out:
  * @code: error code, must be negative (as usual in kernelspace)
  *
  * This function returns the number of broadcast listeners that have set the
- * NETLINK_RECV_NO_ENOBUFS socket option.
+ * NETLINK_NO_ENOBUFS socket option.
  */
 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
 {
@@ -2137,9 +2163,9 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
        switch (optname) {
        case NETLINK_PKTINFO:
                if (val)
-                       nlk->flags |= NETLINK_RECV_PKTINFO;
+                       nlk->flags |= NETLINK_F_RECV_PKTINFO;
                else
-                       nlk->flags &= ~NETLINK_RECV_PKTINFO;
+                       nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
                err = 0;
                break;
        case NETLINK_ADD_MEMBERSHIP:
@@ -2168,18 +2194,18 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
        }
        case NETLINK_BROADCAST_ERROR:
                if (val)
-                       nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
+                       nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
                else
-                       nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
+                       nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
                err = 0;
                break;
        case NETLINK_NO_ENOBUFS:
                if (val) {
-                       nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
-                       clear_bit(NETLINK_CONGESTED, &nlk->state);
+                       nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
+                       clear_bit(NETLINK_S_CONGESTED, &nlk->state);
                        wake_up_interruptible(&nlk->wait);
                } else {
-                       nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
+                       nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
                }
                err = 0;
                break;
@@ -2202,6 +2228,16 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                break;
        }
 #endif /* CONFIG_NETLINK_MMAP */
+       case NETLINK_LISTEN_ALL_NSID:
+               if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
+                       return -EPERM;
+
+               if (val)
+                       nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
+               else
+                       nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
+               err = 0;
+               break;
        default:
                err = -ENOPROTOOPT;
        }
@@ -2228,7 +2264,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                if (len < sizeof(int))
                        return -EINVAL;
                len = sizeof(int);
-               val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
+               val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
                if (put_user(len, optlen) ||
                    put_user(val, optval))
                        return -EFAULT;
@@ -2238,7 +2274,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                if (len < sizeof(int))
                        return -EINVAL;
                len = sizeof(int);
-               val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
+               val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
                if (put_user(len, optlen) ||
                    put_user(val, optval))
                        return -EFAULT;
@@ -2248,7 +2284,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                if (len < sizeof(int))
                        return -EINVAL;
                len = sizeof(int);
-               val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
+               val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
                if (put_user(len, optlen) ||
                    put_user(val, optval))
                        return -EFAULT;
@@ -2268,6 +2304,16 @@ static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
        put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
 }
 
+static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
+                                        struct sk_buff *skb)
+{
+       if (!NETLINK_CB(skb).nsid_is_set)
+               return;
+
+       put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
+                &NETLINK_CB(skb).nsid);
+}
+
 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
@@ -2419,8 +2465,10 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                msg->msg_namelen = sizeof(*addr);
        }
 
-       if (nlk->flags & NETLINK_RECV_PKTINFO)
+       if (nlk->flags & NETLINK_F_RECV_PKTINFO)
                netlink_cmsg_recv_pktinfo(msg, skb);
+       if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
+               netlink_cmsg_listen_all_nsid(sk, msg, skb);
 
        memset(&scm, 0, sizeof(scm));
        scm.creds = *NETLINK_CREDS(skb);
@@ -2474,17 +2522,10 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
        if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
                return NULL;
 
-       /*
-        * We have to just have a reference on the net from sk, but don't
-        * get_net it. Besides, we cannot get and then put the net here.
-        * So we create one inside init_net and the move it to net.
-        */
-
-       if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
+       if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
                goto out_sock_release_nosk;
 
        sk = sock->sk;
-       sk_change_net(sk, net);
 
        if (!cfg || cfg->groups < 32)
                groups = 32;
@@ -2503,7 +2544,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
                goto out_sock_release;
 
        nlk = nlk_sk(sk);
-       nlk->flags |= NETLINK_KERNEL_SOCKET;
+       nlk->flags |= NETLINK_F_KERNEL_SOCKET;
 
        netlink_table_grab();
        if (!nl_table[unit].registered) {
@@ -2540,7 +2581,10 @@ EXPORT_SYMBOL(__netlink_kernel_create);
 void
 netlink_kernel_release(struct sock *sk)
 {
-       sk_release_kernel(sk);
+       if (sk == NULL || sk->sk_socket == NULL)
+               return;
+
+       sock_release(sk->sk_socket);
 }
 EXPORT_SYMBOL(netlink_kernel_release);
 
index b987fd56c3c52935d85a0f8f710814a7f9f5798d..ed212ffc1d9d3159ccbf4b8ac5681606b8446069 100644 (file)
@@ -433,7 +433,7 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
        if (sock->type != SOCK_SEQPACKET || protocol != 0)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto);
+       sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, kern);
        if (sk  == NULL)
                return -ENOMEM;
 
@@ -476,7 +476,7 @@ static struct sock *nr_make_new(struct sock *osk)
        if (osk->sk_type != SOCK_SEQPACKET)
                return NULL;
 
-       sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot);
+       sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot, 0);
        if (sk == NULL)
                return NULL;
 
index 2277276f52bcd2f375f0319476b1cfecdf6b2d76..54e40fa47822673d760862a67fa1955ebbd0143b 100644 (file)
@@ -40,7 +40,7 @@ static int nfc_sock_create(struct net *net, struct socket *sock, int proto,
 
        read_lock(&proto_tab_lock);
        if (proto_tab[proto] && try_module_get(proto_tab[proto]->owner)) {
-               rc = proto_tab[proto]->create(net, sock, proto_tab[proto]);
+               rc = proto_tab[proto]->create(net, sock, proto_tab[proto], kern);
                module_put(proto_tab[proto]->owner);
        }
        read_unlock(&proto_tab_lock);
index de1789e3cc82fc137221535fa3bdf5fa77120c77..1f68724d44d3bad684382afc04b38a01bff9bd3b 100644 (file)
@@ -225,7 +225,7 @@ void nfc_llcp_send_to_raw_sock(struct nfc_llcp_local *local,
                               struct sk_buff *skb, u8 direction);
 
 /* Sock API */
-struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp);
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern);
 void nfc_llcp_sock_free(struct nfc_llcp_sock *sock);
 void nfc_llcp_accept_unlink(struct sock *sk);
 void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
index b18f07ccb504c0f8c2aea36be80069a0376def76..98876274a1eea22b709403dde2c5702b1387a0be 100644 (file)
@@ -934,7 +934,7 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
                sock->ssap = ssap;
        }
 
-       new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
+       new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC, 0);
        if (new_sk == NULL) {
                reason = LLCP_DM_REJ;
                release_sock(&sock->sk);
index 9578bd6a4f3e2efd565c4626a718f8ecc6f967c4..b7de0da46acddc1eaaf041dedfbc9e2cf3697559 100644 (file)
@@ -942,12 +942,12 @@ static void llcp_sock_destruct(struct sock *sk)
        }
 }
 
-struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
+struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern)
 {
        struct sock *sk;
        struct nfc_llcp_sock *llcp_sock;
 
-       sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto);
+       sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto, kern);
        if (!sk)
                return NULL;
 
@@ -993,7 +993,7 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
 }
 
 static int llcp_sock_create(struct net *net, struct socket *sock,
-                           const struct nfc_protocol *nfc_proto)
+                           const struct nfc_protocol *nfc_proto, int kern)
 {
        struct sock *sk;
 
@@ -1009,7 +1009,7 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
        else
                sock->ops = &llcp_sock_ops;
 
-       sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC);
+       sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
        if (sk == NULL)
                return -ENOMEM;
 
index a8ce80b47720a6425c857a660895418b1c00d84f..5c93e8412a2639f3d1c77cf7ee5e15a95991a858 100644 (file)
@@ -30,7 +30,7 @@ struct nfc_protocol {
        struct proto *proto;
        struct module *owner;
        int (*create)(struct net *net, struct socket *sock,
-                     const struct nfc_protocol *nfc_proto);
+                     const struct nfc_protocol *nfc_proto, int kern);
 };
 
 struct nfc_rawsock {
index 82b4e8024778019b80f6b59ba9a4242d015d4b2f..e9a91488fe3d388d90f24b3a2e645bbf71cdb60a 100644 (file)
@@ -334,7 +334,7 @@ static void rawsock_destruct(struct sock *sk)
 }
 
 static int rawsock_create(struct net *net, struct socket *sock,
-                         const struct nfc_protocol *nfc_proto)
+                         const struct nfc_protocol *nfc_proto, int kern)
 {
        struct sock *sk;
 
@@ -348,7 +348,7 @@ static int rawsock_create(struct net *net, struct socket *sock,
        else
                sock->ops = &rawsock_ops;
 
-       sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto);
+       sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
        if (!sk)
                return -ENOMEM;
 
index ed6b0f8dd1bbdfa0876c3425b24c83cf4aa315a6..15840401a2ce584356a3fff7390bb11cbb8f7752 100644 (file)
@@ -59,7 +59,7 @@ config OPENVSWITCH_VXLAN
 config OPENVSWITCH_GENEVE
        tristate "Open vSwitch Geneve tunneling support"
        depends on OPENVSWITCH
-       depends on GENEVE
+       depends on GENEVE_CORE
        default OPENVSWITCH
        ---help---
          If you say Y here, then the Open vSwitch will be able create geneve vport.
index b491c1c296fe8f954872320b7aef4db00dfb6f4a..8a8c0b8b4f63a4bd8e5ff776250189558e6fcb1e 100644 (file)
@@ -608,17 +608,16 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
 }
 
 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
-                           struct sw_flow_key *key, const struct nlattr *attr)
+                           struct sw_flow_key *key, const struct nlattr *attr,
+                           const struct nlattr *actions, int actions_len)
 {
        struct ovs_tunnel_info info;
        struct dp_upcall_info upcall;
        const struct nlattr *a;
        int rem;
 
+       memset(&upcall, 0, sizeof(upcall));
        upcall.cmd = OVS_PACKET_CMD_ACTION;
-       upcall.userdata = NULL;
-       upcall.portid = 0;
-       upcall.egress_tun_info = NULL;
 
        for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
                 a = nla_next(a, &rem)) {
@@ -647,6 +646,13 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                        break;
                }
 
+               case OVS_USERSPACE_ATTR_ACTIONS: {
+                       /* Include actions. */
+                       upcall.actions = actions;
+                       upcall.actions_len = actions_len;
+                       break;
+               }
+
                } /* End of switch. */
        }
 
@@ -654,7 +660,8 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
 }
 
 static int sample(struct datapath *dp, struct sk_buff *skb,
-                 struct sw_flow_key *key, const struct nlattr *attr)
+                 struct sw_flow_key *key, const struct nlattr *attr,
+                 const struct nlattr *actions, int actions_len)
 {
        const struct nlattr *acts_list = NULL;
        const struct nlattr *a;
@@ -688,7 +695,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
         */
        if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
                   nla_is_last(a, rem)))
-               return output_userspace(dp, skb, key, a);
+               return output_userspace(dp, skb, key, a, actions, actions_len);
 
        skb = skb_clone(skb, GFP_ATOMIC);
        if (!skb)
@@ -872,7 +879,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                        break;
 
                case OVS_ACTION_ATTR_USERSPACE:
-                       output_userspace(dp, skb, key, a);
+                       output_userspace(dp, skb, key, a, attr, len);
                        break;
 
                case OVS_ACTION_ATTR_HASH:
@@ -916,7 +923,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                        break;
 
                case OVS_ACTION_ATTR_SAMPLE:
-                       err = sample(dp, skb, key, a);
+                       err = sample(dp, skb, key, a, attr, len);
                        break;
                }
 
index 096c6276e6b92680542ed1204bf396470c08caf5..ff8c4a4c160986bf206f4751860f9767e71246bf 100644 (file)
@@ -272,10 +272,9 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
                struct dp_upcall_info upcall;
                int error;
 
+               memset(&upcall, 0, sizeof(upcall));
                upcall.cmd = OVS_PACKET_CMD_MISS;
-               upcall.userdata = NULL;
                upcall.portid = ovs_vport_find_upcall_portid(p, skb);
-               upcall.egress_tun_info = NULL;
                error = ovs_dp_upcall(dp, skb, key, &upcall);
                if (unlikely(error))
                        kfree_skb(skb);
@@ -397,6 +396,10 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
        if (upcall_info->egress_tun_info)
                size += nla_total_size(ovs_tun_key_attr_size());
 
+       /* OVS_PACKET_ATTR_ACTIONS */
+       if (upcall_info->actions_len)
+               size += nla_total_size(upcall_info->actions_len);
+
        return size;
 }
 
@@ -478,6 +481,17 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
                nla_nest_end(user_skb, nla);
        }
 
+       if (upcall_info->actions_len) {
+               nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
+               err = ovs_nla_put_actions(upcall_info->actions,
+                                         upcall_info->actions_len,
+                                         user_skb);
+               if (!err)
+                       nla_nest_end(user_skb, nla);
+               else
+                       nla_nest_cancel(user_skb, nla);
+       }
+
        /* Only reserve room for attribute header, packet data is added
         * in skb_zerocopy() */
        if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
@@ -545,7 +559,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        /* Normally, setting the skb 'protocol' field would be handled by a
         * call to eth_type_trans(), but it assumes there's a sending
         * device, which we may not have. */
-       if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
+       if (eth_proto_is_802_3(eth->h_proto))
                packet->protocol = eth->h_proto;
        else
                packet->protocol = htons(ETH_P_802_2);
index 4ec4a480b147030c3e2938c4ba612d4e7d5e10be..cd691e935e08c76b75e8ed90d7ec79ce9875ee3c 100644 (file)
@@ -116,6 +116,8 @@ struct ovs_skb_cb {
 struct dp_upcall_info {
        const struct ovs_tunnel_info *egress_tun_info;
        const struct nlattr *userdata;
+       const struct nlattr *actions;
+       int actions_len;
        u32 portid;
        u8 cmd;
 };
index 2dacc7b5af23a14b827785fdbc3eaf5bd6a26883..bc7b0aba994adf6f8ec8cfc2b3278b46d19621db 100644 (file)
@@ -332,7 +332,7 @@ static __be16 parse_ethertype(struct sk_buff *skb)
        proto = *(__be16 *) skb->data;
        __skb_pull(skb, sizeof(__be16));
 
-       if (ntohs(proto) >= ETH_P_802_3_MIN)
+       if (eth_proto_is_802_3(proto))
                return proto;
 
        if (skb->len < sizeof(struct llc_snap_hdr))
@@ -349,7 +349,7 @@ static __be16 parse_ethertype(struct sk_buff *skb)
 
        __skb_pull(skb, sizeof(struct llc_snap_hdr));
 
-       if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
+       if (eth_proto_is_802_3(llc->ethertype))
                return llc->ethertype;
 
        return htons(ETH_P_802_2);
index c691b1a1eee0ae3aeedf389a3288272417e9bcbb..624e41c4267fe0206fe94ede37e3160d80497abf 100644 (file)
@@ -816,7 +816,7 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
                if (is_mask) {
                        /* Always exact match EtherType. */
                        eth_type = htons(0xffff);
-               } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
+               } else if (!eth_proto_is_802_3(eth_type)) {
                        OVS_NLERR(log, "EtherType %x is less than min %x",
                                  ntohs(eth_type), ETH_P_802_3_MIN);
                        return -EINVAL;
index bf02fd5808c964eaefd12a2012f2f15500927c99..208c576bd1b683d909f1d9c2e2c09de610057f55 100644 (file)
@@ -46,11 +46,6 @@ static inline struct geneve_port *geneve_vport(const struct vport *vport)
        return vport_priv(vport);
 }
 
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
-       return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
 /* Convert 64 bit tunnel ID to 24 bit VNI. */
 static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
 {
index b5989c6ee5513904127a8ffec31d09589094c8f6..fd5164139bf08358ba649505fb72c33b1f9411d7 100644 (file)
@@ -1234,27 +1234,81 @@ static void packet_free_pending(struct packet_sock *po)
        free_percpu(po->tx_ring.pending_refcnt);
 }
 
-static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
+#define ROOM_POW_OFF   2
+#define ROOM_NONE      0x0
+#define ROOM_LOW       0x1
+#define ROOM_NORMAL    0x2
+
+static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
+{
+       int idx, len;
+
+       len = po->rx_ring.frame_max + 1;
+       idx = po->rx_ring.head;
+       if (pow_off)
+               idx += len >> pow_off;
+       if (idx >= len)
+               idx -= len;
+       return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
+}
+
+static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
+{
+       int idx, len;
+
+       len = po->rx_ring.prb_bdqc.knum_blocks;
+       idx = po->rx_ring.prb_bdqc.kactive_blk_num;
+       if (pow_off)
+               idx += len >> pow_off;
+       if (idx >= len)
+               idx -= len;
+       return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
+}
+
+static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
 {
        struct sock *sk = &po->sk;
-       bool has_room;
+       int ret = ROOM_NONE;
+
+       if (po->prot_hook.func != tpacket_rcv) {
+               int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
+                                         - (skb ? skb->truesize : 0);
+               if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
+                       return ROOM_NORMAL;
+               else if (avail > 0)
+                       return ROOM_LOW;
+               else
+                       return ROOM_NONE;
+       }
+
+       if (po->tp_version == TPACKET_V3) {
+               if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
+                       ret = ROOM_NORMAL;
+               else if (__tpacket_v3_has_room(po, 0))
+                       ret = ROOM_LOW;
+       } else {
+               if (__tpacket_has_room(po, ROOM_POW_OFF))
+                       ret = ROOM_NORMAL;
+               else if (__tpacket_has_room(po, 0))
+                       ret = ROOM_LOW;
+       }
 
-       if (po->prot_hook.func != tpacket_rcv)
-               return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
-                       <= sk->sk_rcvbuf;
+       return ret;
+}
 
-       spin_lock(&sk->sk_receive_queue.lock);
-       if (po->tp_version == TPACKET_V3)
-               has_room = prb_lookup_block(po, &po->rx_ring,
-                                           po->rx_ring.prb_bdqc.kactive_blk_num,
-                                           TP_STATUS_KERNEL);
-       else
-               has_room = packet_lookup_frame(po, &po->rx_ring,
-                                              po->rx_ring.head,
-                                              TP_STATUS_KERNEL);
-       spin_unlock(&sk->sk_receive_queue.lock);
+static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
+{
+       int ret;
+       bool has_room;
+
+       spin_lock_bh(&po->sk.sk_receive_queue.lock);
+       ret = __packet_rcv_has_room(po, skb);
+       has_room = ret == ROOM_NORMAL;
+       if (po->pressure == has_room)
+               po->pressure = !has_room;
+       spin_unlock_bh(&po->sk.sk_receive_queue.lock);
 
-       return has_room;
+       return ret;
 }
 
 static void packet_sock_destruct(struct sock *sk)
@@ -1282,6 +1336,20 @@ static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
        return x;
 }
 
+static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
+{
+       u32 rxhash;
+       int i, count = 0;
+
+       rxhash = skb_get_hash(skb);
+       for (i = 0; i < ROLLOVER_HLEN; i++)
+               if (po->rollover->history[i] == rxhash)
+                       count++;
+
+       po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
+       return count > (ROLLOVER_HLEN >> 1);
+}
+
 static unsigned int fanout_demux_hash(struct packet_fanout *f,
                                      struct sk_buff *skb,
                                      unsigned int num)
@@ -1318,22 +1386,40 @@ static unsigned int fanout_demux_rnd(struct packet_fanout *f,
 
 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
                                          struct sk_buff *skb,
-                                         unsigned int idx, unsigned int skip,
+                                         unsigned int idx, bool try_self,
                                          unsigned int num)
 {
-       unsigned int i, j;
+       struct packet_sock *po, *po_next, *po_skip = NULL;
+       unsigned int i, j, room = ROOM_NONE;
 
-       i = j = min_t(int, f->next[idx], num - 1);
+       po = pkt_sk(f->arr[idx]);
+
+       if (try_self) {
+               room = packet_rcv_has_room(po, skb);
+               if (room == ROOM_NORMAL ||
+                   (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
+                       return idx;
+               po_skip = po;
+       }
+
+       i = j = min_t(int, po->rollover->sock, num - 1);
        do {
-               if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
+               po_next = pkt_sk(f->arr[i]);
+               if (po_next != po_skip && !po_next->pressure &&
+                   packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
                        if (i != j)
-                               f->next[idx] = i;
+                               po->rollover->sock = i;
+                       atomic_long_inc(&po->rollover->num);
+                       if (room == ROOM_LOW)
+                               atomic_long_inc(&po->rollover->num_huge);
                        return i;
                }
+
                if (++i == num)
                        i = 0;
        } while (i != j);
 
+       atomic_long_inc(&po->rollover->num_failed);
        return idx;
 }
 
@@ -1386,17 +1472,14 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
                idx = fanout_demux_qm(f, skb, num);
                break;
        case PACKET_FANOUT_ROLLOVER:
-               idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
+               idx = fanout_demux_rollover(f, skb, 0, false, num);
                break;
        }
 
-       po = pkt_sk(f->arr[idx]);
-       if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
-           unlikely(!packet_rcv_has_room(po, skb))) {
-               idx = fanout_demux_rollover(f, skb, idx, idx, num);
-               po = pkt_sk(f->arr[idx]);
-       }
+       if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
+               idx = fanout_demux_rollover(f, skb, idx, true, num);
 
+       po = pkt_sk(f->arr[idx]);
        return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
 }
 
@@ -1467,6 +1550,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
        if (po->fanout)
                return -EALREADY;
 
+       if (type == PACKET_FANOUT_ROLLOVER ||
+           (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
+               po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
+               if (!po->rollover)
+                       return -ENOMEM;
+               atomic_long_set(&po->rollover->num, 0);
+               atomic_long_set(&po->rollover->num_huge, 0);
+               atomic_long_set(&po->rollover->num_failed, 0);
+       }
+
        mutex_lock(&fanout_mutex);
        match = NULL;
        list_for_each_entry(f, &fanout_list, list) {
@@ -1515,6 +1608,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
        }
 out:
        mutex_unlock(&fanout_mutex);
+       if (err) {
+               kfree(po->rollover);
+               po->rollover = NULL;
+       }
        return err;
 }
 
@@ -1536,6 +1633,8 @@ static void fanout_release(struct sock *sk)
                kfree(f);
        }
        mutex_unlock(&fanout_mutex);
+
+       kfree(po->rollover);
 }
 
 static const struct proto_ops packet_ops;
@@ -2835,7 +2934,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
        sock->state = SS_UNCONNECTED;
 
        err = -ENOBUFS;
-       sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
+       sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
        if (sk == NULL)
                goto out;
 
@@ -2865,6 +2964,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
 
        spin_lock_init(&po->bind_lock);
        mutex_init(&po->pg_vec_lock);
+       po->rollover = NULL;
        po->prot_hook.func = packet_rcv;
 
        if (sock->type == SOCK_PACKET)
@@ -2942,6 +3042,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        if (skb == NULL)
                goto out;
 
+       if (pkt_sk(sk)->pressure)
+               packet_rcv_has_room(pkt_sk(sk), NULL);
+
        if (pkt_sk(sk)->has_vnet_hdr) {
                struct virtio_net_hdr vnet_hdr = { 0 };
 
@@ -3485,6 +3588,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        struct packet_sock *po = pkt_sk(sk);
        void *data = &val;
        union tpacket_stats_u st;
+       struct tpacket_rollover_stats rstats;
 
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
@@ -3560,6 +3664,15 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                        ((u32)po->fanout->flags << 24)) :
                       0);
                break;
+       case PACKET_ROLLOVER_STATS:
+               if (!po->rollover)
+                       return -EINVAL;
+               rstats.tp_all = atomic_long_read(&po->rollover->num);
+               rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+               rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+               data = &rstats;
+               lv = sizeof(rstats);
+               break;
        case PACKET_TX_HAS_OFF:
                val = po->tp_tx_has_off;
                break;
@@ -3697,6 +3810,8 @@ static unsigned int packet_poll(struct file *file, struct socket *sock,
                        TP_STATUS_KERNEL))
                        mask |= POLLIN | POLLRDNORM;
        }
+       if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
+               po->pressure = 0;
        spin_unlock_bh(&sk->sk_receive_queue.lock);
        spin_lock_bh(&sk->sk_write_queue.lock);
        if (po->tx_ring.pg_vec) {
index fe6e20caea1d9bcd3711b3ad29a8de2ae40cd1bc..c035d263c1e8d119267633971920106b3bf627f3 100644 (file)
@@ -82,12 +82,20 @@ struct packet_fanout {
        atomic_t                rr_cur;
        struct list_head        list;
        struct sock             *arr[PACKET_FANOUT_MAX];
-       int                     next[PACKET_FANOUT_MAX];
        spinlock_t              lock;
        atomic_t                sk_ref;
        struct packet_type      prot_hook ____cacheline_aligned_in_smp;
 };
 
+struct packet_rollover {
+       int                     sock;
+       atomic_long_t           num;
+       atomic_long_t           num_huge;
+       atomic_long_t           num_failed;
+#define ROLLOVER_HLEN  (L1_CACHE_BYTES / sizeof(u32))
+       u32                     history[ROLLOVER_HLEN] ____cacheline_aligned;
+} ____cacheline_aligned_in_smp;
+
 struct packet_sock {
        /* struct sock has to be the first member of packet_sock */
        struct sock             sk;
@@ -102,8 +110,10 @@ struct packet_sock {
                                auxdata:1,
                                origdev:1,
                                has_vnet_hdr:1;
+       int                     pressure;
        int                     ifindex;        /* bound device         */
        __be16                  num;
+       struct packet_rollover  *rollover;
        struct packet_mclist    *mclist;
        atomic_t                mapped;
        enum tpacket_versions   tp_version;
index 32ab87d3482864279ed768c30a965a31f9ffc561..10d42f3220ab469b3e23050c99af72cb39c81452 100644 (file)
@@ -97,7 +97,7 @@ static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
                goto out;
        }
 
-       sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot);
+       sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot, kern);
        if (sk == NULL) {
                err = -ENOMEM;
                goto out;
index 6de2aeb98a1fc99fa7b75430f048060fac6c191b..850a86cde0b3f6eab5b7aa09f4e6ffa66ccd6ed6 100644 (file)
@@ -845,7 +845,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
        }
 
        /* Create a new to-be-accepted sock */
-       newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
+       newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot, 0);
        if (!newsk) {
                pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
                err = -ENOBUFS;
index 10443377fb9d8f5b5cb928647fa58c03001a072b..2ad9032372b2d3a050137451f37dff32f790ba09 100644 (file)
@@ -270,6 +270,28 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
        return ret;
 }
 
+static int rds_set_transport(struct rds_sock *rs, char __user *optval,
+                            int optlen)
+{
+       int t_type;
+
+       if (rs->rs_transport)
+               return -EOPNOTSUPP; /* previously attached to transport */
+
+       if (optlen != sizeof(int))
+               return -EINVAL;
+
+       if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type)))
+               return -EFAULT;
+
+       if (t_type < 0 || t_type >= RDS_TRANS_COUNT)
+               return -EINVAL;
+
+       rs->rs_transport = rds_trans_get(t_type);
+
+       return rs->rs_transport ? 0 : -ENOPROTOOPT;
+}
+
 static int rds_setsockopt(struct socket *sock, int level, int optname,
                          char __user *optval, unsigned int optlen)
 {
@@ -300,6 +322,11 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
        case RDS_CONG_MONITOR:
                ret = rds_cong_monitor(rs, optval, optlen);
                break;
+       case SO_RDS_TRANSPORT:
+               lock_sock(sock->sk);
+               ret = rds_set_transport(rs, optval, optlen);
+               release_sock(sock->sk);
+               break;
        default:
                ret = -ENOPROTOOPT;
        }
@@ -312,6 +339,7 @@ static int rds_getsockopt(struct socket *sock, int level, int optname,
 {
        struct rds_sock *rs = rds_sk_to_rs(sock->sk);
        int ret = -ENOPROTOOPT, len;
+       int trans;
 
        if (level != SOL_RDS)
                goto out;
@@ -337,6 +365,19 @@ static int rds_getsockopt(struct socket *sock, int level, int optname,
                else
                        ret = 0;
                break;
+       case SO_RDS_TRANSPORT:
+               if (len < sizeof(int)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               trans = (rs->rs_transport ? rs->rs_transport->t_type :
+                        RDS_TRANS_NONE); /* unbound */
+               if (put_user(trans, (int __user *)optval) ||
+                   put_user(sizeof(int), optlen))
+                       ret = -EFAULT;
+               else
+                       ret = 0;
+               break;
        default:
                break;
        }
@@ -440,7 +481,7 @@ static int rds_create(struct net *net, struct socket *sock, int protocol,
        if (sock->type != SOCK_SEQPACKET || protocol)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto);
+       sk = sk_alloc(net, AF_RDS, GFP_ATOMIC, &rds_proto, kern);
        if (!sk)
                return -ENOMEM;
 
index a2e6562da751f79bd9524c7e434f1d1d02d3b4fe..4ebd29c128b63f7c9519a421128351b5e502853a 100644 (file)
@@ -181,6 +181,10 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (ret)
                goto out;
 
+       if (rs->rs_transport) { /* previously bound */
+               ret = 0;
+               goto out;
+       }
        trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
        if (!trans) {
                ret = -EADDRNOTAVAIL;
index 0d41155a2258cbbd16e19171c3daa376e3a83877..a33fb4ad3535b57d305c0e5dae0e9f6bd8415104 100644 (file)
@@ -408,11 +408,6 @@ struct rds_notifier {
  *                 should try hard not to block.
  */
 
-#define RDS_TRANS_IB   0
-#define RDS_TRANS_IWARP        1
-#define RDS_TRANS_TCP  2
-#define RDS_TRANS_COUNT        3
-
 struct rds_transport {
        char                    t_name[TRANSNAMSIZ];
        struct list_head        t_item;
@@ -803,6 +798,7 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr);
 void rds_trans_put(struct rds_transport *trans);
 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
                                       unsigned int avail);
+struct rds_transport *rds_trans_get(int t_type);
 int rds_trans_init(void);
 void rds_trans_exit(void);
 
index 7f2ac4fec3678b28715b95094c6346bcc49333e1..8b4a6cd2c3a78f0a4c7dbbf89fbe1bd6156aeb55 100644 (file)
@@ -101,6 +101,27 @@ struct rds_transport *rds_trans_get_preferred(__be32 addr)
        return ret;
 }
 
+struct rds_transport *rds_trans_get(int t_type)
+{
+       struct rds_transport *ret = NULL;
+       struct rds_transport *trans;
+       unsigned int i;
+
+       down_read(&rds_trans_sem);
+       for (i = 0; i < RDS_TRANS_COUNT; i++) {
+               trans = transports[i];
+
+               if (trans && trans->t_type == t_type &&
+                   (!trans->t_owner || try_module_get(trans->t_owner))) {
+                       ret = trans;
+                       break;
+               }
+       }
+       up_read(&rds_trans_sem);
+
+       return ret;
+}
+
 /*
  * This returns the number of stats entries in the snapshot and only
  * copies them using the iter if there is enough space for them.  The
index fa7cd792791cbbd5234de865a53330daedb834db..f12149a29cb19b1b508b30528dc3bfd032799622 100644 (file)
@@ -794,7 +794,8 @@ void rfkill_resume_polling(struct rfkill *rfkill)
 }
 EXPORT_SYMBOL(rfkill_resume_polling);
 
-static int rfkill_suspend(struct device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int rfkill_suspend(struct device *dev)
 {
        struct rfkill *rfkill = to_rfkill(dev);
 
@@ -818,13 +819,18 @@ static int rfkill_resume(struct device *dev)
        return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
+#define RFKILL_PM_OPS (&rfkill_pm_ops)
+#else
+#define RFKILL_PM_OPS NULL
+#endif
+
 static struct class rfkill_class = {
        .name           = "rfkill",
        .dev_release    = rfkill_release,
        .dev_groups     = rfkill_dev_groups,
        .dev_uevent     = rfkill_dev_uevent,
-       .suspend        = rfkill_suspend,
-       .resume         = rfkill_resume,
+       .pm             = RFKILL_PM_OPS,
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
index d978f2f46ff35e0181e2a833e3bfab2b6d58221e..d5d58d9195524f36b03bdac0125148cb7110901a 100644 (file)
@@ -112,21 +112,17 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
 
        rfkill->clk = devm_clk_get(&pdev->dev, NULL);
 
-       gpio = devm_gpiod_get(&pdev->dev, "reset");
-       if (!IS_ERR(gpio)) {
-               ret = gpiod_direction_output(gpio, 0);
-               if (ret)
-                       return ret;
-               rfkill->reset_gpio = gpio;
-       }
+       gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(gpio))
+               return PTR_ERR(gpio);
 
-       gpio = devm_gpiod_get(&pdev->dev, "shutdown");
-       if (!IS_ERR(gpio)) {
-               ret = gpiod_direction_output(gpio, 0);
-               if (ret)
-                       return ret;
-               rfkill->shutdown_gpio = gpio;
-       }
+       rfkill->reset_gpio = gpio;
+
+       gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
+       if (IS_ERR(gpio))
+               return PTR_ERR(gpio);
+
+       rfkill->shutdown_gpio = gpio;
 
        /* Make sure at-least one of the GPIO is defined and that
         * a name is specified for this instance
index 8ae603069a1a1706982dc0b7affd5443cb912308..36dbc2da366180b871db29e2d1af324a15d8d6e0 100644 (file)
@@ -520,7 +520,7 @@ static int rose_create(struct net *net, struct socket *sock, int protocol,
        if (sock->type != SOCK_SEQPACKET || protocol != 0)
                return -ESOCKTNOSUPPORT;
 
-       sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto);
+       sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
        if (sk == NULL)
                return -ENOMEM;
 
@@ -559,7 +559,7 @@ static struct sock *rose_make_new(struct sock *osk)
        if (osk->sk_type != SOCK_SEQPACKET)
                return NULL;
 
-       sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto);
+       sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
        if (sk == NULL)
                return NULL;
 
index 0095b9a0b779ca9fcc212f2c495acce399ec1550..25d60ed15284a56b1a2f6b9bc8c680f0c9f567c0 100644 (file)
@@ -632,7 +632,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
        sock->ops = &rxrpc_rpc_ops;
        sock->state = SS_UNCONNECTED;
 
-       sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
+       sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
        if (!sk)
                return -ENOMEM;
 
index ca904ed5400a11bd08e47fea56d0caeb30f0a442..78483b4602bf729229c160b839ca723fe60242f3 100644 (file)
@@ -73,8 +73,8 @@ static int rxrpc_create_local(struct rxrpc_local *local)
        _enter("%p{%d}", local, local->srx.transport_type);
 
        /* create a socket to represent the local endpoint */
-       ret = sock_create_kern(PF_INET, local->srx.transport_type, IPPROTO_UDP,
-                              &local->socket);
+       ret = sock_create_kern(&init_net, PF_INET, local->srx.transport_type,
+                              IPPROTO_UDP, &local->socket);
        if (ret < 0) {
                _leave(" = %d [socket]", ret);
                return ret;
index 2274e723a3df6fdf393543281cd56bcb6284b41c..daa33432b7166e1c9af818835441be1b102d8d73 100644 (file)
@@ -312,6 +312,7 @@ config NET_SCH_PIE
 config NET_SCH_INGRESS
        tristate "Ingress Qdisc"
        depends on NET_CLS_ACT
+       select NET_INGRESS
        ---help---
          Say Y here if you want to use classifiers for incoming packets.
          If unsure, say Y.
@@ -477,6 +478,16 @@ config NET_CLS_BPF
          To compile this code as a module, choose M here: the module will
          be called cls_bpf.
 
+config NET_CLS_FLOWER
+       tristate "Flower classifier"
+       select NET_CLS
+       ---help---
+         If you say Y here, you will be able to classify packets based on
+         a configurable combination of packet keys and masks.
+
+         To compile this code as a module, choose M here: the module will
+         be called cls_flower.
+
 config NET_EMATCH
        bool "Extended Matches"
        select NET_CLS
index 7ca7f4c1b8c210c9358252c61fc18fff12e3f5db..690c1689e09020cd2dfb5fa64f000231a0980ef8 100644 (file)
@@ -56,6 +56,7 @@ obj-$(CONFIG_NET_CLS_BASIC)   += cls_basic.o
 obj-$(CONFIG_NET_CLS_FLOW)     += cls_flow.o
 obj-$(CONFIG_NET_CLS_CGROUP)   += cls_cgroup.o
 obj-$(CONFIG_NET_CLS_BPF)      += cls_bpf.o
+obj-$(CONFIG_NET_CLS_FLOWER)   += cls_flower.o
 obj-$(CONFIG_NET_EMATCH)       += ematch.o
 obj-$(CONFIG_NET_EMATCH_CMP)   += em_cmp.o
 obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
index 3d43e4979f27c8dca3083c863549592216f741fe..af427a3dbcba238103169ab2a58005feda5fa2f1 100644 (file)
@@ -392,11 +392,6 @@ int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
        list_for_each_entry(a, actions, list) {
 repeat:
                ret = a->ops->act(skb, a, res);
-               if (TC_MUNGED & skb->tc_verd) {
-                       /* copied already, allow trampling */
-                       skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
-                       skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
-               }
                if (ret == TC_ACT_REPEAT)
                        goto repeat;    /* we need a ttl - JHS */
                if (ret != TC_ACT_PIPE)
index dc6a2d324bd8163841e7c9e6a0defebd87792ae5..1d56903fd4c79aa008c4c540aabd8b4c099e81a1 100644 (file)
@@ -37,6 +37,7 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
 {
        struct tcf_bpf *prog = act->priv;
        int action, filter_res;
+       bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
 
        if (unlikely(!skb_mac_header_was_set(skb)))
                return TC_ACT_UNSPEC;
@@ -48,7 +49,13 @@ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
 
        /* Needed here for accessing maps. */
        rcu_read_lock();
-       filter_res = BPF_PROG_RUN(prog->filter, skb);
+       if (at_ingress) {
+               __skb_push(skb, skb->mac_len);
+               filter_res = BPF_PROG_RUN(prog->filter, skb);
+               __skb_pull(skb, skb->mac_len);
+       } else {
+               filter_res = BPF_PROG_RUN(prog->filter, skb);
+       }
        rcu_read_unlock();
 
        /* A BPF program may overwrite the default action opcode.
index 3f63ceac8e0141ee8bbe0133759340e5f29c3573..a42a3b257226178eb5af04054a17813c04368613 100644 (file)
@@ -151,7 +151,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
        }
 
        at = G_TC_AT(skb->tc_verd);
-       skb2 = skb_act_clone(skb, GFP_ATOMIC, m->tcf_action);
+       skb2 = skb_clone(skb, GFP_ATOMIC);
        if (skb2 == NULL)
                goto out;
 
index 59649d588d79d6d05c47e3a111fa1dd0397c7342..17e6d6669c7fdf138915ac9549d3f06d9535d745 100644 (file)
@@ -108,7 +108,7 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
                     struct tcf_result *res)
 {
        struct tcf_pedit *p = a->priv;
-       int i, munged = 0;
+       int i;
        unsigned int off;
 
        if (skb_unclone(skb, GFP_ATOMIC))
@@ -156,11 +156,8 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
                        *ptr = ((*ptr & tkey->mask) ^ tkey->val);
                        if (ptr == &_data)
                                skb_store_bits(skb, off + offset, ptr, 4);
-                       munged++;
                }
 
-               if (munged)
-                       skb->tc_verd = SET_TC_MUNGED(skb->tc_verd);
                goto done;
        } else
                WARN(1, "pedit BUG: index %d\n", p->tcf_index);
index 91bd9c19471d58218cb340a2a871c8fe0ac8cd34..c79ecfd36e0f028388ea5f96a64dbb23451b01b1 100644 (file)
@@ -64,6 +64,11 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 {
        struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
        struct cls_bpf_prog *prog;
+#ifdef CONFIG_NET_CLS_ACT
+       bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
+#else
+       bool at_ingress = false;
+#endif
        int ret = -1;
 
        if (unlikely(!skb_mac_header_was_set(skb)))
@@ -72,7 +77,16 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        /* Needed here for accessing maps. */
        rcu_read_lock();
        list_for_each_entry_rcu(prog, &head->plist, link) {
-               int filter_res = BPF_PROG_RUN(prog->filter, skb);
+               int filter_res;
+
+               if (at_ingress) {
+                       /* It is safe to push/pull even if skb_shared() */
+                       __skb_push(skb, skb->mac_len);
+                       filter_res = BPF_PROG_RUN(prog->filter, skb);
+                       __skb_pull(skb, skb->mac_len);
+               } else {
+                       filter_res = BPF_PROG_RUN(prog->filter, skb);
+               }
 
                if (filter_res == 0)
                        continue;
index a620c4e288a51f55771399f6c1f81328bab9f7c7..76bc3a20ffdb31bb4c9b51942de74c64928c2a3a 100644 (file)
@@ -26,7 +26,7 @@
 #include <net/pkt_cls.h>
 #include <net/ip.h>
 #include <net/route.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
@@ -68,35 +68,41 @@ static inline u32 addr_fold(void *addr)
 
 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       if (flow->src)
-               return ntohl(flow->src);
+       __be32 src = flow_get_u32_src(flow);
+
+       if (src)
+               return ntohl(src);
+
        return addr_fold(skb->sk);
 }
 
 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       if (flow->dst)
-               return ntohl(flow->dst);
+       __be32 dst = flow_get_u32_dst(flow);
+
+       if (dst)
+               return ntohl(dst);
+
        return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
 
 static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       return flow->ip_proto;
+       return flow->basic.ip_proto;
 }
 
 static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       if (flow->ports)
-               return ntohs(flow->port16[0]);
+       if (flow->ports.ports)
+               return ntohs(flow->ports.src);
 
        return addr_fold(skb->sk);
 }
 
 static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow)
 {
-       if (flow->ports)
-               return ntohs(flow->port16[1]);
+       if (flow->ports.ports)
+               return ntohs(flow->ports.dst);
 
        return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
 }
@@ -295,7 +301,7 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 
                keymask = f->keymask;
                if (keymask & FLOW_KEYS_NEEDED)
-                       skb_flow_dissect(skb, &flow_keys);
+                       skb_flow_dissect_flow_keys(skb, &flow_keys);
 
                for (n = 0; n < f->nkeys; n++) {
                        key = ffs(keymask) - 1;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
new file mode 100644 (file)
index 0000000..b92d3f4
--- /dev/null
@@ -0,0 +1,691 @@
+/*
+ * net/sched/cls_flower.c              Flower classifier
+ *
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rhashtable.h>
+
+#include <linux/if_ether.h>
+#include <linux/in6.h>
+#include <linux/ip.h>
+
+#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
+#include <net/ip.h>
+#include <net/flow_dissector.h>
+
+struct fl_flow_key {
+       int     indev_ifindex;
+       struct flow_dissector_key_control control;
+       struct flow_dissector_key_basic basic;
+       struct flow_dissector_key_eth_addrs eth;
+       struct flow_dissector_key_addrs ipaddrs;
+       union {
+               struct flow_dissector_key_ipv4_addrs ipv4;
+               struct flow_dissector_key_ipv6_addrs ipv6;
+       };
+       struct flow_dissector_key_ports tp;
+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
+
+struct fl_flow_mask_range {
+       unsigned short int start;
+       unsigned short int end;
+};
+
+struct fl_flow_mask {
+       struct fl_flow_key key;
+       struct fl_flow_mask_range range;
+       struct rcu_head rcu;
+};
+
+struct cls_fl_head {
+       struct rhashtable ht;
+       struct fl_flow_mask mask;
+       struct flow_dissector dissector;
+       u32 hgen;
+       bool mask_assigned;
+       struct list_head filters;
+       struct rhashtable_params ht_params;
+       struct rcu_head rcu;
+};
+
+struct cls_fl_filter {
+       struct rhash_head ht_node;
+       struct fl_flow_key mkey;
+       struct tcf_exts exts;
+       struct tcf_result res;
+       struct fl_flow_key key;
+       struct list_head list;
+       u32 handle;
+       struct rcu_head rcu;
+};
+
+static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
+{
+       return mask->range.end - mask->range.start;
+}
+
+static void fl_mask_update_range(struct fl_flow_mask *mask)
+{
+       const u8 *bytes = (const u8 *) &mask->key;
+       size_t size = sizeof(mask->key);
+       size_t i, first = 0, last = size - 1;
+
+       for (i = 0; i < sizeof(mask->key); i++) {
+               if (bytes[i]) {
+                       if (!first && i)
+                               first = i;
+                       last = i;
+               }
+       }
+       mask->range.start = rounddown(first, sizeof(long));
+       mask->range.end = roundup(last + 1, sizeof(long));
+}
+
+static void *fl_key_get_start(struct fl_flow_key *key,
+                             const struct fl_flow_mask *mask)
+{
+       return (u8 *) key + mask->range.start;
+}
+
+static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
+                             struct fl_flow_mask *mask)
+{
+       const long *lkey = fl_key_get_start(key, mask);
+       const long *lmask = fl_key_get_start(&mask->key, mask);
+       long *lmkey = fl_key_get_start(mkey, mask);
+       int i;
+
+       for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
+               *lmkey++ = *lkey++ & *lmask++;
+}
+
+static void fl_clear_masked_range(struct fl_flow_key *key,
+                                 struct fl_flow_mask *mask)
+{
+       memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
+}
+
+static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+                      struct tcf_result *res)
+{
+       struct cls_fl_head *head = rcu_dereference_bh(tp->root);
+       struct cls_fl_filter *f;
+       struct fl_flow_key skb_key;
+       struct fl_flow_key skb_mkey;
+
+       fl_clear_masked_range(&skb_key, &head->mask);
+       skb_key.indev_ifindex = skb->skb_iif;
+       /* skb_flow_dissect() does not set n_proto in case an unknown protocol,
+        * so do it rather here.
+        */
+       skb_key.basic.n_proto = skb->protocol;
+       skb_flow_dissect(skb, &head->dissector, &skb_key);
+
+       fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
+
+       f = rhashtable_lookup_fast(&head->ht,
+                                  fl_key_get_start(&skb_mkey, &head->mask),
+                                  head->ht_params);
+       if (f) {
+               *res = f->res;
+               return tcf_exts_exec(skb, &f->exts, res);
+       }
+       return -1;
+}
+
+static int fl_init(struct tcf_proto *tp)
+{
+       struct cls_fl_head *head;
+
+       head = kzalloc(sizeof(*head), GFP_KERNEL);
+       if (!head)
+               return -ENOBUFS;
+
+       INIT_LIST_HEAD_RCU(&head->filters);
+       rcu_assign_pointer(tp->root, head);
+
+       return 0;
+}
+
+static void fl_destroy_filter(struct rcu_head *head)
+{
+       struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+
+       tcf_exts_destroy(&f->exts);
+       kfree(f);
+}
+
+static bool fl_destroy(struct tcf_proto *tp, bool force)
+{
+       struct cls_fl_head *head = rtnl_dereference(tp->root);
+       struct cls_fl_filter *f, *next;
+
+       if (!force && !list_empty(&head->filters))
+               return false;
+
+       list_for_each_entry_safe(f, next, &head->filters, list) {
+               list_del_rcu(&f->list);
+               call_rcu(&f->rcu, fl_destroy_filter);
+       }
+       RCU_INIT_POINTER(tp->root, NULL);
+       if (head->mask_assigned)
+               rhashtable_destroy(&head->ht);
+       kfree_rcu(head, rcu);
+       return true;
+}
+
+static unsigned long fl_get(struct tcf_proto *tp, u32 handle)
+{
+       struct cls_fl_head *head = rtnl_dereference(tp->root);
+       struct cls_fl_filter *f;
+
+       list_for_each_entry(f, &head->filters, list)
+               if (f->handle == handle)
+                       return (unsigned long) f;
+       return 0;
+}
+
+static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
+       [TCA_FLOWER_UNSPEC]             = { .type = NLA_UNSPEC },
+       [TCA_FLOWER_CLASSID]            = { .type = NLA_U32 },
+       [TCA_FLOWER_INDEV]              = { .type = NLA_STRING,
+                                           .len = IFNAMSIZ },
+       [TCA_FLOWER_KEY_ETH_DST]        = { .len = ETH_ALEN },
+       [TCA_FLOWER_KEY_ETH_DST_MASK]   = { .len = ETH_ALEN },
+       [TCA_FLOWER_KEY_ETH_SRC]        = { .len = ETH_ALEN },
+       [TCA_FLOWER_KEY_ETH_SRC_MASK]   = { .len = ETH_ALEN },
+       [TCA_FLOWER_KEY_ETH_TYPE]       = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_IP_PROTO]       = { .type = NLA_U8 },
+       [TCA_FLOWER_KEY_IPV4_SRC]       = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_IPV4_SRC_MASK]  = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_IPV4_DST]       = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_IPV4_DST_MASK]  = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_IPV6_SRC]       = { .len = sizeof(struct in6_addr) },
+       [TCA_FLOWER_KEY_IPV6_SRC_MASK]  = { .len = sizeof(struct in6_addr) },
+       [TCA_FLOWER_KEY_IPV6_DST]       = { .len = sizeof(struct in6_addr) },
+       [TCA_FLOWER_KEY_IPV6_DST_MASK]  = { .len = sizeof(struct in6_addr) },
+       [TCA_FLOWER_KEY_TCP_SRC]        = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_TCP_DST]        = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_TCP_SRC]        = { .type = NLA_U16 },
+       [TCA_FLOWER_KEY_TCP_DST]        = { .type = NLA_U16 },
+};
+
+static void fl_set_key_val(struct nlattr **tb,
+                          void *val, int val_type,
+                          void *mask, int mask_type, int len)
+{
+       if (!tb[val_type])
+               return;
+       memcpy(val, nla_data(tb[val_type]), len);
+       if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
+               memset(mask, 0xff, len);
+       else
+               memcpy(mask, nla_data(tb[mask_type]), len);
+}
+
+static int fl_set_key(struct net *net, struct nlattr **tb,
+                     struct fl_flow_key *key, struct fl_flow_key *mask)
+{
+#ifdef CONFIG_NET_CLS_IND
+       if (tb[TCA_FLOWER_INDEV]) {
+               int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]);
+               if (err < 0)
+                       return err;
+               key->indev_ifindex = err;
+               mask->indev_ifindex = 0xffffffff;
+       }
+#endif
+
+       fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
+                      mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
+                      sizeof(key->eth.dst));
+       fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
+                      mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
+                      sizeof(key->eth.src));
+       fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
+                      &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
+                      sizeof(key->basic.n_proto));
+       if (key->basic.n_proto == htons(ETH_P_IP) ||
+           key->basic.n_proto == htons(ETH_P_IPV6)) {
+               fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
+                              &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
+                              sizeof(key->basic.ip_proto));
+       }
+       if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+               fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
+                              &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+                              sizeof(key->ipv4.src));
+               fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
+                              &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
+                              sizeof(key->ipv4.dst));
+       } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+               fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
+                              &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+                              sizeof(key->ipv6.src));
+               fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
+                              &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
+                              sizeof(key->ipv6.dst));
+       }
+       if (key->basic.ip_proto == IPPROTO_TCP) {
+               fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
+                              &mask->tp.src, TCA_FLOWER_UNSPEC,
+                              sizeof(key->tp.src));
+               fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
+                              &mask->tp.dst, TCA_FLOWER_UNSPEC,
+                              sizeof(key->tp.dst));
+       } else if (key->basic.ip_proto == IPPROTO_UDP) {
+               fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
+                              &mask->tp.src, TCA_FLOWER_UNSPEC,
+                              sizeof(key->tp.src));
+               fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
+                              &mask->tp.dst, TCA_FLOWER_UNSPEC,
+                              sizeof(key->tp.dst));
+       }
+
+       return 0;
+}
+
+static bool fl_mask_eq(struct fl_flow_mask *mask1,
+                      struct fl_flow_mask *mask2)
+{
+       const long *lmask1 = fl_key_get_start(&mask1->key, mask1);
+       const long *lmask2 = fl_key_get_start(&mask2->key, mask2);
+
+       return !memcmp(&mask1->range, &mask2->range, sizeof(mask1->range)) &&
+              !memcmp(lmask1, lmask2, fl_mask_range(mask1));
+}
+
+static const struct rhashtable_params fl_ht_params = {
+       .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
+       .head_offset = offsetof(struct cls_fl_filter, ht_node),
+       .automatic_shrinking = true,
+};
+
+static int fl_init_hashtable(struct cls_fl_head *head,
+                            struct fl_flow_mask *mask)
+{
+       head->ht_params = fl_ht_params;
+       head->ht_params.key_len = fl_mask_range(mask);
+       head->ht_params.key_offset += mask->range.start;
+
+       return rhashtable_init(&head->ht, &head->ht_params);
+}
+
+#define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
+#define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member))
+#define FL_KEY_MEMBER_END_OFFSET(member)                                       \
+       (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member))
+
+#define FL_KEY_IN_RANGE(mask, member)                                          \
+        (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end &&                  \
+         FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start)
+
+#define FL_KEY_SET(keys, cnt, id, member)                                      \
+       do {                                                                    \
+               keys[cnt].key_id = id;                                          \
+               keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);                \
+               cnt++;                                                          \
+       } while(0);
+
+#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member)                    \
+       do {                                                                    \
+               if (FL_KEY_IN_RANGE(mask, member))                              \
+                       FL_KEY_SET(keys, cnt, id, member);                      \
+       } while(0);
+
+static void fl_init_dissector(struct cls_fl_head *head,
+                             struct fl_flow_mask *mask)
+{
+       struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
+       size_t cnt = 0;
+
+       FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
+       FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
+       FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+                              FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
+       FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+                              FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
+       FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+                              FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
+       FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt,
+                              FLOW_DISSECTOR_KEY_PORTS, tp);
+
+       skb_flow_dissector_init(&head->dissector, keys, cnt);
+}
+
+static int fl_check_assign_mask(struct cls_fl_head *head,
+                               struct fl_flow_mask *mask)
+{
+       int err;
+
+       if (head->mask_assigned) {
+               if (!fl_mask_eq(&head->mask, mask))
+                       return -EINVAL;
+               else
+                       return 0;
+       }
+
+       /* Mask is not assigned yet. So assign it and init hashtable
+        * according to that.
+        */
+       err = fl_init_hashtable(head, mask);
+       if (err)
+               return err;
+       memcpy(&head->mask, mask, sizeof(head->mask));
+       head->mask_assigned = true;
+
+       fl_init_dissector(head, mask);
+
+       return 0;
+}
+
+static int fl_set_parms(struct net *net, struct tcf_proto *tp,
+                       struct cls_fl_filter *f, struct fl_flow_mask *mask,
+                       unsigned long base, struct nlattr **tb,
+                       struct nlattr *est, bool ovr)
+{
+       struct tcf_exts e;
+       int err;
+
+       tcf_exts_init(&e, TCA_FLOWER_ACT, 0);
+       err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
+       if (err < 0)
+               return err;
+
+       if (tb[TCA_FLOWER_CLASSID]) {
+               f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
+               tcf_bind_filter(tp, &f->res, base);
+       }
+
+       err = fl_set_key(net, tb, &f->key, &mask->key);
+       if (err)
+               goto errout;
+
+       fl_mask_update_range(mask);
+       fl_set_masked_key(&f->mkey, &f->key, mask);
+
+       tcf_exts_change(tp, &f->exts, &e);
+
+       return 0;
+errout:
+       tcf_exts_destroy(&e);
+       return err;
+}
+
+static u32 fl_grab_new_handle(struct tcf_proto *tp,
+                             struct cls_fl_head *head)
+{
+       unsigned int i = 0x80000000;
+       u32 handle;
+
+       do {
+               if (++head->hgen == 0x7FFFFFFF)
+                       head->hgen = 1;
+       } while (--i > 0 && fl_get(tp, head->hgen));
+
+       if (unlikely(i == 0)) {
+               pr_err("Insufficient number of handles\n");
+               handle = 0;
+       } else {
+               handle = head->hgen;
+       }
+
+       return handle;
+}
+
+static int fl_change(struct net *net, struct sk_buff *in_skb,
+                    struct tcf_proto *tp, unsigned long base,
+                    u32 handle, struct nlattr **tca,
+                    unsigned long *arg, bool ovr)
+{
+       struct cls_fl_head *head = rtnl_dereference(tp->root);
+       struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
+       struct cls_fl_filter *fnew;
+       struct nlattr *tb[TCA_FLOWER_MAX + 1];
+       struct fl_flow_mask mask = {};
+       int err;
+
+       if (!tca[TCA_OPTIONS])
+               return -EINVAL;
+
+       err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
+       if (err < 0)
+               return err;
+
+       if (fold && handle && fold->handle != handle)
+               return -EINVAL;
+
+       fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
+       if (!fnew)
+               return -ENOBUFS;
+
+       tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
+
+       if (!handle) {
+               handle = fl_grab_new_handle(tp, head);
+               if (!handle) {
+                       err = -EINVAL;
+                       goto errout;
+               }
+       }
+       fnew->handle = handle;
+
+       err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr);
+       if (err)
+               goto errout;
+
+       err = fl_check_assign_mask(head, &mask);
+       if (err)
+               goto errout;
+
+       err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
+                                    head->ht_params);
+       if (err)
+               goto errout;
+       if (fold)
+               rhashtable_remove_fast(&head->ht, &fold->ht_node,
+                                      head->ht_params);
+
+       *arg = (unsigned long) fnew;
+
+       if (fold) {
+               list_replace_rcu(&fnew->list, &fold->list);
+               tcf_unbind_filter(tp, &fold->res);
+               call_rcu(&fold->rcu, fl_destroy_filter);
+       } else {
+               list_add_tail_rcu(&fnew->list, &head->filters);
+       }
+
+       return 0;
+
+errout:
+       kfree(fnew);
+       return err;
+}
+
+static int fl_delete(struct tcf_proto *tp, unsigned long arg)
+{
+       struct cls_fl_head *head = rtnl_dereference(tp->root);
+       struct cls_fl_filter *f = (struct cls_fl_filter *) arg;
+
+       rhashtable_remove_fast(&head->ht, &f->ht_node,
+                              head->ht_params);
+       list_del_rcu(&f->list);
+       tcf_unbind_filter(tp, &f->res);
+       call_rcu(&f->rcu, fl_destroy_filter);
+       return 0;
+}
+
+static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg)
+{
+       struct cls_fl_head *head = rtnl_dereference(tp->root);
+       struct cls_fl_filter *f;
+
+       list_for_each_entry_rcu(f, &head->filters, list) {
+               if (arg->count < arg->skip)
+                       goto skip;
+               if (arg->fn(tp, (unsigned long) f, arg) < 0) {
+                       arg->stop = 1;
+                       break;
+               }
+skip:
+               arg->count++;
+       }
+}
+
+static int fl_dump_key_val(struct sk_buff *skb,
+                          void *val, int val_type,
+                          void *mask, int mask_type, int len)
+{
+       int err;
+
+       if (!memchr_inv(mask, 0, len))
+               return 0;
+       err = nla_put(skb, val_type, len, val);
+       if (err)
+               return err;
+       if (mask_type != TCA_FLOWER_UNSPEC) {
+               err = nla_put(skb, mask_type, len, mask);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
+                  struct sk_buff *skb, struct tcmsg *t)
+{
+       struct cls_fl_head *head = rtnl_dereference(tp->root);
+       struct cls_fl_filter *f = (struct cls_fl_filter *) fh;
+       struct nlattr *nest;
+       struct fl_flow_key *key, *mask;
+
+       if (!f)
+               return skb->len;
+
+       t->tcm_handle = f->handle;
+
+       nest = nla_nest_start(skb, TCA_OPTIONS);
+       if (!nest)
+               goto nla_put_failure;
+
+       if (f->res.classid &&
+           nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
+               goto nla_put_failure;
+
+       key = &f->key;
+       mask = &head->mask.key;
+
+       if (mask->indev_ifindex) {
+               struct net_device *dev;
+
+               dev = __dev_get_by_index(net, key->indev_ifindex);
+               if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
+                       goto nla_put_failure;
+       }
+
+       if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
+                           mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
+                           sizeof(key->eth.dst)) ||
+           fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
+                           mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
+                           sizeof(key->eth.src)) ||
+           fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
+                           &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
+                           sizeof(key->basic.n_proto)))
+               goto nla_put_failure;
+       if ((key->basic.n_proto == htons(ETH_P_IP) ||
+            key->basic.n_proto == htons(ETH_P_IPV6)) &&
+           fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
+                           &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
+                           sizeof(key->basic.ip_proto)))
+               goto nla_put_failure;
+
+       if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
+           (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
+                            &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+                            sizeof(key->ipv4.src)) ||
+            fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
+                            &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
+                            sizeof(key->ipv4.dst))))
+               goto nla_put_failure;
+       else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
+                (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
+                                 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+                                 sizeof(key->ipv6.src)) ||
+                 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
+                                 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
+                                 sizeof(key->ipv6.dst))))
+               goto nla_put_failure;
+
+       if (key->basic.ip_proto == IPPROTO_TCP &&
+           (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
+                            &mask->tp.src, TCA_FLOWER_UNSPEC,
+                            sizeof(key->tp.src)) ||
+            fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
+                            &mask->tp.dst, TCA_FLOWER_UNSPEC,
+                            sizeof(key->tp.dst))))
+               goto nla_put_failure;
+       else if (key->basic.ip_proto == IPPROTO_UDP &&
+                (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
+                                 &mask->tp.src, TCA_FLOWER_UNSPEC,
+                                 sizeof(key->tp.src)) ||
+                 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
+                                 &mask->tp.dst, TCA_FLOWER_UNSPEC,
+                                 sizeof(key->tp.dst))))
+               goto nla_put_failure;
+
+       if (tcf_exts_dump(skb, &f->exts))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, nest);
+
+       if (tcf_exts_dump_stats(skb, &f->exts) < 0)
+               goto nla_put_failure;
+
+       return skb->len;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
+       return -1;
+}
+
+static struct tcf_proto_ops cls_fl_ops __read_mostly = {
+       .kind           = "flower",
+       .classify       = fl_classify,
+       .init           = fl_init,
+       .destroy        = fl_destroy,
+       .get            = fl_get,
+       .change         = fl_change,
+       .delete         = fl_delete,
+       .walk           = fl_walk,
+       .dump           = fl_dump,
+       .owner          = THIS_MODULE,
+};
+
+static int __init cls_fl_init(void)
+{
+       return register_tcf_proto_ops(&cls_fl_ops);
+}
+
+static void __exit cls_fl_exit(void)
+{
+       unregister_tcf_proto_ops(&cls_fl_ops);
+}
+
+module_init(cls_fl_init);
+module_exit(cls_fl_exit);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_DESCRIPTION("Flower classifier");
+MODULE_LICENSE("GPL v2");
index 1e1c89e51a118e79610c49412e335191fc3ba834..c5b9db84d069d4c0fca40f3bf21cfa6dd63adb56 100644 (file)
@@ -1818,13 +1818,8 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
                        continue;
                err = tp->classify(skb, tp, res);
 
-               if (err >= 0) {
-#ifdef CONFIG_NET_CLS_ACT
-                       if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
-                               skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
-#endif
+               if (err >= 0)
                        return err;
-               }
        }
        return -1;
 }
@@ -1836,23 +1831,22 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        int err = 0;
 #ifdef CONFIG_NET_CLS_ACT
        const struct tcf_proto *otp = tp;
+       int limit = 0;
 reclassify:
 #endif
 
        err = tc_classify_compat(skb, tp, res);
 #ifdef CONFIG_NET_CLS_ACT
        if (err == TC_ACT_RECLASSIFY) {
-               u32 verd = G_TC_VERD(skb->tc_verd);
                tp = otp;
 
-               if (verd++ >= MAX_REC_LOOP) {
+               if (unlikely(limit++ >= MAX_REC_LOOP)) {
                        net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
                                               tp->q->ops->id,
                                               tp->prio & 0xffff,
                                               ntohs(tp->protocol));
                        return TC_ACT_SHOT;
                }
-               skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
                goto reclassify;
        }
 #endif
index c009eb9045cef48adbe974e9d7d505e242213974..93d5742dc7e0f9730abd1726adf47857de1e64d4 100644 (file)
@@ -18,7 +18,7 @@
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
 #include <net/red.h>
-#include <net/flow_keys.h>
+#include <net/flow_dissector.h>
 
 /*
    CHOKe stateless AQM for fair bandwidth allocation
@@ -133,16 +133,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
        --sch->q.qlen;
 }
 
-/* private part of skb->cb[] that a qdisc is allowed to use
- * is limited to QDISC_CB_PRIV_LEN bytes.
- * As a flow key might be too large, we store a part of it only.
- */
-#define CHOKE_K_LEN min_t(u32, sizeof(struct flow_keys), QDISC_CB_PRIV_LEN - 3)
-
 struct choke_skb_cb {
        u16                     classid;
        u8                      keys_valid;
-       u8                      keys[QDISC_CB_PRIV_LEN - 3];
+       struct                  flow_keys_digest keys;
 };
 
 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
@@ -176,19 +170,19 @@ static bool choke_match_flow(struct sk_buff *skb1,
 
        if (!choke_skb_cb(skb1)->keys_valid) {
                choke_skb_cb(skb1)->keys_valid = 1;
-               skb_flow_dissect(skb1, &temp);
-               memcpy(&choke_skb_cb(skb1)->keys, &temp, CHOKE_K_LEN);
+               skb_flow_dissect_flow_keys(skb1, &temp);
+               make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
        }
 
        if (!choke_skb_cb(skb2)->keys_valid) {
                choke_skb_cb(skb2)->keys_valid = 1;
-               skb_flow_dissect(skb2, &temp);
-               memcpy(&choke_skb_cb(skb2)->keys, &temp, CHOKE_K_LEN);
+               skb_flow_dissect_flow_keys(skb2, &temp);
+               make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
        }
 
        return !memcmp(&choke_skb_cb(skb1)->keys,
                       &choke_skb_cb(skb2)->keys,
-                      CHOKE_K_LEN);
+                      sizeof(choke_skb_cb(skb1)->keys));
 }
 
 /*
index 7a0bdb16ac92fd0a20f565295392bed8674c8d90..535007d5f0b523b13bffac7e314710f58d9ce7ef 100644 (file)
@@ -6,7 +6,7 @@
  *
  *  Implemented on linux by :
  *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
- *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -109,6 +109,7 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
        [TCA_CODEL_LIMIT]       = { .type = NLA_U32 },
        [TCA_CODEL_INTERVAL]    = { .type = NLA_U32 },
        [TCA_CODEL_ECN]         = { .type = NLA_U32 },
+       [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
 };
 
 static int codel_change(struct Qdisc *sch, struct nlattr *opt)
@@ -133,6 +134,12 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
                q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
        }
 
+       if (tb[TCA_CODEL_CE_THRESHOLD]) {
+               u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
+
+               q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+       }
+
        if (tb[TCA_CODEL_INTERVAL]) {
                u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
 
@@ -201,7 +208,10 @@ static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
            nla_put_u32(skb, TCA_CODEL_ECN,
                        q->params.ecn))
                goto nla_put_failure;
-
+       if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
+           nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
+                       codel_time_to_us(q->params.ce_threshold)))
+               goto nla_put_failure;
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
@@ -220,6 +230,7 @@ static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
                .ldelay         = codel_time_to_us(q->vars.ldelay),
                .dropping       = q->vars.dropping,
                .ecn_mark       = q->stats.ecn_mark,
+               .ce_mark        = q->stats.ce_mark,
        };
 
        if (q->vars.dropping) {
index c244c45b78d7feca32fda3b925f7605aebf0a5b6..d75993f89facc0ce8d5df0d26aedcd016714a43e 100644 (file)
@@ -6,7 +6,7 @@
  *     as published by the Free Software Foundation; either version
  *     2 of the License, or (at your option) any later version.
  *
- *  Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
+ *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
  */
 
 #include <linux/module.h>
@@ -23,7 +23,6 @@
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
-#include <net/flow_keys.h>
 #include <net/codel.h>
 
 /*     Fair Queue CoDel.
@@ -68,15 +67,9 @@ struct fq_codel_sched_data {
 };
 
 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
-                                 const struct sk_buff *skb)
+                                 struct sk_buff *skb)
 {
-       struct flow_keys keys;
-       unsigned int hash;
-
-       skb_flow_dissect(skb, &keys);
-       hash = jhash_3words((__force u32)keys.dst,
-                           (__force u32)keys.src ^ keys.ip_proto,
-                           (__force u32)keys.ports, q->perturbation);
+       u32 hash = skb_get_hash_perturb(skb, q->perturbation);
 
        return reciprocal_scale(hash, q->flows_cnt);
 }
@@ -299,6 +292,7 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
        [TCA_FQ_CODEL_ECN]      = { .type = NLA_U32 },
        [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
        [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
+       [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
 };
 
 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
@@ -329,6 +323,12 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
                q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
        }
 
+       if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
+               u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
+
+               q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
+       }
+
        if (tb[TCA_FQ_CODEL_INTERVAL]) {
                u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
 
@@ -448,6 +448,11 @@ static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
                        q->flows_cnt))
                goto nla_put_failure;
 
+       if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
+           nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
+                       codel_time_to_us(q->cparams.ce_threshold)))
+               goto nla_put_failure;
+
        return nla_nest_end(skb, opts);
 
 nla_put_failure:
@@ -466,6 +471,7 @@ static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
        st.qdisc_stats.drop_overlimit = q->drop_overlimit;
        st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
        st.qdisc_stats.new_flow_count = q->new_flow_count;
+       st.qdisc_stats.ce_mark = q->cstats.ce_mark;
 
        list_for_each(pos, &q->new_flows)
                st.qdisc_stats.new_flows_len++;
index 634529e0ce6bddc44b48161b6f76bd79af0a2a8e..abb9f2fec28fbd435ff89a17eb7e1937cb4c19b3 100644 (file)
@@ -165,7 +165,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                         * if no default DP has been configured. This
                         * allows for DP flows to be left untouched.
                         */
-                       if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
+                       if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
+                                       sch->limit))
                                return qdisc_enqueue_tail(skb, sch);
                        else
                                goto drop;
@@ -397,7 +398,10 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
 
        q->DP = dp;
        q->prio = prio;
-       q->limit = ctl->limit;
+       if (ctl->limit > sch->limit)
+               q->limit = sch->limit;
+       else
+               q->limit = ctl->limit;
 
        if (q->backlog == 0)
                red_end_of_idle_period(&q->vars);
@@ -414,6 +418,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
        [TCA_GRED_STAB]         = { .len = 256 },
        [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
        [TCA_GRED_MAX_P]        = { .type = NLA_U32 },
+       [TCA_GRED_LIMIT]        = { .type = NLA_U32 },
 };
 
 static int gred_change(struct Qdisc *sch, struct nlattr *opt)
@@ -433,11 +438,15 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
        if (err < 0)
                return err;
 
-       if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
+       if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
+               if (tb[TCA_GRED_LIMIT] != NULL)
+                       sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
                return gred_change_table_def(sch, opt);
+       }
 
        if (tb[TCA_GRED_PARMS] == NULL ||
-           tb[TCA_GRED_STAB] == NULL)
+           tb[TCA_GRED_STAB] == NULL ||
+           tb[TCA_GRED_LIMIT] != NULL)
                return -EINVAL;
 
        max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
@@ -501,6 +510,14 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
                return -EINVAL;
 
+       if (tb[TCA_GRED_LIMIT])
+               sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
+       else {
+               u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
+
+               sch->limit = qlen * psched_mtu(qdisc_dev(sch));
+       }
+
        return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
 }
 
@@ -531,6 +548,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
                goto nla_put_failure;
 
+       if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
+               goto nla_put_failure;
+
        parms = nla_nest_start(skb, TCA_GRED_PARMS);
        if (parms == NULL)
                goto nla_put_failure;
index 15d3aabfe2506c1483a2cdba0fe8425d152d2736..9d15cb6b8cb1f5e8424e96f6245e9dd206d92405 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
-#include <net/flow_keys.h>
 #include <net/pkt_sched.h>
 #include <net/sock.h>
 
@@ -176,22 +175,6 @@ static u32 hhf_time_stamp(void)
        return jiffies;
 }
 
-static unsigned int skb_hash(const struct hhf_sched_data *q,
-                            const struct sk_buff *skb)
-{
-       struct flow_keys keys;
-       unsigned int hash;
-
-       if (skb->sk && skb->sk->sk_hash)
-               return skb->sk->sk_hash;
-
-       skb_flow_dissect(skb, &keys);
-       hash = jhash_3words((__force u32)keys.dst,
-                           (__force u32)keys.src ^ keys.ip_proto,
-                           (__force u32)keys.ports, q->perturbation);
-       return hash;
-}
-
 /* Looks up a heavy-hitter flow in a chaining list of table T. */
 static struct hh_flow_state *seek_list(const u32 hash,
                                       struct list_head *head,
@@ -280,7 +263,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        /* Get hashed flow-id of the skb. */
-       hash = skb_hash(q, skb);
+       hash = skb_get_hash_perturb(skb, q->perturbation);
 
        /* Check if this packet belongs to an already established HH flow. */
        flow_pos = hash & HHF_BIT_MASK;
index 4cdbfb85686a7ee55d71d0a7c1ea5cdd7e789a22..e7c648fa9dc3bfb8c8b4ae0b218d8c162aef83a5 100644 (file)
 #include <linux/list.h>
 #include <linux/skbuff.h>
 #include <linux/rtnetlink.h>
+
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 
-
-struct ingress_qdisc_data {
-       struct tcf_proto __rcu  *filter_list;
-};
-
-/* ------------------------- Class/flow operations ------------------------- */
-
 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
 {
        return NULL;
@@ -49,57 +43,24 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
                                                 unsigned long cl)
 {
-       struct ingress_qdisc_data *p = qdisc_priv(sch);
-
-       return &p->filter_list;
-}
-
-/* --------------------------- Qdisc operations ---------------------------- */
+       struct net_device *dev = qdisc_dev(sch);
 
-static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
-{
-       struct ingress_qdisc_data *p = qdisc_priv(sch);
-       struct tcf_result res;
-       struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
-       int result;
-
-       result = tc_classify(skb, fl, &res);
-
-       qdisc_bstats_update(sch, skb);
-       switch (result) {
-       case TC_ACT_SHOT:
-               result = TC_ACT_SHOT;
-               qdisc_qstats_drop(sch);
-               break;
-       case TC_ACT_STOLEN:
-       case TC_ACT_QUEUED:
-               result = TC_ACT_STOLEN;
-               break;
-       case TC_ACT_RECLASSIFY:
-       case TC_ACT_OK:
-               skb->tc_index = TC_H_MIN(res.classid);
-       default:
-               result = TC_ACT_OK;
-               break;
-       }
-
-       return result;
+       return &dev->ingress_cl_list;
 }
 
-/* ------------------------------------------------------------- */
-
 static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
 {
        net_inc_ingress_queue();
+       sch->flags |= TCQ_F_CPUSTATS;
 
        return 0;
 }
 
 static void ingress_destroy(struct Qdisc *sch)
 {
-       struct ingress_qdisc_data *p = qdisc_priv(sch);
+       struct net_device *dev = qdisc_dev(sch);
 
-       tcf_destroy_chain(&p->filter_list);
+       tcf_destroy_chain(&dev->ingress_cl_list);
        net_dec_ingress_queue();
 }
 
@@ -110,6 +71,7 @@ static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
+
        return nla_nest_end(skb, nest);
 
 nla_put_failure:
@@ -130,8 +92,6 @@ static const struct Qdisc_class_ops ingress_class_ops = {
 static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
        .cl_ops         =       &ingress_class_ops,
        .id             =       "ingress",
-       .priv_size      =       sizeof(struct ingress_qdisc_data),
-       .enqueue        =       ingress_enqueue,
        .init           =       ingress_init,
        .destroy        =       ingress_destroy,
        .dump           =       ingress_dump,
@@ -148,6 +108,7 @@ static void __exit ingress_module_exit(void)
        unregister_qdisc(&ingress_qdisc_ops);
 }
 
-module_init(ingress_module_init)
-module_exit(ingress_module_exit)
+module_init(ingress_module_init);
+module_exit(ingress_module_exit);
+
 MODULE_LICENSE("GPL");
index 956ead2cab9ad89f36835039a9b728d24a58ca41..5abd1d9de989e6c9777a225c03e42f8194ebda33 100644 (file)
@@ -440,9 +440,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
                struct Qdisc *rootq = qdisc_root(sch);
                u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
-               q->duplicate = 0;
 
-               qdisc_enqueue_root(skb2, rootq);
+               q->duplicate = 0;
+               rootq->enqueue(skb2, rootq);
                q->duplicate = dupsave;
        }
 
index 5819dd82630d2a126d2a75a3cd2f5d6a9a3534a8..4b815193326c9abae464c05d53609c28c2071b38 100644 (file)
@@ -26,7 +26,6 @@
 #include <net/ip.h>
 #include <net/pkt_sched.h>
 #include <net/inet_ecn.h>
-#include <net/flow_keys.h>
 
 /*
  * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
@@ -285,9 +284,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        int i;
        u32 p_min = ~0;
        u32 minqlen = ~0;
-       u32 r, slot, salt, sfbhash;
+       u32 r, sfbhash;
+       u32 slot = q->slot;
        int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-       struct flow_keys keys;
 
        if (unlikely(sch->q.qlen >= q->limit)) {
                qdisc_qstats_overlimit(sch);
@@ -309,22 +308,17 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        fl = rcu_dereference_bh(q->filter_list);
        if (fl) {
+               u32 salt;
+
                /* If using external classifiers, get result and record it. */
                if (!sfb_classify(skb, fl, &ret, &salt))
                        goto other_drop;
-               keys.src = salt;
-               keys.dst = 0;
-               keys.ports = 0;
+               sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
        } else {
-               skb_flow_dissect(skb, &keys);
+               sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
        }
 
-       slot = q->slot;
 
-       sfbhash = jhash_3words((__force u32)keys.dst,
-                              (__force u32)keys.src,
-                              (__force u32)keys.ports,
-                              q->bins[slot].perturbation);
        if (!sfbhash)
                sfbhash = 1;
        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
@@ -356,10 +350,8 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        if (unlikely(p_min >= SFB_MAX_PROB)) {
                /* Inelastic flow */
                if (q->double_buffering) {
-                       sfbhash = jhash_3words((__force u32)keys.dst,
-                                              (__force u32)keys.src,
-                                              (__force u32)keys.ports,
-                                              q->bins[slot].perturbation);
+                       sfbhash = skb_get_hash_perturb(skb,
+                           q->bins[slot].perturbation);
                        if (!sfbhash)
                                sfbhash = 1;
                        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
index b877140beda5573b8d2dbe4a701bf24355ba7229..7d14926633601b85c2d281d914fa978c8a038e10 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
-#include <net/flow_keys.h>
 #include <net/red.h>
 
 
@@ -156,30 +155,10 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
        return &q->dep[val - SFQ_MAX_FLOWS];
 }
 
-/*
- * In order to be able to quickly rehash our queue when timer changes
- * q->perturbation, we store flow_keys in skb->cb[]
- */
-struct sfq_skb_cb {
-       struct flow_keys        keys;
-};
-
-static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
-{
-       qdisc_cb_private_validate(skb, sizeof(struct sfq_skb_cb));
-       return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
-}
-
 static unsigned int sfq_hash(const struct sfq_sched_data *q,
                             const struct sk_buff *skb)
 {
-       const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
-       unsigned int hash;
-
-       hash = jhash_3words((__force u32)keys->dst,
-                           (__force u32)keys->src ^ keys->ip_proto,
-                           (__force u32)keys->ports, q->perturbation);
-       return hash & (q->divisor - 1);
+       return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
 }
 
 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -196,10 +175,8 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
                return TC_H_MIN(skb->priority);
 
        fl = rcu_dereference_bh(q->filter_list);
-       if (!fl) {
-               skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
+       if (!fl)
                return sfq_hash(q, skb) + 1;
-       }
 
        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
        result = tc_classify(skb, fl, &res);
index 0e4198ee237086e01e546432b38ee49d046837a1..e917d27328ea835419ba3e4c26eae1d7b7fade77 100644 (file)
@@ -331,8 +331,9 @@ out:
 
                rt = (struct rt6_info *)dst;
                t->dst = dst;
-               t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
-               pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr,
+               t->dst_cookie = rt6_get_cookie(rt);
+               pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
+                        &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
                         &fl6->saddr);
        } else {
                t->dst = NULL;
@@ -635,7 +636,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
        struct sctp6_sock *newsctp6sk;
 
-       newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot);
+       newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0);
        if (!newsk)
                goto out;
 
index 53b7acde9aa37bf3d4029c459421564d5270f4c0..59e80356672bdf89777265ae1f8c384792dfb98c 100644 (file)
@@ -550,7 +550,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
                                             struct sctp_association *asoc)
 {
        struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
-                       sk->sk_prot);
+                       sk->sk_prot, 0);
        struct inet_sock *newinet;
 
        if (!newsk)
index 884e3299769840c3e05c43f1320ba849a10ad73c..9963a0b53a642f4ce600018606d1b8cb9982df9e 100644 (file)
@@ -576,9 +576,6 @@ void sock_release(struct socket *sock)
        if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
                pr_err("%s: fasync list not empty!\n", __func__);
 
-       if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
-               return;
-
        this_cpu_sub(sockets_in_use, 1);
        if (!sock->file) {
                iput(SOCK_INODE(sock));
@@ -1213,9 +1210,9 @@ int sock_create(int family, int type, int protocol, struct socket **res)
 }
 EXPORT_SYMBOL(sock_create);
 
-int sock_create_kern(int family, int type, int protocol, struct socket **res)
+int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
 {
-       return __sock_create(&init_net, family, type, protocol, res, 1);
+       return __sock_create(net, family, type, protocol, res, 1);
 }
 EXPORT_SYMBOL(sock_create_kern);
 
index 055453d486683ec19433961db220292e4f60571d..e008057dab463009514fc5714e4c8e68eed4d530 100644 (file)
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/netdevice.h>
+#include <linux/if_bridge.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
 /**
- *     netdev_switch_parent_id_get - Get ID of a switch
+ *     switchdev_port_attr_get - Get port attribute
+ *
+ *     @dev: port device
+ *     @attr: attribute to get
+ */
+int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+       const struct switchdev_ops *ops = dev->switchdev_ops;
+       struct net_device *lower_dev;
+       struct list_head *iter;
+       struct switchdev_attr first = {
+               .id = SWITCHDEV_ATTR_UNDEFINED
+       };
+       int err = -EOPNOTSUPP;
+
+       if (ops && ops->switchdev_port_attr_get)
+               return ops->switchdev_port_attr_get(dev, attr);
+
+       if (attr->flags & SWITCHDEV_F_NO_RECURSE)
+               return err;
+
+       /* Switch device port(s) may be stacked under
+        * bond/team/vlan dev, so recurse down to get attr on
+        * each port.  Return -ENODATA if attr values don't
+        * compare across ports.
+        */
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               err = switchdev_port_attr_get(lower_dev, attr);
+               if (err)
+                       break;
+               if (first.id == SWITCHDEV_ATTR_UNDEFINED)
+                       first = *attr;
+               else if (memcmp(&first, attr, sizeof(*attr)))
+                       return -ENODATA;
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
+
+static int __switchdev_port_attr_set(struct net_device *dev,
+                                    struct switchdev_attr *attr)
+{
+       const struct switchdev_ops *ops = dev->switchdev_ops;
+       struct net_device *lower_dev;
+       struct list_head *iter;
+       int err = -EOPNOTSUPP;
+
+       if (ops && ops->switchdev_port_attr_set)
+               return ops->switchdev_port_attr_set(dev, attr);
+
+       if (attr->flags & SWITCHDEV_F_NO_RECURSE)
+               return err;
+
+       /* Switch device port(s) may be stacked under
+        * bond/team/vlan dev, so recurse down to set attr on
+        * each port.
+        */
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               err = __switchdev_port_attr_set(lower_dev, attr);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
+struct switchdev_attr_set_work {
+       struct work_struct work;
+       struct net_device *dev;
+       struct switchdev_attr attr;
+};
+
+static void switchdev_port_attr_set_work(struct work_struct *work)
+{
+       struct switchdev_attr_set_work *asw =
+               container_of(work, struct switchdev_attr_set_work, work);
+       int err;
+
+       rtnl_lock();
+       err = switchdev_port_attr_set(asw->dev, &asw->attr);
+       BUG_ON(err);
+       rtnl_unlock();
+
+       dev_put(asw->dev);
+       kfree(work);
+}
+
+static int switchdev_port_attr_set_defer(struct net_device *dev,
+                                        struct switchdev_attr *attr)
+{
+       struct switchdev_attr_set_work *asw;
+
+       asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
+       if (!asw)
+               return -ENOMEM;
+
+       INIT_WORK(&asw->work, switchdev_port_attr_set_work);
+
+       dev_hold(dev);
+       asw->dev = dev;
+       memcpy(&asw->attr, attr, sizeof(asw->attr));
+
+       schedule_work(&asw->work);
+
+       return 0;
+}
+
+/**
+ *     switchdev_port_attr_set - Set port attribute
+ *
  *     @dev: port device
- *     @psid: switch ID
+ *     @attr: attribute to set
  *
- *     Get ID of a switch this port is part of.
+ *     Use a 2-phase prepare-commit transaction model to ensure
+ *     system is not left in a partially updated state due to
+ *     failure from driver/device.
  */
-int netdev_switch_parent_id_get(struct net_device *dev,
-                               struct netdev_phys_item_id *psid)
+int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
 {
-       const struct swdev_ops *ops = dev->swdev_ops;
+       int err;
+
+       if (!rtnl_is_locked()) {
+               /* Running prepare-commit transaction across stacked
+                * devices requires nothing moves, so if rtnl_lock is
+                * not held, schedule a worker thread to hold rtnl_lock
+                * while setting attr.
+                */
+
+               return switchdev_port_attr_set_defer(dev, attr);
+       }
+
+       /* Phase I: prepare for attr set. Driver/device should fail
+        * here if there are going to be issues in the commit phase,
+        * such as lack of resources or support.  The driver/device
+        * should reserve resources needed for the commit phase here,
+        * but should not commit the attr.
+        */
+
+       attr->trans = SWITCHDEV_TRANS_PREPARE;
+       err = __switchdev_port_attr_set(dev, attr);
+       if (err) {
+               /* Prepare phase failed: abort the transaction.  Any
+                * resources reserved in the prepare phase are
+                * released.
+                */
+
+               attr->trans = SWITCHDEV_TRANS_ABORT;
+               __switchdev_port_attr_set(dev, attr);
+
+               return err;
+       }
+
+       /* Phase II: commit attr set.  This cannot fail as a fault
+        * of driver/device.  If it does, it's a bug in the driver/device
+        * because the driver said everythings was OK in phase I.
+        */
+
+       attr->trans = SWITCHDEV_TRANS_COMMIT;
+       err = __switchdev_port_attr_set(dev, attr);
+       BUG_ON(err);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
+
+static int __switchdev_port_obj_add(struct net_device *dev,
+                                   struct switchdev_obj *obj)
+{
+       const struct switchdev_ops *ops = dev->switchdev_ops;
+       struct net_device *lower_dev;
+       struct list_head *iter;
+       int err = -EOPNOTSUPP;
+
+       if (ops && ops->switchdev_port_obj_add)
+               return ops->switchdev_port_obj_add(dev, obj);
+
+       /* Switch device port(s) may be stacked under
+        * bond/team/vlan dev, so recurse down to add object on
+        * each port.
+        */
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               err = __switchdev_port_obj_add(lower_dev, obj);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
+/**
+ *     switchdev_port_obj_add - Add port object
+ *
+ *     @dev: port device
+ *     @obj: object to add
+ *
+ *     Use a 2-phase prepare-commit transaction model to ensure
+ *     system is not left in a partially updated state due to
+ *     failure from driver/device.
+ *
+ *     rtnl_lock must be held.
+ */
+int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
+{
+       int err;
 
-       if (!ops || !ops->swdev_parent_id_get)
-               return -EOPNOTSUPP;
-       return ops->swdev_parent_id_get(dev, psid);
+       ASSERT_RTNL();
+
+       /* Phase I: prepare for obj add. Driver/device should fail
+        * here if there are going to be issues in the commit phase,
+        * such as lack of resources or support.  The driver/device
+        * should reserve resources needed for the commit phase here,
+        * but should not commit the obj.
+        */
+
+       obj->trans = SWITCHDEV_TRANS_PREPARE;
+       err = __switchdev_port_obj_add(dev, obj);
+       if (err) {
+               /* Prepare phase failed: abort the transaction.  Any
+                * resources reserved in the prepare phase are
+                * released.
+                */
+
+               obj->trans = SWITCHDEV_TRANS_ABORT;
+               __switchdev_port_obj_add(dev, obj);
+
+               return err;
+       }
+
+       /* Phase II: commit obj add.  This cannot fail as a fault
+        * of driver/device.  If it does, it's a bug in the driver/device
+        * because the driver said everythings was OK in phase I.
+        */
+
+       obj->trans = SWITCHDEV_TRANS_COMMIT;
+       err = __switchdev_port_obj_add(dev, obj);
+       WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
+
+       return err;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_parent_id_get);
+EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
 
 /**
- *     netdev_switch_port_stp_update - Notify switch device port of STP
- *                                     state change
+ *     switchdev_port_obj_del - Delete port object
+ *
  *     @dev: port device
- *     @state: port STP state
+ *     @obj: object to delete
+ */
+int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
+{
+       const struct switchdev_ops *ops = dev->switchdev_ops;
+       struct net_device *lower_dev;
+       struct list_head *iter;
+       int err = -EOPNOTSUPP;
+
+       if (ops && ops->switchdev_port_obj_del)
+               return ops->switchdev_port_obj_del(dev, obj);
+
+       /* Switch device port(s) may be stacked under
+        * bond/team/vlan dev, so recurse down to delete object on
+        * each port.
+        */
+
+       netdev_for_each_lower_dev(dev, lower_dev, iter) {
+               err = switchdev_port_obj_del(lower_dev, obj);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
+
+/**
+ *     switchdev_port_obj_dump - Dump port objects
  *
- *     Notify switch device port of bridge port STP state change.
+ *     @dev: port device
+ *     @obj: object to dump
  */
-int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
+int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj)
 {
-       const struct swdev_ops *ops = dev->swdev_ops;
+       const struct switchdev_ops *ops = dev->switchdev_ops;
        struct net_device *lower_dev;
        struct list_head *iter;
        int err = -EOPNOTSUPP;
 
-       if (ops && ops->swdev_port_stp_update)
-               return ops->swdev_port_stp_update(dev, state);
+       if (ops && ops->switchdev_port_obj_dump)
+               return ops->switchdev_port_obj_dump(dev, obj);
+
+       /* Switch device port(s) may be stacked under
+        * bond/team/vlan dev, so recurse down to dump objects on
+        * first port at bottom of stack.
+        */
 
        netdev_for_each_lower_dev(dev, lower_dev, iter) {
-               err = netdev_switch_port_stp_update(lower_dev, state);
-               if (err && err != -EOPNOTSUPP)
-                       return err;
+               err = switchdev_port_obj_dump(lower_dev, obj);
+               break;
        }
 
        return err;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);
+EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
 
-static DEFINE_MUTEX(netdev_switch_mutex);
-static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
+static DEFINE_MUTEX(switchdev_mutex);
+static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
 
 /**
- *     register_netdev_switch_notifier - Register notifier
+ *     register_switchdev_notifier - Register notifier
  *     @nb: notifier_block
  *
  *     Register switch device notifier. This should be used by code
  *     which needs to monitor events happening in particular device.
  *     Return values are same as for atomic_notifier_chain_register().
  */
-int register_netdev_switch_notifier(struct notifier_block *nb)
+int register_switchdev_notifier(struct notifier_block *nb)
 {
        int err;
 
-       mutex_lock(&netdev_switch_mutex);
-       err = raw_notifier_chain_register(&netdev_switch_notif_chain, nb);
-       mutex_unlock(&netdev_switch_mutex);
+       mutex_lock(&switchdev_mutex);
+       err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
+       mutex_unlock(&switchdev_mutex);
        return err;
 }
-EXPORT_SYMBOL_GPL(register_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(register_switchdev_notifier);
 
 /**
- *     unregister_netdev_switch_notifier - Unregister notifier
+ *     unregister_switchdev_notifier - Unregister notifier
  *     @nb: notifier_block
  *
  *     Unregister switch device notifier.
  *     Return values are same as for atomic_notifier_chain_unregister().
  */
-int unregister_netdev_switch_notifier(struct notifier_block *nb)
+int unregister_switchdev_notifier(struct notifier_block *nb)
 {
        int err;
 
-       mutex_lock(&netdev_switch_mutex);
-       err = raw_notifier_chain_unregister(&netdev_switch_notif_chain, nb);
-       mutex_unlock(&netdev_switch_mutex);
+       mutex_lock(&switchdev_mutex);
+       err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
+       mutex_unlock(&switchdev_mutex);
        return err;
 }
-EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
+EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
 
 /**
- *     call_netdev_switch_notifiers - Call notifiers
+ *     call_switchdev_notifiers - Call notifiers
  *     @val: value passed unmodified to notifier function
  *     @dev: port device
  *     @info: notifier information data
@@ -114,146 +376,387 @@ EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
  *     when it needs to propagate hardware event.
  *     Return values are same as for atomic_notifier_call_chain().
  */
-int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
-                                struct netdev_switch_notifier_info *info)
+int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
+                            struct switchdev_notifier_info *info)
 {
        int err;
 
        info->dev = dev;
-       mutex_lock(&netdev_switch_mutex);
-       err = raw_notifier_call_chain(&netdev_switch_notif_chain, val, info);
-       mutex_unlock(&netdev_switch_mutex);
+       mutex_lock(&switchdev_mutex);
+       err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
+       mutex_unlock(&switchdev_mutex);
        return err;
 }
-EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers);
+EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 
 /**
- *     netdev_switch_port_bridge_setlink - Notify switch device port of bridge
- *     port attributes
+ *     switchdev_port_bridge_getlink - Get bridge port attributes
  *
  *     @dev: port device
- *     @nlh: netlink msg with bridge port attributes
- *     @flags: bridge setlink flags
  *
- *     Notify switch device port of bridge port attributes
+ *     Called for SELF on rtnl_bridge_getlink to get bridge port
+ *     attributes.
  */
-int netdev_switch_port_bridge_setlink(struct net_device *dev,
-                                     struct nlmsghdr *nlh, u16 flags)
+int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+                                 struct net_device *dev, u32 filter_mask,
+                                 int nlflags)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+       };
+       u16 mode = BRIDGE_MODE_UNDEF;
+       u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
+       int err;
 
-       if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
-               return 0;
+       err = switchdev_port_attr_get(dev, &attr);
+       if (err)
+               return err;
+
+       return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
+                                      attr.u.brport_flags, mask, nlflags);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
+
+static int switchdev_port_br_setflag(struct net_device *dev,
+                                    struct nlattr *nlattr,
+                                    unsigned long brport_flag)
+{
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
+       };
+       u8 flag = nla_get_u8(nlattr);
+       int err;
+
+       err = switchdev_port_attr_get(dev, &attr);
+       if (err)
+               return err;
+
+       if (flag)
+               attr.u.brport_flags |= brport_flag;
+       else
+               attr.u.brport_flags &= ~brport_flag;
+
+       return switchdev_port_attr_set(dev, &attr);
+}
+
+static const struct nla_policy
+switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
+       [IFLA_BRPORT_STATE]             = { .type = NLA_U8 },
+       [IFLA_BRPORT_COST]              = { .type = NLA_U32 },
+       [IFLA_BRPORT_PRIORITY]          = { .type = NLA_U16 },
+       [IFLA_BRPORT_MODE]              = { .type = NLA_U8 },
+       [IFLA_BRPORT_GUARD]             = { .type = NLA_U8 },
+       [IFLA_BRPORT_PROTECT]           = { .type = NLA_U8 },
+       [IFLA_BRPORT_FAST_LEAVE]        = { .type = NLA_U8 },
+       [IFLA_BRPORT_LEARNING]          = { .type = NLA_U8 },
+       [IFLA_BRPORT_LEARNING_SYNC]     = { .type = NLA_U8 },
+       [IFLA_BRPORT_UNICAST_FLOOD]     = { .type = NLA_U8 },
+};
+
+static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
+                                             struct nlattr *protinfo)
+{
+       struct nlattr *attr;
+       int rem;
+       int err;
+
+       err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
+                                 switchdev_port_bridge_policy);
+       if (err)
+               return err;
+
+       nla_for_each_nested(attr, protinfo, rem) {
+               switch (nla_type(attr)) {
+               case IFLA_BRPORT_LEARNING:
+                       err = switchdev_port_br_setflag(dev, attr,
+                                                       BR_LEARNING);
+                       break;
+               case IFLA_BRPORT_LEARNING_SYNC:
+                       err = switchdev_port_br_setflag(dev, attr,
+                                                       BR_LEARNING_SYNC);
+                       break;
+               default:
+                       err = -EOPNOTSUPP;
+                       break;
+               }
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int switchdev_port_br_afspec(struct net_device *dev,
+                                   struct nlattr *afspec,
+                                   int (*f)(struct net_device *dev,
+                                            struct switchdev_obj *obj))
+{
+       struct nlattr *attr;
+       struct bridge_vlan_info *vinfo;
+       struct switchdev_obj obj = {
+               .id = SWITCHDEV_OBJ_PORT_VLAN,
+       };
+       struct switchdev_obj_vlan *vlan = &obj.u.vlan;
+       int rem;
+       int err;
 
-       if (!ops->ndo_bridge_setlink)
-               return -EOPNOTSUPP;
+       nla_for_each_nested(attr, afspec, rem) {
+               if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
+                       continue;
+               if (nla_len(attr) != sizeof(struct bridge_vlan_info))
+                       return -EINVAL;
+               vinfo = nla_data(attr);
+               vlan->flags = vinfo->flags;
+               if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
+                       if (vlan->vid_start)
+                               return -EINVAL;
+                       vlan->vid_start = vinfo->vid;
+               } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
+                       if (!vlan->vid_start)
+                               return -EINVAL;
+                       vlan->vid_end = vinfo->vid;
+                       if (vlan->vid_end <= vlan->vid_start)
+                               return -EINVAL;
+                       err = f(dev, &obj);
+                       if (err)
+                               return err;
+                       memset(vlan, 0, sizeof(*vlan));
+               } else {
+                       if (vlan->vid_start)
+                               return -EINVAL;
+                       vlan->vid_start = vinfo->vid;
+                       vlan->vid_end = vinfo->vid;
+                       err = f(dev, &obj);
+                       if (err)
+                               return err;
+                       memset(vlan, 0, sizeof(*vlan));
+               }
+       }
 
-       return ops->ndo_bridge_setlink(dev, nlh, flags);
+       return 0;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_setlink);
 
 /**
- *     netdev_switch_port_bridge_dellink - Notify switch device port of bridge
- *     port attribute delete
+ *     switchdev_port_bridge_setlink - Set bridge port attributes
  *
  *     @dev: port device
- *     @nlh: netlink msg with bridge port attributes
- *     @flags: bridge setlink flags
+ *     @nlh: netlink header
+ *     @flags: netlink flags
  *
- *     Notify switch device port of bridge port attribute delete
+ *     Called for SELF on rtnl_bridge_setlink to set bridge port
+ *     attributes.
  */
-int netdev_switch_port_bridge_dellink(struct net_device *dev,
-                                     struct nlmsghdr *nlh, u16 flags)
+int switchdev_port_bridge_setlink(struct net_device *dev,
+                                 struct nlmsghdr *nlh, u16 flags)
 {
-       const struct net_device_ops *ops = dev->netdev_ops;
+       struct nlattr *protinfo;
+       struct nlattr *afspec;
+       int err = 0;
 
-       if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
-               return 0;
+       protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+                                  IFLA_PROTINFO);
+       if (protinfo) {
+               err = switchdev_port_br_setlink_protinfo(dev, protinfo);
+               if (err)
+                       return err;
+       }
 
-       if (!ops->ndo_bridge_dellink)
-               return -EOPNOTSUPP;
+       afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+                                IFLA_AF_SPEC);
+       if (afspec)
+               err = switchdev_port_br_afspec(dev, afspec,
+                                              switchdev_port_obj_add);
 
-       return ops->ndo_bridge_dellink(dev, nlh, flags);
+       return err;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
 
 /**
- *     ndo_dflt_netdev_switch_port_bridge_setlink - default ndo bridge setlink
- *                                                  op for master devices
+ *     switchdev_port_bridge_dellink - Set bridge port attributes
  *
  *     @dev: port device
- *     @nlh: netlink msg with bridge port attributes
- *     @flags: bridge setlink flags
+ *     @nlh: netlink header
+ *     @flags: netlink flags
  *
- *     Notify master device slaves of bridge port attributes
+ *     Called for SELF on rtnl_bridge_dellink to set bridge port
+ *     attributes.
  */
-int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev,
-                                              struct nlmsghdr *nlh, u16 flags)
+int switchdev_port_bridge_dellink(struct net_device *dev,
+                                 struct nlmsghdr *nlh, u16 flags)
 {
-       struct net_device *lower_dev;
-       struct list_head *iter;
-       int ret = 0, err = 0;
+       struct nlattr *afspec;
 
-       if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
-               return ret;
+       afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
+                                IFLA_AF_SPEC);
+       if (afspec)
+               return switchdev_port_br_afspec(dev, afspec,
+                                               switchdev_port_obj_del);
 
-       netdev_for_each_lower_dev(dev, lower_dev, iter) {
-               err = netdev_switch_port_bridge_setlink(lower_dev, nlh, flags);
-               if (err && err != -EOPNOTSUPP)
-                       ret = err;
-       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
 
-       return ret;
+/**
+ *     switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
+ *
+ *     @ndmsg: netlink hdr
+ *     @nlattr: netlink attributes
+ *     @dev: port device
+ *     @addr: MAC address to add
+ *     @vid: VLAN to add
+ *
+ *     Add FDB entry to switch device.
+ */
+int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                          struct net_device *dev, const unsigned char *addr,
+                          u16 vid, u16 nlm_flags)
+{
+       struct switchdev_obj obj = {
+               .id = SWITCHDEV_OBJ_PORT_FDB,
+               .u.fdb = {
+                       .addr = addr,
+                       .vid = vid,
+               },
+       };
+
+       return switchdev_port_obj_add(dev, &obj);
 }
-EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_setlink);
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
 
 /**
- *     ndo_dflt_netdev_switch_port_bridge_dellink - default ndo bridge dellink
- *                                                  op for master devices
+ *     switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
  *
+ *     @ndmsg: netlink hdr
+ *     @nlattr: netlink attributes
  *     @dev: port device
- *     @nlh: netlink msg with bridge port attributes
- *     @flags: bridge dellink flags
+ *     @addr: MAC address to delete
+ *     @vid: VLAN to delete
  *
- *     Notify master device slaves of bridge port attribute deletes
+ *     Delete FDB entry from switch device.
  */
-int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev,
-                                              struct nlmsghdr *nlh, u16 flags)
+int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                          struct net_device *dev, const unsigned char *addr,
+                          u16 vid)
 {
-       struct net_device *lower_dev;
-       struct list_head *iter;
-       int ret = 0, err = 0;
+       struct switchdev_obj obj = {
+               .id = SWITCHDEV_OBJ_PORT_FDB,
+               .u.fdb = {
+                       .addr = addr,
+                       .vid = vid,
+               },
+       };
+
+       return switchdev_port_obj_del(dev, &obj);
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
+
+struct switchdev_fdb_dump {
+       struct switchdev_obj obj;
+       struct sk_buff *skb;
+       struct netlink_callback *cb;
+       struct net_device *filter_dev;
+       int idx;
+};
+
+static int switchdev_port_fdb_dump_cb(struct net_device *dev,
+                                     struct switchdev_obj *obj)
+{
+       struct switchdev_fdb_dump *dump =
+               container_of(obj, struct switchdev_fdb_dump, obj);
+       u32 portid = NETLINK_CB(dump->cb->skb).portid;
+       u32 seq = dump->cb->nlh->nlmsg_seq;
+       struct nlmsghdr *nlh;
+       struct ndmsg *ndm;
+       struct net_device *master = netdev_master_upper_dev_get(dev);
+
+       if (dump->idx < dump->cb->args[0])
+               goto skip;
+
+       if (master && dump->filter_dev != master)
+               goto skip;
+
+       nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
+                       sizeof(*ndm), NLM_F_MULTI);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       ndm = nlmsg_data(nlh);
+       ndm->ndm_family  = AF_BRIDGE;
+       ndm->ndm_pad1    = 0;
+       ndm->ndm_pad2    = 0;
+       ndm->ndm_flags   = NTF_SELF;
+       ndm->ndm_type    = 0;
+       ndm->ndm_ifindex = dev->ifindex;
+       ndm->ndm_state   = NUD_REACHABLE;
+
+       if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
+               goto nla_put_failure;
+
+       if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid))
+               goto nla_put_failure;
+
+       nlmsg_end(dump->skb, nlh);
+
+skip:
+       dump->idx++;
+       return 0;
+
+nla_put_failure:
+       nlmsg_cancel(dump->skb, nlh);
+       return -EMSGSIZE;
+}
 
-       if (!(dev->features & NETIF_F_HW_SWITCH_OFFLOAD))
-               return ret;
+/**
+ *     switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
+ *
+ *     @skb: netlink skb
+ *     @cb: netlink callback
+ *     @dev: port device
+ *     @filter_dev: filter device
+ *     @idx:
+ *
+ *     Delete FDB entry from switch device.
+ */
+int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+                           struct net_device *dev,
+                           struct net_device *filter_dev, int idx)
+{
+       struct switchdev_fdb_dump dump = {
+               .obj = {
+                       .id = SWITCHDEV_OBJ_PORT_FDB,
+                       .cb = switchdev_port_fdb_dump_cb,
+               },
+               .skb = skb,
+               .cb = cb,
+               .filter_dev = filter_dev,
+               .idx = idx,
+       };
+       int err;
 
-       netdev_for_each_lower_dev(dev, lower_dev, iter) {
-               err = netdev_switch_port_bridge_dellink(lower_dev, nlh, flags);
-               if (err && err != -EOPNOTSUPP)
-                       ret = err;
-       }
+       err = switchdev_port_obj_dump(dev, &dump.obj);
+       if (err)
+               return err;
 
-       return ret;
+       return dump.idx;
 }
-EXPORT_SYMBOL_GPL(ndo_dflt_netdev_switch_port_bridge_dellink);
+EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
 
-static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev)
+static struct net_device *switchdev_get_lowest_dev(struct net_device *dev)
 {
-       const struct swdev_ops *ops = dev->swdev_ops;
+       const struct switchdev_ops *ops = dev->switchdev_ops;
        struct net_device *lower_dev;
        struct net_device *port_dev;
        struct list_head *iter;
 
        /* Recusively search down until we find a sw port dev.
-        * (A sw port dev supports swdev_parent_id_get).
+        * (A sw port dev supports switchdev_port_attr_get).
         */
 
-       if (dev->features & NETIF_F_HW_SWITCH_OFFLOAD &&
-           ops && ops->swdev_parent_id_get)
+       if (ops && ops->switchdev_port_attr_get)
                return dev;
 
        netdev_for_each_lower_dev(dev, lower_dev, iter) {
-               port_dev = netdev_switch_get_lowest_dev(lower_dev);
+               port_dev = switchdev_get_lowest_dev(lower_dev);
                if (port_dev)
                        return port_dev;
        }
@@ -261,10 +764,12 @@ static struct net_device *netdev_switch_get_lowest_dev(struct net_device *dev)
        return NULL;
 }
 
-static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
+static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
 {
-       struct netdev_phys_item_id psid;
-       struct netdev_phys_item_id prev_psid;
+       struct switchdev_attr attr = {
+               .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+       };
+       struct switchdev_attr prev_attr;
        struct net_device *dev = NULL;
        int nhsel;
 
@@ -276,28 +781,29 @@ static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
                if (!nh->nh_dev)
                        return NULL;
 
-               dev = netdev_switch_get_lowest_dev(nh->nh_dev);
+               dev = switchdev_get_lowest_dev(nh->nh_dev);
                if (!dev)
                        return NULL;
 
-               if (netdev_switch_parent_id_get(dev, &psid))
+               if (switchdev_port_attr_get(dev, &attr))
                        return NULL;
 
                if (nhsel > 0) {
-                       if (prev_psid.id_len != psid.id_len)
+                       if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
                                return NULL;
-                       if (memcmp(prev_psid.id, psid.id, psid.id_len))
+                       if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
+                                  attr.u.ppid.id_len))
                                return NULL;
                }
 
-               prev_psid = psid;
+               prev_attr = attr;
        }
 
        return dev;
 }
 
 /**
- *     netdev_switch_fib_ipv4_add - Add IPv4 route entry to switch
+ *     switchdev_fib_ipv4_add - Add/modify switch IPv4 route entry
  *
  *     @dst: route's IPv4 destination address
  *     @dst_len: destination address length (prefix length)
@@ -307,13 +813,24 @@ static struct net_device *netdev_switch_get_dev_by_nhs(struct fib_info *fi)
  *     @nlflags: netlink flags passed in (NLM_F_*)
  *     @tb_id: route table ID
  *
- *     Add IPv4 route entry to switch device.
+ *     Add/modify switch IPv4 route entry.
  */
-int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
-                              u8 tos, u8 type, u32 nlflags, u32 tb_id)
+int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
+                          u8 tos, u8 type, u32 nlflags, u32 tb_id)
 {
+       struct switchdev_obj fib_obj = {
+               .id = SWITCHDEV_OBJ_IPV4_FIB,
+               .u.ipv4_fib = {
+                       .dst = dst,
+                       .dst_len = dst_len,
+                       .fi = fi,
+                       .tos = tos,
+                       .type = type,
+                       .nlflags = nlflags,
+                       .tb_id = tb_id,
+               },
+       };
        struct net_device *dev;
-       const struct swdev_ops *ops;
        int err = 0;
 
        /* Don't offload route if using custom ip rules or if
@@ -328,25 +845,20 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
        if (fi->fib_net->ipv4.fib_offload_disabled)
                return 0;
 
-       dev = netdev_switch_get_dev_by_nhs(fi);
+       dev = switchdev_get_dev_by_nhs(fi);
        if (!dev)
                return 0;
-       ops = dev->swdev_ops;
-
-       if (ops->swdev_fib_ipv4_add) {
-               err = ops->swdev_fib_ipv4_add(dev, htonl(dst), dst_len,
-                                             fi, tos, type, nlflags,
-                                             tb_id);
-               if (!err)
-                       fi->fib_flags |= RTNH_F_OFFLOAD;
-       }
+
+       err = switchdev_port_obj_add(dev, &fib_obj);
+       if (!err)
+               fi->fib_flags |= RTNH_F_OFFLOAD;
 
        return err;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_add);
 
 /**
- *     netdev_switch_fib_ipv4_del - Delete IPv4 route entry from switch
+ *     switchdev_fib_ipv4_del - Delete IPv4 route entry from switch
  *
  *     @dst: route's IPv4 destination address
  *     @dst_len: destination address length (prefix length)
@@ -357,38 +869,45 @@ EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_add);
  *
  *     Delete IPv4 route entry from switch device.
  */
-int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
-                              u8 tos, u8 type, u32 tb_id)
+int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
+                          u8 tos, u8 type, u32 tb_id)
 {
+       struct switchdev_obj fib_obj = {
+               .id = SWITCHDEV_OBJ_IPV4_FIB,
+               .u.ipv4_fib = {
+                       .dst = dst,
+                       .dst_len = dst_len,
+                       .fi = fi,
+                       .tos = tos,
+                       .type = type,
+                       .nlflags = 0,
+                       .tb_id = tb_id,
+               },
+       };
        struct net_device *dev;
-       const struct swdev_ops *ops;
        int err = 0;
 
        if (!(fi->fib_flags & RTNH_F_OFFLOAD))
                return 0;
 
-       dev = netdev_switch_get_dev_by_nhs(fi);
+       dev = switchdev_get_dev_by_nhs(fi);
        if (!dev)
                return 0;
-       ops = dev->swdev_ops;
 
-       if (ops->swdev_fib_ipv4_del) {
-               err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
-                                             fi, tos, type, tb_id);
-               if (!err)
-                       fi->fib_flags &= ~RTNH_F_OFFLOAD;
-       }
+       err = switchdev_port_obj_del(dev, &fib_obj);
+       if (!err)
+               fi->fib_flags &= ~RTNH_F_OFFLOAD;
 
        return err;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_del);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_del);
 
 /**
- *     netdev_switch_fib_ipv4_abort - Abort an IPv4 FIB operation
+ *     switchdev_fib_ipv4_abort - Abort an IPv4 FIB operation
  *
  *     @fi: route FIB info structure
  */
-void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
+void switchdev_fib_ipv4_abort(struct fib_info *fi)
 {
        /* There was a problem installing this route to the offload
         * device.  For now, until we come up with more refined
@@ -401,4 +920,4 @@ void netdev_switch_fib_ipv4_abort(struct fib_info *fi)
        fib_flush_external(fi->fib_net);
        fi->fib_net->ipv4.fib_offload_disabled = true;
 }
-EXPORT_SYMBOL_GPL(netdev_switch_fib_ipv4_abort);
+EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
index ba7daa864d44471ad6c57d630bcaa4379aabea0c..48fd3b5a73fbaf934178c444cbba07aa1a0f5b8c 100644 (file)
 #include "addr.h"
 #include "core.h"
 
-u32 tipc_own_addr(struct net *net)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       return tn->own_addr;
-}
-
 /**
  * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
  */
index 7ba6d5c8ae405727721ba8ace46c7051938163ef..93f7c983be3322237a8cf20985de1131a21f417f 100644 (file)
 #include <linux/tipc.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include "core.h"
 
 #define TIPC_ZONE_MASK         0xff000000u
 #define TIPC_CLUSTER_MASK      0xfffff000u
 
+static inline u32 tipc_own_addr(struct net *net)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+
+       return tn->own_addr;
+}
+
 static inline u32 tipc_zone_mask(u32 addr)
 {
        return addr & TIPC_ZONE_MASK;
index c5cbdcb1f0b561a22d2f5537f5cca0a80ee36486..4906ca3c0f3a576a529eacb26631f8585291ae40 100644 (file)
@@ -115,19 +115,15 @@ static void bclink_set_last_sent(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *bcl = tn->bcl;
-       struct sk_buff *skb = skb_peek(&bcl->backlogq);
 
-       if (skb)
-               bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
-       else
-               bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
+       bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
 }
 
 u32 tipc_bclink_get_last_sent(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       return tn->bcl->fsm_msg_cnt;
+       return tn->bcl->silent_intv_cnt;
 }
 
 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -212,16 +208,16 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
                 * or both sent and unsent messages (otherwise)
                 */
                if (tn->bclink->bcast_nodes.count)
-                       acked = tn->bcl->fsm_msg_cnt;
+                       acked = tn->bcl->silent_intv_cnt;
                else
-                       acked = tn->bcl->next_out_no;
+                       acked = tn->bcl->snd_nxt;
        } else {
                /*
                 * Bail out if specified sequence number does not correspond
                 * to a message that has been sent and not yet acknowledged
                 */
                if (less(acked, buf_seqno(skb)) ||
-                   less(tn->bcl->fsm_msg_cnt, acked) ||
+                   less(tn->bcl->silent_intv_cnt, acked) ||
                    less_eq(acked, n_ptr->bclink.acked))
                        goto exit;
        }
@@ -803,9 +799,9 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
                goto attr_msg_full;
        if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
                goto attr_msg_full;
-       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
+       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
                goto attr_msg_full;
-       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
+       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
                goto attr_msg_full;
 
        prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
@@ -866,6 +862,27 @@ int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
        return 0;
 }
 
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
+{
+       int err;
+       u32 win;
+       struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
+
+       if (!attrs[TIPC_NLA_LINK_PROP])
+               return -EINVAL;
+
+       err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
+       if (err)
+               return err;
+
+       if (!props[TIPC_NLA_PROP_WIN])
+               return -EOPNOTSUPP;
+
+       win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+
+       return tipc_bclink_set_queue_limits(net, win);
+}
+
 int tipc_bclink_init(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
@@ -893,7 +910,7 @@ int tipc_bclink_init(struct net *net)
        __skb_queue_head_init(&bcl->backlogq);
        __skb_queue_head_init(&bcl->deferdq);
        skb_queue_head_init(&bcl->wakeupq);
-       bcl->next_out_no = 1;
+       bcl->snd_nxt = 1;
        spin_lock_init(&bclink->node.lock);
        __skb_queue_head_init(&bclink->arrvq);
        skb_queue_head_init(&bclink->inputq);
index 4bdc12277d33ff8eb382b755dd1323424d9d43a2..3c290a48f72037ece5eddfb55c55d501e7f61e67 100644 (file)
@@ -131,6 +131,7 @@ uint  tipc_bclink_get_mtu(void);
 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
 void tipc_bclink_wakeup_users(struct net *net);
 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
+int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
 void tipc_bclink_input(struct net *net);
 
 #endif
index 70e3dacbf84ab7a899298fca1fdcb2e77fd0c2d5..00bc0e6205326025212a85e1110bab48c208e73a 100644 (file)
@@ -71,8 +71,7 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
        [TIPC_NLA_MEDIA_PROP]           = { .type = NLA_NESTED }
 };
 
-static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
-                          bool shutting_down);
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr);
 
 /**
  * tipc_media_find - locates specified media object by name
@@ -324,7 +323,7 @@ restart:
 
        res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
        if (res) {
-               bearer_disable(net, b_ptr, false);
+               bearer_disable(net, b_ptr);
                pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
                        name);
                return -EINVAL;
@@ -344,7 +343,7 @@ restart:
 static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
 {
        pr_info("Resetting bearer <%s>\n", b_ptr->name);
-       tipc_link_reset_list(net, b_ptr->identity);
+       tipc_link_delete_list(net, b_ptr->identity);
        tipc_disc_reset(net, b_ptr);
        return 0;
 }
@@ -354,8 +353,7 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
  *
  * Note: This routine assumes caller holds RTNL lock.
  */
-static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
-                          bool shutting_down)
+static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        u32 i;
@@ -363,7 +361,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        b_ptr->media->disable_media(b_ptr);
 
-       tipc_link_delete_list(net, b_ptr->identity, shutting_down);
+       tipc_link_delete_list(net, b_ptr->identity);
        if (b_ptr->link_req)
                tipc_disc_delete(b_ptr->link_req);
 
@@ -541,7 +539,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
                break;
        case NETDEV_UNREGISTER:
        case NETDEV_CHANGENAME:
-               bearer_disable(dev_net(dev), b_ptr, false);
+               bearer_disable(dev_net(dev), b_ptr);
                break;
        }
        return NOTIFY_OK;
@@ -583,7 +581,7 @@ void tipc_bearer_stop(struct net *net)
        for (i = 0; i < MAX_BEARERS; i++) {
                b_ptr = rtnl_dereference(tn->bearer_list[i]);
                if (b_ptr) {
-                       bearer_disable(net, b_ptr, true);
+                       bearer_disable(net, b_ptr);
                        tn->bearer_list[i] = NULL;
                }
        }
@@ -747,7 +745,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
        }
 
-       bearer_disable(net, bearer, false);
+       bearer_disable(net, bearer);
        rtnl_unlock();
 
        return 0;
@@ -812,7 +810,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
        char *name;
        struct tipc_bearer *b;
        struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
-       struct net *net = genl_info_net(info);
+       struct net *net = sock_net(skb->sk);
 
        if (!info->attrs[TIPC_NLA_BEARER])
                return -EINVAL;
index 5cad243ee8fc646efccfe72f26ad49ebdfe42f4f..dc714d977768c105cff0b774b49be1e5ec1c59fd 100644 (file)
@@ -38,9 +38,9 @@
 #define _TIPC_BEARER_H
 
 #include "netlink.h"
+#include "core.h"
 #include <net/genetlink.h>
 
-#define MAX_BEARERS    2
 #define MAX_MEDIA      3
 #define MAX_NODES      4096
 #define WSIZE          32
index be1c9fa60b09dc713155c94e7bf6bcc6366fc7aa..005ba5eb0ea426a3a24cbfe0e74aa4c1901af903 100644 (file)
@@ -68,7 +68,7 @@ static int __net_init tipc_init_net(struct net *net)
        if (err)
                goto out_nametbl;
 
-       err = tipc_subscr_start(net);
+       err = tipc_topsrv_start(net);
        if (err)
                goto out_subscr;
        return 0;
@@ -83,7 +83,7 @@ out_sk_rht:
 
 static void __net_exit tipc_exit_net(struct net *net)
 {
-       tipc_subscr_stop(net);
+       tipc_topsrv_stop(net);
        tipc_net_stop(net);
        tipc_nametbl_stop(net);
        tipc_sk_rht_destroy(net);
index 3dc68c7a966d4d8e2d8fe6a979a33100866c520c..0fcf133d5cb7cef0f33478412cb75809b68a8223 100644 (file)
 #include <net/netns/generic.h>
 #include <linux/rhashtable.h>
 
-#include "node.h"
-#include "bearer.h"
-#include "bcast.h"
-#include "netlink.h"
-#include "link.h"
-#include "node.h"
-#include "msg.h"
+struct tipc_node;
+struct tipc_bearer;
+struct tipc_bcbearer;
+struct tipc_bclink;
+struct tipc_link;
+struct tipc_name_table;
+struct tipc_server;
 
 #define TIPC_MOD_VER "2.0.0"
 
+#define NODE_HTABLE_SIZE   512
+#define MAX_BEARERS       3
+
 extern int tipc_net_id __read_mostly;
 extern int sysctl_tipc_rmem[3] __read_mostly;
 extern int sysctl_tipc_named_timeout __read_mostly;
@@ -106,6 +109,26 @@ struct tipc_net {
        atomic_t subscription_count;
 };
 
+static inline u16 mod(u16 x)
+{
+       return x & 0xffffu;
+}
+
+static inline int less_eq(u16 left, u16 right)
+{
+       return mod(right - left) < 32768u;
+}
+
+static inline int more(u16 left, u16 right)
+{
+       return !less_eq(left, right);
+}
+
+static inline int less(u16 left, u16 right)
+{
+       return less_eq(left, right) && (mod(right) != mod(left));
+}
+
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
 void tipc_unregister_sysctl(void);
index 43a515dc97b0d4a2c7fca5bc9dc2951175201fff..ca8b8e0f49b526ebbf7a87e2cc89491ecfedf988 100644 (file)
@@ -86,7 +86,7 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
  */
 #define  STARTING_EVT    856384768     /* link processing trigger */
 #define  TRAFFIC_MSG_EVT 560815u       /* rx'd ??? */
-#define  TIMEOUT_EVT     560817u       /* link timer expired */
+#define  SILENCE_EVT     560817u       /* timer dicovered silence from peer */
 
 /*
  * State value stored in 'failover_pkts'
@@ -106,6 +106,7 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
 static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
+static void link_set_timer(struct tipc_link *link, unsigned long time);
 /*
  *  Simple link routines
  */
@@ -197,11 +198,12 @@ static void link_timeout(unsigned long data)
        }
 
        /* do all other link processing performed on a periodic basis */
-       link_state_event(l_ptr, TIMEOUT_EVT);
-
+       if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
+               link_state_event(l_ptr, SILENCE_EVT);
+       l_ptr->silent_intv_cnt++;
        if (skb_queue_len(&l_ptr->backlogq))
                tipc_link_push_packets(l_ptr);
-
+       link_set_timer(l_ptr, l_ptr->keepalive_intv);
        tipc_node_unlock(l_ptr->owner);
        tipc_link_put(l_ptr);
 }
@@ -233,8 +235,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
 
        if (n_ptr->link_cnt >= MAX_BEARERS) {
                tipc_addr_string_fill(addr_string, n_ptr->addr);
-               pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
-                       n_ptr->link_cnt, addr_string, MAX_BEARERS);
+               pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
+                      n_ptr->link_cnt, addr_string, MAX_BEARERS);
                return NULL;
        }
 
@@ -261,7 +263,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                /* note: peer i/f name is updated by reset/activate message */
        memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
        l_ptr->owner = n_ptr;
-       l_ptr->checkpoint = 1;
        l_ptr->peer_session = INVALID_SESSION;
        l_ptr->bearer_id = b_ptr->identity;
        link_set_supervision_props(l_ptr, b_ptr->tolerance);
@@ -280,7 +281,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
        l_ptr->mtu = l_ptr->advertised_mtu;
        l_ptr->priority = b_ptr->priority;
        tipc_link_set_queue_limits(l_ptr, b_ptr->window);
-       l_ptr->next_out_no = 1;
+       l_ptr->snd_nxt = 1;
        __skb_queue_head_init(&l_ptr->transmq);
        __skb_queue_head_init(&l_ptr->backlogq);
        __skb_queue_head_init(&l_ptr->deferdq);
@@ -311,8 +312,7 @@ void tipc_link_delete(struct tipc_link *l)
        tipc_link_put(l);
 }
 
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
-                          bool shutting_down)
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_link *link;
@@ -451,9 +451,9 @@ void tipc_link_reset(struct tipc_link *l_ptr)
 
        if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
                l_ptr->flags |= LINK_FAILINGOVER;
-               l_ptr->failover_checkpt = l_ptr->next_in_no;
+               l_ptr->failover_checkpt = l_ptr->rcv_nxt;
                pl->failover_pkts = FIRST_FAILOVER;
-               pl->failover_checkpt = l_ptr->next_in_no;
+               pl->failover_checkpt = l_ptr->rcv_nxt;
                pl->failover_skb = l_ptr->reasm_buf;
        } else {
                kfree_skb(l_ptr->reasm_buf);
@@ -469,36 +469,19 @@ void tipc_link_reset(struct tipc_link *l_ptr)
        tipc_link_purge_backlog(l_ptr);
        l_ptr->reasm_buf = NULL;
        l_ptr->rcv_unacked = 0;
-       l_ptr->checkpoint = 1;
-       l_ptr->next_out_no = 1;
-       l_ptr->fsm_msg_cnt = 0;
+       l_ptr->snd_nxt = 1;
+       l_ptr->silent_intv_cnt = 0;
        l_ptr->stale_count = 0;
        link_reset_statistics(l_ptr);
 }
 
-void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *l_ptr;
-       struct tipc_node *n_ptr;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->links[bearer_id];
-               if (l_ptr)
-                       tipc_link_reset(l_ptr);
-               tipc_node_unlock(n_ptr);
-       }
-       rcu_read_unlock();
-}
-
 static void link_activate(struct tipc_link *link)
 {
        struct tipc_node *node = link->owner;
 
-       link->next_in_no = 1;
+       link->rcv_nxt = 1;
        link->stats.recv_info = 1;
+       link->silent_intv_cnt = 0;
        tipc_node_link_up(node, link);
        tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
 }
@@ -511,7 +494,7 @@ static void link_activate(struct tipc_link *link)
 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
 {
        struct tipc_link *other;
-       unsigned long cont_intv = l_ptr->cont_intv;
+       unsigned long timer_intv = l_ptr->keepalive_intv;
 
        if (l_ptr->flags & LINK_STOPPED)
                return;
@@ -519,45 +502,33 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
        if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
                return;         /* Not yet. */
 
-       if (l_ptr->flags & LINK_FAILINGOVER) {
-               if (event == TIMEOUT_EVT)
-                       link_set_timer(l_ptr, cont_intv);
+       if (l_ptr->flags & LINK_FAILINGOVER)
                return;
-       }
 
        switch (l_ptr->state) {
        case WORKING_WORKING:
                switch (event) {
                case TRAFFIC_MSG_EVT:
                case ACTIVATE_MSG:
+                       l_ptr->silent_intv_cnt = 0;
                        break;
-               case TIMEOUT_EVT:
-                       if (l_ptr->next_in_no != l_ptr->checkpoint) {
-                               l_ptr->checkpoint = l_ptr->next_in_no;
-                               if (tipc_bclink_acks_missing(l_ptr->owner)) {
+               case SILENCE_EVT:
+                       if (!l_ptr->silent_intv_cnt) {
+                               if (tipc_bclink_acks_missing(l_ptr->owner))
                                        tipc_link_proto_xmit(l_ptr, STATE_MSG,
                                                             0, 0, 0, 0);
-                                       l_ptr->fsm_msg_cnt++;
-                               }
-                               link_set_timer(l_ptr, cont_intv);
                                break;
                        }
                        l_ptr->state = WORKING_UNKNOWN;
-                       l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       l_ptr->fsm_msg_cnt++;
-                       link_set_timer(l_ptr, cont_intv / 4);
                        break;
                case RESET_MSG:
                        pr_debug("%s<%s>, requested by peer\n",
                                 link_rst_msg, l_ptr->name);
                        tipc_link_reset(l_ptr);
                        l_ptr->state = RESET_RESET;
-                       l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
                                             0, 0, 0, 0);
-                       l_ptr->fsm_msg_cnt++;
-                       link_set_timer(l_ptr, cont_intv);
                        break;
                default:
                        pr_debug("%s%u in WW state\n", link_unk_evt, event);
@@ -568,46 +539,33 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                case TRAFFIC_MSG_EVT:
                case ACTIVATE_MSG:
                        l_ptr->state = WORKING_WORKING;
-                       l_ptr->fsm_msg_cnt = 0;
-                       link_set_timer(l_ptr, cont_intv);
+                       l_ptr->silent_intv_cnt = 0;
                        break;
                case RESET_MSG:
                        pr_debug("%s<%s>, requested by peer while probing\n",
                                 link_rst_msg, l_ptr->name);
                        tipc_link_reset(l_ptr);
                        l_ptr->state = RESET_RESET;
-                       l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
                                             0, 0, 0, 0);
-                       l_ptr->fsm_msg_cnt++;
-                       link_set_timer(l_ptr, cont_intv);
                        break;
-               case TIMEOUT_EVT:
-                       if (l_ptr->next_in_no != l_ptr->checkpoint) {
+               case SILENCE_EVT:
+                       if (!l_ptr->silent_intv_cnt) {
                                l_ptr->state = WORKING_WORKING;
-                               l_ptr->fsm_msg_cnt = 0;
-                               l_ptr->checkpoint = l_ptr->next_in_no;
-                               if (tipc_bclink_acks_missing(l_ptr->owner)) {
+                               if (tipc_bclink_acks_missing(l_ptr->owner))
                                        tipc_link_proto_xmit(l_ptr, STATE_MSG,
                                                             0, 0, 0, 0);
-                                       l_ptr->fsm_msg_cnt++;
-                               }
-                               link_set_timer(l_ptr, cont_intv);
-                       } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
+                       } else if (l_ptr->silent_intv_cnt <
+                                  l_ptr->abort_limit) {
                                tipc_link_proto_xmit(l_ptr, STATE_MSG,
                                                     1, 0, 0, 0);
-                               l_ptr->fsm_msg_cnt++;
-                               link_set_timer(l_ptr, cont_intv / 4);
                        } else {        /* Link has failed */
                                pr_debug("%s<%s>, peer not responding\n",
                                         link_rst_msg, l_ptr->name);
                                tipc_link_reset(l_ptr);
                                l_ptr->state = RESET_UNKNOWN;
-                               l_ptr->fsm_msg_cnt = 0;
                                tipc_link_proto_xmit(l_ptr, RESET_MSG,
                                                     0, 0, 0, 0);
-                               l_ptr->fsm_msg_cnt++;
-                               link_set_timer(l_ptr, cont_intv);
                        }
                        break;
                default:
@@ -623,31 +581,22 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        if (other && link_working_unknown(other))
                                break;
                        l_ptr->state = WORKING_WORKING;
-                       l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
                        tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       l_ptr->fsm_msg_cnt++;
                        if (l_ptr->owner->working_links == 1)
                                tipc_link_sync_xmit(l_ptr);
-                       link_set_timer(l_ptr, cont_intv);
                        break;
                case RESET_MSG:
                        l_ptr->state = RESET_RESET;
-                       l_ptr->fsm_msg_cnt = 0;
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
                                             1, 0, 0, 0);
-                       l_ptr->fsm_msg_cnt++;
-                       link_set_timer(l_ptr, cont_intv);
                        break;
                case STARTING_EVT:
                        l_ptr->flags |= LINK_STARTED;
-                       l_ptr->fsm_msg_cnt++;
-                       link_set_timer(l_ptr, cont_intv);
+                       link_set_timer(l_ptr, timer_intv);
                        break;
-               case TIMEOUT_EVT:
+               case SILENCE_EVT:
                        tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
-                       l_ptr->fsm_msg_cnt++;
-                       link_set_timer(l_ptr, cont_intv);
                        break;
                default:
                        pr_err("%s%u in RU state\n", link_unk_evt, event);
@@ -661,21 +610,16 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
                        if (other && link_working_unknown(other))
                                break;
                        l_ptr->state = WORKING_WORKING;
-                       l_ptr->fsm_msg_cnt = 0;
                        link_activate(l_ptr);
                        tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       l_ptr->fsm_msg_cnt++;
                        if (l_ptr->owner->working_links == 1)
                                tipc_link_sync_xmit(l_ptr);
-                       link_set_timer(l_ptr, cont_intv);
                        break;
                case RESET_MSG:
                        break;
-               case TIMEOUT_EVT:
+               case SILENCE_EVT:
                        tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
                                             0, 0, 0, 0);
-                       l_ptr->fsm_msg_cnt++;
-                       link_set_timer(l_ptr, cont_intv);
                        break;
                default:
                        pr_err("%s%u in RR state\n", link_unk_evt, event);
@@ -701,53 +645,58 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
 {
        struct tipc_msg *msg = buf_msg(skb_peek(list));
        unsigned int maxwin = link->window;
-       unsigned int imp = msg_importance(msg);
+       unsigned int i, imp = msg_importance(msg);
        uint mtu = link->mtu;
-       uint ack = mod(link->next_in_no - 1);
-       uint seqno = link->next_out_no;
-       uint bc_last_in = link->owner->bclink.last_in;
+       u16 ack = mod(link->rcv_nxt - 1);
+       u16 seqno = link->snd_nxt;
+       u16 bc_last_in = link->owner->bclink.last_in;
        struct tipc_media_addr *addr = &link->media_addr;
        struct sk_buff_head *transmq = &link->transmq;
        struct sk_buff_head *backlogq = &link->backlogq;
-       struct sk_buff *skb, *tmp;
-
-       /* Match backlog limit against msg importance: */
-       if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
-               return link_schedule_user(link, list);
+       struct sk_buff *skb, *bskb;
 
+       /* Match msg importance against this and all higher backlog limits: */
+       for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+               if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
+                       return link_schedule_user(link, list);
+       }
        if (unlikely(msg_size(msg) > mtu)) {
                __skb_queue_purge(list);
                return -EMSGSIZE;
        }
        /* Prepare each packet for sending, and add to relevant queue: */
-       skb_queue_walk_safe(list, skb, tmp) {
-               __skb_unlink(skb, list);
+       while (skb_queue_len(list)) {
+               skb = skb_peek(list);
                msg = buf_msg(skb);
                msg_set_seqno(msg, seqno);
                msg_set_ack(msg, ack);
                msg_set_bcast_ack(msg, bc_last_in);
 
                if (likely(skb_queue_len(transmq) < maxwin)) {
+                       __skb_dequeue(list);
                        __skb_queue_tail(transmq, skb);
                        tipc_bearer_send(net, link->bearer_id, skb, addr);
                        link->rcv_unacked = 0;
                        seqno++;
                        continue;
                }
-               if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
+               if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
+                       kfree_skb(__skb_dequeue(list));
                        link->stats.sent_bundled++;
                        continue;
                }
-               if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
+               if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
+                       kfree_skb(__skb_dequeue(list));
+                       __skb_queue_tail(backlogq, bskb);
+                       link->backlog[msg_importance(buf_msg(bskb))].len++;
                        link->stats.sent_bundled++;
                        link->stats.sent_bundles++;
-                       imp = msg_importance(buf_msg(skb));
+                       continue;
                }
-               __skb_queue_tail(backlogq, skb);
-               link->backlog[imp].len++;
-               seqno++;
+               link->backlog[imp].len += skb_queue_len(list);
+               skb_queue_splice_tail_init(list, backlogq);
        }
-       link->next_out_no = seqno;
+       link->snd_nxt = seqno;
        return 0;
 }
 
@@ -877,7 +826,8 @@ void tipc_link_push_packets(struct tipc_link *link)
 {
        struct sk_buff *skb;
        struct tipc_msg *msg;
-       unsigned int ack = mod(link->next_in_no - 1);
+       u16 seqno = link->snd_nxt;
+       u16 ack = mod(link->rcv_nxt - 1);
 
        while (skb_queue_len(&link->transmq) < link->window) {
                skb = __skb_dequeue(&link->backlogq);
@@ -886,12 +836,15 @@ void tipc_link_push_packets(struct tipc_link *link)
                msg = buf_msg(skb);
                link->backlog[msg_importance(msg)].len--;
                msg_set_ack(msg, ack);
+               msg_set_seqno(msg, seqno);
+               seqno = mod(seqno + 1);
                msg_set_bcast_ack(msg, link->owner->bclink.last_in);
                link->rcv_unacked = 0;
                __skb_queue_tail(&link->transmq, skb);
                tipc_bearer_send(link->owner->net, link->bearer_id,
                                 skb, &link->media_addr);
        }
+       link->snd_nxt = seqno;
 }
 
 void tipc_link_reset_all(struct tipc_node *node)
@@ -964,13 +917,13 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
        msg = buf_msg(skb);
 
        /* Detect repeated retransmit failures */
-       if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+       if (l_ptr->last_retransm == msg_seqno(msg)) {
                if (++l_ptr->stale_count > 100) {
                        link_retransmit_failure(l_ptr, skb);
                        return;
                }
        } else {
-               l_ptr->last_retransmitted = msg_seqno(msg);
+               l_ptr->last_retransm = msg_seqno(msg);
                l_ptr->stale_count = 1;
        }
 
@@ -978,7 +931,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
                if (!retransmits)
                        break;
                msg = buf_msg(skb);
-               msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+               msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
                tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
                                 &l_ptr->media_addr);
@@ -1001,11 +954,11 @@ static bool link_synch(struct tipc_link *l)
                goto synched;
 
        /* Was last pre-synch packet added to input queue ? */
-       if (less_eq(pl->next_in_no, l->synch_point))
+       if (less_eq(pl->rcv_nxt, l->synch_point))
                return false;
 
        /* Is it still in the input queue ? */
-       post_synch = mod(pl->next_in_no - l->synch_point) - 1;
+       post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
        if (skb_queue_len(&pl->inputq) > post_synch)
                return false;
 synched:
@@ -1016,13 +969,13 @@ synched:
 static void link_retrieve_defq(struct tipc_link *link,
                               struct sk_buff_head *list)
 {
-       u32 seq_no;
+       u16 seq_no;
 
        if (skb_queue_empty(&link->deferdq))
                return;
 
        seq_no = buf_seqno(skb_peek(&link->deferdq));
-       if (seq_no == mod(link->next_in_no))
+       if (seq_no == link->rcv_nxt)
                skb_queue_splice_tail_init(&link->deferdq, list);
 }
 
@@ -1043,8 +996,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
        struct tipc_link *l_ptr;
        struct sk_buff *skb1, *tmp;
        struct tipc_msg *msg;
-       u32 seq_no;
-       u32 ackd;
+       u16 seq_no;
+       u16 ackd;
        u32 released;
 
        skb2list(skb, &head);
@@ -1137,18 +1090,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
                }
 
                /* Link is now in state WORKING_WORKING */
-               if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
+               if (unlikely(seq_no != l_ptr->rcv_nxt)) {
                        link_handle_out_of_seq_msg(l_ptr, skb);
                        link_retrieve_defq(l_ptr, &head);
                        skb = NULL;
                        goto unlock;
                }
+               l_ptr->silent_intv_cnt = 0;
+
                /* Synchronize with parallel link if applicable */
                if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
                        if (!link_synch(l_ptr))
                                goto unlock;
                }
-               l_ptr->next_in_no++;
+               l_ptr->rcv_nxt++;
                if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
                        link_retrieve_defq(l_ptr, &head);
                if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
@@ -1268,7 +1223,7 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
 {
        struct sk_buff *skb1;
-       u32 seq_no = buf_seqno(skb);
+       u16 seq_no = buf_seqno(skb);
 
        /* Empty queue ? */
        if (skb_queue_empty(list)) {
@@ -1284,7 +1239,7 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
 
        /* Locate insertion point in queue, then insert; discard if duplicate */
        skb_queue_walk(list, skb1) {
-               u32 curr_seqno = buf_seqno(skb1);
+               u16 curr_seqno = buf_seqno(skb1);
 
                if (seq_no == curr_seqno) {
                        kfree_skb(skb);
@@ -1312,14 +1267,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                return;
        }
 
-       /* Record OOS packet arrival (force mismatch on next timeout) */
-       l_ptr->checkpoint--;
+       /* Record OOS packet arrival */
+       l_ptr->silent_intv_cnt = 0;
 
        /*
         * Discard packet if a duplicate; otherwise add it to deferred queue
         * and notify peer of gap as per protocol specification
         */
-       if (less(seq_no, mod(l_ptr->next_in_no))) {
+       if (less(seq_no, l_ptr->rcv_nxt)) {
                l_ptr->stats.duplicates++;
                kfree_skb(buf);
                return;
@@ -1344,6 +1299,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        struct tipc_msg *msg = l_ptr->pmsg;
        u32 msg_size = sizeof(l_ptr->proto_msg);
        int r_flag;
+       u16 last_rcv;
 
        /* Don't send protocol message during link failover */
        if (l_ptr->flags & LINK_FAILINGOVER)
@@ -1360,16 +1316,14 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
 
        if (msg_typ == STATE_MSG) {
-               u32 next_sent = mod(l_ptr->next_out_no);
+               u16 next_sent = l_ptr->snd_nxt;
 
                if (!tipc_link_is_up(l_ptr))
                        return;
-               if (skb_queue_len(&l_ptr->backlogq))
-                       next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
                msg_set_next_sent(msg, next_sent);
                if (!skb_queue_empty(&l_ptr->deferdq)) {
-                       u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
-                       gap = mod(rec - mod(l_ptr->next_in_no));
+                       last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
+                       gap = mod(last_rcv - l_ptr->rcv_nxt);
                }
                msg_set_seq_gap(msg, gap);
                if (gap)
@@ -1377,7 +1331,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
                msg_set_link_tolerance(msg, tolerance);
                msg_set_linkprio(msg, priority);
                msg_set_max_pkt(msg, l_ptr->mtu);
-               msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+               msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
                msg_set_probe(msg, probe_msg != 0);
                if (probe_msg)
                        l_ptr->stats.sent_probes++;
@@ -1397,7 +1351,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
        msg_set_linkprio(msg, l_ptr->priority);
        msg_set_size(msg, msg_size);
 
-       msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
+       msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
 
        buf = tipc_buf_acquire(msg_size);
        if (!buf)
@@ -1496,17 +1450,15 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
                }
 
                /* Record reception; force mismatch at next timeout: */
-               l_ptr->checkpoint--;
+               l_ptr->silent_intv_cnt = 0;
 
                link_state_event(l_ptr, TRAFFIC_MSG_EVT);
                l_ptr->stats.recv_states++;
                if (link_reset_unknown(l_ptr))
                        break;
 
-               if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
-                       rec_gap = mod(msg_next_sent(msg) -
-                                     mod(l_ptr->next_in_no));
-               }
+               if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
+                       rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
 
                if (msg_probe(msg))
                        l_ptr->stats.recv_probes++;
@@ -1580,6 +1532,11 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
 
        tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
                      FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
+
+       skb_queue_walk(&l_ptr->backlogq, skb) {
+               msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
+               l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
+       }
        skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
        tipc_link_purge_backlog(l_ptr);
        msgcount = skb_queue_len(&l_ptr->transmq);
@@ -1640,6 +1597,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
        struct tipc_msg tnl_hdr;
        struct sk_buff_head *queue = &link->transmq;
        int mcnt;
+       u16 seqno;
 
        tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
                      SYNCH_MSG, INT_H_SIZE, link->addr);
@@ -1653,7 +1611,7 @@ tunnel_queue:
                struct tipc_msg *msg = buf_msg(skb);
                u32 len = msg_size(msg);
 
-               msg_set_ack(msg, mod(link->next_in_no - 1));
+               msg_set_ack(msg, mod(link->rcv_nxt - 1));
                msg_set_bcast_ack(msg, link->owner->bclink.last_in);
                msg_set_size(&tnl_hdr, len + INT_H_SIZE);
                outskb = tipc_buf_acquire(len + INT_H_SIZE);
@@ -1671,6 +1629,11 @@ tunnel_queue:
        }
        if (queue == &link->backlogq)
                return;
+       seqno = link->snd_nxt;
+       skb_queue_walk(&link->backlogq, skb) {
+               msg_set_seqno(buf_msg(skb), seqno);
+               seqno = mod(seqno + 1);
+       }
        queue = &link->backlogq;
        goto tunnel_queue;
 }
@@ -1742,8 +1705,8 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
                return;
 
        l_ptr->tolerance = tol;
-       l_ptr->cont_intv = msecs_to_jiffies(intv);
-       l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
+       l_ptr->keepalive_intv = msecs_to_jiffies(intv);
+       l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
 }
 
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1803,8 +1766,8 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
 static void link_reset_statistics(struct tipc_link *l_ptr)
 {
        memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
-       l_ptr->stats.sent_info = l_ptr->next_out_no;
-       l_ptr->stats.recv_info = l_ptr->next_in_no;
+       l_ptr->stats.sent_info = l_ptr->snd_nxt;
+       l_ptr->stats.recv_info = l_ptr->rcv_nxt;
 }
 
 static void link_print(struct tipc_link *l_ptr, const char *str)
@@ -1893,6 +1856,9 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
 
        name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
 
+       if (strcmp(name, tipc_bclink_name) == 0)
+               return tipc_nl_bc_link_set(net, attrs);
+
        node = tipc_link_find_owner(net, name, &bearer_id);
        if (!node)
                return -EINVAL;
@@ -2034,9 +2000,9 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
                goto attr_msg_full;
        if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
                goto attr_msg_full;
-       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
+       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
                goto attr_msg_full;
-       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
+       if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
                goto attr_msg_full;
 
        if (tipc_link_is_up(link))
@@ -2175,50 +2141,53 @@ out:
 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
-       struct sk_buff *ans_skb;
        struct tipc_nl_msg msg;
-       struct tipc_link *link;
-       struct tipc_node *node;
        char *name;
-       int bearer_id;
        int err;
 
+       msg.portid = info->snd_portid;
+       msg.seq = info->snd_seq;
+
        if (!info->attrs[TIPC_NLA_LINK_NAME])
                return -EINVAL;
-
        name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
-       node = tipc_link_find_owner(net, name, &bearer_id);
-       if (!node)
-               return -EINVAL;
 
-       ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (!ans_skb)
+       msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!msg.skb)
                return -ENOMEM;
 
-       msg.skb = ans_skb;
-       msg.portid = info->snd_portid;
-       msg.seq = info->snd_seq;
-
-       tipc_node_lock(node);
-       link = node->links[bearer_id];
-       if (!link) {
-               err = -EINVAL;
-               goto err_out;
-       }
-
-       err = __tipc_nl_add_link(net, &msg, link, 0);
-       if (err)
-               goto err_out;
+       if (strcmp(name, tipc_bclink_name) == 0) {
+               err = tipc_nl_add_bc_link(net, &msg);
+               if (err) {
+                       nlmsg_free(msg.skb);
+                       return err;
+               }
+       } else {
+               int bearer_id;
+               struct tipc_node *node;
+               struct tipc_link *link;
 
-       tipc_node_unlock(node);
+               node = tipc_link_find_owner(net, name, &bearer_id);
+               if (!node)
+                       return -EINVAL;
 
-       return genlmsg_reply(ans_skb, info);
+               tipc_node_lock(node);
+               link = node->links[bearer_id];
+               if (!link) {
+                       tipc_node_unlock(node);
+                       nlmsg_free(msg.skb);
+                       return -EINVAL;
+               }
 
-err_out:
-       tipc_node_unlock(node);
-       nlmsg_free(ans_skb);
+               err = __tipc_nl_add_link(net, &msg, link, 0);
+               tipc_node_unlock(node);
+               if (err) {
+                       nlmsg_free(msg.skb);
+                       return err;
+               }
+       }
 
-       return err;
+       return genlmsg_reply(msg.skb, info);
 }
 
 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
index b5b4e3554d4e896873eba6c58ccb3ec48f712025..0c02c973e98558c699f006cce06b81768891b08b 100644 (file)
@@ -107,30 +107,29 @@ struct tipc_stats {
  * @owner: pointer to peer node
  * @refcnt: reference counter for permanent references (owner node & timer)
  * @flags: execution state flags for link endpoint instance
- * @checkpoint: reference point for triggering link continuity checking
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
  * @bearer_id: local bearer id used by link
  * @tolerance: minimum link continuity loss needed to reset link [in ms]
- * @cont_intv: link continuity testing interval
+ * @keepalive_intv: link keepalive timer interval
  * @abort_limit: # of unacknowledged continuity probes needed to reset link
  * @state: current state of link FSM
- * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
+ * @silent_intv_cnt: # of timer intervals without any reception from peer
  * @proto_msg: template for control messages generated by link
  * @pmsg: convenience pointer to "proto_msg" field
  * @priority: current link priority
  * @net_plane: current link network plane ('A' through 'H')
  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
  * @exp_msg_count: # of tunnelled messages expected during link changeover
- * @reset_checkpoint: seq # of last acknowledged message at time of link reset
+ * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
  * @mtu: current maximum packet size for this link
  * @advertised_mtu: advertised own mtu when link is being established
  * @transmitq: queue for sent, non-acked messages
  * @backlogq: queue for messages waiting to be sent
- * @next_out_no: next sequence number to use for outbound messages
+ * @snt_nxt: next sequence number to use for outbound messages
  * @last_retransmitted: sequence number of most recently retransmitted message
  * @stale_count: # of identical retransmit requests made by peer
- * @next_in_no: next sequence number to expect for inbound messages
+ * @rcv_nxt: next sequence number to expect for inbound messages
  * @deferred_queue: deferred queue saved OOS b'cast message received from node
  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
  * @inputq: buffer queue for messages to be delivered upwards
@@ -151,15 +150,14 @@ struct tipc_link {
 
        /* Management and link supervision data */
        unsigned int flags;
-       u32 checkpoint;
        u32 peer_session;
        u32 peer_bearer_id;
        u32 bearer_id;
        u32 tolerance;
-       unsigned long cont_intv;
+       unsigned long keepalive_intv;
        u32 abort_limit;
        int state;
-       u32 fsm_msg_cnt;
+       u32 silent_intv_cnt;
        struct {
                unchar hdr[INT_H_SIZE];
                unchar body[TIPC_MAX_IF_NAME];
@@ -185,13 +183,13 @@ struct tipc_link {
                u16 len;
                u16 limit;
        } backlog[5];
-       u32 next_out_no;
+       u16 snd_nxt;
+       u16 last_retransm;
        u32 window;
-       u32 last_retransmitted;
        u32 stale_count;
 
        /* Reception */
-       u32 next_in_no;
+       u16 rcv_nxt;
        u32 rcv_unacked;
        struct sk_buff_head deferdq;
        struct sk_buff_head inputq;
@@ -213,8 +211,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
                              struct tipc_bearer *b_ptr,
                              const struct tipc_media_addr *media_addr);
 void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
-                          bool shutting_down);
+void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
 void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
@@ -223,7 +220,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr);
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
 void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
-void tipc_link_reset_list(struct net *net, unsigned int bearer_id);
 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
                       u32 selector);
 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
@@ -247,39 +243,6 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
 void link_prepare_wakeup(struct tipc_link *l);
 
-/*
- * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
- */
-static inline u32 buf_seqno(struct sk_buff *buf)
-{
-       return msg_seqno(buf_msg(buf));
-}
-
-static inline u32 mod(u32 x)
-{
-       return x & 0xffffu;
-}
-
-static inline int less_eq(u32 left, u32 right)
-{
-       return mod(right - left) < 32768u;
-}
-
-static inline int more(u32 left, u32 right)
-{
-       return !less_eq(left, right);
-}
-
-static inline int less(u32 left, u32 right)
-{
-       return less_eq(left, right) && (mod(right) != mod(left));
-}
-
-static inline u32 lesser(u32 left, u32 right)
-{
-       return less_eq(left, right) ? left : right;
-}
-
 static inline u32 link_own_addr(struct tipc_link *l)
 {
        return msg_prevnode(l->pmsg);
index c3e96e8154188af27c0d5fc545fe95c54a29e9e9..08b4cc7d496d94c80fb2fcacc30ade0192fb0dc8 100644 (file)
@@ -331,16 +331,15 @@ error:
 
 /**
  * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @bskb: the buffer to append to ("bundle")
- * @skb:  buffer to be appended
+ * @skb: the buffer to append to ("bundle")
+ * @msg:  message to be appended
  * @mtu:  max allowable size for the bundle buffer
  * Consumes buffer if successful
  * Returns true if bundling could be performed, otherwise false
  */
-bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
+bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
 {
        struct tipc_msg *bmsg;
-       struct tipc_msg *msg = buf_msg(skb);
        unsigned int bsz;
        unsigned int msz = msg_size(msg);
        u32 start, pad;
@@ -348,9 +347,9 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
 
        if (likely(msg_user(msg) == MSG_FRAGMENTER))
                return false;
-       if (!bskb)
+       if (!skb)
                return false;
-       bmsg = buf_msg(bskb);
+       bmsg = buf_msg(skb);
        bsz = msg_size(bmsg);
        start = align(bsz);
        pad = start - bsz;
@@ -359,18 +358,20 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
                return false;
        if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
                return false;
-       if (likely(msg_user(bmsg) != MSG_BUNDLER))
+       if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
                return false;
-       if (unlikely(skb_tailroom(bskb) < (pad + msz)))
+       if (unlikely(skb_tailroom(skb) < (pad + msz)))
                return false;
        if (unlikely(max < (start + msz)))
                return false;
+       if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
+           (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
+               return false;
 
-       skb_put(bskb, pad + msz);
-       skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
+       skb_put(skb, pad + msz);
+       skb_copy_to_linear_data_offset(skb, start, msg, msz);
        msg_set_size(bmsg, start + msz);
        msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
-       kfree_skb(skb);
        return true;
 }
 
@@ -416,18 +417,18 @@ none:
 
 /**
  * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @list: the buffer chain
- * @skb: buffer to be appended and replaced
+ * @list: the buffer chain, where head is the buffer to replace/append
+ * @skb: buffer to be created, appended to and returned in case of success
+ * @msg: message to be appended
  * @mtu: max allowable size for the bundle buffer, inclusive header
  * @dnode: destination node for message. (Not always present in header)
- * Replaces buffer if successful
  * Returns true if success, otherwise false
  */
-bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
+bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
+                         u32 mtu, u32 dnode)
 {
-       struct sk_buff *bskb;
+       struct sk_buff *_skb;
        struct tipc_msg *bmsg;
-       struct tipc_msg *msg = buf_msg(*skb);
        u32 msz = msg_size(msg);
        u32 max = mtu - INT_H_SIZE;
 
@@ -440,19 +441,23 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
        if (msz > (max / 2))
                return false;
 
-       bskb = tipc_buf_acquire(max);
-       if (!bskb)
+       _skb = tipc_buf_acquire(max);
+       if (!_skb)
                return false;
 
-       skb_trim(bskb, INT_H_SIZE);
-       bmsg = buf_msg(bskb);
+       skb_trim(_skb, INT_H_SIZE);
+       bmsg = buf_msg(_skb);
        tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
                      INT_H_SIZE, dnode);
+       if (msg_isdata(msg))
+               msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
+       else
+               msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
        msg_set_seqno(bmsg, msg_seqno(msg));
        msg_set_ack(bmsg, msg_ack(msg));
        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
-       tipc_msg_bundle(bskb, *skb, mtu);
-       *skb = bskb;
+       tipc_msg_bundle(_skb, msg, mtu);
+       *skb = _skb;
        return true;
 }
 
index e1d3595e2ee9577634b2bf5b215bd96f43ca473b..19c45fb66238816f6084a706e9ab7784a31faa8c 100644 (file)
@@ -313,12 +313,12 @@ static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 1, 19, 0x3, n);
 }
 
-static inline u32 msg_bcast_ack(struct tipc_msg *m)
+static inline u16 msg_bcast_ack(struct tipc_msg *m)
 {
        return msg_bits(m, 1, 0, 0xffff);
 }
 
-static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
+static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 1, 0, 0xffff, n);
 }
@@ -327,22 +327,22 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
 /*
  * Word 2
  */
-static inline u32 msg_ack(struct tipc_msg *m)
+static inline u16 msg_ack(struct tipc_msg *m)
 {
        return msg_bits(m, 2, 16, 0xffff);
 }
 
-static inline void msg_set_ack(struct tipc_msg *m, u32 n)
+static inline void msg_set_ack(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 2, 16, 0xffff, n);
 }
 
-static inline u32 msg_seqno(struct tipc_msg *m)
+static inline u16 msg_seqno(struct tipc_msg *m)
 {
        return msg_bits(m, 2, 0, 0xffff);
 }
 
-static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
+static inline void msg_set_seqno(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 2, 0, 0xffff, n);
 }
@@ -352,18 +352,22 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
  */
 static inline u32 msg_importance(struct tipc_msg *m)
 {
-       if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+       int usr = msg_user(m);
+
+       if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
+               return usr;
+       if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
                return msg_bits(m, 5, 13, 0x7);
-       if (likely(msg_isdata(m) && !msg_errcode(m)))
-               return msg_user(m);
        return TIPC_SYSTEM_IMPORTANCE;
 }
 
 static inline void msg_set_importance(struct tipc_msg *m, u32 i)
 {
-       if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+       int usr = msg_user(m);
+
+       if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
                msg_set_bits(m, 5, 13, 0x7, i);
-       else if (likely(i < TIPC_SYSTEM_IMPORTANCE))
+       else if (i < TIPC_SYSTEM_IMPORTANCE)
                msg_set_user(m, i);
        else
                pr_warn("Trying to set illegal importance in message\n");
@@ -772,9 +776,9 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
                                uint data_sz, u32 dnode, u32 onode,
                                u32 dport, u32 oport, int errcode);
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu);
-
-bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode);
+bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
+bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
+                         u32 mtu, u32 dnode);
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                   int offset, int dsz, int mtu, struct sk_buff_head *list);
@@ -782,6 +786,11 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
                          int *err);
 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
+static inline u16 buf_seqno(struct sk_buff *skb)
+{
+       return msg_seqno(buf_msg(skb));
+}
+
 /* tipc_skb_peek(): peek and reserve first buffer in list
  * @list: list to be peeked in
  * Returns pointer to first buffer in list, if any
index ab0ac62a12879b068ef4d34aa360bf8839676b2c..0f47f08bf38f0b093520d20e808745dbe8fd4a0e 100644 (file)
@@ -330,13 +330,9 @@ static struct publication *tipc_nameseq_insert_publ(struct net *net,
 
        /* Any subscriptions waiting for notification?  */
        list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
-               tipc_subscr_report_overlap(s,
-                                          publ->lower,
-                                          publ->upper,
-                                          TIPC_PUBLISHED,
-                                          publ->ref,
-                                          publ->node,
-                                          created_subseq);
+               tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
+                                           TIPC_PUBLISHED, publ->ref,
+                                           publ->node, created_subseq);
        }
        return publ;
 }
@@ -404,13 +400,9 @@ found:
 
        /* Notify any waiting subscriptions */
        list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
-               tipc_subscr_report_overlap(s,
-                                          publ->lower,
-                                          publ->upper,
-                                          TIPC_WITHDRAWN,
-                                          publ->ref,
-                                          publ->node,
-                                          removed_subseq);
+               tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
+                                           TIPC_WITHDRAWN, publ->ref,
+                                           publ->node, removed_subseq);
        }
 
        return publ;
@@ -432,19 +424,17 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
                return;
 
        while (sseq != &nseq->sseqs[nseq->first_free]) {
-               if (tipc_subscr_overlap(s, sseq->lower, sseq->upper)) {
+               if (tipc_subscrp_check_overlap(s, sseq->lower, sseq->upper)) {
                        struct publication *crs;
                        struct name_info *info = sseq->info;
                        int must_report = 1;
 
                        list_for_each_entry(crs, &info->zone_list, zone_list) {
-                               tipc_subscr_report_overlap(s,
-                                                          sseq->lower,
-                                                          sseq->upper,
-                                                          TIPC_PUBLISHED,
-                                                          crs->ref,
-                                                          crs->node,
-                                                          must_report);
+                               tipc_subscrp_report_overlap(s, sseq->lower,
+                                                           sseq->upper,
+                                                           TIPC_PUBLISHED,
+                                                           crs->ref, crs->node,
+                                                           must_report);
                                must_report = 0;
                        }
                }
index a54f3cbe2246c8daff5d89923f31e09c2e082098..d6d1399ae22922754ba24364e21e89a6a3497d22 100644 (file)
@@ -40,6 +40,7 @@
 #include "subscr.h"
 #include "socket.h"
 #include "node.h"
+#include "bcast.h"
 
 static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
        [TIPC_NLA_NET_UNSPEC]   = { .type = NLA_UNSPEC },
index ce9121e8e990302d104902b977ce7304949de0ef..53e0fee800864e4b8c29ed793d60a5823d59c773 100644 (file)
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
        int rep_type;
        int rep_size;
        int req_type;
+       struct net *net;
        struct sk_buff *rep;
        struct tlv_desc *req;
        struct sock *dst_sk;
@@ -68,7 +69,8 @@ struct tipc_nl_compat_cmd_dump {
 
 struct tipc_nl_compat_cmd_doit {
        int (*doit)(struct sk_buff *skb, struct genl_info *info);
-       int (*transcode)(struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
+       int (*transcode)(struct tipc_nl_compat_cmd_doit *cmd,
+                        struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
 };
 
 static int tipc_skb_tailroom(struct sk_buff *skb)
@@ -281,7 +283,7 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
        if (!trans_buf)
                return -ENOMEM;
 
-       err = (*cmd->transcode)(trans_buf, msg);
+       err = (*cmd->transcode)(cmd, trans_buf, msg);
        if (err)
                goto trans_out;
 
@@ -353,7 +355,8 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
                            nla_len(bearer[TIPC_NLA_BEARER_NAME]));
 }
 
-static int tipc_nl_compat_bearer_enable(struct sk_buff *skb,
+static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
+                                       struct sk_buff *skb,
                                        struct tipc_nl_compat_msg *msg)
 {
        struct nlattr *prop;
@@ -385,7 +388,8 @@ static int tipc_nl_compat_bearer_enable(struct sk_buff *skb,
        return 0;
 }
 
-static int tipc_nl_compat_bearer_disable(struct sk_buff *skb,
+static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
+                                        struct sk_buff *skb,
                                         struct tipc_nl_compat_msg *msg)
 {
        char *name;
@@ -576,11 +580,81 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
                            &link_info, sizeof(link_info));
 }
 
-static int tipc_nl_compat_link_set(struct sk_buff *skb,
-                                  struct tipc_nl_compat_msg *msg)
+static int __tipc_add_link_prop(struct sk_buff *skb,
+                               struct tipc_nl_compat_msg *msg,
+                               struct tipc_link_config *lc)
+{
+       switch (msg->cmd) {
+       case TIPC_CMD_SET_LINK_PRI:
+               return nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value));
+       case TIPC_CMD_SET_LINK_TOL:
+               return nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value));
+       case TIPC_CMD_SET_LINK_WINDOW:
+               return nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value));
+       }
+
+       return -EINVAL;
+}
+
+static int tipc_nl_compat_media_set(struct sk_buff *skb,
+                                   struct tipc_nl_compat_msg *msg)
 {
-       struct nlattr *link;
        struct nlattr *prop;
+       struct nlattr *media;
+       struct tipc_link_config *lc;
+
+       lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+       media = nla_nest_start(skb, TIPC_NLA_MEDIA);
+       if (!media)
+               return -EMSGSIZE;
+
+       if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
+               return -EMSGSIZE;
+
+       prop = nla_nest_start(skb, TIPC_NLA_MEDIA_PROP);
+       if (!prop)
+               return -EMSGSIZE;
+
+       __tipc_add_link_prop(skb, msg, lc);
+       nla_nest_end(skb, prop);
+       nla_nest_end(skb, media);
+
+       return 0;
+}
+
+static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
+                                    struct tipc_nl_compat_msg *msg)
+{
+       struct nlattr *prop;
+       struct nlattr *bearer;
+       struct tipc_link_config *lc;
+
+       lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+       bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
+       if (!bearer)
+               return -EMSGSIZE;
+
+       if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
+               return -EMSGSIZE;
+
+       prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
+       if (!prop)
+               return -EMSGSIZE;
+
+       __tipc_add_link_prop(skb, msg, lc);
+       nla_nest_end(skb, prop);
+       nla_nest_end(skb, bearer);
+
+       return 0;
+}
+
+static int __tipc_nl_compat_link_set(struct sk_buff *skb,
+                                    struct tipc_nl_compat_msg *msg)
+{
+       struct nlattr *prop;
+       struct nlattr *link;
        struct tipc_link_config *lc;
 
        lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -596,24 +670,40 @@ static int tipc_nl_compat_link_set(struct sk_buff *skb,
        if (!prop)
                return -EMSGSIZE;
 
-       if (msg->cmd == TIPC_CMD_SET_LINK_PRI) {
-               if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value)))
-                       return -EMSGSIZE;
-       } else if (msg->cmd == TIPC_CMD_SET_LINK_TOL) {
-               if (nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value)))
-                       return -EMSGSIZE;
-       } else if (msg->cmd == TIPC_CMD_SET_LINK_WINDOW) {
-               if (nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value)))
-                       return -EMSGSIZE;
-       }
-
+       __tipc_add_link_prop(skb, msg, lc);
        nla_nest_end(skb, prop);
        nla_nest_end(skb, link);
 
        return 0;
 }
 
-static int tipc_nl_compat_link_reset_stats(struct sk_buff *skb,
+static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
+                                  struct sk_buff *skb,
+                                  struct tipc_nl_compat_msg *msg)
+{
+       struct tipc_link_config *lc;
+       struct tipc_bearer *bearer;
+       struct tipc_media *media;
+
+       lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+
+       media = tipc_media_find(lc->name);
+       if (media) {
+               cmd->doit = &tipc_nl_media_set;
+               return tipc_nl_compat_media_set(skb, msg);
+       }
+
+       bearer = tipc_bearer_find(msg->net, lc->name);
+       if (bearer) {
+               cmd->doit = &tipc_nl_bearer_set;
+               return tipc_nl_compat_bearer_set(skb, msg);
+       }
+
+       return __tipc_nl_compat_link_set(skb, msg);
+}
+
+static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
+                                          struct sk_buff *skb,
                                           struct tipc_nl_compat_msg *msg)
 {
        char *name;
@@ -851,7 +941,8 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
                            sizeof(node_info));
 }
 
-static int tipc_nl_compat_net_set(struct sk_buff *skb,
+static int tipc_nl_compat_net_set(struct tipc_nl_compat_cmd_doit *cmd,
+                                 struct sk_buff *skb,
                                  struct tipc_nl_compat_msg *msg)
 {
        u32 val;
@@ -1007,7 +1098,6 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
        struct nlmsghdr *req_nlh;
        struct nlmsghdr *rep_nlh;
        struct tipc_genlmsghdr *req_userhdr = info->userhdr;
-       struct net *net = genl_info_net(info);
 
        memset(&msg, 0, sizeof(msg));
 
@@ -1015,6 +1105,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
        msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN;
        msg.cmd = req_userhdr->cmd;
        msg.dst_sk = info->dst_sk;
+       msg.net = genl_info_net(info);
 
        if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) {
                msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN);
@@ -1030,7 +1121,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
        }
 
        err = tipc_nl_compat_handle(&msg);
-       if (err == -EOPNOTSUPP)
+       if ((err == -EOPNOTSUPP) || (err == -EPERM))
                msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
        else if (err == -EINVAL)
                msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR);
@@ -1043,7 +1134,7 @@ send:
        rep_nlh = nlmsg_hdr(msg.rep);
        memcpy(rep_nlh, info->nlhdr, len);
        rep_nlh->nlmsg_len = msg.rep->len;
-       genlmsg_unicast(net, msg.rep, NETLINK_CB(skb).portid);
+       genlmsg_unicast(msg.net, msg.rep, NETLINK_CB(skb).portid);
 
        return err;
 }
index 22c059ad29991abbdc40e3eca4a09de78df2c1d0..0b1d61a5f85334b3553780e8c0dd64c3f0549aa3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/node.c: TIPC node management routines
  *
- * Copyright (c) 2000-2006, 2012-2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  * All rights reserved.
  *
@@ -39,6 +39,7 @@
 #include "node.h"
 #include "name_distr.h"
 #include "socket.h"
+#include "bcast.h"
 
 static void node_lost_contact(struct tipc_node *n_ptr);
 static void node_established_contact(struct tipc_node *n_ptr);
index 02d5c20dc5511a1669c0262e17d1127a5b2b6a1e..5a834cf142c8432fc4b5d65ab9ada044b342f438 100644 (file)
@@ -45,8 +45,6 @@
 /* Out-of-range value for node signature */
 #define INVALID_NODE_SIG       0x10000
 
-#define NODE_HTABLE_SIZE       512
-
 /* Flags used to take different actions according to flag type
  * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
  * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
index 77ff03ed1e18d13224f086c2315d7123cc931123..922e04a43396db1f19fa6f1721a29840fb9b45d8 100644 (file)
@@ -309,6 +309,10 @@ static int tipc_accept_from_sock(struct tipc_conn *con)
 
        /* Notify that new connection is incoming */
        newcon->usr_data = s->tipc_conn_new(newcon->conid);
+       if (!newcon->usr_data) {
+               sock_release(newsock);
+               return -ENOMEM;
+       }
 
        /* Wake up receive process in case of 'SYN+' message */
        newsock->sk->sk_data_ready(newsock->sk);
@@ -321,7 +325,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
        struct socket *sock = NULL;
        int ret;
 
-       ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1);
+       ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock);
        if (ret < 0)
                return NULL;
        ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
index 9074b5cede38b8edd75890b684a706d96b9f71ba..30ea82a9b0f13b44d34767241586a3d9648b906e 100644 (file)
@@ -41,6 +41,7 @@
 #include "link.h"
 #include "name_distr.h"
 #include "socket.h"
+#include "bcast.h"
 
 #define SS_LISTENING           -1      /* socket is listening */
 #define SS_READY               -2      /* socket is connectionless */
@@ -342,7 +343,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        }
 
        /* Allocate socket's protocol area */
-       sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
+       sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
        if (sk == NULL)
                return -ENOMEM;
 
@@ -409,7 +410,7 @@ static int tipc_release(struct socket *sock)
        struct net *net;
        struct tipc_sock *tsk;
        struct sk_buff *skb;
-       u32 dnode, probing_state;
+       u32 dnode;
 
        /*
         * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -447,10 +448,7 @@ static int tipc_release(struct socket *sock)
        }
 
        tipc_sk_withdraw(tsk, 0, NULL);
-       probing_state = tsk->probing_state;
-       if (del_timer_sync(&sk->sk_timer) &&
-           probing_state != TIPC_CONN_PROBING)
-               sock_put(sk);
+       sk_stop_timer(sk, &sk->sk_timer);
        tipc_sk_remove(tsk);
        if (tsk->connected) {
                skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
index 1c147c869c2e68312ee02b5275589ba0e18f7727..350cca33ee0a64b08ce6135aa130011263eba136 100644 (file)
 
 /**
  * struct tipc_subscriber - TIPC network topology subscriber
+ * @kref: reference counter to tipc_subscription object
  * @conid: connection identifier to server connecting to subscriber
  * @lock: control access to subscriber
- * @subscription_list: list of subscription objects for this subscriber
+ * @subscrp_list: list of subscription objects for this subscriber
  */
 struct tipc_subscriber {
+       struct kref kref;
        int conid;
        spinlock_t lock;
-       struct list_head subscription_list;
+       struct list_head subscrp_list;
 };
 
+static void tipc_subscrp_delete(struct tipc_subscription *sub);
+static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+
 /**
  * htohl - convert value to endianness used by destination
  * @in: value to convert
@@ -62,9 +67,9 @@ static u32 htohl(u32 in, int swap)
        return swap ? swab32(in) : in;
 }
 
-static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
-                             u32 found_upper, u32 event, u32 port_ref,
-                             u32 node)
+static void tipc_subscrp_send_event(struct tipc_subscription *sub,
+                                   u32 found_lower, u32 found_upper,
+                                   u32 event, u32 port_ref, u32 node)
 {
        struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
        struct tipc_subscriber *subscriber = sub->subscriber;
@@ -82,12 +87,13 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
 }
 
 /**
- * tipc_subscr_overlap - test for subscription overlap with the given values
+ * tipc_subscrp_check_overlap - test for subscription overlap with the
+ * given values
  *
  * Returns 1 if there is overlap, otherwise 0.
  */
-int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
-                       u32 found_upper)
+int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+                              u32 found_upper)
 {
        if (found_lower < sub->seq.lower)
                found_lower = sub->seq.lower;
@@ -98,138 +104,121 @@ int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
        return 1;
 }
 
-/**
- * tipc_subscr_report_overlap - issue event if there is subscription overlap
- *
- * Protected by nameseq.lock in name_table.c
- */
-void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
-                               u32 found_upper, u32 event, u32 port_ref,
-                               u32 node, int must)
+void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
+                                u32 found_upper, u32 event, u32 port_ref,
+                                u32 node, int must)
 {
-       if (!tipc_subscr_overlap(sub, found_lower, found_upper))
+       if (!tipc_subscrp_check_overlap(sub, found_lower, found_upper))
                return;
        if (!must && !(sub->filter & TIPC_SUB_PORTS))
                return;
 
-       subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
+       tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
+                               node);
 }
 
-static void subscr_timeout(unsigned long data)
+static void tipc_subscrp_timeout(unsigned long data)
 {
        struct tipc_subscription *sub = (struct tipc_subscription *)data;
        struct tipc_subscriber *subscriber = sub->subscriber;
-       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
 
-       /* The spin lock per subscriber is used to protect its members */
-       spin_lock_bh(&subscriber->lock);
+       /* Notify subscriber of timeout */
+       tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
+                               TIPC_SUBSCR_TIMEOUT, 0, 0);
 
-       /* Validate timeout (in case subscription is being cancelled) */
-       if (sub->timeout == TIPC_WAIT_FOREVER) {
-               spin_unlock_bh(&subscriber->lock);
-               return;
-       }
+       spin_lock_bh(&subscriber->lock);
+       tipc_subscrp_delete(sub);
+       spin_unlock_bh(&subscriber->lock);
 
-       /* Unlink subscription from name table */
-       tipc_nametbl_unsubscribe(sub);
+       tipc_subscrb_put(subscriber);
+}
 
-       /* Unlink subscription from subscriber */
-       list_del(&sub->subscription_list);
+static void tipc_subscrb_kref_release(struct kref *kref)
+{
+       struct tipc_subscriber *subcriber = container_of(kref,
+                                           struct tipc_subscriber, kref);
 
-       spin_unlock_bh(&subscriber->lock);
+       kfree(subcriber);
+}
 
-       /* Notify subscriber of timeout */
-       subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
-                         TIPC_SUBSCR_TIMEOUT, 0, 0);
+static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
+{
+       kref_put(&subscriber->kref, tipc_subscrb_kref_release);
+}
 
-       /* Now destroy subscription */
-       kfree(sub);
-       atomic_dec(&tn->subscription_count);
+static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
+{
+       kref_get(&subscriber->kref);
 }
 
-/**
- * subscr_del - delete a subscription within a subscription list
- *
- * Called with subscriber lock held.
- */
-static void subscr_del(struct tipc_subscription *sub)
+static struct tipc_subscriber *tipc_subscrb_create(int conid)
 {
-       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+       struct tipc_subscriber *subscriber;
 
-       tipc_nametbl_unsubscribe(sub);
-       list_del(&sub->subscription_list);
-       kfree(sub);
-       atomic_dec(&tn->subscription_count);
+       subscriber = kzalloc(sizeof(*subscriber), GFP_ATOMIC);
+       if (!subscriber) {
+               pr_warn("Subscriber rejected, no memory\n");
+               return NULL;
+       }
+       kref_init(&subscriber->kref);
+       INIT_LIST_HEAD(&subscriber->subscrp_list);
+       subscriber->conid = conid;
+       spin_lock_init(&subscriber->lock);
+
+       return subscriber;
 }
 
-static void subscr_release(struct tipc_subscriber *subscriber)
+static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
 {
-       struct tipc_subscription *sub;
-       struct tipc_subscription *sub_temp;
+       struct tipc_subscription *sub, *temp;
 
        spin_lock_bh(&subscriber->lock);
-
        /* Destroy any existing subscriptions for subscriber */
-       list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
-                                subscription_list) {
-               if (sub->timeout != TIPC_WAIT_FOREVER) {
-                       spin_unlock_bh(&subscriber->lock);
-                       del_timer_sync(&sub->timer);
-                       spin_lock_bh(&subscriber->lock);
+       list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
+                                subscrp_list) {
+               if (del_timer(&sub->timer)) {
+                       tipc_subscrp_delete(sub);
+                       tipc_subscrb_put(subscriber);
                }
-               subscr_del(sub);
        }
        spin_unlock_bh(&subscriber->lock);
 
-       /* Now destroy subscriber */
-       kfree(subscriber);
+       tipc_subscrb_put(subscriber);
 }
 
-/**
- * subscr_cancel - handle subscription cancellation request
- *
- * Called with subscriber lock held. Routine must temporarily release lock
- * to enable the subscription timeout routine to finish without deadlocking;
- * the lock is then reclaimed to allow caller to release it upon return.
- *
- * Note that fields of 's' use subscriber's endianness!
- */
-static void subscr_cancel(struct tipc_subscr *s,
-                         struct tipc_subscriber *subscriber)
+static void tipc_subscrp_delete(struct tipc_subscription *sub)
 {
-       struct tipc_subscription *sub;
-       struct tipc_subscription *sub_temp;
-       int found = 0;
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+
+       tipc_nametbl_unsubscribe(sub);
+       list_del(&sub->subscrp_list);
+       kfree(sub);
+       atomic_dec(&tn->subscription_count);
+}
 
+static void tipc_subscrp_cancel(struct tipc_subscr *s,
+                               struct tipc_subscriber *subscriber)
+{
+       struct tipc_subscription *sub, *temp;
+
+       spin_lock_bh(&subscriber->lock);
        /* Find first matching subscription, exit if not found */
-       list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
-                                subscription_list) {
+       list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
+                                subscrp_list) {
                if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
-                       found = 1;
+                       if (del_timer(&sub->timer)) {
+                               tipc_subscrp_delete(sub);
+                               tipc_subscrb_put(subscriber);
+                       }
                        break;
                }
        }
-       if (!found)
-               return;
-
-       /* Cancel subscription timer (if used), then delete subscription */
-       if (sub->timeout != TIPC_WAIT_FOREVER) {
-               sub->timeout = TIPC_WAIT_FOREVER;
-               spin_unlock_bh(&subscriber->lock);
-               del_timer_sync(&sub->timer);
-               spin_lock_bh(&subscriber->lock);
-       }
-       subscr_del(sub);
+       spin_unlock_bh(&subscriber->lock);
 }
 
-/**
- * subscr_subscribe - create subscription for subscriber
- *
- * Called with subscriber lock held.
- */
-static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
-                           struct tipc_subscriber *subscriber,
-                           struct tipc_subscription **sub_p)
+static int tipc_subscrp_create(struct net *net, struct tipc_subscr *s,
+                              struct tipc_subscriber *subscriber,
+                              struct tipc_subscription **sub_p)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_subscription *sub;
@@ -241,7 +230,7 @@ static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
        /* Detect & process a subscription cancellation request */
        if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
                s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
-               subscr_cancel(s, subscriber);
+               tipc_subscrp_cancel(s, subscriber);
                return 0;
        }
 
@@ -273,62 +262,51 @@ static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
                kfree(sub);
                return -EINVAL;
        }
-       list_add(&sub->subscription_list, &subscriber->subscription_list);
+       spin_lock_bh(&subscriber->lock);
+       list_add(&sub->subscrp_list, &subscriber->subscrp_list);
+       spin_unlock_bh(&subscriber->lock);
        sub->subscriber = subscriber;
        sub->swap = swap;
-       memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
+       memcpy(&sub->evt.s, s, sizeof(*s));
        atomic_inc(&tn->subscription_count);
-       if (sub->timeout != TIPC_WAIT_FOREVER) {
-               setup_timer(&sub->timer, subscr_timeout, (unsigned long)sub);
-               mod_timer(&sub->timer, jiffies + sub->timeout);
-       }
+       setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
+       if (sub->timeout != TIPC_WAIT_FOREVER)
+               sub->timeout += jiffies;
+       if (!mod_timer(&sub->timer, sub->timeout))
+               tipc_subscrb_get(subscriber);
        *sub_p = sub;
        return 0;
 }
 
 /* Handle one termination request for the subscriber */
-static void subscr_conn_shutdown_event(int conid, void *usr_data)
+static void tipc_subscrb_shutdown_cb(int conid, void *usr_data)
 {
-       subscr_release((struct tipc_subscriber *)usr_data);
+       tipc_subscrb_delete((struct tipc_subscriber *)usr_data);
 }
 
 /* Handle one request to create a new subscription for the subscriber */
-static void subscr_conn_msg_event(struct net *net, int conid,
-                                 struct sockaddr_tipc *addr, void *usr_data,
-                                 void *buf, size_t len)
+static void tipc_subscrb_rcv_cb(struct net *net, int conid,
+                               struct sockaddr_tipc *addr, void *usr_data,
+                               void *buf, size_t len)
 {
        struct tipc_subscriber *subscriber = usr_data;
        struct tipc_subscription *sub = NULL;
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       spin_lock_bh(&subscriber->lock);
-       subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub);
+       tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
        if (sub)
                tipc_nametbl_subscribe(sub);
        else
                tipc_conn_terminate(tn->topsrv, subscriber->conid);
-       spin_unlock_bh(&subscriber->lock);
 }
 
 /* Handle one request to establish a new subscriber */
-static void *subscr_named_msg_event(int conid)
+static void *tipc_subscrb_connect_cb(int conid)
 {
-       struct tipc_subscriber *subscriber;
-
-       /* Create subscriber object */
-       subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
-       if (subscriber == NULL) {
-               pr_warn("Subscriber rejected, no memory\n");
-               return NULL;
-       }
-       INIT_LIST_HEAD(&subscriber->subscription_list);
-       subscriber->conid = conid;
-       spin_lock_init(&subscriber->lock);
-
-       return (void *)subscriber;
+       return (void *)tipc_subscrb_create(conid);
 }
 
-int tipc_subscr_start(struct net *net)
+int tipc_topsrv_start(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        const char name[] = "topology_server";
@@ -355,9 +333,9 @@ int tipc_subscr_start(struct net *net)
        topsrv->imp                     = TIPC_CRITICAL_IMPORTANCE;
        topsrv->type                    = SOCK_SEQPACKET;
        topsrv->max_rcvbuf_size         = sizeof(struct tipc_subscr);
-       topsrv->tipc_conn_recvmsg       = subscr_conn_msg_event;
-       topsrv->tipc_conn_new           = subscr_named_msg_event;
-       topsrv->tipc_conn_shutdown      = subscr_conn_shutdown_event;
+       topsrv->tipc_conn_recvmsg       = tipc_subscrb_rcv_cb;
+       topsrv->tipc_conn_new           = tipc_subscrb_connect_cb;
+       topsrv->tipc_conn_shutdown      = tipc_subscrb_shutdown_cb;
 
        strncpy(topsrv->name, name, strlen(name) + 1);
        tn->topsrv = topsrv;
@@ -366,7 +344,7 @@ int tipc_subscr_start(struct net *net)
        return tipc_server_start(topsrv);
 }
 
-void tipc_subscr_stop(struct net *net)
+void tipc_topsrv_stop(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_server *topsrv = tn->topsrv;
index 33488bd9fe3c9b1ebd6fe0b5545afe551121b198..92ee18cc5fe6ef5567a4e52a72e66748ed4b395a 100644 (file)
@@ -54,7 +54,7 @@ struct tipc_subscriber;
  * @filter: event filtering to be done for subscription
  * @timer: timer governing subscription duration (optional)
  * @nameseq_list: adjacent subscriptions in name sequence's subscription list
- * @subscription_list: adjacent subscriptions in subscriber's subscription list
+ * @subscrp_list: adjacent subscriptions in subscriber's subscription list
  * @server_ref: object reference of server port associated with subscription
  * @swap: indicates if subscriber uses opposite endianness in its messages
  * @evt: template for events generated by subscription
@@ -67,17 +67,17 @@ struct tipc_subscription {
        u32 filter;
        struct timer_list timer;
        struct list_head nameseq_list;
-       struct list_head subscription_list;
+       struct list_head subscrp_list;
        int swap;
        struct tipc_event evt;
 };
 
-int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
-                       u32 found_upper);
-void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
-                               u32 found_upper, u32 event, u32 port_ref,
-                               u32 node, int must);
-int tipc_subscr_start(struct net *net);
-void tipc_subscr_stop(struct net *net);
+int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+                              u32 found_upper);
+void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
+                                u32 found_lower, u32 found_upper, u32 event,
+                                u32 port_ref, u32 node, int must);
+int tipc_topsrv_start(struct net *net);
+void tipc_topsrv_stop(struct net *net);
 
 #endif
index 06430598cf512fdaff480671620e8fa69c259bb5..f25e1675b865ac9b1823235705b7278cb914d346 100644 (file)
@@ -518,6 +518,11 @@ static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 static int unix_shutdown(struct socket *, int);
 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
+static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
+                                   size_t size, int flags);
+static ssize_t unix_stream_splice_read(struct socket *,  loff_t *ppos,
+                                      struct pipe_inode_info *, size_t size,
+                                      unsigned int flags);
 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
 static int unix_dgram_connect(struct socket *, struct sockaddr *,
@@ -558,7 +563,8 @@ static const struct proto_ops unix_stream_ops = {
        .sendmsg =      unix_stream_sendmsg,
        .recvmsg =      unix_stream_recvmsg,
        .mmap =         sock_no_mmap,
-       .sendpage =     sock_no_sendpage,
+       .sendpage =     unix_stream_sendpage,
+       .splice_read =  unix_stream_splice_read,
        .set_peek_off = unix_set_peek_off,
 };
 
@@ -620,7 +626,7 @@ static struct proto unix_proto = {
  */
 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
 
-static struct sock *unix_create1(struct net *net, struct socket *sock)
+static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
 {
        struct sock *sk = NULL;
        struct unix_sock *u;
@@ -629,7 +635,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
        if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
                goto out;
 
-       sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
+       sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
        if (!sk)
                goto out;
 
@@ -688,7 +694,7 @@ static int unix_create(struct net *net, struct socket *sock, int protocol,
                return -ESOCKTNOSUPPORT;
        }
 
-       return unix_create1(net, sock) ? 0 : -ENOMEM;
+       return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
 }
 
 static int unix_release(struct socket *sock)
@@ -1088,7 +1094,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        err = -ENOMEM;
 
        /* create new sock for complete connection */
-       newsk = unix_create1(sock_net(sk), NULL);
+       newsk = unix_create1(sock_net(sk), NULL, 0);
        if (newsk == NULL)
                goto out;
 
@@ -1720,6 +1726,101 @@ out_err:
        return sent ? : err;
 }
 
+static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+                                   int offset, size_t size, int flags)
+{
+       int err = 0;
+       bool send_sigpipe = true;
+       struct sock *other, *sk = socket->sk;
+       struct sk_buff *skb, *newskb = NULL, *tail = NULL;
+
+       if (flags & MSG_OOB)
+               return -EOPNOTSUPP;
+
+       other = unix_peer(sk);
+       if (!other || sk->sk_state != TCP_ESTABLISHED)
+               return -ENOTCONN;
+
+       if (false) {
+alloc_skb:
+               unix_state_unlock(other);
+               mutex_unlock(&unix_sk(other)->readlock);
+               newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+                                             &err, 0);
+               if (!newskb)
+                       return err;
+       }
+
+       /* we must acquire readlock as we modify already present
+        * skbs in the sk_receive_queue and mess with skb->len
+        */
+       err = mutex_lock_interruptible(&unix_sk(other)->readlock);
+       if (err) {
+               err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
+               send_sigpipe = false;
+               goto err;
+       }
+
+       if (sk->sk_shutdown & SEND_SHUTDOWN) {
+               err = -EPIPE;
+               goto err_unlock;
+       }
+
+       unix_state_lock(other);
+
+       if (sock_flag(other, SOCK_DEAD) ||
+           other->sk_shutdown & RCV_SHUTDOWN) {
+               err = -EPIPE;
+               goto err_state_unlock;
+       }
+
+       skb = skb_peek_tail(&other->sk_receive_queue);
+       if (tail && tail == skb) {
+               skb = newskb;
+       } else if (!skb) {
+               if (newskb)
+                       skb = newskb;
+               else
+                       goto alloc_skb;
+       } else if (newskb) {
+               /* this is fast path, we don't necessarily need to
+                * call to kfree_skb even though with newskb == NULL
+                * this - does no harm
+                */
+               consume_skb(newskb);
+       }
+
+       if (skb_append_pagefrags(skb, page, offset, size)) {
+               tail = skb;
+               goto alloc_skb;
+       }
+
+       skb->len += size;
+       skb->data_len += size;
+       skb->truesize += size;
+       atomic_add(size, &sk->sk_wmem_alloc);
+
+       if (newskb)
+               __skb_queue_tail(&other->sk_receive_queue, newskb);
+
+       unix_state_unlock(other);
+       mutex_unlock(&unix_sk(other)->readlock);
+
+       other->sk_data_ready(other);
+
+       return size;
+
+err_state_unlock:
+       unix_state_unlock(other);
+err_unlock:
+       mutex_unlock(&unix_sk(other)->readlock);
+err:
+       kfree_skb(newskb);
+       if (send_sigpipe && !(flags & MSG_NOSIGNAL))
+               send_sig(SIGPIPE, current, 0);
+       return err;
+}
+
 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
                                  size_t len)
 {
@@ -1860,8 +1961,9 @@ out:
  *     Sleep until more data has arrived. But check for races..
  */
 static long unix_stream_data_wait(struct sock *sk, long timeo,
-                                 struct sk_buff *last)
+                                 struct sk_buff *last, unsigned int last_len)
 {
+       struct sk_buff *tail;
        DEFINE_WAIT(wait);
 
        unix_state_lock(sk);
@@ -1869,7 +1971,9 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
        for (;;) {
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
 
-               if (skb_peek_tail(&sk->sk_receive_queue) != last ||
+               tail = skb_peek_tail(&sk->sk_receive_queue);
+               if (tail != last ||
+                   (tail && tail->len != last_len) ||
                    sk->sk_err ||
                    (sk->sk_shutdown & RCV_SHUTDOWN) ||
                    signal_pending(current) ||
@@ -1897,38 +2001,50 @@ static unsigned int unix_skb_len(const struct sk_buff *skb)
        return skb->len - UNIXCB(skb).consumed;
 }
 
-static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
-                              size_t size, int flags)
+struct unix_stream_read_state {
+       int (*recv_actor)(struct sk_buff *, int, int,
+                         struct unix_stream_read_state *);
+       struct socket *socket;
+       struct msghdr *msg;
+       struct pipe_inode_info *pipe;
+       size_t size;
+       int flags;
+       unsigned int splice_flags;
+};
+
+static int unix_stream_read_generic(struct unix_stream_read_state *state)
 {
        struct scm_cookie scm;
+       struct socket *sock = state->socket;
        struct sock *sk = sock->sk;
        struct unix_sock *u = unix_sk(sk);
-       DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
        int copied = 0;
+       int flags = state->flags;
        int noblock = flags & MSG_DONTWAIT;
-       int check_creds = 0;
+       bool check_creds = false;
        int target;
        int err = 0;
        long timeo;
        int skip;
+       size_t size = state->size;
+       unsigned int last_len;
 
        err = -EINVAL;
        if (sk->sk_state != TCP_ESTABLISHED)
                goto out;
 
        err = -EOPNOTSUPP;
-       if (flags&MSG_OOB)
+       if (flags & MSG_OOB)
                goto out;
 
-       target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+       target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
        timeo = sock_rcvtimeo(sk, noblock);
 
+       memset(&scm, 0, sizeof(scm));
+
        /* Lock the socket to prevent queue disordering
         * while sleeps in memcpy_tomsg
         */
-
-       memset(&scm, 0, sizeof(scm));
-
        err = mutex_lock_interruptible(&u->readlock);
        if (unlikely(err)) {
                /* recvmsg() in non blocking mode is supposed to return -EAGAIN
@@ -1948,6 +2064,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                        goto unlock;
                }
                last = skb = skb_peek(&sk->sk_receive_queue);
+               last_len = last ? last->len : 0;
 again:
                if (skb == NULL) {
                        unix_sk(sk)->recursion_level = 0;
@@ -1970,16 +2087,17 @@ again:
                                break;
                        mutex_unlock(&u->readlock);
 
-                       timeo = unix_stream_data_wait(sk, timeo, last);
+                       timeo = unix_stream_data_wait(sk, timeo, last,
+                                                     last_len);
 
-                       if (signal_pending(current)
-                           ||  mutex_lock_interruptible(&u->readlock)) {
+                       if (signal_pending(current) ||
+                           mutex_lock_interruptible(&u->readlock)) {
                                err = sock_intr_errno(timeo);
                                goto out;
                        }
 
                        continue;
- unlock:
+unlock:
                        unix_state_unlock(sk);
                        break;
                }
@@ -1988,6 +2106,7 @@ again:
                while (skip >= unix_skb_len(skb)) {
                        skip -= unix_skb_len(skb);
                        last = skb;
+                       last_len = skb->len;
                        skb = skb_peek_next(skb, &sk->sk_receive_queue);
                        if (!skb)
                                goto again;
@@ -2004,18 +2123,20 @@ again:
                } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
                        /* Copy credentials */
                        scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
-                       check_creds = 1;
+                       check_creds = true;
                }
 
                /* Copy address just once */
-               if (sunaddr) {
-                       unix_copy_addr(msg, skb->sk);
+               if (state->msg && state->msg->msg_name) {
+                       DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
+                                        state->msg->msg_name);
+                       unix_copy_addr(state->msg, skb->sk);
                        sunaddr = NULL;
                }
 
                chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
-               if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
-                                         msg, chunk)) {
+               chunk = state->recv_actor(skb, skip, chunk, state);
+               if (chunk < 0) {
                        if (copied == 0)
                                copied = -EFAULT;
                        break;
@@ -2053,11 +2174,85 @@ again:
        } while (size);
 
        mutex_unlock(&u->readlock);
-       scm_recv(sock, msg, &scm, flags);
+       if (state->msg)
+               scm_recv(sock, state->msg, &scm, flags);
+       else
+               scm_destroy(&scm);
 out:
        return copied ? : err;
 }
 
+static int unix_stream_read_actor(struct sk_buff *skb,
+                                 int skip, int chunk,
+                                 struct unix_stream_read_state *state)
+{
+       int ret;
+
+       ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
+                                   state->msg, chunk);
+       return ret ?: chunk;
+}
+
+static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+                              size_t size, int flags)
+{
+       struct unix_stream_read_state state = {
+               .recv_actor = unix_stream_read_actor,
+               .socket = sock,
+               .msg = msg,
+               .size = size,
+               .flags = flags
+       };
+
+       return unix_stream_read_generic(&state);
+}
+
+static ssize_t skb_unix_socket_splice(struct sock *sk,
+                                     struct pipe_inode_info *pipe,
+                                     struct splice_pipe_desc *spd)
+{
+       int ret;
+       struct unix_sock *u = unix_sk(sk);
+
+       mutex_unlock(&u->readlock);
+       ret = splice_to_pipe(pipe, spd);
+       mutex_lock(&u->readlock);
+
+       return ret;
+}
+
+static int unix_stream_splice_actor(struct sk_buff *skb,
+                                   int skip, int chunk,
+                                   struct unix_stream_read_state *state)
+{
+       return skb_splice_bits(skb, state->socket->sk,
+                              UNIXCB(skb).consumed + skip,
+                              state->pipe, chunk, state->splice_flags,
+                              skb_unix_socket_splice);
+}
+
+static ssize_t unix_stream_splice_read(struct socket *sock,  loff_t *ppos,
+                                      struct pipe_inode_info *pipe,
+                                      size_t size, unsigned int flags)
+{
+       struct unix_stream_read_state state = {
+               .recv_actor = unix_stream_splice_actor,
+               .socket = sock,
+               .pipe = pipe,
+               .size = size,
+               .splice_flags = flags,
+       };
+
+       if (unlikely(*ppos))
+               return -ESPIPE;
+
+       if (sock->file->f_flags & O_NONBLOCK ||
+           flags & SPLICE_F_NONBLOCK)
+               state.flags = MSG_DONTWAIT;
+
+       return unix_stream_read_generic(&state);
+}
+
 static int unix_shutdown(struct socket *sock, int mode)
 {
        struct sock *sk = sock->sk;
index 2ec86e652a19802ec7f81e7443d29bbdf468b174..df5fc6b340f1bbde621fbe84fa44e0af6e2e00af 100644 (file)
@@ -581,13 +581,14 @@ struct sock *__vsock_create(struct net *net,
                            struct socket *sock,
                            struct sock *parent,
                            gfp_t priority,
-                           unsigned short type)
+                           unsigned short type,
+                           int kern)
 {
        struct sock *sk;
        struct vsock_sock *psk;
        struct vsock_sock *vsk;
 
-       sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto);
+       sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
        if (!sk)
                return NULL;
 
@@ -1866,7 +1867,7 @@ static int vsock_create(struct net *net, struct socket *sock,
 
        sock->state = SS_UNCONNECTED;
 
-       return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM;
+       return __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern) ? 0 : -ENOMEM;
 }
 
 static const struct net_proto_family vsock_family_ops = {
index c294da095461bcf67f662d751d00d7156c092366..1f63daff39659e08561862cfd71220ffc6949291 100644 (file)
@@ -1022,7 +1022,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
        }
 
        pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
-                                sk->sk_type);
+                                sk->sk_type, 0);
        if (!pending) {
                vmci_transport_send_reset(sk, pkt);
                return -ENOMEM;
index 7aaf7415dc4cfffda21a0597a3b57fec482d2842..915b328b9ac5e71afe3a979640288ad2bfab4a22 100644 (file)
@@ -698,19 +698,20 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
 EXPORT_SYMBOL(cfg80211_chandef_usable);
 
 /*
- * For GO only, check if the channel can be used under permissive conditions
- * mandated by the some regulatory bodies, i.e., the channel is marked with
- * IEEE80211_CHAN_GO_CONCURRENT and there is an additional station interface
+ * Check if the channel can be used under permissive conditions mandated by
+ * some regulatory bodies, i.e., the channel is marked with
+ * IEEE80211_CHAN_IR_CONCURRENT and there is an additional station interface
  * associated to an AP on the same channel or on the same UNII band
  * (assuming that the AP is an authorized master).
- * In addition allow the GO to operate on a channel on which indoor operation is
+ * In addition allow operation on a channel on which indoor operation is
  * allowed, iff we are currently operating in an indoor environment.
  */
-static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
+static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
+                                       enum nl80211_iftype iftype,
                                        struct ieee80211_channel *chan)
 {
-       struct wireless_dev *wdev_iter;
-       struct wiphy *wiphy = wiphy_idx_to_wiphy(rdev->wiphy_idx);
+       struct wireless_dev *wdev;
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
        ASSERT_RTNL();
 
@@ -718,32 +719,48 @@ static bool cfg80211_go_permissive_chan(struct cfg80211_registered_device *rdev,
            !(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
                return false;
 
+       /* only valid for GO and TDLS off-channel (station/p2p-CL) */
+       if (iftype != NL80211_IFTYPE_P2P_GO &&
+           iftype != NL80211_IFTYPE_STATION &&
+           iftype != NL80211_IFTYPE_P2P_CLIENT)
+               return false;
+
        if (regulatory_indoor_allowed() &&
            (chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
                return true;
 
-       if (!(chan->flags & IEEE80211_CHAN_GO_CONCURRENT))
+       if (!(chan->flags & IEEE80211_CHAN_IR_CONCURRENT))
                return false;
 
        /*
         * Generally, it is possible to rely on another device/driver to allow
-        * the GO concurrent relaxation, however, since the device can further
+        * the IR concurrent relaxation, however, since the device can further
         * enforce the relaxation (by doing a similar verifications as this),
         * and thus fail the GO instantiation, consider only the interfaces of
         * the current registered device.
         */
-       list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
+       list_for_each_entry(wdev, &rdev->wdev_list, list) {
                struct ieee80211_channel *other_chan = NULL;
                int r1, r2;
 
-               if (wdev_iter->iftype != NL80211_IFTYPE_STATION ||
-                   !netif_running(wdev_iter->netdev))
-                       continue;
-
-               wdev_lock(wdev_iter);
-               if (wdev_iter->current_bss)
-                       other_chan = wdev_iter->current_bss->pub.channel;
-               wdev_unlock(wdev_iter);
+               wdev_lock(wdev);
+               if (wdev->iftype == NL80211_IFTYPE_STATION &&
+                   wdev->current_bss)
+                       other_chan = wdev->current_bss->pub.channel;
+
+               /*
+                * If a GO already operates on the same GO_CONCURRENT channel,
+                * this one (maybe the same one) can beacon as well. We allow
+                * the operation even if the station we relied on with
+                * GO_CONCURRENT is disconnected now. But then we must make sure
+                * we're not outdoor on an indoor-only channel.
+                */
+               if (iftype == NL80211_IFTYPE_P2P_GO &&
+                   wdev->iftype == NL80211_IFTYPE_P2P_GO &&
+                   wdev->beacon_interval &&
+                   !(chan->flags & IEEE80211_CHAN_INDOOR_ONLY))
+                       other_chan = wdev->chandef.chan;
+               wdev_unlock(wdev);
 
                if (!other_chan)
                        continue;
@@ -784,7 +801,6 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
                             struct cfg80211_chan_def *chandef,
                             enum nl80211_iftype iftype)
 {
-       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        bool res;
        u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
                               IEEE80211_CHAN_RADAR;
@@ -792,13 +808,12 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
        trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype);
 
        /*
-        * Under certain conditions suggested by the some regulatory bodies
-        * a GO can operate on channels marked with IEEE80211_NO_IR
-        * so set this flag only if such relaxations are not enabled and
-        * the conditions are not met.
+        * Under certain conditions suggested by some regulatory bodies a
+        * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
+        * only if such relaxations are not enabled and the conditions are not
+        * met.
         */
-       if (iftype != NL80211_IFTYPE_P2P_GO ||
-           !cfg80211_go_permissive_chan(rdev, chandef->chan))
+       if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
                prohibited_flags |= IEEE80211_CHAN_NO_IR;
 
        if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
index 801cd49c5a0c5f2748ff4aba595d865645208745..311eef26bf88b9a0e8125678498583fa19edf688 100644 (file)
@@ -222,6 +222,7 @@ struct cfg80211_event {
                        const u8 *ie;
                        size_t ie_len;
                        u16 reason;
+                       bool locally_generated;
                } dc;
                struct {
                        u8 bssid[ETH_ALEN];
index dd78445c7d50630524b7d33b96b73a2e416c662c..c264effd00a69f97654e9a53959414321ea01668 100644 (file)
@@ -639,8 +639,8 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
                if ((chan->flags & IEEE80211_CHAN_INDOOR_ONLY) &&
                    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_INDOOR_ONLY))
                        goto nla_put_failure;
-               if ((chan->flags & IEEE80211_CHAN_GO_CONCURRENT) &&
-                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_GO_CONCURRENT))
+               if ((chan->flags & IEEE80211_CHAN_IR_CONCURRENT) &&
+                   nla_put_flag(msg, NL80211_FREQUENCY_ATTR_IR_CONCURRENT))
                        goto nla_put_failure;
                if ((chan->flags & IEEE80211_CHAN_NO_20MHZ) &&
                    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_20MHZ))
@@ -4061,7 +4061,8 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
                        return -EINVAL;
                break;
        case CFG80211_STA_MESH_PEER_USER:
-               if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION)
+               if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION &&
+                   params->plink_action != NL80211_PLINK_ACTION_BLOCK)
                        return -EINVAL;
                break;
        }
index 0e347f888fe910d07e7a094058755f37bee9fcd9..d359e0610198c5c3d2094120e4ba3cf714d8dc21 100644 (file)
@@ -989,8 +989,8 @@ static u32 map_regdom_flags(u32 rd_flags)
                channel_flags |= IEEE80211_CHAN_NO_OFDM;
        if (rd_flags & NL80211_RRF_NO_OUTDOOR)
                channel_flags |= IEEE80211_CHAN_INDOOR_ONLY;
-       if (rd_flags & NL80211_RRF_GO_CONCURRENT)
-               channel_flags |= IEEE80211_CHAN_GO_CONCURRENT;
+       if (rd_flags & NL80211_RRF_IR_CONCURRENT)
+               channel_flags |= IEEE80211_CHAN_IR_CONCURRENT;
        if (rd_flags & NL80211_RRF_NO_HT40MINUS)
                channel_flags |= IEEE80211_CHAN_NO_HT40MINUS;
        if (rd_flags & NL80211_RRF_NO_HT40PLUS)
index d11454f87bacf9396241bd5bbc3ba643ef0b3302..8020b5b094d4c8fba0c0f2431f8af7d8ecc40dca 100644 (file)
@@ -938,7 +938,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 }
 
 void cfg80211_disconnected(struct net_device *dev, u16 reason,
-                          const u8 *ie, size_t ie_len, gfp_t gfp)
+                          const u8 *ie, size_t ie_len,
+                          bool locally_generated, gfp_t gfp)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -954,6 +955,7 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
        ev->dc.ie_len = ie_len;
        memcpy((void *)ev->dc.ie, ie, ie_len);
        ev->dc.reason = reason;
+       ev->dc.locally_generated = locally_generated;
 
        spin_lock_irqsave(&wdev->event_lock, flags);
        list_add_tail(&ev->list, &wdev->event_list);
index 9ee6bc1a761022dc551fd42f8e68426214d93cd4..9cee0220665d356b7f2b9734ca888ccfae8c0d1c 100644 (file)
@@ -86,7 +86,7 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
 {
        struct wireless_dev *wdev;
@@ -95,7 +95,7 @@ static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
                cfg80211_leave(rdev, wdev);
 }
 
-static int wiphy_suspend(struct device *dev, pm_message_t state)
+static int wiphy_suspend(struct device *dev)
 {
        struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
        int ret = 0;
@@ -136,6 +136,11 @@ static int wiphy_resume(struct device *dev)
 
        return ret;
 }
+
+static SIMPLE_DEV_PM_OPS(wiphy_pm_ops, wiphy_suspend, wiphy_resume);
+#define WIPHY_PM_OPS (&wiphy_pm_ops)
+#else
+#define WIPHY_PM_OPS NULL
 #endif
 
 static const void *wiphy_namespace(struct device *d)
@@ -151,10 +156,7 @@ struct class ieee80211_class = {
        .dev_release = wiphy_dev_release,
        .dev_groups = ieee80211_groups,
        .dev_uevent = wiphy_uevent,
-#ifdef CONFIG_PM
-       .suspend = wiphy_suspend,
-       .resume = wiphy_resume,
-#endif
+       .pm = WIPHY_PM_OPS,
        .ns_type = &net_ns_type_operations,
        .namespace = wiphy_namespace,
 };
index 70051ab52f4f34d2817e76210aafa70bbda62e60..baf7218cec152bb9f894ba47145cec75b9a86123 100644 (file)
@@ -887,7 +887,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
                case EVENT_DISCONNECTED:
                        __cfg80211_disconnected(wdev->netdev,
                                                ev->dc.ie, ev->dc.ie_len,
-                                               ev->dc.reason, true);
+                                               ev->dc.reason,
+                                               !ev->dc.locally_generated);
                        break;
                case EVENT_IBSS_JOINED:
                        __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
@@ -944,7 +945,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
             ntype == NL80211_IFTYPE_P2P_CLIENT))
                return -EBUSY;
 
-       if (ntype != otype && netif_running(dev)) {
+       if (ntype != otype) {
                dev->ieee80211_ptr->use_4addr = false;
                dev->ieee80211_ptr->mesh_id_up_len = 0;
                wdev_lock(dev->ieee80211_ptr);
index c3ab230e4493fbb0d63b50c3befe1d15aeda802e..a750f330b8ddca8e0ea3e5d8eb88e68f6038e149 100644 (file)
@@ -515,10 +515,10 @@ static struct proto x25_proto = {
        .obj_size = sizeof(struct x25_sock),
 };
 
-static struct sock *x25_alloc_socket(struct net *net)
+static struct sock *x25_alloc_socket(struct net *net, int kern)
 {
        struct x25_sock *x25;
-       struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto);
+       struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, kern);
 
        if (!sk)
                goto out;
@@ -553,7 +553,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
                goto out;
 
        rc = -ENOBUFS;
-       if ((sk = x25_alloc_socket(net)) == NULL)
+       if ((sk = x25_alloc_socket(net, kern)) == NULL)
                goto out;
 
        x25 = x25_sk(sk);
@@ -602,7 +602,7 @@ static struct sock *x25_make_new(struct sock *osk)
        if (osk->sk_type != SOCK_SEQPACKET)
                goto out;
 
-       if ((sk = x25_alloc_socket(sock_net(osk))) == NULL)
+       if ((sk = x25_alloc_socket(sock_net(osk), 0)) == NULL)
                goto out;
 
        x25 = x25_sk(sk);
index b58286ecd156fdb9de2a33ca0ede0fe3194bf289..60ce7014e1b094ef0013f27330ab8c0bec24e284 100644 (file)
@@ -31,7 +31,7 @@ int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
                return -EAFNOSUPPORT;
        spin_lock_bh(&xfrm_input_afinfo_lock);
        if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
-               err = -ENOBUFS;
+               err = -EEXIST;
        else
                rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo);
        spin_unlock_bh(&xfrm_input_afinfo_lock);
@@ -254,13 +254,13 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                skb->sp->xvec[skb->sp->len++] = x;
 
                spin_lock(&x->lock);
-               if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
-                       XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
-                       goto drop_unlock;
-               }
 
                if (unlikely(x->km.state != XFRM_STATE_VALID)) {
-                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
+                       if (x->km.state == XFRM_STATE_ACQ)
+                               XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
+                       else
+                               XFRM_INC_STATS(net,
+                                              LINUX_MIB_XFRMINSTATEINVALID);
                        goto drop_unlock;
                }
 
index fbcedbe33190346a40fc148369757a6ef64a2106..68ada2ca4b60707ac70b45aa6f0a9ea142a9dd16 100644 (file)
@@ -38,6 +38,18 @@ static int xfrm_skb_check_space(struct sk_buff *skb)
        return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
 }
 
+/* Children define the path of the packet through the
+ * Linux networking.  Thus, destinations are stackable.
+ */
+
+static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
+{
+       struct dst_entry *child = dst_clone(skb_dst(skb)->child);
+
+       skb_dst_drop(skb);
+       return child;
+}
+
 static int xfrm_output_one(struct sk_buff *skb, int err)
 {
        struct dst_entry *dst = skb_dst(skb);
index 638af0655aaf8ec600ae5f6b201e252ca229d89d..18cead7645be0e75621e1b08a9e02621e75b7a4c 100644 (file)
@@ -315,14 +315,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
 }
 EXPORT_SYMBOL(xfrm_policy_destroy);
 
-static void xfrm_queue_purge(struct sk_buff_head *list)
-{
-       struct sk_buff *skb;
-
-       while ((skb = skb_dequeue(list)) != NULL)
-               kfree_skb(skb);
-}
-
 /* Rule must be locked. Release descentant resources, announce
  * entry dead. The rule must be unlinked from lists to the moment.
  */
@@ -335,7 +327,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
 
        if (del_timer(&policy->polq.hold_timer))
                xfrm_pol_put(policy);
-       xfrm_queue_purge(&policy->polq.hold_queue);
+       skb_queue_purge(&policy->polq.hold_queue);
 
        if (del_timer(&policy->timer))
                xfrm_pol_put(policy);
@@ -708,6 +700,9 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
        struct xfrm_policy_queue *pq = &old->polq;
        struct sk_buff_head list;
 
+       if (skb_queue_empty(&pq->hold_queue))
+               return;
+
        __skb_queue_head_init(&list);
 
        spin_lock_bh(&pq->hold_queue.lock);
@@ -716,9 +711,6 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
                xfrm_pol_put(old);
        spin_unlock_bh(&pq->hold_queue.lock);
 
-       if (skb_queue_empty(&list))
-               return;
-
        pq = &new->polq;
 
        spin_lock_bh(&pq->hold_queue.lock);
@@ -1012,7 +1004,9 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
        if (list_empty(&walk->walk.all))
                x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
        else
-               x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
+               x = list_first_entry(&walk->walk.all,
+                                    struct xfrm_policy_walk_entry, all);
+
        list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
                if (x->dead)
                        continue;
@@ -1120,6 +1114,9 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
        }
        chain = &net->xfrm.policy_inexact[dir];
        hlist_for_each_entry(pol, chain, bydst) {
+               if ((pol->priority >= priority) && ret)
+                       break;
+
                err = xfrm_policy_match(pol, fl, type, family, dir);
                if (err) {
                        if (err == -ESRCH)
@@ -1128,13 +1125,13 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
                                ret = ERR_PTR(err);
                                goto fail;
                        }
-               } else if (pol->priority < priority) {
+               } else {
                        ret = pol;
                        break;
                }
        }
-       if (ret)
-               xfrm_pol_hold(ret);
+
+       xfrm_pol_hold(ret);
 fail:
        read_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
@@ -1955,7 +1952,7 @@ out:
 
 purge_queue:
        pq->timeout = 0;
-       xfrm_queue_purge(&pq->hold_queue);
+       skb_queue_purge(&pq->hold_queue);
        xfrm_pol_put(pol);
 }
 
@@ -2814,7 +2811,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                return -EAFNOSUPPORT;
        spin_lock(&xfrm_policy_afinfo_lock);
        if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
-               err = -ENOBUFS;
+               err = -EEXIST;
        else {
                struct dst_ops *dst_ops = afinfo->dst_ops;
                if (likely(dst_ops->kmem_cachep == NULL))
@@ -3209,16 +3206,17 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
        }
        chain = &net->xfrm.policy_inexact[dir];
        hlist_for_each_entry(pol, chain, bydst) {
+               if ((pol->priority >= priority) && ret)
+                       break;
+
                if (xfrm_migrate_selector_match(sel, &pol->selector) &&
-                   pol->type == type &&
-                   pol->priority < priority) {
+                   pol->type == type) {
                        ret = pol;
                        break;
                }
        }
 
-       if (ret)
-               xfrm_pol_hold(ret);
+       xfrm_pol_hold(ret);
 
        read_unlock_bh(&net->xfrm.xfrm_policy_lock);
 
index 96688cd0f6f11bddee4451de1d09a9a8e5f212dd..9895a8c56d8c2675393e1aad3a1a438e16d4d47c 100644 (file)
@@ -1626,7 +1626,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
        if (list_empty(&walk->all))
                x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
        else
-               x = list_entry(&walk->all, struct xfrm_state_walk, all);
+               x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
        list_for_each_entry_from(x, &net->xfrm.state_all, all) {
                if (x->state == XFRM_STATE_DEAD)
                        continue;
@@ -1908,7 +1908,7 @@ int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
                return -EAFNOSUPPORT;
        spin_lock_bh(&xfrm_state_afinfo_lock);
        if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
-               err = -ENOBUFS;
+               err = -EEXIST;
        else
                rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
        spin_unlock_bh(&xfrm_state_afinfo_lock);
index 76e3458a5419a556b1b21ffe68d5e867d435f267..46c6a8cf74d3e56cad920616208b9d603a08153c 100644 (file)
@@ -6,29 +6,35 @@ hostprogs-y := test_verifier test_maps
 hostprogs-y += sock_example
 hostprogs-y += sockex1
 hostprogs-y += sockex2
+hostprogs-y += sockex3
 hostprogs-y += tracex1
 hostprogs-y += tracex2
 hostprogs-y += tracex3
 hostprogs-y += tracex4
+hostprogs-y += tracex5
 
 test_verifier-objs := test_verifier.o libbpf.o
 test_maps-objs := test_maps.o libbpf.o
 sock_example-objs := sock_example.o libbpf.o
 sockex1-objs := bpf_load.o libbpf.o sockex1_user.o
 sockex2-objs := bpf_load.o libbpf.o sockex2_user.o
+sockex3-objs := bpf_load.o libbpf.o sockex3_user.o
 tracex1-objs := bpf_load.o libbpf.o tracex1_user.o
 tracex2-objs := bpf_load.o libbpf.o tracex2_user.o
 tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
 tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
+tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
 always += sockex1_kern.o
 always += sockex2_kern.o
+always += sockex3_kern.o
 always += tracex1_kern.o
 always += tracex2_kern.o
 always += tracex3_kern.o
 always += tracex4_kern.o
+always += tracex5_kern.o
 always += tcbpf1_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
@@ -36,15 +42,17 @@ HOSTCFLAGS += -I$(objtree)/usr/include
 HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
 HOSTLOADLIBES_sockex1 += -lelf
 HOSTLOADLIBES_sockex2 += -lelf
+HOSTLOADLIBES_sockex3 += -lelf
 HOSTLOADLIBES_tracex1 += -lelf
 HOSTLOADLIBES_tracex2 += -lelf
 HOSTLOADLIBES_tracex3 += -lelf
 HOSTLOADLIBES_tracex4 += -lelf -lrt
+HOSTLOADLIBES_tracex5 += -lelf
 
 # point this to your LLVM backend with bpf support
 LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
 
-%.o: %.c
+$(obj)/%.o: $(src)/%.c
        clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
                -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
                -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
index f960b5fb3ed842e9adf4ddfd698fde9f9781d8ac..f531a0b3282d593dac6bbec2bfb1726c6a7bbd54 100644 (file)
@@ -21,6 +21,10 @@ static unsigned long long (*bpf_ktime_get_ns)(void) =
        (void *) BPF_FUNC_ktime_get_ns;
 static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
        (void *) BPF_FUNC_trace_printk;
+static void (*bpf_tail_call)(void *ctx, void *map, int index) =
+       (void *) BPF_FUNC_tail_call;
+static unsigned long long (*bpf_get_smp_processor_id)(void) =
+       (void *) BPF_FUNC_get_smp_processor_id;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
index 38dac5a53b518bc71743b32a06ad39391c60066b..da86a8e0a95afb0fcc79f96d310115803b5a9d55 100644 (file)
@@ -16,6 +16,7 @@
 #include <sys/ioctl.h>
 #include <sys/mman.h>
 #include <poll.h>
+#include <ctype.h>
 #include "libbpf.h"
 #include "bpf_helpers.h"
 #include "bpf_load.h"
@@ -29,6 +30,19 @@ int map_fd[MAX_MAPS];
 int prog_fd[MAX_PROGS];
 int event_fd[MAX_PROGS];
 int prog_cnt;
+int prog_array_fd = -1;
+
+static int populate_prog_array(const char *event, int prog_fd)
+{
+       int ind = atoi(event), err;
+
+       err = bpf_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
+       if (err < 0) {
+               printf("failed to store prog_fd in prog_array\n");
+               return -1;
+       }
+       return 0;
+}
 
 static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
 {
@@ -54,12 +68,40 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
                return -1;
        }
 
+       fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
+       if (fd < 0) {
+               printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
+               return -1;
+       }
+
+       prog_fd[prog_cnt++] = fd;
+
+       if (is_socket) {
+               event += 6;
+               if (*event != '/')
+                       return 0;
+               event++;
+               if (!isdigit(*event)) {
+                       printf("invalid prog number\n");
+                       return -1;
+               }
+               return populate_prog_array(event, fd);
+       }
+
        if (is_kprobe || is_kretprobe) {
                if (is_kprobe)
                        event += 7;
                else
                        event += 10;
 
+               if (*event == 0) {
+                       printf("event name cannot be empty\n");
+                       return -1;
+               }
+
+               if (isdigit(*event))
+                       return populate_prog_array(event, fd);
+
                snprintf(buf, sizeof(buf),
                         "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
                         is_kprobe ? 'p' : 'r', event, event);
@@ -71,18 +113,6 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
                }
        }
 
-       fd = bpf_prog_load(prog_type, prog, size, license, kern_version);
-
-       if (fd < 0) {
-               printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf);
-               return -1;
-       }
-
-       prog_fd[prog_cnt++] = fd;
-
-       if (is_socket)
-               return 0;
-
        strcpy(buf, DEBUGFS);
        strcat(buf, "events/kprobes/");
        strcat(buf, event);
@@ -130,6 +160,9 @@ static int load_maps(struct bpf_map_def *maps, int len)
                                           maps[i].max_entries);
                if (map_fd[i] < 0)
                        return 1;
+
+               if (maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
+                       prog_array_fd = map_fd[i];
        }
        return 0;
 }
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
new file mode 100644 (file)
index 0000000..41ae2fd
--- /dev/null
@@ -0,0 +1,290 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+#include <uapi/linux/in.h>
+#include <uapi/linux/if.h>
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/ipv6.h>
+#include <uapi/linux/if_tunnel.h>
+#include <uapi/linux/mpls.h>
+#define IP_MF          0x2000
+#define IP_OFFSET      0x1FFF
+
+#define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F
+
+struct bpf_map_def SEC("maps") jmp_table = {
+       .type = BPF_MAP_TYPE_PROG_ARRAY,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(u32),
+       .max_entries = 8,
+};
+
+#define PARSE_VLAN 1
+#define PARSE_MPLS 2
+#define PARSE_IP 3
+#define PARSE_IPV6 4
+
+/* protocol dispatch routine.
+ * It tail-calls next BPF program depending on eth proto
+ * Note, we could have used:
+ * bpf_tail_call(skb, &jmp_table, proto);
+ * but it would need large prog_array
+ */
+static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
+{
+       switch (proto) {
+       case ETH_P_8021Q:
+       case ETH_P_8021AD:
+               bpf_tail_call(skb, &jmp_table, PARSE_VLAN);
+               break;
+       case ETH_P_MPLS_UC:
+       case ETH_P_MPLS_MC:
+               bpf_tail_call(skb, &jmp_table, PARSE_MPLS);
+               break;
+       case ETH_P_IP:
+               bpf_tail_call(skb, &jmp_table, PARSE_IP);
+               break;
+       case ETH_P_IPV6:
+               bpf_tail_call(skb, &jmp_table, PARSE_IPV6);
+               break;
+       }
+}
+
+struct vlan_hdr {
+       __be16 h_vlan_TCI;
+       __be16 h_vlan_encapsulated_proto;
+};
+
+struct flow_keys {
+       __be32 src;
+       __be32 dst;
+       union {
+               __be32 ports;
+               __be16 port16[2];
+       };
+       __u32 ip_proto;
+};
+
+static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff)
+{
+       return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off))
+               & (IP_MF | IP_OFFSET);
+}
+
+static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off)
+{
+       __u64 w0 = load_word(ctx, off);
+       __u64 w1 = load_word(ctx, off + 4);
+       __u64 w2 = load_word(ctx, off + 8);
+       __u64 w3 = load_word(ctx, off + 12);
+
+       return (__u32)(w0 ^ w1 ^ w2 ^ w3);
+}
+
+struct globals {
+       struct flow_keys flow;
+};
+
+struct bpf_map_def SEC("maps") percpu_map = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(__u32),
+       .value_size = sizeof(struct globals),
+       .max_entries = 32,
+};
+
+/* user poor man's per_cpu until native support is ready */
+static struct globals *this_cpu_globals(void)
+{
+       u32 key = bpf_get_smp_processor_id();
+
+       return bpf_map_lookup_elem(&percpu_map, &key);
+}
+
+/* some simple stats for user space consumption */
+struct pair {
+       __u64 packets;
+       __u64 bytes;
+};
+
+struct bpf_map_def SEC("maps") hash_map = {
+       .type = BPF_MAP_TYPE_HASH,
+       .key_size = sizeof(struct flow_keys),
+       .value_size = sizeof(struct pair),
+       .max_entries = 1024,
+};
+
+static void update_stats(struct __sk_buff *skb, struct globals *g)
+{
+       struct flow_keys key = g->flow;
+       struct pair *value;
+
+       value = bpf_map_lookup_elem(&hash_map, &key);
+       if (value) {
+               __sync_fetch_and_add(&value->packets, 1);
+               __sync_fetch_and_add(&value->bytes, skb->len);
+       } else {
+               struct pair val = {1, skb->len};
+
+               bpf_map_update_elem(&hash_map, &key, &val, BPF_ANY);
+       }
+}
+
+static __always_inline void parse_ip_proto(struct __sk_buff *skb,
+                                          struct globals *g, __u32 ip_proto)
+{
+       __u32 nhoff = skb->cb[0];
+       int poff;
+
+       switch (ip_proto) {
+       case IPPROTO_GRE: {
+               struct gre_hdr {
+                       __be16 flags;
+                       __be16 proto;
+               };
+
+               __u32 gre_flags = load_half(skb,
+                                           nhoff + offsetof(struct gre_hdr, flags));
+               __u32 gre_proto = load_half(skb,
+                                           nhoff + offsetof(struct gre_hdr, proto));
+
+               if (gre_flags & (GRE_VERSION|GRE_ROUTING))
+                       break;
+
+               nhoff += 4;
+               if (gre_flags & GRE_CSUM)
+                       nhoff += 4;
+               if (gre_flags & GRE_KEY)
+                       nhoff += 4;
+               if (gre_flags & GRE_SEQ)
+                       nhoff += 4;
+
+               skb->cb[0] = nhoff;
+               parse_eth_proto(skb, gre_proto);
+               break;
+       }
+       case IPPROTO_IPIP:
+               parse_eth_proto(skb, ETH_P_IP);
+               break;
+       case IPPROTO_IPV6:
+               parse_eth_proto(skb, ETH_P_IPV6);
+               break;
+       case IPPROTO_TCP:
+       case IPPROTO_UDP:
+               g->flow.ports = load_word(skb, nhoff);
+       case IPPROTO_ICMP:
+               g->flow.ip_proto = ip_proto;
+               update_stats(skb, g);
+               break;
+       default:
+               break;
+       }
+}
+
+PROG(PARSE_IP)(struct __sk_buff *skb)
+{
+       struct globals *g = this_cpu_globals();
+       __u32 nhoff, verlen, ip_proto;
+
+       if (!g)
+               return 0;
+
+       nhoff = skb->cb[0];
+
+       if (unlikely(ip_is_fragment(skb, nhoff)))
+               return 0;
+
+       ip_proto = load_byte(skb, nhoff + offsetof(struct iphdr, protocol));
+
+       if (ip_proto != IPPROTO_GRE) {
+               g->flow.src = load_word(skb, nhoff + offsetof(struct iphdr, saddr));
+               g->flow.dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr));
+       }
+
+       verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/);
+       nhoff += (verlen & 0xF) << 2;
+
+       skb->cb[0] = nhoff;
+       parse_ip_proto(skb, g, ip_proto);
+       return 0;
+}
+
+PROG(PARSE_IPV6)(struct __sk_buff *skb)
+{
+       struct globals *g = this_cpu_globals();
+       __u32 nhoff, ip_proto;
+
+       if (!g)
+               return 0;
+
+       nhoff = skb->cb[0];
+
+       ip_proto = load_byte(skb,
+                            nhoff + offsetof(struct ipv6hdr, nexthdr));
+       g->flow.src = ipv6_addr_hash(skb,
+                                    nhoff + offsetof(struct ipv6hdr, saddr));
+       g->flow.dst = ipv6_addr_hash(skb,
+                                    nhoff + offsetof(struct ipv6hdr, daddr));
+       nhoff += sizeof(struct ipv6hdr);
+
+       skb->cb[0] = nhoff;
+       parse_ip_proto(skb, g, ip_proto);
+       return 0;
+}
+
+PROG(PARSE_VLAN)(struct __sk_buff *skb)
+{
+       __u32 nhoff, proto;
+
+       nhoff = skb->cb[0];
+
+       proto = load_half(skb, nhoff + offsetof(struct vlan_hdr,
+                                               h_vlan_encapsulated_proto));
+       nhoff += sizeof(struct vlan_hdr);
+       skb->cb[0] = nhoff;
+
+       parse_eth_proto(skb, proto);
+
+       return 0;
+}
+
+PROG(PARSE_MPLS)(struct __sk_buff *skb)
+{
+       __u32 nhoff, label;
+
+       nhoff = skb->cb[0];
+
+       label = load_word(skb, nhoff);
+       nhoff += sizeof(struct mpls_label);
+       skb->cb[0] = nhoff;
+
+       if (label & MPLS_LS_S_MASK) {
+               __u8 verlen = load_byte(skb, nhoff);
+               if ((verlen & 0xF0) == 4)
+                       parse_eth_proto(skb, ETH_P_IP);
+               else
+                       parse_eth_proto(skb, ETH_P_IPV6);
+       } else {
+               parse_eth_proto(skb, ETH_P_MPLS_UC);
+       }
+
+       return 0;
+}
+
+SEC("socket/0")
+int main_prog(struct __sk_buff *skb)
+{
+       __u32 nhoff = ETH_HLEN;
+       __u32 proto = load_half(skb, 12);
+
+       skb->cb[0] = nhoff;
+       parse_eth_proto(skb, proto);
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
new file mode 100644 (file)
index 0000000..2617772
--- /dev/null
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <assert.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+#include <unistd.h>
+#include <arpa/inet.h>
+
+struct flow_keys {
+       __be32 src;
+       __be32 dst;
+       union {
+               __be32 ports;
+               __be16 port16[2];
+       };
+       __u32 ip_proto;
+};
+
+struct pair {
+       __u64 packets;
+       __u64 bytes;
+};
+
+int main(int argc, char **argv)
+{
+       char filename[256];
+       FILE *f;
+       int i, sock;
+
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+       if (load_bpf_file(filename)) {
+               printf("%s", bpf_log_buf);
+               return 1;
+       }
+
+       sock = open_raw_sock("lo");
+
+       assert(setsockopt(sock, SOL_SOCKET, SO_ATTACH_BPF, &prog_fd[4],
+                         sizeof(__u32)) == 0);
+
+       if (argc > 1)
+               f = popen("ping -c5 localhost", "r");
+       else
+               f = popen("netperf -l 4 localhost", "r");
+       (void) f;
+
+       for (i = 0; i < 5; i++) {
+               struct flow_keys key = {}, next_key;
+               struct pair value;
+
+               sleep(1);
+               printf("IP     src.port -> dst.port               bytes      packets\n");
+               while (bpf_get_next_key(map_fd[2], &key, &next_key) == 0) {
+                       bpf_lookup_elem(map_fd[2], &next_key, &value);
+                       printf("%s.%05d -> %s.%05d %12lld %12lld\n",
+                              inet_ntoa((struct in_addr){htonl(next_key.src)}),
+                              next_key.port16[0],
+                              inet_ntoa((struct in_addr){htonl(next_key.dst)}),
+                              next_key.port16[1],
+                              value.bytes, value.packets);
+                       key = next_key;
+               }
+       }
+       return 0;
+}
index 7c27710f82968e640ff02c54fb29d2cbb56cb356..9bfb2eb34563d0158dae21c97c1b37d640e8d62a 100644 (file)
@@ -21,7 +21,7 @@ static inline void set_dst_mac(struct __sk_buff *skb, char *mac)
 
 static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
 {
-       __u8 old_tos = load_byte(skb, BPF_LL_OFF + TOS_OFF);
+       __u8 old_tos = load_byte(skb, TOS_OFF);
 
        bpf_l3_csum_replace(skb, IP_CSUM_OFF, htons(old_tos), htons(new_tos), 2);
        bpf_skb_store_bytes(skb, TOS_OFF, &new_tos, sizeof(new_tos), 0);
@@ -34,7 +34,7 @@ static inline void set_ip_tos(struct __sk_buff *skb, __u8 new_tos)
 
 static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
 {
-       __u32 old_ip = _htonl(load_word(skb, BPF_LL_OFF + IP_SRC_OFF));
+       __u32 old_ip = _htonl(load_word(skb, IP_SRC_OFF));
 
        bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_ip, new_ip, IS_PSEUDO | sizeof(new_ip));
        bpf_l3_csum_replace(skb, IP_CSUM_OFF, old_ip, new_ip, sizeof(new_ip));
@@ -44,7 +44,7 @@ static inline void set_tcp_ip_src(struct __sk_buff *skb, __u32 new_ip)
 #define TCP_DPORT_OFF (ETH_HLEN + sizeof(struct iphdr) + offsetof(struct tcphdr, dest))
 static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
 {
-       __u16 old_port = htons(load_half(skb, BPF_LL_OFF + TCP_DPORT_OFF));
+       __u16 old_port = htons(load_half(skb, TCP_DPORT_OFF));
 
        bpf_l4_csum_replace(skb, TCP_CSUM_OFF, old_port, new_port, sizeof(new_port));
        bpf_skb_store_bytes(skb, TCP_DPORT_OFF, &new_port, sizeof(new_port), 0);
@@ -53,7 +53,7 @@ static inline void set_tcp_dest_port(struct __sk_buff *skb, __u16 new_port)
 SEC("classifier")
 int bpf_prog1(struct __sk_buff *skb)
 {
-       __u8 proto = load_byte(skb, BPF_LL_OFF + ETH_HLEN + offsetof(struct iphdr, protocol));
+       __u8 proto = load_byte(skb, ETH_HLEN + offsetof(struct iphdr, protocol));
        long *value;
 
        if (proto == IPPROTO_TCP) {
index 12f3780af73fa5b4ec60fc88ae683bae509926c3..693605997abcbb9eb7d069d2ec57b8046579263b 100644 (file)
@@ -29,6 +29,7 @@ struct bpf_test {
                ACCEPT,
                REJECT
        } result;
+       enum bpf_prog_type prog_type;
 };
 
 static struct bpf_test tests[] = {
@@ -743,6 +744,84 @@ static struct bpf_test tests[] = {
                .errstr = "different pointers",
                .result = REJECT,
        },
+       {
+               "check skb->mark is not writeable by sockets",
+               .insns = {
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check skb->tc_index is not writeable by sockets",
+               .insns = {
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_index)),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check non-u32 access to cb",
+               .insns = {
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check out of range skb->cb access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[60])),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+       },
+       {
+               "write skb fields from socket prog",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4])),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_index)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "write skb fields from tc_cls_act prog",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_index)),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, tc_index)),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3])),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
@@ -775,6 +854,7 @@ static int test(void)
 
        for (i = 0; i < ARRAY_SIZE(tests); i++) {
                struct bpf_insn *prog = tests[i].insns;
+               int prog_type = tests[i].prog_type;
                int prog_len = probe_filter_length(prog);
                int *fixup = tests[i].fixup;
                int map_fd = -1;
@@ -789,8 +869,8 @@ static int test(void)
                }
                printf("#%d %s ", i, tests[i].descr);
 
-               prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog,
-                                       prog_len * sizeof(struct bpf_insn),
+               prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER,
+                                       prog, prog_len * sizeof(struct bpf_insn),
                                        "GPL", 0);
 
                if (tests[i].result == ACCEPT) {
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
new file mode 100644 (file)
index 0000000..b71fe07
--- /dev/null
@@ -0,0 +1,75 @@
+/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/seccomp.h>
+#include "bpf_helpers.h"
+
+#define PROG(F) SEC("kprobe/"__stringify(F)) int bpf_func_##F
+
+struct bpf_map_def SEC("maps") progs = {
+       .type = BPF_MAP_TYPE_PROG_ARRAY,
+       .key_size = sizeof(u32),
+       .value_size = sizeof(u32),
+       .max_entries = 1024,
+};
+
+SEC("kprobe/seccomp_phase1")
+int bpf_prog1(struct pt_regs *ctx)
+{
+       struct seccomp_data sd = {};
+
+       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+
+       /* dispatch into next BPF program depending on syscall number */
+       bpf_tail_call(ctx, &progs, sd.nr);
+
+       /* fall through -> unknown syscall */
+       if (sd.nr >= __NR_getuid && sd.nr <= __NR_getsid) {
+               char fmt[] = "syscall=%d (one of get/set uid/pid/gid)\n";
+               bpf_trace_printk(fmt, sizeof(fmt), sd.nr);
+       }
+       return 0;
+}
+
+/* we jump here when syscall number == __NR_write */
+PROG(__NR_write)(struct pt_regs *ctx)
+{
+       struct seccomp_data sd = {};
+
+       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+       if (sd.args[2] == 512) {
+               char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
+               bpf_trace_printk(fmt, sizeof(fmt),
+                                sd.args[0], sd.args[1], sd.args[2]);
+       }
+       return 0;
+}
+
+PROG(__NR_read)(struct pt_regs *ctx)
+{
+       struct seccomp_data sd = {};
+
+       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+       if (sd.args[2] > 128 && sd.args[2] <= 1024) {
+               char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
+               bpf_trace_printk(fmt, sizeof(fmt),
+                                sd.args[0], sd.args[1], sd.args[2]);
+       }
+       return 0;
+}
+
+PROG(__NR_mmap)(struct pt_regs *ctx)
+{
+       char fmt[] = "mmap\n";
+       bpf_trace_printk(fmt, sizeof(fmt));
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
new file mode 100644 (file)
index 0000000..a04dd3c
--- /dev/null
@@ -0,0 +1,46 @@
+#include <stdio.h>
+#include <linux/bpf.h>
+#include <unistd.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <sys/prctl.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+/* install fake seccomp program to enable seccomp code path inside the kernel,
+ * so that our kprobe attached to seccomp_phase1() can be triggered
+ */
+static void install_accept_all_seccomp(void)
+{
+       struct sock_filter filter[] = {
+               BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW),
+       };
+       struct sock_fprog prog = {
+               .len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
+               .filter = filter,
+       };
+       if (prctl(PR_SET_SECCOMP, 2, &prog))
+               perror("prctl");
+}
+
+int main(int ac, char **argv)
+{
+       FILE *f;
+       char filename[256];
+
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+       if (load_bpf_file(filename)) {
+               printf("%s", bpf_log_buf);
+               return 1;
+       }
+
+       install_accept_all_seccomp();
+
+       f = popen("dd if=/dev/zero of=/dev/null count=5", "r");
+       (void) f;
+
+       read_trace_pipe();
+
+       return 0;
+}
diff --git a/samples/pktgen/README.rst b/samples/pktgen/README.rst
new file mode 100644 (file)
index 0000000..8365c4e
--- /dev/null
@@ -0,0 +1,43 @@
+Sample and benchmark scripts for pktgen (packet generator)
+==========================================================
+This directory contains some pktgen sample and benchmark scripts, that
+can easily be copied and adjusted for your own use-case.
+
+General doc is located in kernel: Documentation/networking/pktgen.txt
+
+Helper include files
+====================
+This directory contains two helper shell files, that can be "included"
+by shell source'ing.  Namely "functions.sh" and "parameters.sh".
+
+Common parameters
+-----------------
+The parameters.sh file support easy and consistant parameter parsing
+across the sample scripts.  Usage example is printed on errors::
+
+ Usage: ./pktgen_sample01_simple.sh [-vx] -i ethX
+  -i : ($DEV)       output interface/device (required)
+  -s : ($PKT_SIZE)  packet size
+  -d : ($DEST_IP)   destination IP
+  -m : ($DST_MAC)   destination MAC-addr
+  -t : ($THREADS)   threads to start
+  -c : ($SKB_CLONE) SKB clones send before alloc new SKB
+  -b : ($BURST)     HW level bursting of SKBs
+  -v : ($VERBOSE)   verbose
+  -x : ($DEBUG)     debug
+
+The global variable being set is also listed.  E.g. the required
+interface/device parameter "-i" sets variable $DEV.
+
+Common functions
+----------------
+The functions.sh file provides; Three different shell functions for
+configuring the different components of pktgen: pg_ctrl(), pg_thread()
+and pg_set().
+
+These functions correspond to pktgens different components.
+ * pg_ctrl()   control "pgctrl" (/proc/net/pktgen/pgctrl)
+ * pg_thread() control the kernel threads and binding to devices
+ * pg_set()    control setup of individual devices
+
+See sample scripts for usage examples.
diff --git a/samples/pktgen/functions.sh b/samples/pktgen/functions.sh
new file mode 100644 (file)
index 0000000..205e4cd
--- /dev/null
@@ -0,0 +1,121 @@
+#
+# Common functions used by pktgen scripts
+#  - Depending on bash 3 (or higher) syntax
+#
+# Author: Jesper Dangaaard Brouer
+# License: GPL
+
+## -- General shell logging cmds --
+function err() {
+    local exitcode=$1
+    shift
+    echo "ERROR: $@" >&2
+    exit $exitcode
+}
+
+function warn() {
+    echo "WARN : $@" >&2
+}
+
+function info() {
+    if [[ -n "$VERBOSE" ]]; then
+       echo "INFO : $@" >&2
+    fi
+}
+
+## -- Pktgen proc config commands -- ##
+export PROC_DIR=/proc/net/pktgen
+#
+# Three different shell functions for configuring the different
+# components of pktgen:
+#   pg_ctrl(), pg_thread() and pg_set().
+#
+# These functions correspond to pktgens different components.
+# * pg_ctrl()   control "pgctrl" (/proc/net/pktgen/pgctrl)
+# * pg_thread() control the kernel threads and binding to devices
+# * pg_set()    control setup of individual devices
+function pg_ctrl() {
+    local proc_file="pgctrl"
+    proc_cmd ${proc_file} "$@"
+}
+
+function pg_thread() {
+    local thread=$1
+    local proc_file="kpktgend_${thread}"
+    shift
+    proc_cmd ${proc_file} "$@"
+}
+
+function pg_set() {
+    local dev=$1
+    local proc_file="$dev"
+    shift
+    proc_cmd ${proc_file} "$@"
+}
+
+# More generic replacement for pgset(), that does not depend on global
+# variable for proc file.
+function proc_cmd() {
+    local result
+    local proc_file=$1
+    # after shift, the remaining args are contained in $@
+    shift
+    local proc_ctrl=${PROC_DIR}/$proc_file
+    if [[ ! -e "$proc_ctrl" ]]; then
+       err 3 "proc file:$proc_ctrl does not exists (dev added to thread?)"
+    else
+       if [[ ! -w "$proc_ctrl" ]]; then
+           err 4 "proc file:$proc_ctrl not writable, not root?!"
+       fi
+    fi
+
+    if [[ "$DEBUG" == "yes" ]]; then
+       echo "cmd: $@ > $proc_ctrl"
+    fi
+    # Quoting of "$@" is important for space expansion
+    echo "$@" > "$proc_ctrl"
+    local status=$?
+
+    result=$(grep "Result: OK:" $proc_ctrl)
+    # Due to pgctrl, cannot use exit code $? from grep
+    if [[ "$result" == "" ]]; then
+       grep "Result:" $proc_ctrl >&2
+    fi
+    if (( $status != 0 )); then
+       err 5 "Write error($status) occurred cmd: \"$@ > $proc_ctrl\""
+    fi
+}
+
+# Old obsolete "pgset" function, with slightly improved err handling
+function pgset() {
+    local result
+
+    if [[ "$DEBUG" == "yes" ]]; then
+       echo "cmd: $1 > $PGDEV"
+    fi
+    echo $1 > $PGDEV
+    local status=$?
+
+    result=`cat $PGDEV | fgrep "Result: OK:"`
+    if [[ "$result" == "" ]]; then
+         cat $PGDEV | fgrep Result:
+    fi
+    if (( $status != 0 )); then
+       err 5 "Write error($status) occurred cmd: \"$1 > $PGDEV\""
+    fi
+}
+
+## -- General shell tricks --
+
+function root_check_run_with_sudo() {
+    # Trick so, program can be run as normal user, will just use "sudo"
+    #  call as root_check_run_as_sudo "$@"
+    if [ "$EUID" -ne 0 ]; then
+       if [ -x $0 ]; then # Directly executable use sudo
+           info "Not root, running with sudo"
+            sudo "$0" "$@"
+            exit $?
+       fi
+       err 4 "cannot perform sudo run of $0"
+    fi
+}
diff --git a/samples/pktgen/parameters.sh b/samples/pktgen/parameters.sh
new file mode 100644 (file)
index 0000000..33b70fd
--- /dev/null
@@ -0,0 +1,97 @@
+#
+# Common parameter parsing for pktgen scripts
+#
+
+function usage() {
+    echo ""
+    echo "Usage: $0 [-vx] -i ethX"
+    echo "  -i : (\$DEV)       output interface/device (required)"
+    echo "  -s : (\$PKT_SIZE)  packet size"
+    echo "  -d : (\$DEST_IP)   destination IP"
+    echo "  -m : (\$DST_MAC)   destination MAC-addr"
+    echo "  -t : (\$THREADS)   threads to start"
+    echo "  -c : (\$SKB_CLONE) SKB clones send before alloc new SKB"
+    echo "  -b : (\$BURST)     HW level bursting of SKBs"
+    echo "  -v : (\$VERBOSE)   verbose"
+    echo "  -x : (\$DEBUG)     debug"
+    echo ""
+}
+
+##  --- Parse command line arguments / parameters ---
+## echo "Commandline options:"
+while getopts "s:i:d:m:t:c:b:vxh" option; do
+    case $option in
+        i) # interface
+          export DEV=$OPTARG
+         info "Output device set to: DEV=$DEV"
+          ;;
+        s)
+          export PKT_SIZE=$OPTARG
+         info "Packet size set to: PKT_SIZE=$PKT_SIZE bytes"
+          ;;
+        d) # destination IP
+          export DEST_IP=$OPTARG
+         info "Destination IP set to: DEST_IP=$DEST_IP"
+          ;;
+        m) # MAC
+          export DST_MAC=$OPTARG
+         info "Destination MAC set to: DST_MAC=$DST_MAC"
+          ;;
+        t)
+         export THREADS=$OPTARG
+          export CPU_THREADS=$OPTARG
+         let "CPU_THREADS -= 1"
+         info "Number of threads to start: $THREADS (0 to $CPU_THREADS)"
+          ;;
+        c)
+         export CLONE_SKB=$OPTARG
+         info "CLONE_SKB=$CLONE_SKB"
+          ;;
+        b)
+         export BURST=$OPTARG
+         info "SKB bursting: BURST=$BURST"
+          ;;
+        v)
+          export VERBOSE=yes
+          info "Verbose mode: VERBOSE=$VERBOSE"
+          ;;
+        x)
+          export DEBUG=yes
+          info "Debug mode: DEBUG=$DEBUG"
+          ;;
+        h|?|*)
+          usage;
+          err 2 "[ERROR] Unknown parameters!!!"
+    esac
+done
+shift $(( $OPTIND - 1 ))
+
+if [ -z "$PKT_SIZE" ]; then
+    # NIC adds 4 bytes CRC
+    export PKT_SIZE=60
+    info "Default packet size set to: set to: $PKT_SIZE bytes"
+fi
+
+if [ -z "$THREADS" ]; then
+    # Zero CPU threads means one thread, because CPU numbers are zero indexed
+    export CPU_THREADS=0
+    export THREADS=1
+fi
+
+if [ -z "$DEV" ]; then
+    usage
+    err 2 "Please specify output device"
+fi
+
+if [ -z "$DST_MAC" ]; then
+    warn "Missing destination MAC address"
+fi
+
+if [ -z "$DEST_IP" ]; then
+    warn "Missing destination IP address"
+fi
+
+if [ ! -d /proc/net/pktgen ]; then
+    info "Loading kernel module: pktgen"
+    modprobe pktgen
+fi
diff --git a/samples/pktgen/pktgen.conf-1-1 b/samples/pktgen/pktgen.conf-1-1
deleted file mode 100755 (executable)
index f91daad..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
-    local result
-
-    echo $1 > $PGDEV
-
-    result=`cat $PGDEV | fgrep "Result: OK:"`
-    if [ "$result" = "" ]; then
-         cat $PGDEV | fgrep Result:
-    fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. One CPU example. We add eth1.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
-  echo "Removing all devices"
- pgset "rem_device_all"
-  echo "Adding eth1"
- pgset "add_device eth1"
-
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
-  echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac  00:04:23:08:91:dc"
-
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-2-1 b/samples/pktgen/pktgen.conf-2-1
deleted file mode 100755 (executable)
index e108e97..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
-    local result
-
-    echo $1 > $PGDEV
-
-    result=`cat $PGDEV | fgrep "Result: OK:"`
-    if [ "$result" = "" ]; then
-         cat $PGDEV | fgrep Result:
-    fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. Two CPU example. We add eth1 to the first
-# and leave the second idle.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
-  echo "Removing all devices"
- pgset "rem_device_all"
-  echo "Adding eth1"
- pgset "add_device eth1"
-
-# We need to remove old config since we dont use this thread. We can only
-# one NIC on one CPU due to affinity reasons.
-
-PGDEV=/proc/net/pktgen/kpktgend_1
-  echo "Removing all devices"
- pgset "rem_device_all"
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
-  echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac  00:04:23:08:91:dc"
-
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1
diff --git a/samples/pktgen/pktgen.conf-2-2 b/samples/pktgen/pktgen.conf-2-2
deleted file mode 100755 (executable)
index acea155..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/bash
-
-#modprobe pktgen
-
-
-function pgset() {
-    local result
-
-    echo $1 > $PGDEV
-
-    result=`cat $PGDEV | fgrep "Result: OK:"`
-    if [ "$result" = "" ]; then
-         cat $PGDEV | fgrep Result:
-    fi
-}
-
-# Config Start Here -----------------------------------------------------------
-
-
-# thread config
-# Each CPU has its own thread. Two CPU example. We add eth1, eth2 respectively.
-
-PGDEV=/proc/net/pktgen/kpktgend_0
-  echo "Removing all devices"
- pgset "rem_device_all"
-  echo "Adding eth1"
- pgset "add_device eth1"
-
-PGDEV=/proc/net/pktgen/kpktgend_1
-  echo "Removing all devices"
- pgset "rem_device_all"
-  echo "Adding eth2"
- pgset "add_device eth2"
-
-
-# device config
-# delay 0 means maximum speed.
-
-CLONE_SKB="clone_skb 1000000"
-# NIC adds 4 bytes CRC
-PKT_SIZE="pkt_size 60"
-
-# COUNT 0 means forever
-#COUNT="count 0"
-COUNT="count 10000000"
-DELAY="delay 0"
-
-PGDEV=/proc/net/pktgen/eth1
-  echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 10.10.11.2"
- pgset "dst_mac  00:04:23:08:91:dc"
-
-PGDEV=/proc/net/pktgen/eth2
-  echo "Configuring $PGDEV"
- pgset "$COUNT"
- pgset "$CLONE_SKB"
- pgset "$PKT_SIZE"
- pgset "$DELAY"
- pgset "dst 192.168.2.2"
- pgset "dst_mac  00:04:23:08:91:de"
-
-# Time to run
-PGDEV=/proc/net/pktgen/pgctrl
-
- echo "Running... ctrl^C to stop"
- trap true INT
- pgset "start"
- echo "Done"
- cat /proc/net/pktgen/eth1 /proc/net/pktgen/eth2
diff --git a/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh b/samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
new file mode 100755 (executable)
index 0000000..cb15903
--- /dev/null
@@ -0,0 +1,86 @@
+#!/bin/bash
+#
+# Benchmark script:
+#  - developed for benchmarking ingress qdisc path
+#
+# Script for injecting packets into RX path of the stack with pktgen
+# "xmit_mode netif_receive".  With an invalid dst_mac this will only
+# measure the ingress code path as packets gets dropped in ip_rcv().
+#
+# This script don't really need any hardware.  It benchmarks software
+# RX path just after NIC driver level.  With bursting is also
+# "removes" the SKB alloc/free overhead.
+#
+# Setup scenarios for measuring ingress qdisc (with invalid dst_mac):
+# ------------------------------------------------------------------
+# (1) no ingress (uses static_key_false(&ingress_needed))
+#
+# (2) ingress on other dev (change ingress_needed and calls
+#     handle_ing() but exit early)
+#
+#  config:  tc qdisc add dev $SOMEDEV handle ffff: ingress
+#
+# (3) ingress on this dev, handle_ing() -> tc_classify()
+#
+#  config:  tc qdisc add dev $DEV handle ffff: ingress
+#
+# (4) ingress on this dev + drop at u32 classifier/action.
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Using invalid DST_MAC will cause the packets to get dropped in
+# ip_rcv() which is part of the test
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$BURST" ] && BURST=1024
+
+# Base Config
+DELAY="0"        # Zero means max speed
+COUNT="10000000" # Zero means indefinitely
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+    # The device name is extended with @name, using thread number to
+    # make then unique, but any name will do.
+    dev=${DEV}@${thread}
+
+    # Add remove all other devices and add_device $dev to thread
+    pg_thread $thread "rem_device_all"
+    pg_thread $thread "add_device" $dev
+
+    # Base config of dev
+    pg_set $dev "flag QUEUE_MAP_CPU"
+    pg_set $dev "count $COUNT"
+    pg_set $dev "pkt_size $PKT_SIZE"
+    pg_set $dev "delay $DELAY"
+    pg_set $dev "flag NO_TIMESTAMP"
+
+    # Destination
+    pg_set $dev "dst_mac $DST_MAC"
+    pg_set $dev "dst $DEST_IP"
+
+    # Inject packet into RX path of stack
+    pg_set $dev "xmit_mode netif_receive"
+
+    # Burst allow us to avoid measuring SKB alloc/free overhead
+    pg_set $dev "burst $BURST"
+done
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+for ((thread = 0; thread < $THREADS; thread++)); do
+    dev=${DEV}@${thread}
+    echo "Device: $dev"
+    cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+done
diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh
new file mode 100755 (executable)
index 0000000..8c9d318
--- /dev/null
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Simple example:
+#  * pktgen sending with single thread and single interface
+#  * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+# - go look in parameters.sh to see which setting are avail
+# - required param is the interface "-i" stored in $DEV
+source ${basedir}/parameters.sh
+#
+# Set some default params, if they didn't get set
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+# Example enforce param "-m" for dst_mac
+[ -z "$DST_MAC" ] && usage && err 2 "Must specify -m dst_mac"
+
+# Base Config
+DELAY="0"        # Zero means max speed
+COUNT="100000"   # Zero means indefinitely
+
+# Flow variation random source port between min and max
+UDP_MIN=9
+UDP_MAX=109
+
+# General cleanup everything since last run
+# (especially important if other threads were configured by other scripts)
+pg_ctrl "reset"
+
+# Add remove all other devices and add_device $DEV to thread 0
+thread=0
+pg_thread $thread "rem_device_all"
+pg_thread $thread "add_device" $DEV
+
+# How many packets to send (zero means indefinitely)
+pg_set $DEV "count $COUNT"
+
+# Reduce alloc cost by sending same SKB many times
+# - this obviously affects the randomness within the packet
+pg_set $DEV "clone_skb $CLONE_SKB"
+
+# Set packet size
+pg_set $DEV "pkt_size $PKT_SIZE"
+
+# Delay between packets (zero means max speed)
+pg_set $DEV "delay $DELAY"
+
+# Flag example disabling timestamping
+pg_set $DEV "flag NO_TIMESTAMP"
+
+# Destination
+pg_set $DEV "dst_mac $DST_MAC"
+pg_set $DEV "dst $DEST_IP"
+
+# Setup random UDP port src range
+pg_set $DEV "flag UDPSRC_RND"
+pg_set $DEV "udp_src_min $UDP_MIN"
+pg_set $DEV "udp_src_max $UDP_MAX"
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+echo "Result device: $DEV"
+cat /proc/net/pktgen/$DEV
diff --git a/samples/pktgen/pktgen_sample02_multiqueue.sh b/samples/pktgen/pktgen_sample02_multiqueue.sh
new file mode 100755 (executable)
index 0000000..32467ae
--- /dev/null
@@ -0,0 +1,75 @@
+#!/bin/bash
+#
+# Multiqueue: Using pktgen threads for sending on multiple CPUs
+#  * adding devices to kernel threads
+#  * notice the naming scheme for keeping device names unique
+#  * nameing scheme: dev@thread_number
+#  * flow variation via random UDP source port
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+#
+# Required param: -i dev in $DEV
+source ${basedir}/parameters.sh
+
+# Base Config
+DELAY="0"        # Zero means max speed
+COUNT="100000"   # Zero means indefinitely
+[ -z "$CLONE_SKB" ] && CLONE_SKB="0"
+
+# Flow variation random source port between min and max
+UDP_MIN=9
+UDP_MAX=109
+
+# (example of setting default params in your script)
+[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+    # The device name is extended with @name, using thread number to
+    # make then unique, but any name will do.
+    dev=${DEV}@${thread}
+
+    # Add remove all other devices and add_device $dev to thread
+    pg_thread $thread "rem_device_all"
+    pg_thread $thread "add_device" $dev
+
+    # Notice config queue to map to cpu (mirrors smp_processor_id())
+    # It is beneficial to map IRQ /proc/irq/*/smp_affinity 1:1 to CPU number
+    pg_set $dev "flag QUEUE_MAP_CPU"
+
+    # Base config of dev
+    pg_set $dev "count $COUNT"
+    pg_set $dev "clone_skb $CLONE_SKB"
+    pg_set $dev "pkt_size $PKT_SIZE"
+    pg_set $dev "delay $DELAY"
+
+    # Flag example disabling timestamping
+    pg_set $dev "flag NO_TIMESTAMP"
+
+    # Destination
+    pg_set $dev "dst_mac $DST_MAC"
+    pg_set $dev "dst $DEST_IP"
+
+    # Setup random UDP port src range
+    pg_set $dev "flag UDPSRC_RND"
+    pg_set $dev "udp_src_min $UDP_MIN"
+    pg_set $dev "udp_src_max $UDP_MAX"
+done
+
+# start_run
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
+echo "Done" >&2
+
+# Print results
+for ((thread = 0; thread < $THREADS; thread++)); do
+    dev=${DEV}@${thread}
+    echo "Device: $dev"
+    cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+done
diff --git a/samples/pktgen/pktgen_sample03_burst_single_flow.sh b/samples/pktgen/pktgen_sample03_burst_single_flow.sh
new file mode 100755 (executable)
index 0000000..775f5d0
--- /dev/null
@@ -0,0 +1,82 @@
+#!/bin/bash
+#
+# Script for max single flow performance
+#  - If correctly tuned[1], single CPU 10G wirespeed small pkts is possible[2]
+#
+# Using pktgen "burst" option (use -b $N)
+#  - To boost max performance
+#  - Avail since: kernel v3.18
+#   * commit 38b2cf2982dc73 ("net: pktgen: packet bursting via skb->xmit_more")
+#  - This avoids writing the HW tailptr on every driver xmit
+#  - The performance boost is impressive, see commit and blog [2]
+#
+# Notice: On purpose generates a single (UDP) flow towards target,
+#   reason behind this is to only overload/activate a single CPU on
+#   target host.  And no randomness for pktgen also makes it faster.
+#
+# Tuning see:
+#  [1] http://netoptimizer.blogspot.dk/2014/06/pktgen-for-network-overload-testing.html
+#  [2] http://netoptimizer.blogspot.dk/2014/10/unlocked-10gbps-tx-wirespeed-smallest.html
+#
+basedir=`dirname $0`
+source ${basedir}/functions.sh
+root_check_run_with_sudo "$@"
+
+# Parameter parsing via include
+source ${basedir}/parameters.sh
+# Set some default params, if they didn't get set
+[ -z "$DEST_IP" ]   && DEST_IP="198.18.0.42"
+[ -z "$DST_MAC" ]   && DST_MAC="90:e2:ba:ff:ff:ff"
+[ -z "$BURST" ]     && BURST=32
+[ -z "$CLONE_SKB" ] && CLONE_SKB="100000"
+
+# Base Config
+DELAY="0"  # Zero means max speed
+COUNT="0"  # Zero means indefinitely
+
+# General cleanup everything since last run
+pg_ctrl "reset"
+
+# Threads are specified with parameter -t value in $THREADS
+for ((thread = 0; thread < $THREADS; thread++)); do
+    dev=${DEV}@${thread}
+
+    # Add remove all other devices and add_device $dev to thread
+    pg_thread $thread "rem_device_all"
+    pg_thread $thread "add_device" $dev
+
+    # Base config
+    pg_set $dev "flag QUEUE_MAP_CPU"
+    pg_set $dev "count $COUNT"
+    pg_set $dev "clone_skb $CLONE_SKB"
+    pg_set $dev "pkt_size $PKT_SIZE"
+    pg_set $dev "delay $DELAY"
+    pg_set $dev "flag NO_TIMESTAMP"
+
+    # Destination
+    pg_set $dev "dst_mac $DST_MAC"
+    pg_set $dev "dst $DEST_IP"
+
+    # Setup burst, for easy testing -b 0 disable bursting
+    # (internally in pktgen default and minimum burst=1)
+    if [[ ${BURST} -ne 0 ]]; then
+       pg_set $dev "burst $BURST"
+    else
+       info "$dev: Not using burst"
+    fi
+done
+
+# Run if user hits control-c
+function control_c() {
+    # Print results
+    for ((thread = 0; thread < $THREADS; thread++)); do
+       dev=${DEV}@${thread}
+       echo "Device: $dev"
+       cat /proc/net/pktgen/$dev | grep -A2 "Result:"
+    done
+}
+# trap keyboard interrupt (Ctrl-C)
+trap control_c SIGINT
+
+echo "Running... ctrl^C to stop" >&2
+pg_ctrl "start"
index 6f6733331d9597918353464a71af1e9d9e04a251..08c2a36ef7a9b429169fa1bd2f19f9b5340348ad 100644 (file)
@@ -272,7 +272,7 @@ int main(int argc, char **argv)
        const int expect_hash[2][2]     = { { 15, 5 },  { 20, 5 } };
        const int expect_hash_rb[2][2]  = { { 15, 5 },  { 20, 15 } };
        const int expect_lb[2][2]       = { { 10, 10 }, { 18, 17 } };
-       const int expect_rb[2][2]       = { { 20, 0 },  { 20, 15 } };
+       const int expect_rb[2][2]       = { { 15, 5 },  { 20, 15 } };
        const int expect_cpu0[2][2]     = { { 20, 0 },  { 20, 0 } };
        const int expect_cpu1[2][2]     = { { 0, 20 },  { 0, 20 } };
        int port_off = 2, tries = 5, ret;